diff --git a/.github/workflows/documentation.yaml b/.github/workflows/documentation.yaml new file mode 100644 index 00000000..0f2e3b0f --- /dev/null +++ b/.github/workflows/documentation.yaml @@ -0,0 +1,49 @@ +# This is a basic workflow to help you get started with Actions + +name: Build-sphinx-docs + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the main branch + push: + branches: [ main ] + pull_request: + branches: [ main ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: "3.10" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install sphinx sphinx_rtd_theme recommonmark ghp-import +# pip install torch numpy + + # Building html requires all necessary dependencies be installed. + # For cosense3d, the installation requires a lot of time, + # therefore it is temporally built locally and copy the built html files to the gh-pages branch. + +# - name: Build HTML +# run: | +# cd docs/ +# make html + - name: Run ghp-import + run: | + ghp-import -n -p -f docs/_build/html \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..dca9dc22 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,43 @@ +FROM pytorch/pytorch:2.2.1-cuda11.8-cudnn8-devel +LABEL hostname="cosense3d-docker" + +############################################## +# You should modify this to match your GPU compute capability +# ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 6.2 7.0 7.2 7.5 8.0 8.6" +############################################## + +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV OMP_NUM_THREADS 16 + +WORKDIR /project +COPY requirements.txt /project/requirements.txt +COPY ./cosense3d/ops/ /project/ops/ + +# Install dependencies +RUN apt-get update +RUN apt-get install -y git ninja-build cmake build-essential libopenblas-dev \ + xterm xauth openssh-server tmux wget mate-desktop-environment-core + +RUN apt-get clean +RUN rm -rf /var/lib/apt/lists/* + +# For faster build, use more jobs. +ENV MAX_JOBS=4 +RUN git clone --recursive "https://github.com/NVIDIA/MinkowskiEngine" +RUN cd MinkowskiEngine; python setup.py install --force_cuda --blas=openblas +RUN cd .. + +RUN conda update conda -y +RUN apt-get update +RUN apt install python3-dev -y +RUN apt install libgl1-mesa-glx libglib2.0-0 -y + +RUN cd ops && pip install . && cd .. + +RUN pip install -r requirements.txt + +#RUN conda install pybind11 -y +#RUN conda install -c conda-forge libstdcxx-ng -y + +WORKDIR /workspace diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..bd34f6ac --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Yunshuang Yuan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/cosense3d/__init__.py b/cosense3d/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/cosense3d/__init__.py @@ -0,0 +1 @@ + diff --git a/cosense3d/agents/__init__.py b/cosense3d/agents/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/agents/cav_prototype/RLseg.py b/cosense3d/agents/cav_prototype/RLseg.py new file mode 100644 index 00000000..dd6e4f70 --- /dev/null +++ b/cosense3d/agents/cav_prototype/RLseg.py @@ -0,0 +1,98 @@ + + +import torch +import torch_scatter +from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP +from cosense3d.agents.cav_prototype.base_cav import BaseCAV + + +class RLsegCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset = kwargs.get('dataset', None) + self.lidar_range = torch.nn.Parameter(self.lidar_range) + self.prepare_data_keys = ['points', 'annos_local', 'roadline_tgts'] + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + self.use_aug = True + + def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + self.T_aug2g = T_c2aug + + def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data, res=0.5, min_h=0) + DOP.generate_sparse_target_roadline_points(self.data) + + def get_request_cpm(self): + return {'lidar_pose': self.lidar_pose} + + def get_response_cpm(self): + cpm = {} + for k in ['bev_feat']: + if k in self.data: + cpm[k] = self.data[k] + return cpm + + def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:backbone_neck', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '21:rlseg_head', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '31:rlseg_head', {})) + return tasks + + + diff --git a/cosense3d/agents/cav_prototype/__init__.py b/cosense3d/agents/cav_prototype/__init__.py new file mode 100644 index 00000000..5b786e4a --- /dev/null +++ b/cosense3d/agents/cav_prototype/__init__.py @@ -0,0 +1,15 @@ +# This module provides prototypes for CAVs/agents. +# The prototype has the following features: +# 1. Data processing logics for each prototyped agent/CAV. +# 2. All intermediate data processed are stored locally at prototype class. +# 3. Specify the requesting and responding CPMs + +import importlib + + +def get_prototype(module_full_path: str): + module_name, cls_name = module_full_path.rsplit('.', 1) + module = importlib.import_module(f'cosense3d.agents.cav_prototype.{module_name}') + cls_obj = getattr(module, cls_name, None) + assert cls_obj is not None, f'Class \'{module_name}\' not found.' + return cls_obj \ No newline at end of file diff --git a/cosense3d/agents/cav_prototype/base_cav.py b/cosense3d/agents/cav_prototype/base_cav.py new file mode 100644 index 00000000..ccc4cb1e --- /dev/null +++ b/cosense3d/agents/cav_prototype/base_cav.py @@ -0,0 +1,264 @@ +import torch +from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP + + +class BaseCAV: + def __init__(self, id: str, mapped_id: int, is_ego: bool, + lidar_range: torch.Tensor, memory_len: int, + lidar_pose: torch.Tensor=None, require_grad: bool=False, + seq_len: int=1, **kwargs): + """ + Base class for CAV prototype. + + :param id: agent id. + :param mapped_id: remapped id. + :param is_ego: if the agent is an ego agent. + :param lidar_range: visible lidar range, + :param memory_len: memory length for memory queue. + :param lidar_pose: lidar pose in shape (4, 4). + :param require_grad: if True, the gradients will be calculated for this agent during training. + :param seq_len: sequence length of the input data. + :param kwargs: additional key-value arguments. + """ + self.id = id + self.mapped_id = mapped_id + self.is_ego = is_ego + self.lidar_pose = lidar_pose + self.lidar_range = lidar_range + self.memory_len = memory_len + self.require_grad = require_grad + self.seq_len = seq_len + for k, v in kwargs.items(): + setattr(self, k, v) + self.data = {} # memory FIFO + self.prepare_data_keys = ['img', 'points', 'annos_global', 'annos_local'] + + def update(self, lidar_pose, is_ego, require_grad): + self.lidar_pose = lidar_pose + self.is_ego = is_ego + self.require_grad = require_grad + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(id={self.id}, ' + repr_str += f'is_ego={self.is_ego}, ' + repr_str += f'data={self.data.keys()})' + return repr_str + + def apply_transform(self): + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + DOP.cav_aug_transform(self.data, transform, self.data['augment_params'], + apply_to=self.prepare_data_keys) + + def prepare_data(self): + pass + + def transform_data(self): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + + def has_request(self): + if 'received_request' in self.data and self.data['received_request'] is not None: + return True + else: + return False + + def get_request_cpm(self): + return {'lidar_pose': self.lidar_pose} + + def get_response_cpm(self): + cpm = {} + for k in ['points']: + if k in self.data: + cpm[k] = self.data[k] + return cpm + + def receive_request(self, request): + self.data['received_request'] = request + + def receive_response(self, response): + self.data['received_response'] = response + + def forward(self, tasks, training_mode, **kwargs): + self.forward_localization(tasks, training_mode, **kwargs) + self.forward_local(tasks, training_mode, **kwargs) + self.forward_fusion(tasks, training_mode, **kwargs) + self.forward_head(tasks, training_mode, **kwargs) + return tasks + + def forward_localization(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks + + def forward_local(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks + + def forward_fusion(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks + + def loss(self, tasks, **kwargs): + """To be overloaded.""" + return tasks + + def reset_data(self, *args, **kwargs): + del self.data + self.data = {} + + def pre_update_memory(self): + """Update memory before each forward run of a single frame.""" + pass + + def post_update_memory(self): + """Update memory after each forward run of a single frame.""" + pass + + +class BaseSeqCAV: + def __init__(self, id, mapped_id, is_ego, lidar_range, memory_len, + lidar_pose=None, require_grad=False, seq_len=1, **kwargs): + self.id = id + self.mapped_id = mapped_id + self.is_ego = is_ego + self.lidar_pose = lidar_pose + self.lidar_range = lidar_range + self.memory_len = memory_len + self.require_grad = require_grad + self.seq_len = seq_len + for k, v in kwargs.items(): + setattr(self, k, v) + self.data = {} # memory FIFO + self.memory = {} + self.prepare_data_keys = ['img', 'points', 'annos_global', 'annos_local'] + + def update(self, lidar_pose): + self.lidar_pose = lidar_pose + + def task_id(self, seq_idx): + return f"{self.id}.{seq_idx}" + + def get_data(self, keys, seq_idx=None): + if seq_idx is None: + out = {} + for i, d in self.data.items(): + out[i] = {} + for k in keys: + out[i][k] = d[k] + else: + out = {k: self.data[seq_idx][k] for k in keys} + return out + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(id={self.id}, ' + repr_str += f'is_ego={self.is_ego}, ' + repr_str += f'data={self.data.keys()})' + return repr_str + + def apply_transform(self, seq_idx): + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + DOP.cav_aug_transform(self.data, transform, self.data['augment_params'], + apply_to=self.prepare_data_keys) + + def prepare_data(self, seq_idx): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + + def has_request(self): + has_req = False + for d in self.data.values(): + if 'received_request' in d and d['received_request'] is not None: + has_req = True + break + return has_req + + def get_request_cpm(self): + return self.get_data(['lidar_poses']) + + def get_response_cpm(self): + cpm = {} + for k in ['points']: + if k in self.data[0]: + cpm[k] = {i: d[k] for i, d in self.data.items()} + return cpm + + def receive_request(self, request): + for i, req in request.items(): + if i not in self.data: + continue + self.data[i]['received_request'] = req + + def receive_response(self, response, seq_idx): + for cav_id, resp in response.items(): + self.data[seq_idx]['received_response'][cav_id] = {k: v[seq_idx] for k, v in resp.items()} + + def forward(self, tasks, training_mode, seq_idx, with_loss): + self.prepare_data(seq_idx) + self.forward_local(tasks, training_mode, seq_idx, with_loss) + self.forward_fusion(tasks, training_mode, seq_idx, with_loss) + self.forward_head(tasks, training_mode, seq_idx, with_loss) + return tasks + + def forward_local(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks + + def forward_fusion(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks + + def forward_head(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks + + def loss(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks + + def reset_data(self, *args, **kwargs): + del self.data + self.data = {} + + def pre_update_memory(self, seq_idx, **kwargs): + """Update memory before each forward run of a single frame.""" + pass + + def post_update_memory(self, seq_idx, **kwargs): + """Update memory after each forward run of a single frame.""" + pass + + +class OPV2VtCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global'] + + +class OPV2VtCAV_v2(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.is_ego: + self.prepare_data_keys = ['points', 'annos_local', 'annos_global', 'annos_global_pred'] + else: + self.prepare_data_keys = ['points', 'annos_local', 'annos_global'] + + +class DairV2XCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_global', 'annos_local'] diff --git a/cosense3d/agents/cav_prototype/co_perception.py b/cosense3d/agents/cav_prototype/co_perception.py new file mode 100644 index 00000000..78882507 --- /dev/null +++ b/cosense3d/agents/cav_prototype/co_perception.py @@ -0,0 +1,483 @@ + + +import torch +import torch_scatter +from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP +from cosense3d.agents.cav_prototype.base_cav import BaseCAV + + +class StreamLidarCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset = kwargs.get('dataset', None) + self.lidar_range = torch.nn.Parameter(self.lidar_range) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global'] + self.data['memory'] = None + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + self.use_aug = True + + def refresh_memory(self, prev_exists): + x = prev_exists.float() + init_pose = torch.eye(4, device=self.lidar_pose.device).unsqueeze(0).unsqueeze(0) + if not x: + self.data['memory'] = { + 'embeddings': x.new_zeros(self.memory_len, self.memory_num_propagated, self.memory_emb_dims), + 'ref_pts': x.new_zeros(self.memory_len, self.memory_num_propagated, self.ref_pts_dim), + 'timestamp': x.new_zeros(self.memory_len, self.memory_num_propagated, 1), + 'pose': x.new_zeros(self.memory_len, self.memory_num_propagated, 4, 4) , + 'pose_no_aug': x.new_zeros(self.memory_len, self.memory_num_propagated, 4, 4) , + 'velo': x.new_zeros(self.memory_len, self.memory_num_propagated, 2), + } + self.data['memory']['pose_no_aug'] = self.data['memory']['pose'] + init_pose + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + else: + for k, v in self.data['memory'].items(): + self.data['memory'][k] = self.data['memory'][k][:self.memory_len] * x + if not x: + self.data['memory']['pose_no_aug'][0] = init_pose[0].repeat(self.memory_num_propagated, 1, 1) + self.data['memory']['prev_exists'] = x + + def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + if self.data['prev_exists']: + self.data['memory']['pose_no_aug'] = T_g2e @ self.data['memory']['pose_no_aug'] + self.data['memory']['ref_pts'] = self.transform_ref_pts( + self.data['memory']['ref_pts'], T_g2aug) + self.data['memory']['pose'] = self.aug_transform @ self.data['memory']['pose_no_aug'] + + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + self.T_aug2g = T_c2aug + + def prepare_data(self): + self.prepare_time_scale() + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1) + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + # self.vis_data('transformed', 4) + + def prepare_time_scale(self): + # hash time + azi = torch.arctan2(self.data['points'][:, 1], self.data['points'][:, 0]) + azi, inds = (torch.rad2deg(azi) + 180).floor().long().unique(return_inverse=True) + times = torch.zeros_like(azi).float() + torch_scatter.scatter_mean(self.data['points'][:, -1], inds, dim=0, out=times) + if len(times) < 360: + time360 = times.new_zeros(360) + time360[azi] = times + time360[time360 == 0] = times.mean() + else: + time360 = times + self.data['time_scale'] = time360 + self.data['time_scale_reduced'] = time360 - self.timestamp + # self.data['points'] = self.data['points'][:, :-1] + + def update_memory_timestamps(self, ref_pts): + # transform ref pts to coop coordinates + transform = self.lidar_pose.inverse() @ self.T_aug2g + pts = self.transform_ref_pts(ref_pts, transform) + timestamp = torch.rad2deg(torch.arctan2(pts[:, 1], pts[:, 0])) + 180 + timestamp = - self.data['time_scale'][(timestamp % 360).floor().long()].unsqueeze(-1) + return timestamp + + def get_response_cpm(self): + cpm = {} + for k in ['temp_fusion_feat']: + if k in self.data: + cpm[k] = self.data[k] + return cpm + + def forward_local(self, tasks, training_mode): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '01:pts_backbone', {})) + tasks[grad_mode].append((self.id, '02:backbone_neck', {})) + tasks[grad_mode].append((self.id, '03:roi_head', {})) + tasks[grad_mode].append((self.id, '04:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '05:det1_head', {})) + + def forward_fusion(self, tasks, training_mode): + if self.is_ego: + tasks['with_grad'].append((self.id, '11:spatial_fusion', {})) + return tasks + + def forward_head(self, tasks, training_mode): + if self.is_ego: + tasks['with_grad'].append((self.id, '13:det2_head', {})) + return tasks + + def loss(self, tasks): + if self.is_ego: + tasks['loss'].append((self.id, '21:roi_head', {})) + tasks['loss'].append((self.id, '22:det1_head', {})) + tasks['loss'].append((self.id, '23:det2_head', {})) + return tasks + + def pre_update_memory(self): + """Update memory before each forward run of a single frame.""" + if self.data['memory'] is not None: + self.data['memory']['timestamp'] += self.timestamp + # pose_inv = self.lidar_pose.inverse() + # self.data['memory']['pose'] = pose_inv @ self.data['memory']['pose'] + # self.data['memory']['ref_pts'] = self.transform_ref_pts( + # self.data['memory']['ref_pts'], pose_inv) + + self.refresh_memory(self.data['prev_exists']) + + def post_update_memory(self): + """Update memory after each forward run of a single frame.""" + x = self.data['detection_local'] + scores = x['all_cls_scores'][-1][..., + min(x['all_cls_scores'][-1].shape[-1] - 1, 1):].topk(1, dim=-1).values[..., 0] + topk = torch.topk(scores, k=self.memory_num_propagated).indices + + ref_pts = x['all_bbox_preds'][-1][:, :self.ref_pts_dim] + velo = x['all_bbox_preds'][-1][:, -2:] + embeddings = self.data['temp_fusion_feat']['outs_dec'][-1] + + timestamp = self.update_memory_timestamps(ref_pts) + pose_no_aug = torch.eye(4, device=ref_pts.device).unsqueeze(0).repeat( + timestamp.shape[0], 1, 1) + + vars = locals() + for k, v in self.data['memory'].items(): + if k == 'prev_exists' or k == 'pose': + continue + rec_topk = vars[k][topk].unsqueeze(0) + self.data['memory'][k] = torch.cat([rec_topk, v], dim=0) + + # self.vis_ref_pts('post update') + + # ego aug to global + self.data['memory']['ref_pts'] = self.transform_ref_pts( + self.data['memory']['ref_pts'], self.T_aug2g) + self.data['memory']['timestamp'][1:] -= self.timestamp + self.data['memory']['pose_no_aug'] = self.T_e2g[(None,) * 2] @ self.data['memory']['pose_no_aug'] # aug -->global + + def transform_ref_pts(self, reference_points, matrix): + reference_points = torch.cat( + [reference_points, torch.ones_like(reference_points[..., 0:1])], dim=-1) + if reference_points.ndim == 3: + reference_points = matrix.unsqueeze(0) @ reference_points.permute(0, 2, 1) + reference_points = reference_points.permute(0, 2, 1)[..., :3] + elif reference_points.ndim == 2: + reference_points = matrix @ reference_points.T + reference_points = reference_points.T[..., :3] + else: + raise NotImplementedError + return reference_points + + @property + def timestamp(self): + if self.dataset == 'opv2vt': + timestamp = float(self.data['frame']) * 0.1 / 2 + elif self.dataset == 'dairv2xt': + timestamp = self.data['global_time'] + else: + raise NotImplementedError + return timestamp + + def vis_ref_pts(self, ax=None, label=None, his_len=1, **kwargs): + import matplotlib.pyplot as plt + from cosense3d.utils.vislib import draw_points_boxes_plt + if ax is None: + fig = plt.figure(figsize=(8, 4)) + ax = fig.add_subplot() + pcd = self.data['points'][:, :3].detach().cpu().numpy() + gt_boxes = self.data['local_bboxes_3d'].detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.lidar_range.tolist(), + boxes_gt=gt_boxes[:, :7], + points=pcd, + ax=ax, + return_ax=True, + ) + + ref_pts = self.data['memory']['ref_pts'].detach().cpu().numpy() + markers = ['.r', '.m', '.b', '.c'] + for i in range(his_len): + plt.plot(ref_pts[i, :, 0], ref_pts[i, :, 1], markers[i], markersize=2) + ax.set_title(f"{label}: {self.data['scenario']}, {self.data['frame']}") + plt.show() + plt.close() + + return ax + + def vis_poses(self, ax=None, label=None, his_len=1, **kwargs): + import matplotlib.pyplot as plt + markers = ['r', 'm', 'b', 'c'] + mem_poses = self.data['memory']['pose'][:, 0].detach().cpu() + p0 = mem_poses[:his_len, :2, -1].numpy() + p1 = mem_poses[:his_len] @ torch.tensor([1., 0., 0., 1.]).view(1, 4, 1).repeat(his_len, 1, 1) + p2 = mem_poses[:his_len] @ torch.tensor([0., 1., 0., 1.]).view(1, 4, 1).repeat(his_len, 1, 1) + p1 = p1.squeeze(-1)[:, :2].numpy() + p2 = p2.squeeze(-1)[:, :2].numpy() + + if ax is None: + fig = plt.figure() + ax = fig.add_subplot() + ax.axis('equal') + for i in range(his_len): + ax.plot([p0[i, 0], p1[i, 0]], [p0[i, 1], p1[i, 1]], markers[i]) + ax.plot([p0[i, 0], p2[i, 0]], [p0[i, 1], p2[i, 1]], markers[i]) + return ax + + +class slcDenseToSparse(StreamLidarCAV): + + def prepare_data(self): + self.prepare_time_scale() + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + + def forward_local(self, tasks, training_mode): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '01:pts_backbone', {})) + tasks[grad_mode].append((self.id, '02:roi_head', {})) + tasks[grad_mode].append((self.id, '03:formatting', {})) + tasks[grad_mode].append((self.id, '04:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '05:det1_head', {})) + + +slcFcooper = slcDenseToSparse +slcAttnFusion = slcDenseToSparse + + +class slcFPVRCNN(StreamLidarCAV): + def prepare_data(self): + self.prepare_time_scale() + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + + def forward_local(self, tasks, training_mode): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '01:pts_backbone', {})) + tasks[grad_mode].append((self.id, '02:roi_head', {})) + tasks[grad_mode].append((self.id, '03:keypoint_composer', {})) + tasks[grad_mode].append((self.id, '04:formatting', {})) + tasks[grad_mode].append((self.id, '05:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '06:det1_head', {})) + + # def forward_fusion(self, tasks, training_mode): + # # if self.is_ego: + # # tasks['with_grad'].append((self.id, '11:spatial_fusion', {})) + # return tasks + # + # def forward_head(self, tasks, training_mode): + # # if self.is_ego: + # # tasks['with_grad'].append((self.id, '13:det2_head', {})) + # return tasks + # + # def pre_update_memory(self): + # pass + # + # def post_update_memory(self): + # pass + # + # def get_response_cpm(self): + # return {} + # + # def loss(self, tasks): + # if self.is_ego: + # tasks['loss'].append((self.id, '21:roi_head', {})) + # return tasks + # + # def apply_transform(self): + # if self.use_aug: + # if self.is_ego: + # T_e2g = self.lidar_pose + # T_g2e = self.lidar_pose.inverse() + # T_c2e = torch.eye(4).to(self.lidar_pose.device) + # else: + # # cav to ego + # T_e2g = self.data['received_request']['lidar_pose'] + # T_g2e = self.data['received_request']['lidar_pose'].inverse() + # T_c2e = T_g2e @ self.lidar_pose + # + # if self.aug_transform is None: + # self.aug_transform = DOP.update_transform_with_aug( + # torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + # T_e2aug = self.aug_transform + # else: + # # adapt aug params to the current ego frame + # T_e2aug = self.T_g2aug @ T_e2g + # + # T_c2aug = T_e2aug @ T_c2e + # T_g2aug = T_e2aug @ T_g2e + # + # DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + # + # self.T_e2g = T_e2g + # self.T_g2aug = T_g2aug + # self.T_aug2g = T_g2aug.inverse() # ego aug to global + # + # else: + # if self.is_ego: + # transform = torch.eye(4).to(self.lidar_pose.device) + # else: + # # cav to ego + # request = self.data['received_request'] + # transform = request['lidar_pose'].inverse() @ self.lidar_pose + # + # T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + # DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + # self.T_aug2g = T_c2aug + + +class slcNoBoxTime(StreamLidarCAV): + + def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1) + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + + def update_memory_timestamps(self, ref_pts): + timestamp = torch.zeros_like(ref_pts[..., :1]) + return timestamp + + +class slcCIASSD(StreamLidarCAV): + def prepare_data(self): + self.prepare_time_scale() + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + + def forward_local(self, tasks, training_mode): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '01:pts_backbone', {})) + tasks[grad_mode].append((self.id, '02:roi_head', {})) + + def forward_fusion(self, tasks, training_mode): + return tasks + + def forward_head(self, tasks, training_mode): + return tasks + + def pre_update_memory(self): + pass + + def post_update_memory(self): + pass + + def get_response_cpm(self): + return {} + + def loss(self, tasks): + if self.is_ego: + tasks['loss'].append((self.id, '21:roi_head', {})) + return tasks + + def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + self.T_aug2g = T_c2aug + + +class StreamLidarCAVLocCorr(StreamLidarCAV): + def get_response_cpm(self): + cpm = {} + cpm['coop_det_ctr'] = self.data['detection_local']['preds']['box'][:, :3] + for k in ['temp_fusion_feat']: + if k in self.data: + cpm[k] = self.data[k] + return cpm + + def forward_fusion(self, tasks, training_mode): + if self.is_ego: + tasks['with_grad'].append((self.id, '11:spatial_alignment', {})) + tasks['with_grad'].append((self.id, '12:spatial_fusion', {})) + return tasks + + + + + + diff --git a/cosense3d/agents/cav_prototype/cood_collection.py b/cosense3d/agents/cav_prototype/cood_collection.py new file mode 100644 index 00000000..c3b82e93 --- /dev/null +++ b/cosense3d/agents/cav_prototype/cood_collection.py @@ -0,0 +1,117 @@ +# Copyright (c) 2024. Yunshuang Yuan. +# Project: CoSense3D +# Author: Yunshuang Yuan +# Affiliation: Institut für Kartographie und Geoinformatik, Lebniz University Hannover, Germany +# Email: yunshuang.yuan@ikg.uni-hannover.de +# All rights reserved. +# --------------- + +from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP +from cosense3d.agents.cav_prototype.base_cav import BaseCAV + + +class CoodCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_global'] + + def get_response_cpm(self): + cpm = {} + for k in ['pts_feat']: + if k in self.data: + cpm[k] = self.data[k] + return cpm + + def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + tasks['with_grad'].append((self.id, '11:pts_backbone', {})) + else: + tasks['no_grad'].append((self.id, '11:pts_backbone', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '21:fusion', {})) + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '22:detection_head', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '32:detection_head', {})) + return tasks + + def reset_data(self): + del self.data + self.data = {} + + +class FpvrcnnCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_global', 'annos_local'] + + def get_response_cpm(self): + cpm = {} + for k in ['keypoint_feat']: + if k in self.data: + cpm[k] = self.data[k] + return cpm + + def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + tasks['with_grad'].append((self.id, '11:pts_backbone', {})) + tasks['with_grad'].append((self.id, '12:detection_head_local', {})) + tasks['with_grad'].append((self.id, '13:keypoint_composer', {})) + else: + tasks['no_grad'].append((self.id, '11:pts_backbone', {})) + tasks['no_grad'].append((self.id, '12:detection_head_local', {})) + tasks['no_grad'].append((self.id, '13:keypoint_composer', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '21:fusion', {})) + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '22:detection_head_global', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '31:detection_head_local', {})) + tasks['loss'].append((self.id, '32:detection_head_global', {})) + return tasks + + def reset_data(self): + del self.data + self.data = {} + + +class Sp3DCAV(CoodCAV): + def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data) + + def forward_fusion(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '21:fusion', {})) + tasks['with_grad'].append((self.id, '22:fusion_neck', {})) + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '23:bev_head', {})) + tasks['with_grad'].append((self.id, '24:detection_head', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '31:bev_head', {})) + tasks['loss'].append((self.id, '32:detection_head', {})) + return tasks + + + diff --git a/cosense3d/agents/cav_prototype/gevBEV_collection.py b/cosense3d/agents/cav_prototype/gevBEV_collection.py new file mode 100644 index 00000000..c5631be3 --- /dev/null +++ b/cosense3d/agents/cav_prototype/gevBEV_collection.py @@ -0,0 +1,136 @@ + + +import torch +import torch_scatter +from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP +from cosense3d.agents.cav_prototype.base_cav import BaseCAV + + +class BEVSemsegCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset = kwargs.get('dataset', None) + self.lidar_range = torch.nn.Parameter(self.lidar_range) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global', 'bev_tgt_pts'] + self.data['memory'] = None + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + self.use_aug = True + + def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + self.T_aug2g = T_c2aug + + def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data, res=0.5, min_h=0) + # if self.require_grad: + # if not self.is_ego: + # self.data['bevmap'] = self.data['received_request']['bevmap'] + # self.data['bevmap_coor'] = self.data['received_request']['bevmap_coor'] + # DOP.generate_sparse_target_bev_points(self.data) + # self.vis_data('transformed', 4) + + def get_request_cpm(self): + return {'lidar_pose': self.lidar_pose} + # return {'lidar_pose': self.lidar_pose, 'bevmap': self.data['bevmap'], 'bevmap_coor': self.data['bevmap_coor']} + + def get_response_cpm(self): + cpm = {} + for k in ['bev_feat']: + if k in self.data: + cpm[k] = self.data[k] + return cpm + + def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + # tasks[grad_mode].append((self.id, '13:semseg_head_local', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '21:spatial_fusion', {})) + tasks['with_grad'].append((self.id, '22:fusion_neck', {})) + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + # tasks['with_grad'].append((self.id, '22:det_head', {})) + tasks['with_grad'].append((self.id, '23:semseg_head', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.is_ego: + # tasks['loss'].append((self.id, '31:semseg_head_local', {})) + # tasks['loss'].append((self.id, '32:det_head', {})) + tasks['loss'].append((self.id, '33:semseg_head', {})) + return tasks + + +GevBEV = BEVSemsegCAV + + +class GevBEVwDet(BEVSemsegCAV): + """GevBEV with object detection.""" + + def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '22:det_head', {})) + tasks['with_grad'].append((self.id, '23:semseg_head', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.is_ego: + # tasks['loss'].append((self.id, '21:semseg_head_local', {})) + tasks['loss'].append((self.id, '32:det_head', {})) + tasks['loss'].append((self.id, '33:semseg_head', {})) + return tasks + + +class EviBEV(BEVSemsegCAV): + def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data) + DOP.generate_sparse_target_bev_points(self.data, discrete=True) + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) \ No newline at end of file diff --git a/cosense3d/agents/cav_prototype/streamLTS_collection.py b/cosense3d/agents/cav_prototype/streamLTS_collection.py new file mode 100644 index 00000000..b27ffc53 --- /dev/null +++ b/cosense3d/agents/cav_prototype/streamLTS_collection.py @@ -0,0 +1,707 @@ +import copy + +import torch +import torch_scatter +from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP +from cosense3d.agents.cav_prototype.base_cav import BaseCAV + + +class StreamLidarCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset = kwargs.get('dataset', None) + self.lidar_range = torch.nn.Parameter(self.lidar_range) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global'] + self.data['memory'] = None + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + self.use_aug = True + + def refresh_memory(self, prev_exists): + x = prev_exists.float() + init_pose = torch.eye(4, device=self.lidar_pose.device).unsqueeze(0).unsqueeze(0) + if not x: + self.data['memory'] = { + 'embeddings': x.new_zeros(self.memory_len, self.memory_num_propagated, self.memory_emb_dims), + 'ref_pts': x.new_zeros(self.memory_len, self.memory_num_propagated, self.ref_pts_dim), + 'timestamp': x.new_zeros(self.memory_len, self.memory_num_propagated, 1), + 'pose': x.new_zeros(self.memory_len, self.memory_num_propagated, 4, 4) , + 'pose_no_aug': x.new_zeros(self.memory_len, self.memory_num_propagated, 4, 4) , + 'velo': x.new_zeros(self.memory_len, self.memory_num_propagated, 2), + } + self.data['memory']['pose_no_aug'] = self.data['memory']['pose'] + init_pose + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + else: + for k, v in self.data['memory'].items(): + self.data['memory'][k] = self.data['memory'][k][:self.memory_len] * x + if not x: + self.data['memory']['pose_no_aug'][0] = init_pose[0].repeat(self.memory_num_propagated, 1, 1) + self.data['memory']['prev_exists'] = x + + def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + if self.is_ego: + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + else: + data_keys = [k for k in self.prepare_data_keys if k != 'annos_global'] + DOP.apply_transform(self.data, T_c2aug, apply_to=data_keys) + # global bboxes share the same memory with the ego cav, therefore it is already transformed to the aug coor + # DOP.apply_transform(self.data, T_e2aug, apply_to=['annos_global']) + if self.data['prev_exists']: + self.data['memory']['pose_no_aug'] = T_g2e @ self.data['memory']['pose_no_aug'] + self.data['memory']['ref_pts'] = self.transform_ref_pts( + self.data['memory']['ref_pts'], T_g2aug) + self.data['memory']['pose'] = self.aug_transform @ self.data['memory']['pose_no_aug'] + + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=['points', 'annos_local']) + self.T_aug2g = T_c2aug + + def prepare_data(self): + self.prepare_time_scale() + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1) + + def transform_data(self): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + # self.vis_data('transformed', 4) + + def prepare_time_scale(self): + # hash time + azi = torch.arctan2(self.data['points'][:, 1], self.data['points'][:, 0]) + azi, inds = (torch.rad2deg(azi) + 180).floor().long().unique(return_inverse=True) + times = torch.zeros_like(azi).float() + torch_scatter.scatter_mean(self.data['points'][:, -1], inds, dim=0, out=times) + if len(times) < 360: + time360 = times.new_zeros(360) + time360[azi] = times + time360[time360 == 0] = times.mean() + else: + time360 = times + self.data['time_scale'] = time360 + self.data['time_scale_reduced'] = time360 - self.timestamp + # self.data['points'] = self.data['points'][:, :-1] + + def update_memory_timestamps(self, ref_pts): + # transform ref pts to coop coordinates + transform = self.lidar_pose.inverse() @ self.T_aug2g + pts = self.transform_ref_pts(ref_pts, transform) + timestamp = torch.rad2deg(torch.arctan2(pts[:, 1], pts[:, 0])) + 180 + timestamp = - self.data['time_scale'][(timestamp % 360).floor().long()].unsqueeze(-1) + return timestamp + + def get_response_cpm(self): + cpm = {} + feat = self.data['temp_fusion_feat'] + scores = self.data['detection_local']['all_cls_scores'][-1][..., + min(self.data['detection_local']['all_cls_scores' + ][-1].shape[-1] - 1, 1):].topk(1, dim=-1).values[..., 0] + mask = scores > self.share_score_thr + cpm['temp_fusion_feat'] = {'ref_pts': feat['ref_pts'][mask], 'outs_dec': feat['outs_dec'][:, mask]} + return cpm + + def forward_local(self, tasks, training_mode, **kwargs): + if self.is_ego and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:backbone_neck', {})) + tasks[grad_mode].append((self.id, '13:roi_head', {})) + + if self.require_grad and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '14:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '15:det1_head', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego: + tasks[grad_mode].append((self.id, '21:spatial_fusion', {})) + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego: + tasks[grad_mode].append((self.id, '23:det2_head', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '31:roi_head', {})) + tasks['loss'].append((self.id, '32:det1_head', {})) + tasks['loss'].append((self.id, '33:det2_head', {})) + elif self.require_grad: + tasks['loss'].append((self.id, '32:det1_head', {})) + return tasks + + def pre_update_memory(self): + """Update memory before each forward run of a single frame.""" + if self.data['memory'] is not None: + self.data['memory']['timestamp'] += self.timestamp + # pose_inv = self.lidar_pose.inverse() + # self.data['memory']['pose'] = pose_inv @ self.data['memory']['pose'] + # self.data['memory']['ref_pts'] = self.transform_ref_pts( + # self.data['memory']['ref_pts'], pose_inv) + + self.refresh_memory(self.data['prev_exists']) + + def post_update_memory(self): + """Update memory after each forward run of a single frame.""" + x = self.data['detection_local'] + scores = x['all_cls_scores'][-1][..., + min(x['all_cls_scores'][-1].shape[-1] - 1, 1):].topk(1, dim=-1).values[..., 0] + topk = torch.topk(scores, k=self.memory_num_propagated).indices + + ref_pts = x['all_bbox_preds'][-1][:, :self.ref_pts_dim] + velo = x['all_bbox_preds'][-1][:, -2:] + embeddings = self.data['temp_fusion_feat']['outs_dec'][-1] + + timestamp = self.update_memory_timestamps(ref_pts) + pose_no_aug = torch.eye(4, device=ref_pts.device).unsqueeze(0).repeat( + timestamp.shape[0], 1, 1) + + vars = locals() + for k, v in self.data['memory'].items(): + if k == 'prev_exists' or k == 'pose': + continue + rec_topk = vars[k][topk].unsqueeze(0) + self.data['memory'][k] = torch.cat([rec_topk, v], dim=0) + + # self.vis_ref_pts('post update') + + # ego aug to global + self.data['memory']['ref_pts'] = self.transform_ref_pts( + self.data['memory']['ref_pts'], self.T_aug2g) + self.data['memory']['timestamp'][1:] -= self.timestamp + self.data['memory']['pose_no_aug'] = self.T_e2g[(None,) * 2] @ self.data['memory']['pose_no_aug'] # aug -->global + + # if self.require_grad: + # # self.vis_local_detection() + # self.vis_local_pred() + # print('d') + + def transform_ref_pts(self, reference_points, matrix): + reference_points = torch.cat( + [reference_points, torch.ones_like(reference_points[..., 0:1])], dim=-1) + if reference_points.ndim == 3: + reference_points = matrix.unsqueeze(0) @ reference_points.permute(0, 2, 1) + reference_points = reference_points.permute(0, 2, 1)[..., :3] + elif reference_points.ndim == 2: + reference_points = matrix @ reference_points.T + reference_points = reference_points.T[..., :3] + else: + raise NotImplementedError + return reference_points + + @property + def timestamp(self): + if self.dataset == 'opv2vt': + timestamp = float(self.data['frame']) * 0.1 / 2 + elif self.dataset == 'dairv2xt': + timestamp = self.data['global_time'] + else: + raise NotImplementedError + return timestamp + + def vis_ref_pts(self, ax=None, label=None, his_len=1, **kwargs): + import matplotlib.pyplot as plt + from cosense3d.utils.vislib import draw_points_boxes_plt + if ax is None: + fig = plt.figure(figsize=(8, 4)) + ax = fig.add_subplot() + pcd = self.data['points'][:, :3].detach().cpu().numpy() + gt_boxes = self.data['local_bboxes_3d'].detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.lidar_range.tolist(), + boxes_gt=gt_boxes[:, :7], + points=pcd, + ax=ax, + return_ax=True, + ) + + ref_pts = self.data['memory']['ref_pts'].detach().cpu().numpy() + markers = ['.r', '.m', '.b', '.c'] + for i in range(his_len): + plt.plot(ref_pts[i, :, 0], ref_pts[i, :, 1], markers[i], markersize=2) + ax.set_title(f"{label}: {self.data['scenario']}, {self.data['frame']}") + plt.show() + plt.close() + + return ax + + def vis_poses(self, ax=None, label=None, his_len=1, **kwargs): + import matplotlib.pyplot as plt + markers = ['r', 'm', 'b', 'c'] + mem_poses = self.data['memory']['pose'][:, 0].detach().cpu() + p0 = mem_poses[:his_len, :2, -1].numpy() + p1 = mem_poses[:his_len] @ torch.tensor([1., 0., 0., 1.]).view(1, 4, 1).repeat(his_len, 1, 1) + p2 = mem_poses[:his_len] @ torch.tensor([0., 1., 0., 1.]).view(1, 4, 1).repeat(his_len, 1, 1) + p1 = p1.squeeze(-1)[:, :2].numpy() + p2 = p2.squeeze(-1)[:, :2].numpy() + + if ax is None: + fig = plt.figure() + ax = fig.add_subplot() + ax.axis('equal') + for i in range(his_len): + ax.plot([p0[i, 0], p1[i, 0]], [p0[i, 1], p1[i, 1]], markers[i]) + ax.plot([p0[i, 0], p2[i, 0]], [p0[i, 1], p2[i, 1]], markers[i]) + return ax + + def vis_local_detection(self): + import matplotlib.pyplot as plt + from cosense3d.utils.vislib import draw_points_boxes_plt + points = self.data['points'][:, :3].detach().cpu().numpy() + # pred_boxes = self.data['det_local']['preds']['box'].detach().cpu().numpy() + gt_boxes = self.data['local_bboxes_3d'][:, :7].detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.lidar_range.tolist(), + boxes_gt=gt_boxes[:, :7], + # boxes_pred=pred_boxes, + points=points, + return_ax=True + ) + + ax.set_title('ego' if self.is_ego else 'coop') + plt.savefig("/home/yuan/Pictures/local_det.png") + plt.close() + + def vis_local_pred(self): + import matplotlib.pyplot as plt + from cosense3d.utils.vislib import draw_points_boxes_plt + points = self.data['points'][:, :3].detach().cpu().numpy() + # pred_boxes = self.data['detection_local']['preds']['box'].detach().cpu().numpy() + ref_pts = self.data['temp_fusion_feat']['ref_pts'].cpu() * (self.lidar_range[3:] - self.lidar_range[:3]) + self.lidar_range[:3] + ref_pts = ref_pts.detach().numpy() + gt_boxes = self.data['global_bboxes_3d'][:, :7].detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.lidar_range.tolist(), + boxes_gt=gt_boxes[:, :7], + # boxes_pred=pred_boxes, + points=points, + return_ax=True + ) + ax.plot(ref_pts[:, 0], ref_pts[:, 1], '.r', markersize=1) + + ax.set_title('ego' if self.is_ego else 'coop') + plt.savefig("/home/yuan/Pictures/local_pred.png") + plt.close() + + +class slcDenseToSparse(StreamLidarCAV): + + def prepare_data(self): + self.prepare_time_scale() + + def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:roi_head', {})) + tasks[grad_mode].append((self.id, '13:formatting', {})) + tasks[grad_mode].append((self.id, '14:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '15:det1_head', {})) + + +slcFcooper = slcDenseToSparse +slcAttnFusion = slcDenseToSparse + + +class slcFPVRCNN(StreamLidarCAV): + def prepare_data(self): + self.prepare_time_scale() + + def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:roi_head', {})) + tasks[grad_mode].append((self.id, '13:keypoint_composer', {})) + tasks[grad_mode].append((self.id, '14:formatting', {})) + tasks[grad_mode].append((self.id, '15:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '16:det1_head', {})) + + # def forward_fusion(self, tasks, training_mode, **kwargs): + # # if self.is_ego: + # # tasks['with_grad'].append((self.id, '21:spatial_fusion', {})) + # return tasks + # + # def forward_head(self, tasks, training_mode, **kwargs): + # # if self.is_ego: + # # tasks['with_grad'].append((self.id, '23:det2_head', {})) + # return tasks + # + # def pre_update_memory(self): + # pass + # + # def post_update_memory(self): + # pass + # + # def get_response_cpm(self): + # return {} + # + # def loss(self, tasks, **kwargs): + # if self.is_ego: + # tasks['loss'].append((self.id, '31:roi_head', {})) + # return tasks + # + # def apply_transform(self): + # if self.use_aug: + # if self.is_ego: + # T_e2g = self.lidar_pose + # T_g2e = self.lidar_pose.inverse() + # T_c2e = torch.eye(4).to(self.lidar_pose.device) + # else: + # # cav to ego + # T_e2g = self.data['received_request']['lidar_pose'] + # T_g2e = self.data['received_request']['lidar_pose'].inverse() + # T_c2e = T_g2e @ self.lidar_pose + # + # if self.aug_transform is None: + # self.aug_transform = DOP.update_transform_with_aug( + # torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + # T_e2aug = self.aug_transform + # else: + # # adapt aug params to the current ego frame + # T_e2aug = self.T_g2aug @ T_e2g + # + # T_c2aug = T_e2aug @ T_c2e + # T_g2aug = T_e2aug @ T_g2e + # + # DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + # + # self.T_e2g = T_e2g + # self.T_g2aug = T_g2aug + # self.T_aug2g = T_g2aug.inverse() # ego aug to global + # + # else: + # if self.is_ego: + # transform = torch.eye(4).to(self.lidar_pose.device) + # else: + # # cav to ego + # request = self.data['received_request'] + # transform = request['lidar_pose'].inverse() @ self.lidar_pose + # + # T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + # DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + # self.T_aug2g = T_c2aug + + +class slcNoBoxTime(StreamLidarCAV): + + def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1) + + def update_memory_timestamps(self, ref_pts): + timestamp = torch.zeros_like(ref_pts[..., :1]) + return timestamp + + +class slcCIASSD(StreamLidarCAV): + def prepare_data(self): + self.prepare_time_scale() + + def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:roi_head', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + return tasks + + def pre_update_memory(self): + pass + + def post_update_memory(self): + pass + + def get_response_cpm(self): + return {} + + def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '21:roi_head', {})) + return tasks + + def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + self.T_aug2g = T_c2aug + + +class LTSDairV2X(StreamLidarCAV): + def forward_local(self, tasks, training_mode, **kwargs): + if self.require_grad and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:backbone_neck', {})) + tasks[grad_mode].append((self.id, '13:roi_head', {})) + tasks[grad_mode].append((self.id, '14:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '15:det1_head', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '21:spatial_fusion', {})) + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '23:det2_head', {})) + return tasks + + def loss(self, tasks, **kwargs): + if self.require_grad: + tasks['loss'].append((self.id, '31:roi_head', {})) + tasks['loss'].append((self.id, '32:det1_head', {})) + if self.is_ego: + tasks['loss'].append((self.id, '33:det2_head', {})) + return tasks + + +class slcNoBoxTimeDairV2X(LTSDairV2X): + + def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1) + + def update_memory_timestamps(self, ref_pts): + timestamp = torch.zeros_like(ref_pts[..., :1]) + return timestamp + + +class LTSCAVLocCorr(StreamLidarCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global'] + self.rl_range = torch.nn.Parameter(torch.Tensor([-50, -50, -3.0, 50, 50, 1.0])) + self.seq_idx = 0 + + def apply_transform(self): + super().apply_transform() + self.data['lidar_pose_aug'] = self.T_aug2g + + def prepare_data(self): + self.prepare_time_scale() + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1) + # DOP.adaptive_free_space_augmentation(self.data, res=0.5, min_h=0) + DOP.generate_sparse_target_roadline_points(self.data, range=75) + self.data['points_rl'] = copy.deepcopy(self.data['points']) + DOP.filter_range(self.data, self.rl_range, apply_to=['points_rl']) + + # import matplotlib.pyplot as plt + # points = torch.cat([self.data['points'][:, :3], + # torch.ones_like(self.data['points'][:, :1])], dim=-1) + # points = (self.data['lidar_poses_gt'] @ points.T)[:3].T.detach().cpu().numpy() + # rl = self.data['roadline'].detach().cpu().numpy() + # fig = plt.figure(figsize=(14, 6)) + # ax = fig.add_subplot() + # ax.plot(points[:, 0], points[:, 1], 'g.', markersize=1) + # ax.plot(rl[:, 0], rl[:, 1], 'k.', markersize=1) + # plt.savefig("/home/yys/Downloads/tmp.jpg") + # plt.close() + + def transform_data(self): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys) + + def forward_localization(self, tasks, training_mode, **kwargs): + self.seq_idx = kwargs['seq_idx'] + if self.is_ego and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + if kwargs['seq_idx'] == self.memory_len - 1: + # only do localization correction for the last frame for easier matching during data fusion + # the relative transformations between the subsequent frame in the sequence is assumed to be correct. + tasks[grad_mode].append((self.id, '01:rl_backbone', {})) + tasks[grad_mode].append((self.id, '02:rl_neck', {})) + tasks[grad_mode].append((self.id, '03:rlseg_head', {})) + tasks[grad_mode].append((self.id, '04:localization', {})) + + def forward_local(self, tasks, training_mode, **kwargs): + if self.is_ego and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '14:pts_backbone', {})) + tasks[grad_mode].append((self.id, '15:backbone_neck', {})) + tasks[grad_mode].append((self.id, '16:roi_head', {})) + + if self.require_grad and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '17:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '18:det1_head', {})) + + def forward_fusion(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego and self.seq_idx == self.memory_len - 1: + tasks[grad_mode].append((self.id, '21:spatial_fusion', {})) + return tasks + + def forward_head(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego and self.seq_idx == self.memory_len - 1: + tasks[grad_mode].append((self.id, '23:det2_head', {})) + return tasks + + def get_response_cpm(self): + if self.seq_idx < self.memory_len - 1: + return {} + pose_corrected = self.data['lidar_poses_gt'] + pose = self.data['lidar_poses'] + ego_pose = self.data['received_request']['lidar_pose'] + box_ctrs = copy.deepcopy(self.data['detection_local']['preds']['box'][:, :4]) + box_ctrs[:, 3] = 1 + ref_pts = self.data['temp_fusion_feat']['ref_pts'] + lr = self.lidar_range.to(ref_pts.device) + ref_pts = ref_pts * (lr[3:6] - lr[:3]) + lr[:3] + # ref_pts = torch.cat([ref_pts, torch.ones_like(ref_pts[:, :1])], dim=-1) + # transformation matrix from augment-frame to corrected world-frame + transform = pose_corrected @ pose.inverse() @ self.T_aug2g + # transform = pose.inverse() @ ego_pose + box_ctrs = (transform @ box_ctrs.T)[:2].T + # ref_pts = (transform @ ref_pts.T)[:3].T + # transform roadline points to corrected world-frame + roadline = self.data.get('roadline_pred', None) + roadline = torch.cat([roadline, torch.zeros_like(roadline[:, :1]), + torch.ones_like(roadline[:, :1])], dim=-1) + roadline = (pose_corrected @ roadline.T)[:2].T + + # points is only for GL-visualization + # points = torch.cat([self.data['points'][:, :3], + # torch.ones_like(self.data['points'][:, :1])], dim=-1) + # self.data['points'][:, :3] = (transform @ points.T)[:3].T + + # import matplotlib.pyplot as plt + # + # pts = self.data['points_rl'].detach().cpu().numpy() + # rl_vis = self.data.get('roadline_pred', None).detach().cpu().numpy() + # plt.plot(pts[:, 0], pts[:, 1], 'k.', markersize=1) + # plt.plot(rl_vis[:, 0], rl_vis[:, 1], 'r.', markersize=1) + # plt.show() + # plt.close() + + # import matplotlib.pyplot as plt + # fig = plt.figure(figsize=(6, 6)) + # ax = fig.add_subplot() + # rl_gt = self.data['points'].detach().cpu().numpy() + # rl_vis = roadline.detach().cpu().numpy() + # box_ctrs_vis = box_ctrs.detach().cpu().numpy() + # ref_pts_vis = ref_pts.detach().cpu().numpy() + # ax.plot(rl_gt[:, 0], rl_gt[:, 1], 'g.', markersize=1) + # ax.plot(rl_vis[:, 0], rl_vis[:, 1], 'k.', markersize=1) + # ax.plot(box_ctrs_vis[:, 0], box_ctrs_vis[:, 1], 'bo', markersize=3) + # ax.plot(ref_pts_vis[:, 0], ref_pts_vis[:, 1], 'r.', markersize=1) + # plt.savefig("/home/yys/Downloads/tmp.jpg") + # plt.close() + + return { + # 'pose_corrected': self.data['lidar_poses_corrected'], + 'box_ctrs': box_ctrs, + 'roadline': roadline, + 'ref_pts': ref_pts, + 'feat': self.data['temp_fusion_feat']['outs_dec'], + 'Taug2caw': transform, + 'points': self.data['points'], + } + + + + + + + + + + + diff --git a/cosense3d/agents/center_controller.py b/cosense3d/agents/center_controller.py new file mode 100644 index 00000000..2baf9f64 --- /dev/null +++ b/cosense3d/agents/center_controller.py @@ -0,0 +1,216 @@ +import matplotlib.pyplot as plt +import torch + +from cosense3d.agents import core + + +class CenterController: + def __init__(self, cfg, data_loader, dist=False): + self.mode = data_loader.dataset.mode + self.dist = dist + self.seq_len = data_loader.dataset.seq_len + self.data_info = data_loader.dataset.cfgs['data_info'] + self.num_loss_frame = cfg.get('num_loss_frame', 1) + self.batch_seq = cfg.get('batch_seq', False) + self.setup_core(cfg) + self.global_data = {} + + def setup_core(self, cfg): + if self.batch_seq: + cav_manager = core.SeqCAVManager + data_manager = core.SeqDataManager + task_manager = core.SeqTaskManager(self.seq_len) + + else: + cav_manager = core.CAVManager + data_manager = core.DataManager + task_manager = core.TaskManager() + self.cav_manager = cav_manager(**self.update_cfg(cfg['cav_manager'], + self.data_info)) + self.data_manager = data_manager( + self.cav_manager, **self.update_cfg( + cfg['data_manager'][self.mode], self.data_info)) + self.task_manager = task_manager + self.forward_runner = core.ForwardRunner(cfg['shared_modules'], + self.data_manager, + self.dist, **cfg.get('forward_runner', {})) + + def update_cfg(self, cfg, *args): + for arg in args: + cfg.update(arg) + return cfg + + @property + def modules(self): + return self.forward_runner.shared_modules + + @property + def model(self): + return self.forward_runner + + @property + def parameters(self): + return self.forward_runner.parameters() + + def train_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + self.data_manager.add_loc_err(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + self.cav_manager.reset() + + if self.batch_seq: + return self.run_seq(seq_data, training_mode=True, **kwargs) + else: + loss = 0 + loss_dict = {} + for i, data in enumerate(seq_data): # a few seqs from dataloader might < self.seq_lens + with_loss = i >= self.seq_len - self.num_loss_frame + kwargs['seq_idx'] = i + frame_loss_dict = self.run_frame(data, with_loss, training_mode=True, **kwargs) + for k, v in frame_loss_dict.items(): + if 'loss' in k: + loss = loss + v + loss_dict[f'f{i}.{k}'] = v + loss_dict['total_loss'] = loss + return loss, loss_dict + + def test_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + self.data_manager.add_loc_err(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + self.cav_manager.reset() + + # cav_idx = 1 + # import matplotlib.pyplot as plt + # import torch + # fig = plt.figure(figsize=(16, 10)) + # ax = fig.add_subplot() + # + # for i, frame_data in enumerate(seq_data): + # points = frame_data['points'][0][cav_idx] + # lidar_pose = frame_data['lidar_poses'][0][0].inverse() @ frame_data['lidar_poses'][0][cav_idx] + # # lidar_pose = frame_data['lidar_poses'][0][cav_idx] + # points = lidar_pose @ torch.cat([points[:, :3], torch.ones_like(points[:, :1])], dim=-1).T + # points = points.detach().cpu().numpy() + # ax.plot(points[0], points[1], '.', markersize=1) + # + # plt.savefig("/home/yys/Downloads/tmp.png") + # plt.close() + + for i in range(self.seq_len): + kwargs['seq_idx'] = i + self.run_frame(seq_data[i], + with_loss=False, + training_mode=False, + **kwargs) + + def vis_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + self.data_manager.add_loc_err(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + frame_data = seq_data[0] + self.cav_manager.update_cav_info(**frame_data) + self.data_manager.distribute_to_cav(**frame_data) + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + # apply data online transform + self.cav_manager.forward(False, False) + + def run_frame(self, frame_data, with_loss, training_mode, **kwargs): + self.cav_manager.update_cav_info(**frame_data) + self.data_manager.distribute_to_cav(**frame_data) + self.cav_manager.apply_cav_function('pre_update_memory') + + # get pseudo forward tasks + tasks = self.cav_manager.forward(with_loss, training_mode, **kwargs) + batched_tasks = self.task_manager.summarize_tasks(tasks) + + # prepare local data + self.cav_manager.apply_cav_function('prepare_data') + + # correct localization errors + self.forward_runner(batched_tasks[0]['no_grad'], with_grad=False, **kwargs) + self.forward_runner(batched_tasks[0]['with_grad'], with_grad=True, **kwargs) + + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + + # apply data transformation with the corrected localization + self.cav_manager.apply_cav_function('transform_data') + + # preprocess after transformation to ego frame + self.data_manager.apply_preprocess() + # self.data_manager.vis_global_data_plt(['vis_ref_pts', 'vis_poses'], kwargs['seq_idx'] + 1) + + # from cosense3d.utils.vislib import plot_cavs_points + # plot_cavs_points(self.cav_manager.cavs[0]) + + # process local cav data + self.forward_runner(batched_tasks[1]['no_grad'], with_grad=False, **kwargs) + self.forward_runner(batched_tasks[1]['with_grad'], with_grad=training_mode, **kwargs) + + # send coop cav feature-level cpm to ego cav + response = self.cav_manager.send_response() + self.cav_manager.receive_response(response) + + # process ego cav data and fuse data from coop cav with grad if training + self.forward_runner(batched_tasks[2]['with_grad'], with_grad=training_mode, **kwargs) + self.forward_runner(batched_tasks[2]['no_grad'], with_grad=False, **kwargs) + self.cav_manager.apply_cav_function('post_update_memory') + + frame_loss_dict = {} + if with_loss: + frame_loss_dict = self.forward_runner.frame_loss(batched_tasks[3]['loss'], **kwargs) + return frame_loss_dict + + def run_seq(self, seq_data, training_mode, **kwargs): + cur_len = len(seq_data) + self.cav_manager.update_cav_info(seq_data) + self.data_manager.distribute_to_cav(seq_data) + self.cav_manager.apply_cav_function('init_memory') + + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + # get pseudo forward tasks + tasks = self.cav_manager.forward(training_mode, self.num_loss_frame, cur_len) + batched_tasks = self.task_manager.summarize_tasks(tasks) + # preprocess after transformation to ego frame + self.data_manager.apply_preprocess() + + # process local cav data + if 'no_grad' in batched_tasks[0] and len(batched_tasks[0]['no_grad']) > 0: + self.forward_runner(batched_tasks[0]['no_grad'], with_grad=False, **kwargs) + + self.forward_runner(batched_tasks[0]['with_grad'], with_grad=training_mode, **kwargs) + + # process tasks that needs to be run sequentially + seq_tasks = self.task_manager.parallel_to_sequential(batched_tasks[1]) + for i in range(cur_len): + self.cav_manager.apply_cav_function('pre_update_memory', seq_idx=i) + if 'no_grad' in seq_tasks and len(seq_tasks['no_grad'][i]) > 0: + self.forward_runner(seq_tasks['no_grad'][i], with_grad=False, **kwargs) + self.forward_runner(seq_tasks['with_grad'][i], with_grad=training_mode, **kwargs) + self.cav_manager.apply_cav_function('post_update_memory', seq_idx=i) + + # send coop cav feature-level cpm to ego cav + response = self.cav_manager.send_response() + self.cav_manager.receive_response(response) + + if 2 not in batched_tasks: + print([d['valid_agent_ids'] for d in seq_data]) + # process ego cav data and fuse data from coop cav with grad if training + self.forward_runner(batched_tasks[2]['with_grad'], with_grad=training_mode, **kwargs) + if 'no_grad' in batched_tasks[2]: + self.forward_runner(batched_tasks[2]['no_grad'], with_grad=False, **kwargs) + loss, loss_dict = self.forward_runner.loss(batched_tasks[3]['loss'], with_grad=False, **kwargs) + return loss, loss_dict + + + + + + + diff --git a/cosense3d/agents/center_controller_v1.py b/cosense3d/agents/center_controller_v1.py new file mode 100644 index 00000000..4c62a330 --- /dev/null +++ b/cosense3d/agents/center_controller_v1.py @@ -0,0 +1,173 @@ +from cosense3d.agents import core + + +class CenterController: + def __init__(self, cfg, data_loader, dist=False): + self.mode = data_loader.dataset.mode + self.dist = dist + self.seq_len = data_loader.dataset.seq_len + self.data_info = data_loader.dataset.cfgs['data_info'] + self.num_loss_frame = cfg.get('num_loss_frame', 1) + self.batch_seq = cfg.get('batch_seq', False) + self.setup_core(cfg) + self.global_data = {} + + def setup_core(self, cfg): + if self.batch_seq: + cav_manager = core.SeqCAVManager + data_manager = core.SeqDataManager + task_manager = core.SeqTaskManager(self.seq_len) + + else: + cav_manager = core.CAVManager + data_manager = core.DataManager + task_manager = core.TaskManager() + self.cav_manager = cav_manager(**self.update_cfg(cfg['cav_manager'], + self.data_info)) + self.data_manager = data_manager( + self.cav_manager, **self.update_cfg( + cfg['data_manager'][self.mode], self.data_info)) + self.task_manager = task_manager + self.forward_runner = core.ForwardRunner(cfg['shared_modules'], + self.data_manager, + self.dist, **cfg.get('forward_runner', {})) + + def update_cfg(self, cfg, *args): + for arg in args: + cfg.update(arg) + return cfg + + @property + def modules(self): + return self.forward_runner.shared_modules + + @property + def model(self): + return self.forward_runner + + @property + def parameters(self): + return self.forward_runner.parameters() + + def train_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + self.cav_manager.reset() + + if self.batch_seq: + return self.run_seq(seq_data, training_mode=True, **kwargs) + else: + loss = 0 + loss_dict = {} + for i, data in enumerate(seq_data): # a few seqs from dataloader might < self.seq_lens + with_loss = i >= self.seq_len - self.num_loss_frame + kwargs['seq_idx'] = i + frame_loss_dict = self.run_frame(data, with_loss, training_mode=True, **kwargs) + for k, v in frame_loss_dict.items(): + if 'loss' in k: + loss = loss + v + loss_dict[f'f{i}.{k}'] = v + loss_dict['total_loss'] = loss + return loss, loss_dict + + def test_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + self.cav_manager.reset() + for i in range(self.seq_len): + self.run_frame(seq_data[i], with_loss=False, training_mode=False, **kwargs) + + def vis_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + frame_data = seq_data[0] + self.cav_manager.update_cav_info(**frame_data) + self.data_manager.distribute_to_cav(**frame_data) + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + # apply data online transform + self.cav_manager.forward(False, False) + + def run_frame(self, frame_data, with_loss, training_mode, **kwargs): + self.cav_manager.update_cav_info(**frame_data) + self.data_manager.distribute_to_cav(**frame_data) + self.cav_manager.apply_cav_function('pre_update_memory') + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + # get pseudo forward tasks + tasks = self.cav_manager.forward(with_loss, training_mode) + batched_tasks = self.task_manager.summarize_tasks(tasks) + # preprocess after transformation to ego frame + self.data_manager.apply_preprocess() + # self.data_manager.vis_global_data_plt(['vis_ref_pts', 'vis_poses'], kwargs['seq_idx'] + 1) + + # process local cav data + if len(batched_tasks[1]['no_grad']) > 0: + self.forward_runner(batched_tasks[1]['no_grad'], with_grad=False, **kwargs) + self.forward_runner(batched_tasks[1]['with_grad'], with_grad=training_mode, **kwargs) + + # send coop cav feature-level cpm to ego cav + response = self.cav_manager.send_response() + self.cav_manager.receive_response(response) + + # process ego cav data and fuse data from coop cav with grad if training + self.forward_runner(batched_tasks[2]['with_grad'], with_grad=training_mode, **kwargs) + self.forward_runner(batched_tasks[2]['no_grad'], with_grad=False, **kwargs) + self.cav_manager.apply_cav_function('post_update_memory') + + frame_loss_dict = {} + if with_loss: + frame_loss_dict = self.forward_runner.frame_loss(batched_tasks[3]['loss'], **kwargs) + return frame_loss_dict + + def run_seq(self, seq_data, training_mode, **kwargs): + cur_len = len(seq_data) + self.cav_manager.update_cav_info(seq_data) + self.data_manager.distribute_to_cav(seq_data) + self.cav_manager.apply_cav_function('init_memory') + + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + # get pseudo forward tasks + tasks = self.cav_manager.forward(training_mode, self.num_loss_frame, cur_len) + batched_tasks = self.task_manager.summarize_tasks(tasks) + # preprocess after transformation to ego frame + self.data_manager.apply_preprocess() + + # process local cav data + if 'no_grad' in batched_tasks[0] and len(batched_tasks[0]['no_grad']) > 0: + self.forward_runner(batched_tasks[0]['no_grad'], with_grad=False, **kwargs) + + self.forward_runner(batched_tasks[0]['with_grad'], with_grad=training_mode, **kwargs) + + # process tasks that needs to be run sequentially + seq_tasks = self.task_manager.parallel_to_sequential(batched_tasks[1]) + for i in range(cur_len): + self.cav_manager.apply_cav_function('pre_update_memory', seq_idx=i) + if 'no_grad' in seq_tasks and len(seq_tasks['no_grad'][i]) > 0: + self.forward_runner(seq_tasks['no_grad'][i], with_grad=False, **kwargs) + self.forward_runner(seq_tasks['with_grad'][i], with_grad=training_mode, **kwargs) + self.cav_manager.apply_cav_function('post_update_memory', seq_idx=i) + + # send coop cav feature-level cpm to ego cav + response = self.cav_manager.send_response() + self.cav_manager.receive_response(response) + + if 2 not in batched_tasks: + print([d['valid_agent_ids'] for d in seq_data]) + # process ego cav data and fuse data from coop cav with grad if training + self.forward_runner(batched_tasks[2]['with_grad'], with_grad=training_mode, **kwargs) + if 'no_grad' in batched_tasks[2]: + self.forward_runner(batched_tasks[2]['no_grad'], with_grad=False, **kwargs) + loss, loss_dict = self.forward_runner.loss(batched_tasks[3]['loss'], with_grad=False, **kwargs) + return loss, loss_dict + + + + + + + diff --git a/cosense3d/agents/core/__init__.py b/cosense3d/agents/core/__init__.py new file mode 100644 index 00000000..103d14f8 --- /dev/null +++ b/cosense3d/agents/core/__init__.py @@ -0,0 +1,6 @@ + + +from .cav_manager import CAVManager +from .forward_runner import ForwardRunner +from .data_manager import DataManager +from .task_manager import TaskManager diff --git a/cosense3d/agents/core/base_runner.py b/cosense3d/agents/core/base_runner.py new file mode 100644 index 00000000..979f4405 --- /dev/null +++ b/cosense3d/agents/core/base_runner.py @@ -0,0 +1,69 @@ + + +from cosense3d.utils.train_utils import * +from cosense3d.agents.core.hooks import Hooks + + +class BaseRunner: + def __init__(self, + dataloader, + controller, + gpus=0, + log_every=10, + hooks=None, + **kwargs + ): + self.dataloader = dataloader + self.data_iter = iter(dataloader) + self.total_iter = len(dataloader) + self.iter = 1 + self.epoch = 1 + + self.controller = controller + self.forward_runner = controller.forward_runner + self.hooks = Hooks(hooks) + + self.gpus = gpus + self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + self.log_every = log_every + + self.init() + + def init(self): + if self.forward_runner is not None: + self.forward_runner.to(self.device) + + def setup_logger(self, *args, **kwargs): + pass + + def set_logdir(self, logdir): + self.logger.log_path = logdir + + @property + def logdir(self): + if hasattr(self, 'logger'): + return self.logger.logdir + else: + return None + + def run(self): + raise NotImplementedError + + def next_batch(self): + if self.iter >= self.total_iter: + self.iter = 1 + self.epoch += 1 + self.data_iter = iter(self.dataloader) + batch = next(self.data_iter) + return batch + + def vis_data(self, keys=None, **kwargs): + if keys is None: + keys = ['points', 'imgs', 'bboxes2d', 'lidar2img', 'global_labels', 'local_labels'] + else: + keys = list(set(keys)) + return self.controller.data_manager.gather_vis_data(keys=keys) + + + + diff --git a/cosense3d/agents/core/cav_manager.py b/cosense3d/agents/core/cav_manager.py new file mode 100644 index 00000000..1dfac99f --- /dev/null +++ b/cosense3d/agents/core/cav_manager.py @@ -0,0 +1,145 @@ + +import torch +import numpy as np + +from cosense3d.agents.cav_prototype import get_prototype +from cosense3d.utils.data_statistics import StatsRecorder + + +class CAVManager: + def __init__(self, lidar_range, prototype=None, memory_len=1, all_grad=False, + num_grad_cav=1, seq_len=0, cpm_statistic=False, **kwargs): + self.lidar_range = torch.tensor(lidar_range) + self.memory_len = memory_len + self.all_grad = all_grad + self.num_grad_cav = num_grad_cav + self.seq_len = seq_len + self.cpm_statistic = cpm_statistic + self.kwargs = kwargs + self.cavs = [] + self.cav_dict = {} + assert prototype is not None, "CAV prototype should be defined." + self.prototype = get_prototype(prototype) + + if self.cpm_statistic: + self.cpm_size_recorder = StatsRecorder() + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(cavs={self.cav_dict.keys()})' + return repr_str + + def reset(self): + self.cavs = [] + self.cav_dict = {} + + def update_cav_info(self, valid_agent_ids=None, lidar_poses=None, **data): + B = len(valid_agent_ids) # batch_size + cavs = [] + cav_dict = {} + for b in range(B): + batch_cavs = [] + for i, cav_id in enumerate(valid_agent_ids[b]): + is_ego = True if i==0 else False # assume the first car is ego car + require_grad = True if (i < self.num_grad_cav or self.all_grad) else False + # pad id with batch idx to avoid duplicated ids across different batches + cav_id = f'{b}.{cav_id}' + cav = self.get_cav_with_id(cav_id) + if not cav: + cav = self.prototype(cav_id, i, is_ego, + self.lidar_range, + self.memory_len, + lidar_pose=lidar_poses[b][i], + require_grad=require_grad, + **self.kwargs) + else: + cav.update(lidar_poses[b][i], is_ego, require_grad) + batch_cavs.append(cav) + cav_dict[cav_id] = (b, i) + cavs.append(batch_cavs) + self.cavs = cavs + self.cav_dict = cav_dict + + def has_cav(self, cav_id): + return cav_id in self.cav_dict + + def get_cav_with_id(self, id): + if id not in self.cav_dict: + return False + item = self.cav_dict[id] + if isinstance(item, tuple): + b, i = item + return self.cavs[b][i] + else: + return item + + def send_request(self): + request = [] + for b, cavs in enumerate(self.cavs): + req = {} + for cav in cavs: + if cav.is_ego: + req[cav.id] = cav.get_request_cpm() + request.append(req) + return request + + def receive_request(self, request): + for b, req in enumerate(request): + for ai, req_cpm in req.items(): + for cav in self.cavs[b]: + if ai != cav.id: + cav.receive_request(req_cpm) + + def send_response(self): + response = [] + for b, cavs in enumerate(self.cavs): + ans = {} + for cav in cavs: + if cav.has_request(): + ans[cav.id] = cav.get_response_cpm() + response.append(ans) + if self.cpm_statistic: + self.update_cpm_statistic(response) + return response + + def receive_response(self, response): + for cavs, resp in zip(self.cavs, response): + for cav in cavs: + if cav.is_ego: + cav.receive_response(resp) + + def forward(self, with_loss, training_mode, **kwargs): + tasks = {'with_grad': [], 'no_grad': [], 'loss': []} + for i, cavs in enumerate(self.cavs): + for cav in cavs: + cav.forward(tasks, training_mode, **kwargs) + if with_loss and training_mode: + cav.loss(tasks, **kwargs) + return tasks + + def apply_cav_function(self, func_name): + for b, cavs in enumerate(self.cavs): + for cav in cavs: + getattr(cav, func_name)() + + def update_cpm_statistic(self, response): + sizes = [] + for resp in response: + for ai, data_dict in resp.items(): + def count_size(data): + if isinstance(data, dict): + s = 0 + for k, v in data.items(): + s += count_size(v) + return s + elif isinstance(data, torch.Tensor): + return data.numel() + sizes.append(count_size(data_dict)) + if len(sizes) > 0: + self.cpm_size_recorder.update(np.array(sizes).reshape(-1, 1)) + + + + + + diff --git a/cosense3d/agents/core/data_manager.py b/cosense3d/agents/core/data_manager.py new file mode 100644 index 00000000..a56c7cf4 --- /dev/null +++ b/cosense3d/agents/core/data_manager.py @@ -0,0 +1,414 @@ + + +import os +import random + +import matplotlib.pyplot as plt +import torch +import torch_scatter + +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.agents.utils.transform import generate_bev_tgt_pts + + +class DataManager: + def __init__(self, + cav_manager, + lidar_range, + voxel_size=None, + aug=None, + pre_process=[], + loc_err=None): + self.cav_manager = cav_manager + self.lidar_range = lidar_range + self.voxel_size = voxel_size + self.aug = aug + self.pre_process = pre_process + self.loc_err = loc_err + + def apply_preprocess(self): + if isinstance(self.pre_process, list): + for p in self.pre_process: + getattr(self, p)() + elif isinstance(self.pre_process, dict): + for p, args in self.pre_process.items(): + getattr(self, p)(**args) + + def remove_global_empty_boxes(self): + for cavs in self.cav_manager.cavs: + if cavs[0].data.get('global_bboxes_3d', None) is None: + continue + assert cavs[0].is_ego + points = torch.cat([cav.data['points'] for cav in cavs], dim=0) + global_boxes = cavs[0].data['global_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + global_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(global_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cavs[0].data['global_bboxes_3d'] = global_boxes[mask] + cavs[0].data['global_labels_3d'] = cavs[0].data['global_labels_3d'][mask] + if 'bboxes_3d_pred' in cavs[0].data: + cavs[0].data['bboxes_3d_pred'] = cavs[0].data['bboxes_3d_pred'][:, mask] + + def generate_global_non_empty_mask(self): + for cavs in self.cav_manager.cavs: + if cavs[0].data.get('global_bboxes_3d', None) is None: + continue + assert cavs[0].is_ego + points = torch.cat([cav.data['points'] for cav in cavs], dim=0) + global_boxes = cavs[0].data['global_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + global_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(global_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cavs[0].data['global_bboxes_mask'] = mask + + def remove_local_empty_boxes(self, ego_only=False): + for cavs in self.cav_manager.cavs: + for cav in cavs: + if not cav.is_ego and ego_only: + continue + if cav.data.get('local_bboxes_3d', None) is None: + continue + points = cav.data['points'] + local_boxes = cav.data['local_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + local_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(local_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cav.data['local_bboxes_3d'] = local_boxes[mask] + cav.data['local_labels_3d'] = cav.data['local_labels_3d'][mask] + + def generate_local_non_empty_mask(self, ego_only=False): + for cavs in self.cav_manager.cavs: + for cav in cavs: + if not cav.is_ego and ego_only: + continue + if cav.data.get('local_bboxes_3d', None) is None: + continue + points = cav.data['points'] + local_boxes = cav.data['local_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + local_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(local_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cav.data['local_bboxes_mask'] = mask + + def sample_global_bev_tgt_pts(self, sam_res=0.4, map_res=0.2, range=50, max_num_pts=5000, discrete=False): + for cavs in self.cav_manager.cavs: + assert cavs[0].is_ego + points = torch.cat([cav.data['points'] for cav in cavs], dim=0) + transform = cavs[0].T_e2g.inverse() @ cavs[0].T_aug2g + bev_pts = generate_bev_tgt_pts(points, cavs[0].data, transform, + sam_res, map_res, range, max_num_pts, discrete) + cavs[0].data['global_bev_tgt_pts'] = bev_pts + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # lidar = points.cpu().numpy() + # pts = bev_pts.cpu().numpy() + # pos = pts[:, 2] == 1 + # neg = pts[:, 2] == 0 + # + # ax = draw_points_boxes_plt( + # pc_range=50, + # points=pts[pos, :], + # points_c='r', + # return_ax=True + # ) + # ax.plot(pts[neg, 0], pts[neg, 1], '.', c='b', markersize=1) + # ax.plot(lidar[:, 0], lidar[:, 1], '.', c='gray', markersize=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + + def distribute_to_seq_list(self, batch_dict, seq_len): + result = [] + for l in range(seq_len): + res = {} + for k, v in batch_dict.items(): + x_list = [x[l] for x in v if l < len(x)] + if len(x_list) == 0: + res = {} + break + res[k] = x_list + if len(res) > 0: + result.append(res) + return result + + def distribute_to_cav(self, valid_agent_ids=None, **data): + cavs = self.cav_manager.cavs + global_data_list = [] + for b, agent_ids in enumerate(valid_agent_ids): + global_data = {} + for j, ai in enumerate(agent_ids): + assert cavs[b][j].id == f'{b}.{ai}' + for k, v in data.items(): + if isinstance(v[b], list) and len(v[b]) == len(agent_ids): + cavs[b][j].data[k] = v[b][j] + elif k == 'chosen_cams': + cavs[b][j].data[k] = v[b][ai] + elif k == 'augment_params': + cavs[b][j].data[k] = v[b] + global_data[k] = v[b] + # elif cavs[b][j].is_ego: + else: + cavs[b][j].data[k] = v[b] + global_data_list.append(global_data) + return global_data_list + + def distribute_to_seq_cav(self, data): + for l, d in enumerate(data): + valid_agent_ids = d['valid_agent_ids'] + global_data_list = [] + for b, agent_ids in enumerate(valid_agent_ids): + global_data = {} + for j, ai in enumerate(agent_ids): + new_data = {} + for k, v in d.items(): + if isinstance(v[b], list) and len(v[b]) == len(agent_ids): + new_data[k] = v[b][j] + elif k == 'chosen_cams': + new_data[k] = v[b][ai] + elif k == 'augment_params': + new_data[k] = v[b] + global_data[k] = v[b] + # elif cavs[b][j].is_ego: + else: + new_data[k] = v[b] + self.cav_manager.get_cav_with_id(f'{b}.{ai}').data[l] = new_data + global_data_list.append(global_data) + return global_data_list + + def generate_augment_params(self, batch_dict, seq_len): + B = len(batch_dict['scenario']) + if self.aug is None: + rand_aug = [[None] * seq_len] * B + else: + rand_aug = [] + def rand_from_range(r): + return torch.rand(1) * (r[1] - r[0]) + r[0] + for i in range(B): + cur_aug = {} + if 'rot_range' in self.aug: + theta = rand_from_range(self.aug['rot_range']) + ct = torch.cos(theta) + st = torch.sin(theta) + transform = torch.eye(4) + transform[0, 0] = ct + transform[0, 1] = -st + transform[1, 0] = st + transform[1, 1] = ct + cur_aug['rot'] = transform + if 'trans_std' in self.aug: + cur_aug['trans'] = torch.randn(len(self.aug['trans_std'])) * torch.tensor(self.aug['trans_std']) + if 'scale_ratio_range' in self.aug: + cur_aug['scale'] = rand_from_range(self.aug['scale_ratio_range']) + if 'flip' in self.aug: + cur_aug['flip'] = {'flip_idx': random.randint(0, 3), 'flip_axis': self.aug['flip']} + rand_aug.append([cur_aug for _ in range(seq_len)]) + batch_dict['augment_params'] = rand_aug + + def add_loc_err(self, batch_dict, seq_len): + if self.loc_err is None: + return + # TODO + + def gather(self, cav_list, data_keys): + data_list = [] + for k in data_keys: + data = [] + for cav_id in cav_list: + data.append(self.cav_manager.get_cav_with_id(cav_id).data[k]) + data_list.append(data) + return data_list + + def scatter(self, cav_list, data_dict): + for k, data_list in data_dict.items(): + for cav_id, data in zip(cav_list, data_list): + self.update(cav_id, k, data) + + def update(self, cav_id, data_key, data): + self.cav_manager.get_cav_with_id(cav_id).data[data_key] = data + + def gather_batch(self, batch_idx, key, to_numpy=False): + data = {} + for cav in self.cav_manager.cavs[batch_idx]: + if key not in cav.data: + continue + d = cav.data[key] + if isinstance(d, torch.Tensor) and to_numpy: + d = d.cpu().numpy() + elif isinstance(d, list) and len(d) > 0 and isinstance(d[0], torch.Tensor): + d = [x.cpu().numpy() for x in d] + data[cav.id] = d + return data + + def gather_ego_data(self, key): + data = {} + for cavs in self.cav_manager.cavs: + assert cavs[0].is_ego + if key not in cavs[0].data: + continue + d = cavs[0].data[key] + data[cavs[0].id] = d + return data + + def gather_cav_data(self, key): + data = {} + for cavs in self.cav_manager.cavs: + for cav in cavs: + data[cav.id] = cav.data.get(key, {}) + return data + + def boxes_to_vis_format(self, boxes, labels, id_appendix=0): + boxes_vis = {} + gt_labels = labels.tolist() + for i, box in enumerate(boxes.tolist()): + cur_id = i + 1 + if id_appendix != 0: + cur_id = cur_id * 10 + id_appendix + try: + boxes_vis[cur_id] = [gt_labels[i]] + box[:6] + [0, 0] + [box[6]] + except: + print('d') + return boxes_vis + + def get_gt_boxes_as_vis_format(self, batch_idx, coor='global', successors=False): + gt_boxes = self.gather_batch(batch_idx, f'{coor}_bboxes_3d' ) + gt_labels = self.gather_batch(batch_idx, f'{coor}_labels_3d') + if successors and coor=='global': + bboxes_3d_pred = self.gather_batch(batch_idx, 'bboxes_3d_pred') + labels = {} + successor_labels = {} + for k in gt_boxes.keys(): + labels[k] = self.boxes_to_vis_format(gt_boxes[k], gt_labels[k]) + if successors and coor=='global' and k in bboxes_3d_pred: + successor_labels[k] = {} + for i, cur_preds in enumerate(bboxes_3d_pred[k]): + tmp_boxes = gt_boxes[k].detach().clone() + tmp_boxes[:, :3] = cur_preds[:, :3] + tmp_boxes[:, 6] = cur_preds[:, -1] + successor_labels[k].update(self.boxes_to_vis_format(tmp_boxes, gt_labels[k], i)) + return labels, successor_labels + + + def gather_vis_data(self, batch_idx=0, keys=['points']): + gather_dict = {} + successors = 'global_pred_gt' in keys + for k in keys: + if k in ['global_labels', 'local_labels']: + ref_coor = k.split('_')[0] + gather_dict[f'{ref_coor}_labels'], successor_labels = ( + self.get_gt_boxes_as_vis_format(batch_idx, ref_coor, successors)) + if successors and ref_coor=='global': + gather_dict['global_pred_gt'] = successor_labels + elif k == 'global_pred_gt' or k == 'global_pred': + continue + elif k == 'detection' or k == 'detection_global': + detection = self.gather_ego_data(k) + global_pred = {} + for cav_id, det in detection.items(): + global_pred[cav_id] = {} + if 'preds' in det: + det = det['preds'] # todo: without nms hook, keywork preds is not removed + if 'box' in det and 'lbl' in det: + detection[cav_id]['labels'] = self.boxes_to_vis_format(det['box'], det['lbl']) + if 'pred' in det: + global_pred[cav_id]['labels'] = self.boxes_to_vis_format( + det['pred'].view(-1, 7), det['lbl'].unsqueeze(0).repeat(2, 1).view(-1)) + gather_dict['detection'] = detection + gather_dict['global_pred'] = global_pred + elif k == 'detection_local': + detection = self.gather_cav_data(k) + for cav_id, det in detection.items(): + if len(det) == 0: + continue + if 'preds' in det: + det = det['preds'] + if 'box' in det and 'lbl' in det: + detection[cav_id]['labels'] = self.boxes_to_vis_format(det['box'], det['lbl']) + gather_dict['detection_local'] = detection + else: + gather_dict[k] = self.gather_batch(batch_idx, k, True) + return gather_dict + + def get_vis_data_input(self, batch_idx=0, keys=None): + """ + + Parameters + ---------- + batch_idx + key: additional gt keys that are not standarlized in consense3d data API + + Returns + ------- + + """ + pcds = self.gather_batch(batch_idx, 'points', True) + imgs = self.gather_batch(batch_idx, 'img', True) + global_labels = self.get_gt_boxes_as_vis_format(batch_idx, 'global') + local_labels = self.get_gt_boxes_as_vis_format(batch_idx, 'local') + bboxes2d = self.gather_batch(batch_idx, 'bboxes2d', True) + lidar2img = self.gather_batch(batch_idx, 'lidar2img', True) + out_dict = { + 'pcds': pcds, + 'imgs': imgs, + 'bboxes2d': bboxes2d, + 'lidar2img': lidar2img, + 'global_labels': global_labels, + 'local_labels': local_labels + } + if keys is not None: + for k in keys: + out_dict[k] = self.gather_batch(batch_idx, k, True) + return out_dict + + def get_vis_data_detection(self, batch_idx=0, keys='detection'): + """ + + Parameters + ---------- + batch_idx: batch index + key: the default key for detection is 'detection', customized key can also be used, + depending on which key is used for saving detection result in the CAV data pool. + + Returns + ------- + detection: result with boxes and labels converted to the visualizing format. + """ + detection = self.gather_batch(batch_idx, 'detection') + for cav_id, det in detection.items(): + detection[cav_id]['labels'] = self.boxes_to_vis_format(det['box'], det['lbl']) + return detection + + def get_vis_data_bev(self, batch_idx=0, keys='bev'): + return self.gather_batch(batch_idx, 'bev') + + def get_vis_data_meta(self, batch_idx=0, keys=None): + return { + 'scenario': self.gather_batch(batch_idx, 'scenario'), + 'frame': self.gather_batch(batch_idx, 'frame') + } + + def vis_global_data_plt(self, vis_funcs, seq_len=1): + for func in vis_funcs: + ax = None + for cav in self.cav_manager.cavs[0]: + ax = getattr(cav, func)(ax, his_len=seq_len) + plt.savefig(f"{os.environ['HOME']}/Pictures/{func}_{seq_len}.png") + plt.close() + + + + + + + + diff --git a/cosense3d/agents/core/forward_runner.py b/cosense3d/agents/core/forward_runner.py new file mode 100644 index 00000000..57a521ac --- /dev/null +++ b/cosense3d/agents/core/forward_runner.py @@ -0,0 +1,99 @@ + +import math +import torch +from torch import nn + +from cosense3d.modules import build_module + + +class ForwardRunner(nn.Module): + def __init__(self, shared_modules, data_manager, dist=False, chunk_size=24, **kwargs): + super().__init__() + self.lidar_range = torch.tensor(data_manager.lidar_range) + self.data_manager = data_manager + self.dist = dist + # if the fwd items of a module exits the GPU capacity, run them in several mini batches + self.chunk_size = chunk_size + + module_dict = {} + self.module_keys = [] + for k, v in shared_modules.items(): + if 'type' not in v: + continue + v['dist'] = dist + module = build_module(v) + if module.freeze: + module.freeze_parameters() + module_dict[k] = module + self.module_keys.append(k) + + self.shared_modules = nn.ModuleDict(module_dict) + + def to_gpu(self, gpu_id): + for n, m in self.shared_modules.items(): + sync_func = m.to_gpu(gpu_id) + if sync_func is not None: + self.shared_modules[n] = sync_func(m) + + def gather_cav_ids(self, tasks): + return [t[0] for t in tasks] + + def forward(self, tasks, with_grad=True, **kwargs): + if with_grad: + self._forward(tasks, **kwargs) + else: + with torch.no_grad(): + self._forward(tasks, **kwargs) + + def _forward(self, tasks, **kwargs): + for task_name, task_list in tasks.items(): + module = getattr(self.shared_modules, task_name) + task_ids = self.gather_cav_ids(task_list) + n_task = len(task_ids) + s = self.chunk_size + if n_task > s and 0 < n_task % s < 4: + s = int(math.ceil(n_task / math.ceil(n_task / s))) + chunks = [task_ids[i:i + s] for i in range(0, len(task_ids), s)] + res = {k: [] for k in module.scatter_keys} + for tids in chunks: + data = self.data_manager.gather(tids, module.gather_keys) + cur_res = module(*data, **kwargs) + for k in module.scatter_keys: + res[k].extend(cur_res[k]) + self.data_manager.scatter(task_ids, res) + + def loss(self, tasks, **kwargs): + loss_dict = {} + loss = 0 + for task_name, task_list in tasks.items(): + module = getattr(self.shared_modules, task_name) + if module.freeze: + continue + cav_ids = self.gather_cav_ids(task_list) + data = self.data_manager.gather(cav_ids, module.scatter_keys + module.gt_keys) + ldict = module.loss(*data, **kwargs) + for k, v in ldict.items(): + prefix = task_name.replace('_head', '') + loss_dict[f'{prefix}.{k}'] = v + loss = loss + v + loss_dict['total_loss'] = loss + return loss, loss_dict + + def frame_loss(self, tasks, **kwargs): + loss_dict = {} + for task_name, task_list in tasks.items(): + module = getattr(self.shared_modules, task_name) + if module.freeze: + continue + cav_ids = self.gather_cav_ids(task_list) + data = self.data_manager.gather(cav_ids, module.scatter_keys + module.gt_keys) + ldict = module.loss(*data, **kwargs) + for k, v in ldict.items(): + prefix = task_name.replace('_head', '') + loss_dict[f'{prefix}.{k}'] = v + return loss_dict + + + + + diff --git a/cosense3d/agents/core/gui.py b/cosense3d/agents/core/gui.py new file mode 100644 index 00000000..748353db --- /dev/null +++ b/cosense3d/agents/core/gui.py @@ -0,0 +1,190 @@ + + +import functools +import os + +from PyQt5 import QtCore, QtGui, QtWidgets + +from cosense3d.agents.viewer.gl_viewer import GLViewer +from cosense3d.agents.viewer.output_viewer import OutputViewer +from cosense3d.agents.viewer.img_viewer import ImgViewer +from cosense3d.agents.viewer.img_anno3d_viewer import ImgAnno3DViewer + + +class GUI(QtWidgets.QMainWindow): + def __init__(self, mode, cfg) -> None: + super(GUI, self).__init__() + self.mode = mode + self.header_height = 30 + path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + self.css_dir = os.path.join(path, 'viewer', 'css') + self.data_keys = [ + 'scenario', 'frame', + 'points', 'img', 'bboxes2d', 'lidar2img', + 'global_labels', 'local_labels', 'global_pred_gt', + 'detection', 'detection_local', 'global_pred' + ] + self.setupUI(cfg) + self.setWindowTitle("Cosense3D") + + # Set window size to screen size + screen = QtWidgets.QDesktopWidget().screenGeometry() + width, height = screen.width(), screen.height() + self.setGeometry(0, 0, width, height) + + self.timer = QtCore.QTimer(self) + self.timer.timeout.connect(self.step) + self.data = None + self.colo_mode = 'united' + + def setupUI(self, cfg): + self.tabs = QtWidgets.QTabWidget() + + self.glViewer0 = GLViewer('MAINVIEW', self) + self.tabs.addTab(self.glViewer0, 'GLViewer') + + self.img_viewer = ImgViewer(**cfg.get('img_viewer', {})) + self.tabs.addTab(self.img_viewer, 'ImgViewer') + + self.img_anno3d_viewer = ImgAnno3DViewer(**cfg.get('img_anno3d_viewer', {})) + self.tabs.addTab(self.img_anno3d_viewer, 'ImgAnno3DViewer') + + self.output_viewer = OutputViewer(**cfg['output_viewer']) + self.tabs.addTab(self.output_viewer, 'OutputViewer') + self.data_keys.extend(self.output_viewer.gather_data_keys) + + self.setCentralWidget(self.tabs) + self.get_toolbar() + + def setRunner(self, runner): + self.runner = runner + + def initGUI(self): + # connect all events + self.connect_events_to_funcs() + + def get_toolbar(self): + self.toolbar = self.addToolBar("Toolbar") + self.infos = ['scene', 'frame', 'PCDcolor'] + self.tools = ['start', 'stop', 'step'] + self.visible_objects = ['localDet', 'globalDet', 'localGT', 'globalGT', 'globalPred', 'globalPredGT'] + + # add label combo pairs + for name in self.infos: + qlabel = QtWidgets.QLabel(f' {name[0].upper()}{name[1:]}:') + w1 = qlabel.sizeHint().width() + qlabel.setMinimumWidth(w1 + 25) + qlabel.setMaximumWidth(w1 + 50) + qcombo = QtWidgets.QComboBox() + w2 = qcombo.sizeHint().width() + qcombo.setMinimumWidth(w2 + 25) + qcombo.setMaximumWidth(w2 + 50) + if name=='PCDcolor': + qcombo.addItem('united') + qcombo.addItem('height') + qcombo.addItem('cav') + qcombo.addItem('time') + else: + qcombo.addItem('---------') + setattr(self, f'label_{name}', qlabel) + setattr(self, f'combo_{name}', qcombo) + setattr(self, f'cur_{name}', None) + + self.toolbar.addWidget(getattr(self, f'label_{name}')) + self.toolbar.addWidget(getattr(self, f'combo_{name}')) + + for name in self.tools: + bname = f'{name[0].upper()}{name[1:]}' + qbutton = QtWidgets.QToolButton() + qbutton.setText(bname) + # qbutton.setIcon(QtGui.QIcon(f"./interface/ui/icons/{name}.png")) + w = qbutton.sizeHint().width() + 1 + qbutton.setMaximumWidth(w) + setattr(self, f'button_{name}', qbutton) + self.toolbar.addWidget(getattr(self, f'button_{name}')) + + for name in ['glcolor'] + self.visible_objects: + bname = f'{name[0].upper()}{name[1:]}' + qbutton = QtWidgets.QPushButton() + qbutton.setText(bname) + w = qbutton.sizeHint().width() + 1 + qbutton.setMaximumWidth(w) + setattr(self, f'button_{name}', qbutton) + self.toolbar.addWidget(getattr(self, f'button_{name}')) + + for name in self.visible_objects: + setattr(self, f"{name.lower()}_visible", False) + + self.button_glcolor.setStyleSheet("background-color: black; color: white") + + def change_visible(self, name): + button = getattr(self, f'button_{name}') + current_color = button.palette().button().color() + + if current_color != QtGui.QColor('lightblue'): + button.setStyleSheet("background-color: lightblue") + setattr(self, f"{name.lower()}_visible", True) + else: + button.setStyleSheet("background-color: #efefef") + setattr(self, f"{name.lower()}_visible", False) + self.refresh() + + def change_glcolor(self): + button = self.button_glcolor + current_color = button.palette().button().color() + if current_color == QtGui.QColor('black'): + button.setStyleSheet("background-color: white; color: black") + self.glViewer0.setBackgroundColor('w') + else: + button.setStyleSheet("background-color: black; color: white") + self.glViewer0.setBackgroundColor('k') + self.refresh() + + def change_color_mode(self): + self.colo_mode = self.combo_PCDcolor.currentText() + self.refresh() + + def connect_events_to_funcs(self): + self.combo_PCDcolor.currentIndexChanged.connect(self.change_color_mode) + self.button_step.clicked.connect(self.step) + self.button_start.clicked.connect(self.start) + self.button_stop.clicked.connect(self.stop) + self.tabs.currentChanged.connect(self.refresh) + self.button_glcolor.clicked.connect(self.change_glcolor) + for name in self.visible_objects: + if getattr(self, f"{name.lower()}_visible"): + self.change_visible(name) + getattr(self, f'button_{name}').clicked.connect( + functools.partial(self.change_visible, name=name)) + + def step(self): + self.runner.step() + self.data = self.runner.vis_data(self.data_keys) + self.refresh() + if self.runner.iter == self.runner.total_iter: + self.timer.stop() + + def refresh(self): + active_widget = self.tabs.currentWidget() + if self.data is not None: + visible_keys = [k for k in self.visible_objects if getattr(self, f"{k.lower()}_visible")] + active_widget.refresh(self.data, visible_keys=visible_keys, color_mode=self.colo_mode) + scene = list(self.data['scenario'].values())[0] + frame = list(self.data['frame'].values())[0] + # todo adapt scenario and frame selection + self.combo_frame.clear() + self.combo_frame.addItem(frame) + self.combo_scene.clear() + self.combo_scene.addItem(scene) + + def start(self): + self.timer.start(300) # Trigger the animate method every 100ms + + def stop(self): + self.timer.stop() + + + + + + diff --git a/cosense3d/agents/core/hooks.py b/cosense3d/agents/core/hooks.py new file mode 100644 index 00000000..2b496237 --- /dev/null +++ b/cosense3d/agents/core/hooks.py @@ -0,0 +1,589 @@ + + +import os +import time + +import torch +import torch.nn.functional as F +from importlib import import_module + +from cosense3d.ops.utils import points_in_boxes_gpu + + +class Hooks: + def __init__(self, cfg): + self.hooks = [] + if cfg is None: + return + for hook_cfg in cfg: + self.hooks.append( + globals()[hook_cfg['type']](**hook_cfg) + ) + + def __call__(self, runner, hook_stage, **kwargs): + for hook in self.hooks: + getattr(hook, hook_stage)(runner, **kwargs) + + def set_logger(self, logger): + for hook in self.hooks: + hook.set_logger(logger) + + +class BaseHook: + def __init__(self, **kwargs): + pass + + def pre_iter(self, runner, **kwargs): + pass + + def post_iter(self, runner, **kwargs): + pass + + def pre_epoch(self, runner, **kwargs): + pass + + def post_epoch(self, runner, **kwargs): + pass + + def set_logger(self, logger): + self.logger = logger + + +class MemoryUsageHook(BaseHook): + def __init__(self, device='cuda:0', **kwargs): + super().__init__(**kwargs) + self.device = device + + def post_iter(self, runner, **kwargs): + memory = torch.cuda.max_memory_allocated(self.device) / 1024 / 1024 + torch.cuda.empty_cache() + runner.logger.update(memory=memory) + + +class CPMStatisticHook(BaseHook): + def __init__(self, device='cuda:0', **kwargs): + super().__init__(**kwargs) + self.device = device + + def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'detection_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir + + def post_epoch(self, runner, **kwargs): + cpm_rec = runner.controller.cav_manager.cpm_size_recorder + thr = runner.controller.cav_manager.cavs[0][0].share_score_thr + ss = (f"########## CPM size @ {thr} ###########\n" + f"Mean: {cpm_rec.mean[0] * 4 / 1024:.2f} KB, Std: {cpm_rec.std[0] * 4 / 1024:.2f} KB") + print(ss) + self.logger.log(ss) + + +class TrainTimerHook(BaseHook): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.elapsed_time = 0 + self.start_time = None + self.mean_time_per_itr = None + self.observations = 0 + + def pre_epoch(self, runner, **kwargs): + if self.start_time is None: + self.start_time = time.time() + self.last_time = time.time() + + def post_iter(self, runner, **kwargs): + cur_time = time.time() + self.elapsed_time = (cur_time - self.start_time) / 3600 + # total_run_iter = (runner.total_iter * (runner.epoch - runner.start_epoch)) + runner.iter + # time_per_iter = self.elapsed_time / total_run_iter + time_per_iter = (cur_time - self.last_time) / 3600 + m = self.observations + if self.mean_time_per_itr is None: + self.mean_time_per_itr = time_per_iter + else: + self.mean_time_per_itr = m / (m + 1) * self.mean_time_per_itr + 1 / (m + 1) * time_per_iter + iter_remain = runner.total_iter * (runner.total_epochs - runner.epoch + 1) - runner.iter + time_remain = self.mean_time_per_itr * iter_remain + runner.logger.update(t_remain=time_remain, t_used=self.elapsed_time) + self.last_time = cur_time + self.observations += 1 + + +class CheckPointsHook(BaseHook): + def __init__(self, max_ckpt=3, epoch_every=None, iter_every=None, **kwargs): + super().__init__(**kwargs) + self.max_ckpt = max_ckpt + self.epoch_every = epoch_every + self.iter_every = iter_every + + def post_epoch(self, runner, **kwargs): + if runner.gpu_id != 0: + return + self.save(runner, f'epoch{runner.epoch}.pth') + if runner.epoch > self.max_ckpt: + if (self.epoch_every is None or not + (runner.epoch - self.max_ckpt) % self.epoch_every == 0): + filename = os.path.join( + runner.logger.logdir, + f'epoch{runner.epoch - self.max_ckpt}.pth') + if os.path.exists(filename): + os.remove(filename) + + def post_iter(self, runner, **kwargs): + if runner.gpu_id != 0: + return + if self.iter_every is not None and runner.iter % self.iter_every == 0: + self.save(runner, f'latest.pth') + + @staticmethod + def save(runner, name): + save_path = os.path.join(runner.logger.logdir, name) + print(f'Saving checkpoint to {save_path}.') + torch.save({ + 'epoch': runner.epoch, + 'model': runner.forward_runner.state_dict(), + 'optimizer': runner.optimizer.state_dict(), + 'lr_scheduler': runner.lr_scheduler.state_dict(), + }, save_path) + + +class DetectionNMSHook(BaseHook): + def __init__(self, nms_thr, pre_max_size, + det_key='detection', + **kwargs): + super().__init__(**kwargs) + self.nms_thr = nms_thr + self.pre_max_size = pre_max_size + self.nms = import_module('cosense3d.ops.iou3d_nms_utils').nms_gpu + self.det_key = det_key + self.defual_pred_keys = ['box', 'scr', 'lbl', 'idx'] + + def post_iter(self, runner, **kwargs): + detection_out = runner.controller.data_manager.gather_ego_data(self.det_key) + preds = [] + cav_ids = [] + for cav_id, values in detection_out.items(): + cav_ids.append(cav_id) + + boxes = values['preds']['box'] + scores = values['preds']['scr'] + labels = values['preds']['lbl'] + indices = values['preds']['idx'] # map index for retrieving features + + out = {} + if 'center' in values: + out['ctr'] = values['center'] + if 'conf' in values: + out['conf'] = values['conf'] + + if len(values['preds']['box']) == 0: + out.update({ + 'box': torch.zeros((0, 7), device=boxes.device), + 'scr': torch.zeros((0,), device=scores.device), + 'lbl': torch.zeros((0,), device=labels.device), + 'idx': torch.zeros(indices.shape[0] if isinstance(indices, torch.Tensor) else (0,), + device=indices.device), + }) + if 'pred' in values['preds']: + out['pred'] = torch.zeros((0, 2, 7), device=boxes.device) + else: + keep = self.nms( + boxes[..., :7], + scores, + thresh=self.nms_thr, + pre_maxsize=self.pre_max_size + ) + out.update({ + 'box': boxes[keep], + 'scr': scores[keep], + 'lbl': labels[keep], + 'idx': indices[keep], + }) + if 'pred' in values['preds'] and values['preds']['pred'] is not None: + out['pred'] = values['preds']['pred'][keep] + assert len(out['pred']) != len(out['box']) + preds.append(out) + + # from cosense3d.utils.vislib import draw_points_boxes_plt + # points = out['ctr'].detach().cpu().numpy() + # boxes = out['box'].detach().cpu().numpy() + # draw_points_boxes_plt( + # pc_range=[-140.8, -38.4, -3.0, 140.8, 38.4, 1.0], + # boxes_pred=boxes, + # points=points, + # filename="/home/yuan/Pictures/tmp.png" + # ) + + runner.controller.data_manager.scatter(cav_ids, {self.det_key: preds}) + + +class EvalDetectionBEVHook(BaseHook): + def __init__(self, pc_range, iou_thr=[0.5, 0.7], save_result=False, + det_key='detection', gt_key='global_bboxes_3d', **kwargs): + super().__init__(**kwargs) + self.iou_thr = iou_thr + self.pc_range = pc_range + self.save_result = save_result + self.det_key = det_key + self.gt_key = gt_key + self.result = {iou: {'tp': [], 'fp': [], 'gt': 0, 'scr': []} for iou in iou_thr} + self.eval_funcs = import_module('cosense3d.utils.eval_detection_utils') + + def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'detection_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir + + def post_iter(self, runner, **kwargs): + detection = runner.controller.data_manager.gather_ego_data(self.det_key) + gt_boxes = runner.controller.data_manager.gather_ego_data(self.gt_key) + + for i, (cav_id, preds) in enumerate(detection.items()): + if 'preds' in preds: + preds = preds['preds'] + preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds['time'] = \ + self.filter_box_ranges(preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds.get('time', None)) + cur_gt_boxes = self.filter_box_ranges(gt_boxes[cav_id])[0] + cur_points = runner.controller.data_manager.gather_batch(i, 'points') + + if self.save_result: + ego_key = cav_id + senario = runner.controller.data_manager.gather_ego_data('scenario')[ego_key] + frame = runner.controller.data_manager.gather_ego_data('frame')[ego_key] + filename = f"{senario}.{frame}.{ego_key.split('.')[1]}.pth" + result = {'detection': preds, + 'gt_boxes': cur_gt_boxes, + 'points': cur_points} + torch.save(result, os.path.join(self.logdir, filename)) + + for iou in self.iou_thr: + self.eval_funcs.caluclate_tp_fp( + preds['box'][..., :7], preds['scr'], cur_gt_boxes[..., :7], self.result, iou + ) + + def filter_box_ranges(self, boxes, scores=None, labels=None, indices=None, times=None): + mask = boxes.new_ones((len(boxes),)).bool() + if boxes.ndim == 3: + centers = boxes.mean(dim=1) + else: + centers = boxes[:, :3] + for i in range(3): + mask = mask & (centers[:, i] > self.pc_range[i]) & (centers[:, i] < self.pc_range[i + 3]) + boxes = boxes[mask] + if scores is not None: + scores = scores[mask] + if labels is not None: + labels = labels[mask] + if indices is not None: + try: + indices = indices[mask] + except: + print("Number of boxes doesn't match the number of indices") + if times is not None: + times = times[mask] + return boxes, scores, labels, indices, times + + def post_epoch(self, runner, **kwargs): + fmt_str = ("################\n" + "DETECTION RESULT\n" + "################\n") + out_dict = self.eval_funcs.eval_final_results( + self.result, + self.iou_thr, + global_sort_detections=True + ) + fmt_str += "OPV2V BEV Global sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + + print(fmt_str) + self.logger.log(fmt_str) + + def format_final_result(self, out_dict): + fmt_str = "" + for iou in self.iou_thr: + iou_str = f"{int(iou * 100)}" + fmt_str += f"AP@{iou_str}: {out_dict[f'ap_{iou_str}']:.3f}\n" + return fmt_str + + +class EvalDetectionHook(BaseHook): + def __init__(self, pc_range, iou_thr=[0.5, 0.7], metrics=['CoSense3D'], save_result=False, + det_key='detection', gt_key='global_bboxes_3d', **kwargs): + super().__init__(**kwargs) + self.iou_thr = iou_thr + self.pc_range = pc_range + self.save_result = save_result + self.det_key = det_key + self.gt_key = gt_key + for m in metrics: + assert m in ['OPV2V', 'CoSense3D'] + setattr(self, f'{m.lower()}_result', + {iou: {'tp': [], 'fp': [], 'gt': 0, 'scr': []} for iou in iou_thr}) + self.metrics = metrics + self.eval_funcs = import_module('cosense3d.utils.eval_detection_utils') + + def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'detection_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir + + def post_iter(self, runner, **kwargs): + detection = runner.controller.data_manager.gather_ego_data(self.det_key) + gt_boxes = runner.controller.data_manager.gather_ego_data(self.gt_key) + + for i, (cav_id, preds) in enumerate(detection.items()): + if 'preds' in preds: + preds = preds['preds'] + preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds['time'] = \ + self.filter_box_ranges(preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds.get('time', None)) + cur_gt_boxes = self.filter_box_ranges(gt_boxes[cav_id])[0] + cur_points = runner.controller.data_manager.gather_batch(i, 'points') + + if self.save_result: + ego_key = cav_id + senario = runner.controller.data_manager.gather_ego_data('scenario')[ego_key] + frame = runner.controller.data_manager.gather_ego_data('frame')[ego_key] + filename = f"{senario}.{frame}.{ego_key.split('.')[1]}.pth" + result = {'detection': preds, + 'gt_boxes': cur_gt_boxes, + 'points': cur_points} + torch.save(result, os.path.join(self.logdir, filename)) + + for iou in self.iou_thr: + if 'OPV2V' in self.metrics: + result_dict = getattr(self, f'opv2v_result') + self.eval_funcs.caluclate_tp_fp( + preds['box'][..., :7], preds['scr'], cur_gt_boxes[..., :7], result_dict, iou + ) + if 'CoSense3D' in self.metrics: + result_dict = getattr(self, f'cosense3d_result') + tp = self.eval_funcs.ops_cal_tp( + preds['box'][..., :7].detach(), cur_gt_boxes[..., :7].detach(), IoU_thr=iou + ) + result_dict[iou]['tp'].append(tp.cpu()) + result_dict[iou]['gt'] += len(cur_gt_boxes) + result_dict[iou]['scr'].append(preds['scr'].detach().cpu()) + + def filter_box_ranges(self, boxes, scores=None, labels=None, indices=None, times=None): + mask = boxes.new_ones((len(boxes),)).bool() + if boxes.ndim == 3: + centers = boxes.mean(dim=1) + else: + centers = boxes[:, :3] + for i in range(3): + mask = mask & (centers[:, i] > self.pc_range[i]) & (centers[:, i] < self.pc_range[i + 3]) + boxes = boxes[mask] + if scores is not None: + scores = scores[mask] + if labels is not None: + labels = labels[mask] + if indices is not None: + try: + indices = indices[mask] + except: + print("Number of boxes doesn't match the number of indices") + if times is not None: + times = times[mask] + return boxes, scores, labels, indices, times + + def post_epoch(self, runner, **kwargs): + fmt_str = ("################\n" + "DETECTION RESULT\n" + "################\n") + if 'OPV2V' in self.metrics: + result_dict = getattr(self, f'opv2v_result') + out_dict = self.eval_funcs.eval_final_results( + result_dict, + self.iou_thr, + global_sort_detections=True + ) + fmt_str += "OPV2V BEV Global sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + + out_dict = self.eval_funcs.eval_final_results( + result_dict, + self.iou_thr, + global_sort_detections=False + ) + fmt_str += "OPV2V BEV Local sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + if 'CoSense3D' in self.metrics: + out_dict = self.eval_cosense3d_final() + fmt_str += "CoSense3D Global sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + print(fmt_str) + self.logger.log(fmt_str) + + def format_final_result(self, out_dict): + fmt_str = "" + for iou in self.iou_thr: + iou_str = f"{int(iou * 100)}" + fmt_str += f"AP@{iou_str}: {out_dict[f'ap_{iou_str}']:.3f}\n" + # fmt_str += f"Precision@{iou_str}: {out_dict[f'mpre_{iou_str}']:.3f}\n" + # fmt_str += f"Recall@{iou_str}: {out_dict[f'mrec_{iou_str}']:.3f}\n" + return fmt_str + + def eval_cosense3d_final(self): + out_dict = {} + result_dict = getattr(self, f'cosense3d_result') + for iou in self.iou_thr: + scores = torch.cat(result_dict[iou]['scr'], dim=0) + tps = torch.cat(result_dict[iou]['tp'], dim=0) + n_pred = len(scores) + n_gt = result_dict[iou]['gt'] + + ap, mpre, mrec, _ = self.eval_funcs.cal_ap_all_point(scores, tps, n_pred, n_gt) + iou_str = f"{int(iou * 100)}" + out_dict.update({f'ap_{iou_str}': ap, + f'mpre_{iou_str}': mpre, + f'mrec_{iou_str}': mrec}) + return out_dict + + +class EvalBEVSemsegHook(BaseHook): + def __init__(self, + test_range, + test_res=0.4, + save_result=False, + eval_static=True, + bev_semseg_key='bev_semseg', + gt_bev_key='bevmap', + gt_boxes_key='global_bboxes_3d', + **kwargs): + super().__init__(**kwargs) + self.test_range = test_range + self.test_res = test_res + self.save_result = save_result + self.eval_static = eval_static + self.bev_semseg_key = bev_semseg_key + self.gt_bev_key = gt_bev_key + self.gt_boxes_key = gt_boxes_key + self.thrs = torch.arange(0.1, 1.1, 0.1) + self.sx = int(round((self.test_range[3] - self.test_range[0]) / self.test_res)) + self.sy = int(round((self.test_range[4] - self.test_range[1]) / self.test_res)) + + self.res_dict = { + 'iou_dynamic_all': [], + 'iou_dynamic_obs': [], + 'iou_static_all': [], + 'iou_static_obs': [], + } + + def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'bev_semseg_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir + + def post_iter(self, runner, **kwargs): + scene_tokens = runner.controller.data_manager.gather_ego_data('scene_tokens') + frame = runner.controller.data_manager.gather_ego_data('frame') + semseg = runner.controller.data_manager.gather_ego_data(self.bev_semseg_key) + gt_bevmaps = runner.controller.data_manager.gather_ego_data(self.gt_bev_key) + gt_boxes = runner.controller.data_manager.gather_ego_data(self.gt_boxes_key) + for i, (cav_id, preds) in enumerate(semseg.items()): + token = f'{scene_tokens[cav_id]}.{frame[cav_id]}' + gt_dynamic_map = self.gt_dynamic_map(gt_boxes[cav_id]) + self.cal_ious(preds, gt_dynamic_map, 'dynamic', token) + if self.eval_static: + gt_static_map = self.gt_static_map(gt_bevmaps[cav_id]) + self.cal_ious(preds, gt_static_map, 'static', token) + + def cal_ious(self, preds, gt_map, tag, token=None): + conf = self.crop_map(preds[f'conf_map_{tag}']) + unc = self.crop_map(preds[f'unc_map_{tag}']) + obs_mask = self.crop_map(preds[f'obs_mask_{tag}']) + self.res_dict[f'iou_{tag}_all'].append(self.iou(conf, unc, gt_map)) + self.res_dict[f'iou_{tag}_obs'].append(self.iou(conf, unc, gt_map, obs_mask)) + + if self.save_result: + img = torch.cat([gt_map, unc, conf[..., 1]], dim=0).detach().cpu().numpy() + import matplotlib.pyplot as plt + plt.imshow(img.T) + plt.savefig(os.path.join(self.logdir, f'{token}.{tag}.jpg')) + plt.close() + + def iou(self, conf, unc, gt, obs_mask=None): + ious = [] + for thr in self.thrs: + if obs_mask is None: + pos_mask = torch.argmax(conf, dim=-1).bool() + pos_mask = torch.logical_and(pos_mask, unc <= thr) + gt_ = gt + else: + pos_mask = torch.argmax(conf[obs_mask], dim=-1).bool() + pos_mask = torch.logical_and(pos_mask, unc[obs_mask] <= thr) + gt_ = gt[obs_mask] + mi = torch.logical_and(pos_mask, gt_).sum() + mu = torch.logical_or(pos_mask, gt_).sum() + ious.append(mi / mu) + return torch.stack(ious, dim=0) + + def gt_dynamic_map(self, boxes): + # filter box range + mask = boxes.new_ones((len(boxes),)).bool() + dynamic_map = torch.ones((self.sx, self.sy), device=boxes.device) + centers = boxes[:, :3] + for i in range(3): + mask = mask & (centers[:, i] > self.test_range[i]) & (centers[:, i] < self.test_range[i + 3]) + boxes = boxes[mask] + if len(boxes) > 0: + indices = torch.stack(torch.where(dynamic_map), dim=1) + xy = indices.float() + xy = (xy + 0.5) * self.test_res + xy[:, 0] += self.test_range[0] + xy[:, 1] += self.test_range[1] + xyz = F.pad(xy, (1, 1), 'constant', 0.0) + boxes = F.pad(boxes, (1, 0), 'constant', 0.0) + boxes[:, 3] = 0 + boxes_decomposed, box_idx_of_pts = points_in_boxes_gpu( + xyz, boxes, batch_size=1 + ) + inds = indices[box_idx_of_pts >= 0].T + dynamic_map[inds[0], inds[1]] = 0 + dynamic_map = torch.logical_not(dynamic_map) + return dynamic_map + + def gt_static_map(self, bevmap): + # map has higher resolution, downsample 2x + # bevmap = torch.flip(bevmap, [0]) + return bevmap[::2, ::2] + + def crop_map(self, bevmap): + sx, sy = bevmap.shape[:2] + sx_crop = (sx - self.sx) // 2 + sy_crop = (sy - self.sy) // 2 + return bevmap[sx_crop:-sx_crop, sy_crop:-sy_crop] + + def post_epoch(self, runner, **kwargs): + fmt_str = ("#################\n" + "BEV SEMSEG RESULT\n" + "#################\n") + fmt_str += f"{'thr':18s} | " + " ".join([f"{v:4.1f} " for v in self.thrs]) + "\n" + fmt_str += "-" * (23 + 70) + "\n" + for k, vs in self.res_dict.items(): + vs = torch.stack(vs, dim=0).mean(dim=0) * 100 + if isinstance(vs, int): + continue + s1 = f"{k:18s} | " + if isinstance(vs, float): + s2 = f"{vs:4.1f} \n" + else: + s2 = " ".join([f"{v:4.1f} " for v in vs]) + "\n" + fmt_str += s1 + s2 + print(fmt_str) + self.logger.log(fmt_str) + + + + + + diff --git a/cosense3d/agents/core/task_manager.py b/cosense3d/agents/core/task_manager.py new file mode 100644 index 00000000..2b4f5248 --- /dev/null +++ b/cosense3d/agents/core/task_manager.py @@ -0,0 +1,59 @@ + + +from collections import OrderedDict + + +class TaskManager: + def __init__(self): + pass + + def summarize_tasks(self, tasks): + tasks_out = {0: {'no_grad': [], 'with_grad': []}, + 1: {'no_grad': [], 'with_grad': []}, + 2: {'no_grad': [], 'with_grad': []}, + 3: {'loss': []}} + no_grad0, no_grad1, no_grad2, _ = self.reformat_tasks(tasks['no_grad']) + with_grad0, with_grad1, with_grad2, _ = self.reformat_tasks(tasks['with_grad']) + tasks_out[0]['no_grad'] = no_grad0 + tasks_out[0]['with_grad'] = with_grad0 + tasks_out[1]['no_grad'] = no_grad1 + tasks_out[1]['with_grad'] = with_grad1 + tasks_out[2]['no_grad'] = no_grad2 + tasks_out[2]['with_grad'] = with_grad2 + tasks_out[3]['loss'] = self.reformat_tasks(tasks['loss'])[3] + return tasks_out + + def summarize_loss_tasks(self, tasks): + return self.reformat_tasks(tasks) + + def reformat_tasks(self, task_list): + task_out = ({}, {}, {}, {}) # two stages + if len(task_list) == 0: + return task_out + for task in task_list: + cav_id, task_label, args = task + stage_order, task_name = task_label.split(':') + stage = int(stage_order[0]) + order = int(stage_order[1:]) + task_name = task_name.strip() + if order not in task_out[stage]: + task_out[stage][order] = {} + if task_name not in task_out[stage][order]: + task_out[stage][order][task_name] = [] + task_out[stage][order][task_name].append((cav_id, args)) + + task_out = [self.task_to_ordered_dict(tasks) for tasks in task_out] + return task_out + + def task_to_ordered_dict(self, tasks): + orders = sorted(tasks) + ordered_task = OrderedDict() + for i in orders: + for k, v in tasks[i].items(): + ordered_task[k] = v + return ordered_task + + + + + diff --git a/cosense3d/agents/core/test_runner.py b/cosense3d/agents/core/test_runner.py new file mode 100644 index 00000000..3b3c6886 --- /dev/null +++ b/cosense3d/agents/core/test_runner.py @@ -0,0 +1,76 @@ + + +import os, glob, logging +from tqdm import tqdm + +from cosense3d.utils.train_utils import * +from cosense3d.utils.logger import TestLogger +from cosense3d.agents.core.base_runner import BaseRunner + + +class TestRunner(BaseRunner): + def __init__(self, + load_from=None, + logdir=None, + **kwargs + ): + super().__init__(**kwargs) + ckpt = self.load(load_from) + self.progress_bar = tqdm(total=self.total_iter) + self.setup_logger(ckpt, logdir) + self.forward_runner.eval() + + def setup_logger(self, ckpt, logdir): + if logdir is None: + logdir = ckpt[:-4] + else: + logdir = os.path.join(logdir, f'test_{os.path.basename(ckpt)[:-4]}') + self.logger = TestLogger(logdir) + self.hooks.set_logger(self.logger) + + def load(self, load_from): + assert load_from is not None, "load path not given." + assert os.path.exists(load_from), f'resume path does not exist: {load_from}.' + if os.path.isfile(load_from): + ckpt = load_from + else: + ckpts = glob.glob(os.path.join(load_from, 'epoch*.pth')) + if len(ckpts) > 0: + epochs = [int(os.path.basename(ckpt)[5:-4]) for ckpt in ckpts] + max_idx = epochs.index(max(epochs)) + ckpt = ckpts[max_idx] + elif os.path.exists(os.path.join(load_from, 'last.pth')): + ckpt = os.path.join(load_from, 'last.pth') + else: + raise IOError('No checkpoint found.') + logging.info(f"Resuming the model from checkpoint: {ckpt}") + ckpt_dict = torch.load(ckpt) + load_model_dict(self.forward_runner, ckpt_dict['model']) + return ckpt + + def run(self): + self.hooks(self, 'pre_epoch') + for data in self.dataloader: + self.run_itr(data) + self.progress_bar.close() + self.hooks(self, 'post_epoch') + + def step(self): + data = self.next_batch() + self.run_itr(data) + if self.iter == self.total_iter: + self.hooks(self, 'post_epoch') + + def run_itr(self, data): + # if self.iter > 140: + # print('d') + self.hooks(self, 'pre_iter') + load_tensors_to_gpu(data) + self.controller.test_forward(data) + self.hooks(self, 'post_iter') + self.iter += 1 + self.progress_bar.update(1) + + + + diff --git a/cosense3d/agents/core/train_runner.py b/cosense3d/agents/core/train_runner.py new file mode 100644 index 00000000..03b5f546 --- /dev/null +++ b/cosense3d/agents/core/train_runner.py @@ -0,0 +1,145 @@ + + +import os, glob, logging +from datetime import datetime + +from torch.nn.parallel import DistributedDataParallel as DDP + +from cosense3d.utils.train_utils import * +from cosense3d.utils.lr_scheduler import build_lr_scheduler +from cosense3d.utils.logger import LogMeter +from cosense3d.utils.misc import ensure_dir +from cosense3d.agents.core.base_runner import BaseRunner +from cosense3d.agents.utils.deco import save_ckpt_on_error + + +class TrainRunner(BaseRunner): + def __init__(self, + max_epoch, + optimizer, + lr_scheduler, + gpus=0, + resume_from=None, + load_from=None, + run_name='default', + log_dir='work_dir', + use_wandb=False, + debug=False, + **kwargs + ): + super().__init__(**kwargs) + self.gpus = gpus + self.gpu_id = 0 + self.dist = False + self.debug = debug + if gpus > 0: + self.dist = True + self.gpu_id = int(os.environ.get("LOCAL_RANK", 0)) + self.forward_runner.to_gpu(self.gpu_id) + self.forward_runner = DDP(self.forward_runner, device_ids=[self.gpu_id]) + self.optimizer = build_optimizer(self.forward_runner, optimizer) + self.lr_scheduler = build_lr_scheduler(self.optimizer, lr_scheduler, + len(self.dataloader)) + self.total_epochs = max_epoch + self.start_epoch = 1 + + self.resume(resume_from, load_from) + self.setup_logger(resume_from, run_name, log_dir, use_wandb) + + def setup_logger(self, resume_from, run_name, log_dir, use_wandb): + if resume_from is not None: + if os.path.isfile(resume_from): + log_path = os.path.dirname(resume_from) + else: + log_path = resume_from + else: + now = datetime.now().strftime('%m-%d-%H-%M-%S') + run_name = run_name + '_' + now + log_path = os.path.join(log_dir, run_name) + ensure_dir(log_path) + wandb_project_name = run_name if use_wandb else None + self.logger = LogMeter(self.total_iter, log_path, log_every=self.log_every, + wandb_project=wandb_project_name) + + def resume(self, resume_from, load_from): + if resume_from is not None or load_from is not None: + load_path = resume_from if resume_from is not None else load_from + assert os.path.exists(load_path), f'resume/load path does not exist: {resume_from}.' + if os.path.isdir(load_path): + ckpts = glob.glob(os.path.join(load_path, 'epoch*.pth')) + if len(ckpts) > 0: + epochs = [int(os.path.basename(ckpt)[5:-4]) for ckpt in ckpts] + max_idx = epochs.index(max(epochs)) + ckpt = ckpts[max_idx] + elif os.path.exists(os.path.join(load_path, 'last.pth')): + ckpt = os.path.join(load_path, 'last.pth') + else: + raise IOError(f'No checkpoint found in directory {load_path}.') + elif os.path.isfile(load_path): + ckpt = load_path + else: + raise IOError(f'Failed to load checkpoint from {load_path}.') + logging.info(f"Resuming the model from checkpoint: {ckpt}") + ckpt = torch.load(ckpt) + load_model_dict(self.forward_runner, ckpt['model']) + if resume_from is not None: + self.start_epoch = ckpt['epoch'] + 1 + self.epoch = ckpt['epoch'] + 1 + if 'lr_scheduler' in ckpt: + self.lr_scheduler.load_state_dict(ckpt['lr_scheduler']) + try: + if 'optimizer' in ckpt: + self.optimizer.load_state_dict(ckpt['optimizer']) + except: + warnings.warn("Cannot load optimizer state_dict, " + "there might be training parameter changes, " + "please consider using 'load-from'.") + + def run(self): + with torch.autograd.set_detect_anomaly(True): + for i in range(self.start_epoch, self.total_epochs + 1): + self.hooks(self, 'pre_epoch') + self.run_epoch() + self.hooks(self, 'post_epoch') + self.lr_scheduler.step_epoch(i) + self.epoch += 1 + self.iter = 1 + + def step(self): + data = self.next_batch() + self.run_itr(data) + + def run_epoch(self): + if self.dist: + self.dataloader.sampler.set_epoch(self.epoch) + for data in self.dataloader: + # print(f'{self.gpu_id}: run_itr{self.iter}: 0') + self.hooks(self, 'pre_iter') + self.run_itr(data) + self.hooks(self, 'post_iter') + + @save_ckpt_on_error + def run_itr(self, data): + load_tensors_to_gpu(data, self.gpu_id) + self.optimizer.zero_grad() + total_loss, loss_dict = self.controller.train_forward( + data, epoch=self.epoch, itr=self.iter, gpu_id=self.gpu_id) + total_loss.backward() + + grad_norm = clip_grads(self.controller.parameters) + loss_dict['grad_norm'] = grad_norm + # Updating parameters + self.optimizer.step() + + self.lr_scheduler.step_itr(self.iter + self.epoch * self.total_iter) + + if self.logger is not None and self.gpu_id == 0: + # rec_lr = self.lr_scheduler.optimizer.param_groups[0]['lr'] + rec_lr = self.lr_scheduler.get_last_lr()[0] + self.logger.log(self.epoch, self.iter, rec_lr, **loss_dict) + + del data + self.iter += 1 + + + diff --git a/cosense3d/agents/core/vis_runner.py b/cosense3d/agents/core/vis_runner.py new file mode 100644 index 00000000..14983f98 --- /dev/null +++ b/cosense3d/agents/core/vis_runner.py @@ -0,0 +1,63 @@ + + +import os, glob, logging +from tqdm import tqdm +from datetime import datetime + +from cosense3d.utils.train_utils import * +from cosense3d.utils.logger import TestLogger +from cosense3d.utils.misc import ensure_dir, setup_logger +from cosense3d.agents.core.base_runner import BaseRunner + + +class VisRunner(BaseRunner): + def __init__(self, + **kwargs + ): + super().__init__(**kwargs) + self.progress_bar = tqdm(total=self.total_iter) + + def load(self, load_from): + assert load_from is not None, "load path not given." + assert os.path.exists(load_from), f'resume path does not exist: {load_from}.' + if os.path.isfile(load_from): + ckpt = load_from + else: + ckpts = glob.glob(os.path.join(load_from, 'epoch*.pth')) + if len(ckpts) > 0: + epochs = [int(os.path.basename(ckpt)[5:-4]) for ckpt in ckpts] + max_idx = epochs.index(max(epochs)) + ckpt = ckpts[max_idx] + elif os.path.exists(os.path.join(load_from, 'last.pth')): + ckpt = os.path.join(load_from, 'last.pth') + else: + raise IOError('No checkpoint found.') + logging.info(f"Resuming the model from checkpoint: {ckpt}") + ckpt_dict = torch.load(ckpt) + load_model_dict(self.forward_runner, ckpt_dict['model']) + return ckpt + + def run(self): + for data in self.dataloader: + self.run_itr(data) + self.progress_bar.close() + + def step(self): + data = self.next_batch() + self.run_itr(data) + + def run_itr(self, data): + self.hooks(self, 'pre_iter') + if data['scenario'][0][0] == '10.0' and data['frame'][0][0] == '018076': + print('d') + load_tensors_to_gpu(data) + self.controller.vis_forward(data) + + self.hooks(self, 'post_iter') + self.iter += 1 + self.progress_bar.update(1) + + + + + diff --git a/cosense3d/agents/utils/__init__.py b/cosense3d/agents/utils/__init__.py new file mode 100644 index 00000000..fd40910d --- /dev/null +++ b/cosense3d/agents/utils/__init__.py @@ -0,0 +1,4 @@ + + + + diff --git a/cosense3d/agents/utils/deco.py b/cosense3d/agents/utils/deco.py new file mode 100644 index 00000000..7bbd54fc --- /dev/null +++ b/cosense3d/agents/utils/deco.py @@ -0,0 +1,17 @@ + + +from cosense3d.agents.core.hooks import CheckPointsHook + + +def save_ckpt_on_error(func): + def wrapper(*args, **kwargs): + try: + result = func(*args, **kwargs) + return result + except Exception as e: + CheckPointsHook.save(args[0], f'debug_ep{args[0].epoch}.pth') + print(f"Exception caught in {func.__name__}: {e}") + raise e + return wrapper + + diff --git a/cosense3d/agents/utils/transform.py b/cosense3d/agents/utils/transform.py new file mode 100644 index 00000000..bf1d95f3 --- /dev/null +++ b/cosense3d/agents/utils/transform.py @@ -0,0 +1,367 @@ + + +import torch +import numpy as np +from scipy.spatial.transform.rotation import Rotation as R +from torch_scatter import scatter_mean + +from cosense3d.utils import pclib, box_utils +from cosense3d.modules.utils.common import limit_period + + +def add_rotate(tf, rot): + if isinstance(rot, list) and len(rot) == 3: + # param: [roll, pitch, yaw] in radian + rot = pclib.rotation_matrix(rot, degrees=False) + rot = torch.from_numpy(rot).to(tf.device) + tf[:3, :3] = rot @ tf[:3, :3] + elif isinstance(rot, torch.Tensor): + assert rot.shape[0] == 4 + tf = rot.to(tf.device) @ tf + else: + raise NotImplementedError + return tf + + +def add_flip(tf, flip_idx, flip_axis='xy'): + # flip_idx =1 : flip x + # flip_idx =2 : flip y + # flip_idx =3 : flip x & y + rot = torch.eye(4).to(tf.device) + # flip x + if 'x' in flip_axis and (flip_idx == 1 or flip_idx == 3): + rot[0, 0] *= -1 + # flip y + if 'y' in flip_axis and (flip_idx == 2 or flip_idx == 3): + rot[1, 1] *= -1 + tf = rot @ tf + return tf + + +def add_scale(tf, scale_ratio): + scale = torch.eye(4).to(tf.device) + scale[[0, 1, 2], [0, 1, 2]] = scale_ratio + tf = scale @ tf + return tf + + +def apply_transform(data, transform, key): + if (transform.cpu() == torch.eye(4)).all(): + return + if key == 'points': + C = data['points'].shape[-1] + points = data['points'][:, :3] + points = torch.cat([points, torch.ones_like(points[:, :1])], dim=-1).T + points = (transform @ points).T + + if C > 3: + data['points'] = torch.cat([points[:, :3], + data['points'][:, 3:]], dim=-1) + else: + data['points'] = points + elif 'annos_global' == key or 'annos_local' == key: + box_key = f"{key.split('_')[1]}_bboxes_3d" + if box_key not in data or data[box_key] is None: + return + boxes = data[box_key] + data[box_key][:, :7] = box_utils.transform_boxes_3d(boxes[:, :7], transform, mode=7) + elif key == 'annos_global_pred': + preds = data['bboxes_3d_pred'] + boxes = data['global_bboxes_3d'][..., :7].detach().clone() + boxes = boxes.unsqueeze(0).repeat(2, 1, 1) + boxes[..., [0, 1, 2, 6]] = data['bboxes_3d_pred'] + boxes = box_utils.transform_boxes_3d(boxes.view(-1, 7), transform, mode=7) + data['bboxes_3d_pred'] = boxes[..., [0, 1, 2, 6]].reshape(*preds.shape) + elif key == 'img': + for i in range(len(data['img'])): + data['extrinsics'][i] = data['extrinsics'][i] @ transform.inverse() + data['lidar2img'][i] = data['intrinsics'][i] @ data['extrinsics'][i] + elif key == 'bev_tgt_pts' and key in data: + if key not in data or data[key] is None: + return + points = data['bev_tgt_pts'].clone() + points[:, 2] = 0 + points = torch.cat([points, torch.ones_like(points[:, :1])], dim=-1).T + points = (transform @ points).T + data['bev_tgt_pts'][:, :2] = points[:, :2] + elif 'roadline' in key and key in data: + if key not in data or data[key] is None: + return + points = data[key][:, :2].clone() + points = torch.cat([points, torch.ones_like(points)], dim=-1).T + points[2] = 0 + points = (transform @ points).T + data[key][:, :2] = points[:, :2] + + +def filter_range(data, lidar_range, key): + if 'points' in key: + mask = filter_range_mask(data[key], lidar_range) + points = data[key][mask] + if len(points) == 0: + # pad empty point cloud with random points to ensure batch norm validity + points = data[key].new_zeros((8, points.shape[-1])) + points[:, :2] = torch.rand_like(points[:, :2]) * 2 - 1 + points[:, 3] = -1 + points[:, -1] = data[key][:, -1].min() + data[key] = points + elif 'annos_global' == key or 'annos_local' == key: + coor = key.split('_')[1] + if f'{coor}_bboxes_3d' not in data or data[f'{coor}_bboxes_3d'] is None: + return + mask = filter_range_mask(data[f'{coor}_bboxes_3d'][:, :3], lidar_range) + data[f'{coor}_bboxes_3d'] = data[f'{coor}_bboxes_3d'][mask] + data[f'{coor}_labels_3d'] = data[f'{coor}_labels_3d'][mask] + data[f'{coor}_bboxes_id'] = data[f'{coor}_bboxes_id'][mask] + data[f'{coor}_names'] = [data[f'{coor}_names'][i] for i, m in enumerate(mask) if m] + if coor == 'global' and 'bboxes_3d_pred' in data: + data['bboxes_3d_pred'] = data['bboxes_3d_pred'][:, mask] + + +def filter_range_mask(points, lidar_range, eps=1e-4): + lr = lidar_range.to(points.device) + mask = (points[:, :3] > lr[:3].view(1, 3) + eps) & (points[:, :3] < lr[3:].view(1, 3) - eps) + return mask.all(dim=-1) + + +def generate_bev_tgt_pts(points, data, transform=None, sam_res=0.4, map_res=0.2, range=50, + max_num_pts=5000, discrete=False): + if 'bevmap' not in data or data['bevmap'] is None: + return None + bevmap = data['bevmap'] + bevmap_coor = data['bevmap_coor'] + sx, sy = bevmap.shape[:2] + points2d = points[:, :2] + points2d = points2d[(points2d.abs() <= range).all(1)] + device = points2d.device + + # sample random points + offsets = torch.randn((len(points2d), 10, 2), device=device) * 3 + points2d = (points2d.reshape(-1, 1, 2) + offsets).reshape(-1, 2) + points2d = torch.unique(torch.floor(points2d / sam_res).int(), dim=0) * sam_res + if not discrete: + points2d = points2d + torch.randn_like(points2d) + + # transform points to global coordinates + if transform is not None: + points = torch.cat([points2d, + torch.zeros_like(points2d[:, :1]), + torch.ones_like(points2d[:, :1])], + dim=-1) + points = transform @ points.T + else: + points = points2d.T + + xs = torch.floor((points[0] - bevmap_coor[0]) / map_res).int() + ys = torch.floor((points[1] - bevmap_coor[1]) / map_res).int() + xs = torch.clamp(xs, 0, sx - 1).long() + ys = torch.clamp(ys, 0, sy - 1).long() + road_mask = bevmap[xs, ys] + + bev_pts = torch.cat([points2d, road_mask.unsqueeze(1)], dim=1) + return bev_pts[torch.randperm(len(bev_pts))[:max_num_pts]] + + +class DataOnlineProcessor: + @staticmethod + def update_transform_with_aug(transform, aug_params): + if 'rot' in aug_params: + transform = add_rotate(transform, aug_params['rot']) + if 'flip' in aug_params: + transform = add_flip(transform, **aug_params['flip']) + if 'scale' in aug_params: + transform = add_scale(transform, aug_params['scale']) + return transform + + @staticmethod + def apply_transform(data, transform, apply_to=['points']): + for k in apply_to: + apply_transform(data, transform, k) + + @staticmethod + def cav_aug_transform(data, transform, aug_params, + apply_to=['points', 'imgs', 'annos_global']): + # augmentation + if aug_params is not None: + transform = DataOnlineProcessor.update_transform_with_aug(transform, aug_params) + + DataOnlineProcessor.apply_transform(data, transform, apply_to) + + @staticmethod + def filter_range(data, lidar_range, apply_to: list): + for k in apply_to: + filter_range(data, lidar_range, k) + + @staticmethod + @torch.no_grad() + def free_space_augmentation(data, d: float=10.0, h: float=-1.5, step: float=1.5, res=0.25): + lidar = data['points'] + # get point lower than z_min=1.5m + m = lidar[:, 2] < h + points = lidar[m][:, :3] + + # generate free space points based on points + dists = torch.norm(points[:, :2], dim=1).reshape(-1, 1) + delta_d = torch.arange(1, d, step, + device=lidar.device).reshape(1, -1) + steps = delta_d.shape[1] + tmp = (dists - delta_d) / dists # Nxsteps + xyz_new = points[:, None, :] * tmp[:, :, None] # Nxstepsx3 + + # 1.remove free space points with negative distances to lidar center + # 2.remove free space points higher than z_min + # 3.remove duplicated points with resolution 1m + xyz_new = xyz_new[tmp > 0] + xyz_new = xyz_new[(xyz_new[..., 2] < h)] + xyz_new = xyz_new[torch.randperm(len(xyz_new))] + selected = torch.unique(torch.floor(xyz_new * res).long(), return_inverse=True, dim=0)[1] + xyz_new = scatter_mean(src=xyz_new, index=selected, dim=0) + + # pad free space point intensity as -1 + xyz_new = torch.cat([xyz_new, - torch.ones_like(xyz_new[:, :1])], dim=-1) + data['points'] = torch.cat([lidar, xyz_new], dim=0) + + @staticmethod + @torch.no_grad() + def adaptive_free_space_augmentation(data: dict, min_h: float=-1.5, steps: int=20, + alpha: float=0.05, res: float=0.5, time_idx: int=None): + r""" + Add free space points according to the distance of points to the origin. + + .. raw:: html + +
+           lidar origin ->  *
+                         *  *
+                      *     * h
+                   *  ele   *
+                 ************
+                        d
+
+           
+ + Assume the :math:`\theta = \frac{\\pi}{2} - \text{ele}` (elevation angle), + :math:`\alpha` = average angle between two lidar rings, + :math:`d_k` is the ground distance of the :math:`n_{th}` lidar ring to lidar origin, :math:`k=1,...,n`, + :math:`\delta_d` is the distance between two neighboring lidar rings, + then + + .. math:: + d &= h \tan(\theta) \\ + \delta_d &= d_n - d_{n-1} = d_n - h\tan(\arctan(\frac{h}{d_n}) - \alpha) + + we sample free space points in the ground distance of :math:`\delta_d` relative to each ring + with the given 'step' distance. + + :param data: input data dict containing 'points'. + :param min_h: minimum sample height relative to lidar origin. Default is -1.5. + :param steps: number of points to be sampled for each lidar ray. Default is 20. + :param alpha: average angle offset between two neighboring lidar casting rays. Default is 0.05. + :param res: resolution for down-sampling the free space points. Default is 0.5. + :param time_idx: if provided, time will be copied from the original points to free space points. + :return: + updated data. + """ + + lidar = data['points'] + # get point lower than z_min=1.5m + m = lidar[:, 2] < min_h + points = lidar[m] + + # generate free space points based on points + dn = torch.norm(points[:, :2], dim=1).view(-1, 1) + dn1 = - points[:, 2:3] * torch.tan(torch.atan2(dn, -points[:, 2:3]) - alpha) + delta_d = dn - dn1 + steps_arr = torch.linspace(0, 1, steps + 1)[:-1].view(1, steps).to(delta_d.device) + tmp = (dn - steps_arr * delta_d) / dn # Nxsteps + xyz_new = points[:, None, :3] * tmp[:, :, None] # Nxstepsx3 + if time_idx is not None: + times = points[:, time_idx].view(-1, 1, 1).repeat(1, steps, 1) + xyz_new = torch.cat([xyz_new, times], dim=-1) + + # 1.remove free space points with negative distances to lidar center + # 2.remove free space points higher than z_min + # 3.remove duplicated points with resolution 1m + xyz_new = xyz_new[tmp > 0] + # xyz_new = xyz_new[(xyz_new[..., 2] < min_h)] + xyz_new = xyz_new[torch.randperm(len(xyz_new))] + uniq, selected = torch.unique(torch.floor(xyz_new[..., :3] * res).long(), return_inverse=True, dim=0) + # xyz = torch.zeros_like(xyz_new[:len(uniq)]) + tmin = xyz_new[:, -1].min() + xyz_new[:, -1] -= tmin + xyz_new = scatter_mean(src=xyz_new, index=selected, dim=0) + xyz_new[:, -1] += tmin + + # pad free space point intensity as -1 + xyz_new = torch.cat([xyz_new[:, :3], - torch.ones_like(xyz_new[:, :1]), xyz_new[:, 3:]], dim=-1) + pad_dim = lidar.shape[-1] - xyz_new.shape[-1] + if pad_dim > 0: + xyz_new = torch.cat([xyz_new, torch.zeros_like(xyz_new[:, :1]).repeat(1, pad_dim)], dim=-1) + data['points'] = torch.cat([lidar, xyz_new], dim=0) + + @staticmethod + @torch.no_grad() + def generate_sparse_target_bev_points(data: dict, + transform=None, + sam_res=0.4, + map_res=0.2, + range=50, + max_num_pts=3000, + discrete=False): + data['bev_tgt_pts'] = generate_bev_tgt_pts( + data['points'], data, + transform, sam_res, map_res, range, max_num_pts, discrete + ) + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # lidar = data['points'].cpu().numpy() + # pts = data['bev_tgt_pts'].cpu().numpy() + # pos = pts[:, 2] == 1 + # neg = pts[:, 2] == 0 + # + # ax = draw_points_boxes_plt( + # pc_range=50, + # points=pts[pos, :], + # points_c='r', + # return_ax=True + # ) + # ax.plot(pts[neg, 0], pts[neg, 1], '.', c='b', markersize=1) + # ax.plot(lidar[:, 0], lidar[:, 1], '.', c='gray', markersize=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + + @staticmethod + @torch.no_grad() + def generate_sparse_target_roadline_points(data: dict, + transform=None, + map_res=0.2, + range=50, + kernel=3, + max_num_pts=3000): + if 'bevmap' not in data or data['bevmap'] is None: + return + bevmap = data['bevmap'].clone().float() + bevmap[bevmap==0] = -1 + bevmap_coor = data['bevmap_coor'] + sx, sy = bevmap.shape[:2] + filters = torch.ones(1, 1, kernel, kernel, device=bevmap.device) / (kernel ** 2 * 2) + road = torch.conv2d(bevmap[None, None], filters).squeeze() + mask = (road < 0.5) & (road > -0.5) + inds = torch.where(mask) + scores = 1 - road[mask].abs() + coords = torch.stack(inds).T * map_res + 2.5 * map_res - range + + data['roadline_tgts'] = torch.cat([coords, scores.unsqueeze(1)], dim=1) + + + + + + + + + + + + + diff --git a/cosense3d/agents/viewer/__init__.py b/cosense3d/agents/viewer/__init__.py new file mode 100644 index 00000000..fd40910d --- /dev/null +++ b/cosense3d/agents/viewer/__init__.py @@ -0,0 +1,4 @@ + + + + diff --git a/cosense3d/agents/viewer/gl_viewer.py b/cosense3d/agents/viewer/gl_viewer.py new file mode 100644 index 00000000..e8efc119 --- /dev/null +++ b/cosense3d/agents/viewer/gl_viewer.py @@ -0,0 +1,536 @@ + + +from typing import TYPE_CHECKING, List, Tuple, Union + +Color4f = Tuple[float, float, float, float] # type alias for type hinting + +import logging +import queue + +import numpy as np +from PyQt5.QtCore import Qt, QEvent, QPointF, QRectF +from PyQt5 import QtWidgets, QtGui, QtCore +import pyqtgraph as pg +from PyQt5.QtGui import QPen, QBrush, QColor +import pyqtgraph.opengl as gl +from matplotlib import colormaps +from OpenGL.GL import * +from OpenGL import GLU +from cosense3d.agents.viewer.utils import depth_min +from cosense3d.agents.viewer.items.graph_items import LineBoxItem + +SIZE_OF_FLOAT = ctypes.sizeof(ctypes.c_float) +TRANSLATION_FACTOR = 0.03 +jet = colormaps['jet'] +cav_colors = np.array([ + [0.745, 0.039, 1.000, 1.000], + [0.039, 0.937, 1.000, 1.000], + [0.078, 0.490, 0.961, 1.000], + [0.039, 1.000, 0.600, 1.000], + [1.000, 0.529, 0.000, 1.000], + [0.345, 0.039, 1.000, 1.000], + [0.631, 1.000, 0.039, 1.000], + [1.000, 0.827, 0.000, 1.000], +]) + + +# Main widget for presenting the point cloud and bounding boxes +class GLViewer(gl.GLViewWidget): + + def __init__(self, name: str, parent=None) -> None: + super(GLViewer, self).__init__(parent) + self.setObjectName(name) + self.controller = None + + self.setCameraPosition(distance=300, elevation=30, azimuth=-90) + self.pan(0, 0, 0) + self.draw_axes() + + self.tasks = queue.Queue() + + # point cloud data + self.pcd = None + self.boxes = [] + self.local_boxes = {} + self.pcd_items = {} + self.visibility = {} + + # drag window control + self.dragging = False + self.start_pos = None + self.end_pos = None + + # box control + self.rectangle = None # (pos1, pos2) + self.center = None # evt pose + self.highlight_mode = False + self.highlighted_item = None + self.activate_item = None + + def initializeGL(self): + glEnable(GL_DEPTH_TEST) # for visualization of depth + glDepthFunc(GL_LESS) # drawn if depth is less than the existing depth + glEnable(GL_BLEND) # enable transparency + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) + super().initializeGL() + + depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + print('viwer init:', depth_enabled) + + def paintGL(self, region=None, viewport=None, useItemNames=False): + super().paintGL(region, viewport, useItemNames) + # self.draw_depth_buffer() + self.addBox() + self.paintRect() + # depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + # print("paintGL", depth_enabled) + + def draw_axes(self): + axis = gl.GLAxisItem(size=QtGui.QVector3D(5, 5, 5)) + self.addItem(axis) + + def updatePCDs(self, pcds, color_mode='united', **kwargs): + self.pcds = pcds + if color_mode == 'height': + points_all = np.concatenate([pcd for pcd in pcds.values()], axis=0) + global_min = points_all[:, 2].min() + global_max = points_all[:, 2].max() + elif color_mode == 'time': + points_all = np.concatenate([pcd for pcd in pcds.values()], axis=0) + global_min = points_all[:, -1].min() + global_max = points_all[:, -1].max() + else: + global_min = None + global_max = None + + for i, (lidar_id, pcd)in enumerate(pcds.items()): + if color_mode == 'united': + colors = [1.0, 1.0, 1.0, 1.0] + elif color_mode == 'height': + height_norm = (pcd[:, 2] - global_min) / (global_max - global_min) + colors = jet(height_norm) + elif color_mode == 'cav': + colors = cav_colors[i] + colors[-1] = 0.5 + colors = colors.reshape(1, 4).repeat(len(pcd), 0) + elif color_mode == 'time': + time_norm = (pcd[:, -1] - global_min) / (global_max - global_min) + colors = jet(time_norm) + else: + raise NotImplementedError + item = gl.GLScatterPlotItem( + pos=pcd[:, :3], size=2, glOptions='opaque', color=colors + ) + if lidar_id in self.visibility: + item.setVisible(self.visibility[lidar_id]) + else: + self.visibility[lidar_id] = True + self.pcd_items[lidar_id] = item + self.addItem(item) + + def updateLabel(self, local_labels, global_labels, local_det, global_det, + successor=None, successor_gt=None, predecessor=None): + self.boxes = [] + if local_labels is not None: + for agent_id, labels in local_labels.items(): + self.local_boxes[agent_id] = [] + for id, label in labels.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='local_gt', line_width=2) + item.setVisible(self.visibility.get(f'{agent_id}.0', True)) + self.local_boxes[agent_id].append(item) + self.addItem(item) + if global_labels is not None: + for id, label in global_labels.items(): + prev_label = None if predecessor is None else predecessor[id] + item = LineBoxItem(box=[id, ] + label, last_pose=prev_label, + status='global_gt', line_width=2) + self.boxes.append(item) + self.addItem(item) + if local_det is not None: + for agent_id, labels in local_det.items(): + self.local_boxes[agent_id] = [] + for id, label in labels.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='det', line_width=2) + item.setVisible(self.visibility.get(f'{agent_id}.0', True)) + self.local_boxes[agent_id].append(item) + self.addItem(item) + if global_det is not None: + for id, label in global_det.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='det', line_width=2) + self.boxes.append(item) + self.addItem(item) + if successor is not None: + for id, label in successor.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='successor', line_width=2) + self.boxes.append(item) + self.addItem(item) + if successor_gt is not None: + for id, label in successor_gt.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='successor_gt', line_width=2) + self.boxes.append(item) + self.addItem(item) + + def updateFrameData(self, pcds, + local_label=None, + global_label=None, + local_det=None, + global_det=None, + predecessor=None, + successor=None, + successor_gt=None, + pcd_color='united'): + self.clear() + self.draw_axes() + self.updatePCDs(pcds, color_mode=pcd_color) + self.updateLabel(local_label, + global_label, + local_det, + global_det, + successor, + successor_gt, + predecessor,) + self.update() + + def refresh(self, data_dict, visible_keys=['globalGT'], color_mode='united', **kwargs): + pcds = data_dict.get('points', {}) + ego_id = list(data_dict['scenario'].keys())[0] + local_labels, global_labels, local_det, global_det = None, None, None, None + global_pred_gt, global_pred = None, None + if 'globalGT' in visible_keys: + global_labels = data_dict.get('global_labels', {}) + global_labels = global_labels[ego_id] + if 'localGT' in visible_keys: + local_labels = data_dict.get('local_labels', {}) + if pcds is None and global_labels is {} and local_labels is None: + return + + if 'localDet' in visible_keys: + if 'detection_local' in data_dict: + local_det = {k: v.get('labels', {}) for k, v in data_dict['detection_local'].items()} + if 'globalDet' in visible_keys: + if 'detection' in data_dict: + global_det = data_dict.get('detection', {}) + else: + global_det = data_dict.get('detection_global', {}) + global_det = global_det.get(ego_id, {'labels': {}})['labels'] + if 'globalPredGT' in visible_keys: + global_pred_gt = data_dict.get('global_pred_gt', {}) + global_pred_gt = global_pred_gt.get(ego_id, {}) + if 'globalPred' in visible_keys: + global_pred = data_dict.get('global_pred', {}) + global_pred = global_pred.get(ego_id, {'labels': {}})['labels'] + + self.updateFrameData(pcds, + local_label=local_labels, + global_label=global_labels, + local_det=local_det, + global_det=global_det, + successor=global_pred, + successor_gt=global_pred_gt, + pcd_color=color_mode) + + def addBox(self): + if self.rectangle is not None: + world_pos = self.evt_pos_to_world(*self.rectangle) + self.rectangle = None + if world_pos is not None: + box = LineBoxItem([self.controller.curr_box_type] + [0, 0, 0] + [4, 2, 1.7] + [0, 0, 0]) + azi = self.opts['azimuth'] + box.rotate(azi, 0, 0, 1) + box.translate(*world_pos, False) + self.boxes.append(box) + self.addItem(box) + self.controller.save_frame_labels(self.boxes) + logging.info("Add box: ", box.id) + if self.center is not None: + world_pos = self.evt_pos_to_world(self.center) + self.center = None + if world_pos is not None: + self.controller.track_singleton(world_pos) + + def highlightBox(self, pos): + w = 30 + h = 30 + x = pos.x() - w / 2 + y = pos.y() - h / 2 + self.removeHeilight() + items = self.itemsAt((x, y, w, h)) + for item in items: + if isinstance(item, LineBoxItem): + item.highlight() + self.highlighted_item = item + self.update() + return + + def removeHeilight(self): + if self.highlighted_item is not None: + self.highlighted_item.deactivate() + self.highlighted_item = None + self.update() + + def selectHeilight(self): + # remove previous activate item if exists + self.removeActivate() + self.highlighted_item.activate() + self.activate_item = self.highlighted_item + self.highlighted_item = None + self.controller.show_obj_info(self.activate_item) + self.update() + + def removeActivate(self): + if self.activate_item is not None: + self.activate_item.deactivate() + self.controller.hide_obj_info() + self.update() + + def mousePressEvent(self, evt: QtGui.QMouseEvent) -> None: + depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + print('mousePressEvent:', depth_enabled) + self.mousePos = evt.pos() + if evt.button() == Qt.LeftButton and \ + evt.modifiers() == Qt.ShiftModifier: + logging.debug("mousePress+Shift: drag box") + self.start_pos = evt.pos() + self.end_pos = evt.pos() + self.dragging = True + elif evt.button() == Qt.LeftButton and \ + self.highlighted_item is not None: + logging.debug("Select Highlighted box") + self.selectHeilight() + elif evt.button() == Qt.LeftButton and not self.highlight_mode: + self.removeActivate() + else: + super().mousePressEvent(evt) + + def mouseDoubleClickEvent(self, evt: QtGui.QMouseEvent) -> None: + if evt.button() == Qt.LeftButton: + self.center = evt.pos() + logging.debug('Double click left mouse button.') + self.update() + + def mouseMoveEvent(self, evt: QtGui.QMouseEvent) -> None: + if evt.buttons() == Qt.LeftButton and \ + evt.modifiers() == Qt.ShiftModifier: + logging.debug("mousePress+Shift+mouseMove") + if self.dragging: + self.end_pos = evt.pos() + self.update() + elif self.highlight_mode: + logging.debug("Highlight box") + self.highlightBox(evt.pos()) + else: + super().mouseMoveEvent(evt) + logging.debug("mouseMove-super") + + def mouseReleaseEvent(self, evt: QtGui.QMouseEvent): + if evt.button() == Qt.LeftButton and self.dragging: + self.dragging = False + self.rectangle = (self.start_pos, self.end_pos) + self.start_pos = None + self.end_pos = None + self.update() + else: + super().mouseReleaseEvent(evt) + + def keyPressEvent(self, evt: QEvent) -> None: + if evt.isAutoRepeat(): + return + if evt.key() == Qt.Key_Shift: + logging.debug("keyShiftPressed") + self.key_shift = True + elif evt.key() == Qt.Key_C: + logging.debug("keyCressed: highlight mode") + self.highlight_mode = True + self.setMouseTracking(True) + elif evt.key() == Qt.Key_3: + evt.accept() + self.controller.last_frame() + elif evt.key() == Qt.Key_4: + evt.accept() + self.controller.next_frame() + elif evt.key() == Qt.Key_T: + evt.accept() + self.controller.track() + elif evt.key() == Qt.Key_2: + evt.accept() + self.controller.next_frame() + self.controller.track() + else: + super().keyPressEvent(evt) + + def keyReleaseEvent(self, event: QEvent) -> None: + if event.isAutoRepeat(): + return + if event.key() == Qt.Key_C: + logging.debug("key C released: deactivate highlighted box") + self.highlight_mode = False + self.setMouseTracking(False) + self.removeHeilight() + + def model_pose_to_world(self, x, y, z): + modelview = glGetDoublev(GL_MODELVIEW_MATRIX) + projection = glGetDoublev(GL_PROJECTION_MATRIX) + viewport = self.getViewport() + world_pos = GLU.gluUnProject( + x, y, z, modelview, projection, viewport + ) + return world_pos + + def evt_pos_to_world(self, pos1, pos2=None): + """ + Args: + pos1: center pos if pos2 is None else start post of a region + pos2: end pos of a region + """ + if pos2 is None: + pos1 = QtCore.QPoint(pos1.x() - 20, pos1.y() - 20) + pos2 = QtCore.QPoint(pos1.x() + 20, pos1.y() + 20) + depths = self.get_region_depth(pos1, pos2) + valid = depths < 1 + if valid.sum() == 0: + logging.info("No point found, skip drawing box") + return None + else: + z = depths[valid].mean() + y = (pos1.y() + pos2.y()) / 2 + x = (pos1.x() + pos2.x()) / 2 + real_y = self.height() - y + world_pos = self.model_pose_to_world(x, real_y, z) + return world_pos + + def get_point_depth(self, x, y): + buffer_size = 201 + center = buffer_size // 2 + 1 + depths = glReadPixels( + x - center + 1, + y - center + 1, + buffer_size, + buffer_size, + GL_DEPTH_COMPONENT, + GL_FLOAT, + ) + z = depths[center][center] # Read selected pixel from depth buffer + + if z == 1: + z = depth_min(depths, center) + return z + + def get_region_depth(self, p1: QtCore.QPoint, p2: QtCore.QPoint) -> np.ndarray: + """ + Args: + p1: start point of region. + p2: end point of region + """ + buffer_size_x = abs(p2.x() - p1.x()) + buffer_size_y = abs(p2.y() - p1.y()) + x = min(p1.x(), p2.x()) + y = self.height() - max(p1.y(), p2.y()) + + # Create a buffer to hold the depth values + depth_buffer = np.zeros((buffer_size_y, buffer_size_x), dtype=np.float32) + + glReadPixels( + x, y, + buffer_size_x, + buffer_size_y, + GL_DEPTH_COMPONENT, + GL_FLOAT, + depth_buffer + ) + depth_buffer = depth_buffer[::-1, :] + + return depth_buffer + + def draw_depth_buffer(self): + """!!!! + Remember the depth buffer is only available under paintGL loop. + Only in this loop the gl context is active. + """ + # Get the OpenGL extensions string + depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + print(depth_enabled) + # Retrieve the dimensions of the framebuffer + viewport = glGetIntegerv(GL_VIEWPORT) + width, height = viewport[2], viewport[3] + + # Create a buffer to hold the depth values + depth_buffer = np.zeros((height, width), dtype=np.float32) + + # Read the depth buffer into the buffer + glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT, depth_buffer) + depth_buffer = depth_buffer[::-1, :] + + # Convert the depth buffer to an image + print("min depth value:", depth_buffer.min()) + depth_image = ((1 - depth_buffer) * 500) * 255 + depth_image = np.repeat(depth_image[:, :, np.newaxis], 3, axis=2).astype(np.uint8) + + # Save the image to a file + import imageio + imageio.imwrite('/media/hdd/tmp/depth_image.png', depth_image) + + def box(self): + p1 = self.box_start_pos + p2 = self.box_end_pos + new_lines = np.array([ + [p1.x(), p1.y(), p1.z()], + [p2.x(), p2.y(), p2.z()], + ]) + + # create a GLLinePlotItem for the axes + line_item = gl.GLLinePlotItem(pos=new_lines, color=QtGui.QColor(255, 0, 0, 255), + width=3) + + # add the axes to the view + self.addItem(line_item) + + def drawRectangle(self): + if self.rectItem is None: + self.rectItem = pg.QtGui.QGraphicsRectItem() + self.scene.addItem(self.rectItem) + x1, y1 = self.startPoint.x(), self.startPoint.y() + x2, y2 = self.endPoint.x(), self.endPoint.y() + rect = QRectF(QPointF(x1, y1), QPointF(x2, y2)) + pen = QPen(QColor(255, 0, 0)) + brush = QBrush(QColor(0, 0, 0, 0)) + self.rectItem.setPen(pen) + self.rectItem.setBrush(brush) + self.rectItem.setRect(rect) + + def removeRectangle(self): + if self.rectItem is not None: + self.scene.removeItem(self.rectItem) + self.rectItem = None + self.update() + + def paintRect(self): + if self.dragging: + painter = QtGui.QPainter(self) + painter.setRenderHint(QtGui.QPainter.Antialiasing) + glDisable(GL_DEPTH_TEST) + glDisable(GL_BLEND) + # draw the rectangle + painter.setPen(QtGui.QPen(QtGui.QColor(255, 0, 0))) + painter.setBrush(QtGui.QBrush(QtGui.QColor(255, 255, 0, 80))) + painter.drawRect(self.start_pos.x(), + self.start_pos.y(), + self.end_pos.x() - self.start_pos.x(), + self.end_pos.y() - self.start_pos.y()) + + glEnable(GL_DEPTH_TEST) + + def change_visibility(self, key, visible): + ai, li = key.split('.') + self.visibility[key] = visible + self.pcd_items[key].setVisible(visible) + for item in self.local_boxes[ai]: + item.setVisible(visible) + + + + diff --git a/cosense3d/agents/viewer/img_anno3d_viewer.py b/cosense3d/agents/viewer/img_anno3d_viewer.py new file mode 100644 index 00000000..c6542576 --- /dev/null +++ b/cosense3d/agents/viewer/img_anno3d_viewer.py @@ -0,0 +1,39 @@ + + +import matplotlib +import numpy as np +from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg +from matplotlib.figure import Figure + +from cosense3d.utils import vislib + +matplotlib.use('Qt5Agg') + +class ImgAnno3DViewer(FigureCanvasQTAgg): + + def __init__(self, dpi=50): + self.fig = Figure(dpi=dpi) + super(ImgAnno3DViewer, self).__init__(self.fig) + + def refresh(self, data, **kwargs): + if len(data['img']) == 0: + return + self.fig.clear() + n_cavs = len(data['img']) + n_imgs = len(list(data['img'].values())[0]) + cav_ids = sorted(list(data['img'].keys())) + for i, cav_id in enumerate(cav_ids): + if cav_id in data['local_labels']: + bboxes3d = np.array(list(data['local_labels'][cav_id].values()) + )[:, [1, 2, 3, 4, 5, 6, 9]] + elif cav_id in data['global_labels']: + bboxes3d = np.array(list(data['global_labels'][cav_id].values()) + )[:, [1, 2, 3, 4, 5, 6, 9]] + else: + return + for j in range(n_imgs): + ax = self.fig.add_subplot(n_cavs, n_imgs, i * n_imgs + j + 1) + img = data['img'][cav_id][j].astype(np.uint8) + lidar2img = data['lidar2img'][cav_id][j] + vislib.draw_3d_points_boxes_on_img(ax, img, lidar2img, boxes=bboxes3d) + self.draw() diff --git a/cosense3d/agents/viewer/img_viewer.py b/cosense3d/agents/viewer/img_viewer.py new file mode 100644 index 00000000..eedb29b2 --- /dev/null +++ b/cosense3d/agents/viewer/img_viewer.py @@ -0,0 +1,40 @@ + + +import numpy as np +import matplotlib +from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg +from matplotlib.figure import Figure + +from cosense3d.utils import vislib + +matplotlib.use('Qt5Agg') + + +class ImgViewer(FigureCanvasQTAgg): + + def __init__(self, dpi=100, mean=None, std=None): + self.fig = Figure(dpi=dpi) + super(ImgViewer, self).__init__(self.fig) + self.mean = np.array(mean) if mean is not None else None + self.std = np.array(std) if std is not None else None + + def refresh(self, data, **kwargs): + if len(data['img']) == 0: + return + self.fig.clear() + n_cavs = len(data['img']) + n_imgs = len(list(data['img'].values())[0]) + cav_ids = sorted(list(data['img'].keys())) + for i, cav_id in enumerate(cav_ids): + for j in range(n_imgs): + ax = self.fig.add_subplot(n_cavs, n_imgs, i * n_imgs + j + 1) + img = data['img'][cav_id][j] + if self.std is not None and self.mean is not None: + img = img * self.std + self.mean + img = img.astype(np.uint8) + if len(data['bboxes2d']) == 0: + bboxes2d = None + else: + bboxes2d = data['bboxes2d'][cav_id][j].reshape(-1, 2, 2) + vislib.draw_2d_bboxes_on_img(img, bboxes2d, ax) + self.draw() diff --git a/cosense3d/agents/viewer/items/__init__.py b/cosense3d/agents/viewer/items/__init__.py new file mode 100644 index 00000000..fd40910d --- /dev/null +++ b/cosense3d/agents/viewer/items/__init__.py @@ -0,0 +1,4 @@ + + + + diff --git a/cosense3d/agents/viewer/items/graph_items.py b/cosense3d/agents/viewer/items/graph_items.py new file mode 100644 index 00000000..b71b0cde --- /dev/null +++ b/cosense3d/agents/viewer/items/graph_items.py @@ -0,0 +1,260 @@ + + +import pyqtgraph.opengl as gl +import pyqtgraph as pg +from PyQt5.QtWidgets import QGraphicsRectItem, QGraphicsLineItem +from PyQt5 import QtCore + +from cosense3d.utils.box_utils import * +from cosense3d.dataset.toolkit.cosense import csColors +from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs + +CSCOLORS = (np.array([csColors[k] for k in cs.OBJ_LIST]) / 255.).tolist() + +BOX_COLORs = { + 'inactive': CSCOLORS, + 'highlight': (0., 1, 1, 1), + 'active': (0.9, 0, 1, 1), + 'local_gt': (1, 1, 0, 1), + 'global_gt': (0, 1, 0, 1), + 'gt': (0, 1, 0, 1), + 'det': (1, 0, 0, 1), + 'pred': (1, 0, 1, 1), + 'successor': (0, 0.5, 1, 1), + 'successor_gt': (0, 1, 1, 1) +} + +pens = { + 'yellow_dashed': pg.mkPen('y', width=1, style=QtCore.Qt.DashLine), + 'yellow_solid': pg.mkPen('y', width=1, style=QtCore.Qt.SolidLine), + 'virtual': pg.mkPen(color=(0, 0, 0, 0), width=1), +} + + +class MeshBoxItem(gl.GLMeshItem): + def __init__(self, size=(1, 1, 1), color=(0.0, 1.0, 0.0, 0.25)): + l, w, h = size + verts = [ + [0, 0, 0], + [l, 0, 0], + [l, 0, h], + [0, 0, h], + [0, w, 0], + [l, w, 0], + [l, w, h], + [0, w, h] + ] + verts = np.array(verts) + + faces = [ + [0, 1, 2], + [0, 2, 3], + [1, 5, 6], + [1, 6, 2], + [5, 4, 7], + [5, 7, 6], + [4, 0, 3], + [4, 3, 7], + [3, 2, 6], + [3, 6, 7], + [0, 4, 5], + [0, 5, 1] + ] + faces = np.array(faces) + + normals = np.array([ + [0, -1, 0], + [0, 1, 0], + [1, 0, 0], + [-1, 0, 0], + [0, 0, -1], + [0, 0, 1] + ]) + + colors = [color] * len(faces) + + meshdata = gl.MeshData(vertexes=verts, faces=faces, faceColors=colors) + super().__init__(meshdata=meshdata, shader='balloon', glOptions='translucent') + + +class LineBoxItem(gl.GLLinePlotItem): + ids = set() # TODO: need to be initialized by labeled data in the current scenario + id_ptr = 0 + def __init__(self, + box, + status='inactive', + show_direction=False, + last_pose=None, + line_width=1.): + """ + :param box: ([id], type_id, x, y, z, l, w, h, roll, pitch, yaw) + :param color: + + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + """ + id = None + box_score = None + if len(box) == 11: + id = int(box[0]) + type_id = int(box[1]) + box = box[2:] + elif len(box) == 10: + type_id = int(box[0]) + box = box[1:] + elif len(box) == 12: + id = int(box[0]) + type_id = int(box[1]) + box_score = box[-1] + box = box[2:-1] + else: + raise NotImplementedError + vertices = np.zeros((12, 3)) + vertices[:8] = boxes_to_corners_3d(np.array([box]))[0] + if show_direction: + # ----- + # | |---- direction on top + # ----- + top_center = np.mean(vertices[4:], axis=0) + top_front = np.mean(vertices[[4, 5]], axis=0) + top_ff = top_front * 2 - top_center + vertices[8] = top_front + vertices[9] = top_ff + if last_pose is not None: + # ----- + # last pose on bottom of the boxe o----| | + # ----- + assert len(last_pose) == 3 + bottom_center = np.mean(vertices[:4], axis=0) + last_pose[2] = bottom_center[2] # set last pose z to ground + vertices[10] = np.array(last_pose) + vertices[11] = np.array(bottom_center) + + self.vertices = vertices + + # Define the edges of the box + edges = [ + [0, 1], # front-bottom + [1, 5], # front-right + [5, 4], # front-top + [4, 0], # front-left + [0, 3], # left-bottom + [1, 2], # right-bottom + [5, 6], # right-top + [4, 7], # left-top + [3, 2], # back-bottom + [2, 6], # back-right + [6, 7], # back-top + [7, 3], # back-left + ] + if show_direction: + edges.append([8, 9]) + if last_pose is not None: + edges.append([10, 11]) + self.edges = np.array(edges) + + vertices_pairs = self.vertices[self.edges.flatten()] + + while id is None: + if LineBoxItem.id_ptr not in LineBoxItem.ids: + id = LineBoxItem.id_ptr + else: + LineBoxItem.id_ptr += 1 + self.id = id + self.typeid = type_id + LineBoxItem.ids.add(id) + + super().__init__(pos=vertices_pairs, + color=self.color(status), + width=line_width, + mode='lines', + glOptions='opaque') + + def to_center(self): + """Convert box to center format""" + transform = self.transform().matrix() + corners = (transform[:3, :3] @ self.vertices[:8].T) + transform[:3, 3:] + box_center = corners_to_boxes_3d(corners.T[None, :]) + return box_center[0] + + def activate(self): + self.setData(color=BOX_COLORs['active'], width=2.0) + + def deactivate(self): + self.setData(color=BOX_COLORs['inactive'][self.typeid] + [0.5]) + + def highlight(self): + self.setData(color=BOX_COLORs['highlight'], width=2.0) + + @property + def isActive(self): + return self.color == BOX_COLORs['active'] + + def color(self, status): + if status in ['inactive']: + return BOX_COLORs[status][self.typeid] + [0.5] + else: + return BOX_COLORs[status] + + +class LineItem(QGraphicsLineItem): + def __init__(self, line, parent=None): + super().__init__(parent) + self.inactive_pen = pens['yellow_dashed'] + self.active_pen = pens['yellow_solid'] + self.setLine(*line) + self.setPen(self.inactive_pen) + self.setZValue(5) + self.active = False + + def hoverEvent(self, event): + if event.isExit(): + self.setPen(self.inactive_pen) + self.active = False + else: + self.setPen(self.active_pen) + self.active = True + + +class RectangleItem(QGraphicsRectItem): + def __init__(self, rect): + super().__init__(*rect) + self.setPen(pens['virtual']) + self.setZValue(0) + self.active = False + + def hoverEvent(self, event): + if event.isExit(): + self.setPen(pens['virtual']) + self.active = False + else: + pos = event.pos() + if abs(pos.x()) < 0.3 and abs(pos.y()) < 0.3: + self.setPen(pens['yellow_solid']) + self.active = True + + + +if __name__ == "__main__": + from PyQt5 import QtWidgets + + app = QtWidgets.QApplication([]) + w = gl.GLViewWidget() + w.opts['distance'] = 20 + w.show() + + boxItem = LineBoxItem( + box=[-5, 8, -1, 4, 3, 2, 0, 0, 0], + show_direction=True + ) + w.addItem(boxItem) + + app.exec_() + + + diff --git a/cosense3d/agents/viewer/output_viewer.py b/cosense3d/agents/viewer/output_viewer.py new file mode 100644 index 00000000..74eadea2 --- /dev/null +++ b/cosense3d/agents/viewer/output_viewer.py @@ -0,0 +1,248 @@ + + +import matplotlib +import numpy as np +from PyQt5 import QtWidgets +from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg +from matplotlib.figure import Figure + +from cosense3d.utils.vislib import draw_points_boxes_plt + +matplotlib.use('Qt5Agg') + + +class MplCanvas(FigureCanvasQTAgg): + + def __init__(self, data_keys, width=5, height=4, dpi=100, title='plot', nrows=1, ncols=1): + fig = Figure(figsize=(width, height), dpi=dpi) + fig.suptitle(title, fontsize=16) + self.axes = fig.subplots(nrows, ncols) + self.data_keys = data_keys + super(MplCanvas, self).__init__(fig) + + def update_title(self, scenario, frame, cav_id): + self.axes.set_title(f"{scenario[cav_id]}.{frame[cav_id]}") + + +class BEVSparseCanvas(MplCanvas): + def __init__(self, lidar_range=None, s=4, **kwargs): + super().__init__(**kwargs) + assert len(self.data_keys) >=1, ('1st key should be pred bev map, ' + '2nd key (optional) should be gt bev map.') + self.lidar_range = lidar_range + self.s = s + self.pred_key = self.data_keys[0] + self.gt_key = None + if len(self.data_keys) > 1: + self.gt_key = self.data_keys[1] + + def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, data_dict in data[self.pred_key].items(): + if 'ctr' in data_dict: + centers = data_dict['ctr'].cpu().numpy() + elif 'ref_pts' in data_dict: + centers = data_dict['ref_pts'].cpu().numpy() + else: + raise NotImplementedError(f'only ctr or ref_pts are supported.') + conf = data_dict['conf'][:, 1:].detach().max(dim=-1).values.cpu().numpy() + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.scatter = self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=conf, s=self.s, vmin=0, vmax=1) + # self.scatter.set_array(conf) + # self.scatter.set_offsets(centers) + if self.gt_key is not None: + gt_boxes = list(data[self.gt_key][cav_id].values()) + gt_boxes = np.array(gt_boxes)[:, [1, 2, 3, 4, 5, 6, 9]] + self.axes = draw_points_boxes_plt( + self.lidar_range, + boxes_gt=gt_boxes, + ax=self.axes, + return_ax=True + ) + self.draw() + break + + +class DetectionScoreMap(MplCanvas): + def __init__(self, lidar_range=None, s=4, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.s = s + self.pred_key = self.data_keys[0] + # self.gt_key = self.data_keys[1] + + def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, det_dict in data[self.pred_key].items(): + assert 'ctr' in det_dict and 'scr' in det_dict + centers = det_dict['ctr'].cpu().numpy() + conf = det_dict['scr'].cpu().numpy() + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.scatter = self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=conf, s=self.s, vmin=0, vmax=1) + # self.scatter.set_array(conf) + # self.scatter.set_offsets(centers) + self.draw() + break + + +class BEVDenseCanvas(MplCanvas): + def __init__(self, lidar_range=None, **kwargs): + super().__init__(**kwargs) + assert len(self.data_keys) == 2, '1st key should be pred bev map, 2nd key should be gt bev map.' + self.lidar_range = lidar_range + self.pred_key = self.data_keys[0] + self.gt_key = self.data_keys[1] + + def refresh(self, data, **kwargs): + if self.pred_key not in data and self.gt_key not in data: + return + gt_bev = data.get(self.gt_key, False) + for cav_id, pred_bev in data[self.pred_key].items(): + self.axes[0].clear() + self.axes[1].clear() + self.axes[0].set_title(f"Pred: {data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.axes[1].set_title(f"GT: {data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.axes[0].imshow(pred_bev[..., 1]) + if gt_bev: + self.axes[1].imshow(gt_bev[cav_id]) + self.draw() + break + + +class SparseDetectionCanvas(MplCanvas): + def __init__(self, lidar_range=None, topk_ctr=0, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.topk_ctr = topk_ctr + self.pred_key = self.data_keys[0] + self.gt_key = self.data_keys[1] + + def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, det_dict in data[self.pred_key].items(): + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + # plot points + for points in data['points'].values(): + draw_points_boxes_plt( + pc_range=self.lidar_range, + points=points, + ax=self.axes, + # return_ax=True + ) + # plot centers + if 'ctr' in det_dict: + centers = det_dict['ctr'].detach().cpu().numpy() + if self.topk_ctr > 0: + topk_inds = det_dict['scr'].topk(self.topk_ctr).indices + conf = det_dict['scr'][topk_inds] + centers = centers[topk_inds] + elif 'conf' in det_dict: + conf = det_dict['conf'][:, 0, 1].detach().cpu().numpy() + mask = conf > 0.5 + centers = centers[mask] + conf = conf[mask] + + self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=conf, s=1, vmin=0, vmax=1) + # plot pcds and boxes + gt_boxes = list(data[self.gt_key][cav_id].values()) + gt_boxes = np.array(gt_boxes)[:, [1, 2, 3, 4, 5, 6, 9]] + pred_boxes = det_dict['box'].detach().cpu().numpy() + draw_points_boxes_plt( + pc_range=self.lidar_range, + boxes_pred=pred_boxes, + boxes_gt=gt_boxes, + ax=self.axes, + # return_ax=True + ) + self.draw() + break + + +class DetectionCanvas(MplCanvas): + def __init__(self, lidar_range=None, topk_ctr=0, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.topk_ctr = topk_ctr + self.pred_key = self.data_keys[0] + self.gt_key = self.data_keys[1] + + def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, det_dict in data[self.pred_key].items(): + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + # plot points + for points in data['points'].values(): + draw_points_boxes_plt( + pc_range=self.lidar_range, + points=points, + ax=self.axes, + # return_ax=True + ) + + # plot centers + if 'ctr' in det_dict: + if self.topk_ctr > 0: + topk_inds = det_dict['scr'].topk(self.topk_ctr).indices + scr = det_dict['scr'][topk_inds].detach().cpu().numpy() + centers = det_dict['ctr'][topk_inds].detach().cpu().numpy() + else: + centers = det_dict['ctr'].detach().cpu().numpy() + if 'scr' in det_dict: + scr = det_dict['scr'].detach().cpu().numpy() + elif 'conf' in det_dict: + scr = det_dict['conf'][:, 0, 1].detach().cpu().numpy() + else: + break + mask = scr > 0.5 + centers = centers[mask] + scr = scr[mask] + self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=scr, s=.1, vmin=0, vmax=1) + # plot pcds and boxes + gt_boxes = list(data[self.gt_key][cav_id].values()) + gt_boxes = np.array(gt_boxes)[:, [1, 2, 3, 4, 5, 6, 9]] + if 'preds' in det_dict: + det_dict = det_dict['preds'] + pred_boxes = det_dict['box'].detach().cpu().numpy() + draw_points_boxes_plt( + pc_range=self.lidar_range, + boxes_pred=pred_boxes, + boxes_gt=gt_boxes, + ax=self.axes, + # return_ax=True + ) + self.draw() + break + + +class OutputViewer(QtWidgets.QWidget): + def __init__(self, plots, parent=None): + super(OutputViewer, self).__init__(parent) + layout = QtWidgets.QVBoxLayout(self) + self.gather_data_keys = [] + self.plots = [] + for p in plots: + plot = globals()[p['title']](**p) + layout.addWidget(plot) + self.plots.append(plot) + self.gather_data_keys = self.gather_data_keys + plot.data_keys + self.gather_data_keys = list(set(self.gather_data_keys)) + + def refresh(self, data, **kwargs): + for plot in self.plots: + plot.refresh(data) + + + + diff --git a/cosense3d/agents/viewer/utils.py b/cosense3d/agents/viewer/utils.py new file mode 100644 index 00000000..77de0a8d --- /dev/null +++ b/cosense3d/agents/viewer/utils.py @@ -0,0 +1,23 @@ + + +import numpy as np + + +# Returns the minimum (closest) depth for a specified radius around the center +def depth_min(depths, center, r=10) -> float: + selected_depths = depths[circular_mask(len(depths), center, r)] + filtered_depths = selected_depths[(0 < selected_depths) & (selected_depths < 1)] + if 0 in depths: # Check if cursor is at widget border + return 1 + if len(filtered_depths) > 0: + return np.min(filtered_depths) + else: + return 1 + + +# Creates a circular mask with radius around center +def circular_mask(arr_length, center, radius): + dx = np.arange(arr_length) + dx2 = (dx[np.newaxis, :] - center) ** 2 + \ + (dx[:, np.newaxis] - center) ** 2 + return dx2 < radius ** 2 \ No newline at end of file diff --git a/cosense3d/carla/__init__.py b/cosense3d/carla/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/carla/assets/map_bounds.json b/cosense3d/carla/assets/map_bounds.json new file mode 100644 index 00000000..73151101 --- /dev/null +++ b/cosense3d/carla/assets/map_bounds.json @@ -0,0 +1,58 @@ +{ + "Town04": [ + -537.019287109375, + -417.8961181640625, + 0.0, + 435.8958740234375, + 457.75714111328125, + 11.000946044921875 + ], + "Town06": [ + -392.4933776855469, + -174.14720153808594, + 0.0, + 694.64794921875, + 434.96490478515625, + 0.1244821548461914 + ], + "Town03": [ + -258.726318359375, + -289.6225891113281, + 0.0, + 272.03712463378906, + 229.38528442382812, + 9.008626937866211 + ], + "Town05": [ + -299.779296875, + -229.62591552734375, + 0.0, + 232.8135528564453, + 230.8162841796875, + 10.032529830932617 + ], + "Town07": [ + -226.22705078125, + -270.593017578125, + 0.0, + 107.6220932006836, + 145.85621643066406, + 9.405640602111816 + ], + "Town10HD_Opt": [ + -139.66156768798828, + -93.58621978759766, + 0.0, + 134.9676971435547, + 162.9640350341797, + 3.0597971090173814e-07 + ], + "Town02": [ + -29.459869384765625, + 83.390625, + 0.0, + 215.74081420898438, + 328.5606994628906, + 3.4969110629390343e-07 + ] +} \ No newline at end of file diff --git a/cosense3d/carla/assets/maps/png/Town02.png b/cosense3d/carla/assets/maps/png/Town02.png new file mode 100644 index 00000000..da911bcf Binary files /dev/null and b/cosense3d/carla/assets/maps/png/Town02.png differ diff --git a/cosense3d/carla/assets/maps/png/Town03.png b/cosense3d/carla/assets/maps/png/Town03.png new file mode 100644 index 00000000..10e878e2 Binary files /dev/null and b/cosense3d/carla/assets/maps/png/Town03.png differ diff --git a/cosense3d/carla/assets/maps/png/Town04.png b/cosense3d/carla/assets/maps/png/Town04.png new file mode 100644 index 00000000..f58a12fe Binary files /dev/null and b/cosense3d/carla/assets/maps/png/Town04.png differ diff --git a/cosense3d/carla/assets/maps/png/Town05.png b/cosense3d/carla/assets/maps/png/Town05.png new file mode 100644 index 00000000..f05f2aa2 Binary files /dev/null and b/cosense3d/carla/assets/maps/png/Town05.png differ diff --git a/cosense3d/carla/assets/maps/png/Town06.png b/cosense3d/carla/assets/maps/png/Town06.png new file mode 100644 index 00000000..e4105c2f Binary files /dev/null and b/cosense3d/carla/assets/maps/png/Town06.png differ diff --git a/cosense3d/carla/assets/maps/png/Town07.png b/cosense3d/carla/assets/maps/png/Town07.png new file mode 100644 index 00000000..37ee1be6 Binary files /dev/null and b/cosense3d/carla/assets/maps/png/Town07.png differ diff --git a/cosense3d/carla/assets/maps/png/Town10HD_Opt.png b/cosense3d/carla/assets/maps/png/Town10HD_Opt.png new file mode 100644 index 00000000..96bfa71c Binary files /dev/null and b/cosense3d/carla/assets/maps/png/Town10HD_Opt.png differ diff --git a/cosense3d/carla/assets/maps/roadline/Town02.bin b/cosense3d/carla/assets/maps/roadline/Town02.bin new file mode 100644 index 00000000..ec1a8685 Binary files /dev/null and b/cosense3d/carla/assets/maps/roadline/Town02.bin differ diff --git a/cosense3d/carla/assets/maps/roadline/Town03.bin b/cosense3d/carla/assets/maps/roadline/Town03.bin new file mode 100644 index 00000000..1c29b50d Binary files /dev/null and b/cosense3d/carla/assets/maps/roadline/Town03.bin differ diff --git a/cosense3d/carla/assets/maps/roadline/Town04.bin b/cosense3d/carla/assets/maps/roadline/Town04.bin new file mode 100644 index 00000000..4d2ed4dd Binary files /dev/null and b/cosense3d/carla/assets/maps/roadline/Town04.bin differ diff --git a/cosense3d/carla/assets/maps/roadline/Town05.bin b/cosense3d/carla/assets/maps/roadline/Town05.bin new file mode 100644 index 00000000..ee35262f Binary files /dev/null and b/cosense3d/carla/assets/maps/roadline/Town05.bin differ diff --git a/cosense3d/carla/assets/maps/roadline/Town06.bin b/cosense3d/carla/assets/maps/roadline/Town06.bin new file mode 100644 index 00000000..4f249d3b Binary files /dev/null and b/cosense3d/carla/assets/maps/roadline/Town06.bin differ diff --git a/cosense3d/carla/assets/maps/roadline/Town07.bin b/cosense3d/carla/assets/maps/roadline/Town07.bin new file mode 100644 index 00000000..1656daf6 Binary files /dev/null and b/cosense3d/carla/assets/maps/roadline/Town07.bin differ diff --git a/cosense3d/carla/assets/maps/roadline/Town10HD_Opt.bin b/cosense3d/carla/assets/maps/roadline/Town10HD_Opt.bin new file mode 100644 index 00000000..48a24647 Binary files /dev/null and b/cosense3d/carla/assets/maps/roadline/Town10HD_Opt.bin differ diff --git a/cosense3d/carla/assets/maps/xodr/Town02.xodr b/cosense3d/carla/assets/maps/xodr/Town02.xodr new file mode 100644 index 00000000..30af4060 --- /dev/null +++ b/cosense3d/carla/assets/maps/xodr/Town02.xodr @@ -0,0 +1,10021 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/cosense3d/carla/assets/maps/xodr/Town03.xodr b/cosense3d/carla/assets/maps/xodr/Town03.xodr new file mode 100644 index 00000000..c8aabd94 --- /dev/null +++ b/cosense3d/carla/assets/maps/xodr/Town03.xodr @@ -0,0 +1,49275 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/cosense3d/carla/assets/maps/xodr/Town04.xodr b/cosense3d/carla/assets/maps/xodr/Town04.xodr new file mode 100644 index 00000000..2269f503 --- /dev/null +++ b/cosense3d/carla/assets/maps/xodr/Town04.xodr @@ -0,0 +1,36011 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cosense3d/carla/assets/maps/xodr/Town05.xodr b/cosense3d/carla/assets/maps/xodr/Town05.xodr new file mode 100644 index 00000000..88d0a31e --- /dev/null +++ b/cosense3d/carla/assets/maps/xodr/Town05.xodr @@ -0,0 +1,47273 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/cosense3d/carla/assets/maps/xodr/Town06.xodr b/cosense3d/carla/assets/maps/xodr/Town06.xodr new file mode 100644 index 00000000..2cfa0507 --- /dev/null +++ b/cosense3d/carla/assets/maps/xodr/Town06.xodr @@ -0,0 +1,31365 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/cosense3d/carla/assets/maps/xodr/Town07.xodr b/cosense3d/carla/assets/maps/xodr/Town07.xodr new file mode 100644 index 00000000..b9ad15a0 --- /dev/null +++ b/cosense3d/carla/assets/maps/xodr/Town07.xodr @@ -0,0 +1,26688 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/cosense3d/carla/assets/maps/xodr/Town10HD_Opt.xodr b/cosense3d/carla/assets/maps/xodr/Town10HD_Opt.xodr new file mode 100644 index 00000000..0e75263a --- /dev/null +++ b/cosense3d/carla/assets/maps/xodr/Town10HD_Opt.xodr @@ -0,0 +1,17962 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/cosense3d/carla/assets/scenario_town_map.json b/cosense3d/carla/assets/scenario_town_map.json new file mode 100644 index 00000000..4ebaf9bf --- /dev/null +++ b/cosense3d/carla/assets/scenario_town_map.json @@ -0,0 +1,71 @@ +{ + "2021_08_20_21_48_35": "Town06", + "2021_08_18_19_48_05": "Town06", + "2021_08_20_21_10_24": "Town06", + "2021_08_21_09_28_12": "Town06", + "2021_08_22_07_52_02": "Town05", + "2021_08_22_09_08_29": "Town05", + "2021_08_22_21_41_24": "Town05", + "2021_08_23_12_58_19": "Town05", + "2021_08_23_15_19_19": "Town04", + "2021_08_23_16_06_26": "Town04", + "2021_08_23_17_22_47": "Town04", + "2021_08_23_21_07_10": "Town10HD_Opt", + "2021_08_23_21_47_19": "Town10HD_Opt", + "2021_08_24_07_45_41": "Town10HD_Opt", + "2021_08_24_11_37_54": "Town07", + "2021_08_24_20_09_18": "Town04", + "2021_08_24_20_49_54": "Town04", + "2021_08_24_21_29_28": "Town04", + "2021_08_16_22_26_54": "Town06", + "2021_08_18_09_02_56": "Town06", + "2021_08_18_18_33_56": "Town06", + "2021_08_18_21_38_28": "Town06", + "2021_08_18_22_16_12": "Town06", + "2021_08_18_23_23_19": "Town06", + "2021_08_19_15_07_39": "Town06", + "2021_08_20_16_20_46": "Town06", + "2021_08_20_20_39_00": "Town06", + "2021_08_20_21_00_19": "Town06", + "2021_08_21_09_09_41": "Town06", + "2021_08_21_15_41_04": "Town05", + "2021_08_21_16_08_42": "Town05", + "2021_08_21_17_00_32": "Town05", + "2021_08_21_21_35_56": "Town05", + "2021_08_21_22_21_37": "Town05", + "2021_08_22_06_43_37": "Town05", + "2021_08_22_07_24_12": "Town05", + "2021_08_22_08_39_02": "Town05", + "2021_08_22_09_43_53": "Town05", + "2021_08_22_10_10_40": "Town05", + "2021_08_22_10_46_58": "Town06", + "2021_08_22_11_29_38": "Town06", + "2021_08_22_22_30_58": "Town05", + "2021_08_23_10_47_16": "Town04", + "2021_08_23_11_06_41": "Town05", + "2021_08_23_11_22_46": "Town04", + "2021_08_23_12_13_48": "Town05", + "2021_08_23_13_10_47": "Town05", + "2021_08_23_16_42_39": "Town04", + "2021_08_23_17_07_55": "Town04", + "2021_08_23_19_27_57": "Town10HD_Opt", + "2021_08_23_20_47_11": "Town10HD_Opt", + "2021_08_23_22_31_01": "Town10HD_Opt", + "2021_08_23_23_08_17": "Town10HD_Opt", + "2021_08_24_09_25_42": "Town07", + "2021_08_24_09_58_32": "Town07", + "2021_08_24_12_19_30": "Town07", + "2021_09_09_13_20_58": "Town03", + "2021_09_09_19_27_35": "Town01", + "2021_09_10_12_07_11": "Town04", + "2021_09_09_23_21_21": "Town03", + "2021_08_21_17_30_41": "Town05", + "2021_08_22_13_37_16": "Town06", + "2021_08_22_22_01_17": "Town05", + "2021_08_23_10_51_24": "Town05", + "2021_08_23_13_17_21": "Town05", + "2021_08_23_19_42_07": "Town10HD_Opt", + "2021_09_09_22_21_11": "Town02", + "2021_09_11_00_33_16": "Town10HD_Opt", + "2021_08_18_19_11_02": "Town06" +} diff --git a/cosense3d/carla/bev_map.py b/cosense3d/carla/bev_map.py new file mode 100644 index 00000000..ab483162 --- /dev/null +++ b/cosense3d/carla/bev_map.py @@ -0,0 +1,4 @@ +import sys +sys.path.append("/opt/carla-simulator/PythonAPI/carla/dist/carla-0.9.13-py3.7-linux-x86_64.egg") +import carla + diff --git a/cosense3d/carla/map_manager.py b/cosense3d/carla/map_manager.py new file mode 100644 index 00000000..90d36eec --- /dev/null +++ b/cosense3d/carla/map_manager.py @@ -0,0 +1,112 @@ +import uuid + +import numpy as np + +from cosense3d.carla.map_utils import * + + +class CarlaMapManager: + def __init__(self, world, cfgs): + self.world = world + self.cfgs = cfgs + self.carla_map = self.world.get_map() + + def generate_map_mata(self): + # cross walks + crosswalks = self.carla_map.get_crosswalks() + crosswalks_dict = {} + + tmp_list = [] + for key_points in crosswalks: + if (key_points.x, key_points.y, key_points.z) in tmp_list: + crosswalk_id = uuid.uuid4().hex[:6].upper() + cross_marking = np.array(tmp_list) + bound = self.get_bounds(cross_marking, cross_marking) + crosswalks_dict[crosswalk_id] = {'xyz': cross_marking, 'bound': bound} + tmp_list = [] + else: + tmp_list.append((key_points.x, key_points.y, key_points.z)) + + # lanes + lanes_dict = {} + # list of all start waypoints in HD Map + topology = [x[0] for x in self.carla_map.get_topology()] + # sort by altitude + topology = sorted(topology, key=lambda w: w.transform.location.z) + for (i, waypoint) in enumerate(topology): + # unique id for each lane + lane_id = uuid.uuid4().hex[:6].upper() + intersection_flag = True if waypoint.is_intersection else False + + waypoints = [waypoint] + nxt = waypoint.next(self.cfgs['lane_sample_resolution'])[0] + # looping until next lane + while nxt.road_id == waypoint.road_id \ + and nxt.lane_id == waypoint.lane_id: + waypoints.append(nxt) + nxt = nxt.next(self.cfgs['lane_sample_resolution'])[0] + + # waypoint is the centerline, we need to calculate left and right lane mark + left_marking = [lateral_shift(w.transform, -w.lane_width * 0.5) for + w in waypoints] + right_marking = [lateral_shift(w.transform, w.lane_width * 0.5) for + w in waypoints] + # convert the list of carla.Location to np.array + left_marking = list_loc2array(left_marking) + right_marking = list_loc2array(right_marking) + mid_lane = list_wpt2array(waypoints) + bound = self.get_bounds(left_marking, right_marking) + + lanes_dict[lane_id] = { + 'is_intersection': intersection_flag, + 'left': left_marking, + 'middle': mid_lane, + 'right': right_marking, + 'bound': bound + } + + self.crosswalks_dict = crosswalks_dict + self.lanes_dict = lanes_dict + self.global_bounds = self.get_global_bound() + + + @staticmethod + def get_bounds(left_lane, right_lane): + """ + Get boundary information of a lane. + + Parameters + ---------- + left_lane : np.array + shape: (n, 3) + right_lane : np.array + shape: (n,3) + Returns + ------- + bound : np.array + """ + x_min = min(np.min(left_lane[:, 0]), + np.min(right_lane[:, 0])) + y_min = min(np.min(left_lane[:, 1]), + np.min(right_lane[:, 1])) + z_min = min(np.min(left_lane[:, 2]), + np.min(right_lane[:, 2])) + x_max = max(np.max(left_lane[:, 0]), + np.max(right_lane[:, 0])) + y_max = max(np.max(left_lane[:, 1]), + np.max(right_lane[:, 1])) + z_max = max(np.max(left_lane[:, 2]), + np.max(right_lane[:, 2])) + + bounds = np.asarray([[[x_min, y_min], [x_max, y_max], [z_min, z_max]]]) + + return bounds + + def get_global_bound(self): + bounds = np.concatenate([v['bound'] for k, v in self.crosswalks_dict.items()] + + [v['bound'] for k, v in self.lanes_dict.items()], axis=0) + xy_min = np.min(bounds[:, 0, :], axis=0) - 20 + xy_max = np.max(bounds[:, 1, :], axis=0) + 20 + z_max = np.max(bounds[:, 2, 1]) + return xy_min.tolist() + [0.0] + xy_max.tolist() + [z_max] + diff --git a/cosense3d/carla/map_utils.py b/cosense3d/carla/map_utils.py new file mode 100644 index 00000000..55929bfe --- /dev/null +++ b/cosense3d/carla/map_utils.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- + +"""HDMap utilities +""" + +# Author: Runsheng Xu +# License: TDG-Attribution-NonCommercial-NoDistrib +import sys +sys.path.append("/opt/carla-simulator/PythonAPI/carla/dist/carla-0.9.13-py3.7-linux-x86_64.egg") +import carla +import numpy as np +import uuid +import math +from enum import IntEnum + +LABEL_TO_CARLA = {'building': carla.CityObjectLabel.Buildings, + 'terrain': carla.CityObjectLabel.Terrain, + 'sidewalk': carla.CityObjectLabel.Sidewalks, + 'roads': carla.CityObjectLabel.Roads} + + +class InterpolationMethod(IntEnum): + INTER_METER = 0 # fixed interpolation at a given step in meters + INTER_ENSURE_LEN = 1 # ensure we always get the same number of elements + + +def lateral_shift(transform, shift): + transform.rotation.yaw += 90 + return transform.location + shift * transform.get_forward_vector() + + +def list_loc2array(list_location): + """ + Convert list of carla location to np.array + Parameters + ---------- + list_location : list + List of carla locations. + + Returns + ------- + loc_array : np.array + Numpy array of shape (N, 3) + """ + loc_array = np.zeros((len(list_location), 3)) + for (i, carla_location) in enumerate(list_location): + loc_array[i, 0] = carla_location.x + loc_array[i, 1] = carla_location.y + loc_array[i, 2] = carla_location.z + + return loc_array + + +def list_wpt2array(list_wpt): + """ + Convert list of carla transform to np.array + Parameters + ---------- + list_wpt : list + List of carla waypoint. + + Returns + ------- + loc_array : np.array + Numpy array of shape (N, 3) + """ + loc_array = np.zeros((len(list_wpt), 3)) + for (i, carla_wpt) in enumerate(list_wpt): + loc_array[i, 0] = carla_wpt.transform.location.x + loc_array[i, 1] = carla_wpt.transform.location.y + loc_array[i, 2] = carla_wpt.transform.location.z + + return loc_array + + +def convert_tl_status(status): + """ + Convert carla.TrafficLightState to str. + Parameters + ---------- + status : carla.TrafficLightState + + Returns + ------- + status_str : str + """ + if status == carla.TrafficLightState.Red: + return 'red' + elif status == carla.TrafficLightState.Green: + return 'green' + elif status == carla.TrafficLightState.Yellow: + return 'yellow' + else: + return 'normal' + + +def x_to_world_transformation(transform): + """ + Get the transformation matrix from x(it can be vehicle or sensor) + coordinates to world coordinate. + + Parameters + ---------- + transform : carla.Transform + The transform that contains location and rotation + + Returns + ------- + matrix : np.ndarray + The transformation matrx. + + """ + rotation = transform.rotation + location = transform.location + + # used for rotation matrix + c_y = np.cos(np.radians(rotation.yaw)) + s_y = np.sin(np.radians(rotation.yaw)) + c_r = np.cos(np.radians(rotation.roll)) + s_r = np.sin(np.radians(rotation.roll)) + c_p = np.cos(np.radians(rotation.pitch)) + s_p = np.sin(np.radians(rotation.pitch)) + + matrix = np.identity(4) + # translation matrix + matrix[0, 3] = location.x + matrix[1, 3] = location.y + matrix[2, 3] = location.z + + # rotation matrix + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + + return matrix + + +def world_to_sensor(cords, sensor_transform): + """ + Transform coordinates from world reference to sensor reference. + + Parameters + ---------- + cords : np.ndarray + Coordinates under world reference, shape: (4, n). + + sensor_transform : carla.Transform + Sensor position in the world. + + Returns + ------- + sensor_cords : np.ndarray + Coordinates in the sensor reference. + + """ + sensor_world_matrix = x_to_world_transformation(sensor_transform) + world_sensor_matrix = np.linalg.inv(sensor_world_matrix) + sensor_cords = np.dot(world_sensor_matrix, cords) + + return sensor_cords + + +def exclude_off_road_agents(static_bev, dynamic_bev): + dynamic_bev[static_bev == 0] = 0 + return dynamic_bev + + +def retrieve_city_object_info(world, label_list): + """ + A general function to retrieve object bbx in carla world except vehicle, + lane, crosswalk and road. + + Parameters + ---------- + world : carla.World + Carla world object + label_list : list of str + The label that users want to retrieve. + + Returns + ------- + A dictionary with information of the retrieved objects. + """ + city_object_info = {} + for label_name in label_list: + object_ins = world.get_level_bbs(LABEL_TO_CARLA[label_name]) + + obj_info = {} + + for obj in object_ins: + obj_id = uuid.uuid4().hex[:6].upper() + + obj_transform = carla.Transform(obj.location, + obj.rotation) + obj_loc = [obj.location.x, + obj.location.y, + obj.location.z, ] + obj_yaw = obj.rotation.yaw + + # calculate 4 corners + bb = obj.extent + corners = [ + carla.Location(x=-bb.x, y=-bb.y), + carla.Location(x=-bb.x, y=bb.y), + carla.Location(x=bb.x, y=bb.y), + carla.Location(x=bb.x, y=-bb.y) + ] + + obj_transform.transform(corners) + corners_reformat = [[x.x, x.y, x.z] for x in corners] + + obj_info[obj_id] = {'location': obj_loc, + 'yaw': obj_yaw, + 'corners': corners_reformat} + + city_object_info.update({label_name: obj_info}) + + return city_object_info + + +def obj_in_range(center, radius, obj_info_dict): + """ + Retrieve the object in range. + + Parameters + ---------- + center : carla.Transform + The ego position + + radius : float + Valid radius. + + obj_info_dict : dict + + Returns + ------- + A dictionary that contains objects in range. + """ + final_objs = {} + + for obj_category, obj_contents in obj_info_dict.items(): + cur_objs = {} + for obj_id, obj_info in obj_contents.items(): + corners = obj_info['corners'] + for corner in corners: + distance = math.sqrt((corner[0] - center.location.x) ** 2 + \ + (corner[1] - center.location.y) ** 2) + if distance < radius: + cur_objs.update({obj_id: obj_info}) + break + final_objs.update({obj_category: cur_objs}) + + return final_objs diff --git a/cosense3d/carla/scene_manager.py b/cosense3d/carla/scene_manager.py new file mode 100644 index 00000000..2409ef86 --- /dev/null +++ b/cosense3d/carla/scene_manager.py @@ -0,0 +1,15 @@ + + +def get_scene_manager(cfg, **kwargs): + modules = globals() + return None + + +class MapGeneration: + def __init__(self): + pass + + +class SingleSceneSimulation: + def __init__(self): + pass \ No newline at end of file diff --git a/cosense3d/carla/xodr_utils.py b/cosense3d/carla/xodr_utils.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/config/__init__.py b/cosense3d/config/__init__.py new file mode 100644 index 00000000..11a2e1f1 --- /dev/null +++ b/cosense3d/config/__init__.py @@ -0,0 +1,94 @@ +import os +from importlib import import_module + +from cosense3d.utils.misc import load_yaml, save_yaml, update_dict +from cosense3d.config import pycfg + + +def load_config(args): + """ + Load yaml config file, merge additional config in args + and return a dictionary. + + Parameters + ---------- + args : argparse object or str + if is str, it should be the yaml config filename + else args.config indicates config yaml file + + Returns + ------- + params : dict + A dictionary that contains defined parameters. + """ + path = os.path.dirname(os.path.abspath(__file__)) + cfg = {} + if isinstance(args, str): + main_cfg = load_yaml(args) + else: + # load default + # modules_default = load_yaml("./config/defaults/modules.yaml") + # update_dict(cfg, modules_default) + main_cfg = load_yaml(args.config) + if not isinstance(main_cfg['DATASET'], str): + default_file = f"{path}/defaults/{main_cfg['DATASET']['name']}.yaml" + if os.path.exists(default_file): + dataset_default = load_yaml(default_file) + update_dict(cfg, dataset_default) + update_dict(cfg, main_cfg) + parse_pycfg(cfg) + + # update params + if args.mode == 'train': + cfg['TRAIN']['resume_from'] = args.resume_from + cfg['TRAIN']['load_from'] = args.load_from + cfg['TRAIN']['log_dir'] = args.log_dir + cfg['TRAIN']['run_name'] = args.run_name + elif args.mode == 'test': + cfg['TEST']['load_from'] = args.load_from + cfg['TEST']['log_dir'] = args.log_dir + + return cfg + + +def save_config(config_dict, filename): + """ + Save config dictionary into yaml file. + + Parameters + ---------- + config_dict : dict + filename : str + + Returns + ------- + """ + config_dict['TRAIN']['save_path'] = filename + filename = os.path.join(filename, "config.yaml") + save_yaml(config_dict, filename) + + +def parse_pycfg(cfg_dict): + for k, v in cfg_dict.items(): + if isinstance(v, str) and 'pycfg' in v: + m, n = v.rsplit('.', 1) + module = import_module(f'cosense3d.config.{m}') + cfg_dict[k] = getattr(module, n) + elif isinstance(v, dict): + parse_pycfg(v) + + +def add_cfg_keys(func): + def wrapper(*args, **kwargs): + interface_keys = ['gather_keys', 'scatter_keys', 'gt_keys'] + interface_dict = {} + for k in interface_keys: + interface_dict[k] = kwargs.pop(k, []) + result = func(*args, **kwargs) + result.update(**interface_dict) + return result + return wrapper + + + + diff --git a/cosense3d/config/carla.yaml b/cosense3d/config/carla.yaml new file mode 100644 index 00000000..e24a71d1 --- /dev/null +++ b/cosense3d/config/carla.yaml @@ -0,0 +1,8 @@ +scene_manager: 'MapGeneration' +map: + lane_sample_resolution: 0.1 + + + + + diff --git a/cosense3d/config/cood/attnfusion.yaml b/cosense3d/config/cood/attnfusion.yaml new file mode 100644 index 00000000..1250acc0 --- /dev/null +++ b/cosense3d/config/cood/attnfusion.yaml @@ -0,0 +1,43 @@ +DATASET: 'pycfg.base.opv2v.seq1_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.002 + weight_decay: 1e-4 + betas: [0.95, 0.999] + milestones: [25, 40] + gamma: 0.1 + lr_scheduler: + policy: 'MultiStepLR' + milestones: [15, 30] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.cood_attnfusion.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager_no_bev_tgt' + shared_modules: 'pycfg.nets.cood_attnfusion.shared_modules_opv2v' + cav_manager: + prototype: cood_collection.CoodCAV + dataset: opv2v + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.cood_attnfusion.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/cood/fcooper.yaml b/cosense3d/config/cood/fcooper.yaml new file mode 100644 index 00000000..c3d332f6 --- /dev/null +++ b/cosense3d/config/cood/fcooper.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq1_vox040460' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0001 + weight_decay: 1e-2 + betas: [0.95, 0.999] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [10, 20] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.cood_fcooper.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager_no_bev_tgt' + shared_modules: 'pycfg.nets.cood_fcooper.shared_modules_opv2v' + cav_manager: + prototype: cood_collection.CoodCAV + dataset: opv2v + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.cood_fcooper.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/cood/fpvrcnn.yaml b/cosense3d/config/cood/fpvrcnn.yaml new file mode 100644 index 00000000..57e6e0e3 --- /dev/null +++ b/cosense3d/config/cood/fpvrcnn.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq1_vox01' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.002 + weight_decay: 1e-4 + betas: [0.95, 0.999] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [15, 30] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.cood_fpvrcnn.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager_no_bev_tgt' + shared_modules: 'pycfg.nets.cood_fpvrcnn.shared_modules_opv2v' + cav_manager: + prototype: cood_collection.FpvrcnnCAV + dataset: opv2v + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.cood_fpvrcnn.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/cood/sp3d.yaml b/cosense3d/config/cood/sp3d.yaml new file mode 100644 index 00000000..ba5d9651 --- /dev/null +++ b/cosense3d/config/cood/sp3d.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq1_vox02' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0001 + weight_decay: 1e-2 + betas: [0.95, 0.999] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [10, 20] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.cood_minkunet.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager_no_bev_tgt' + shared_modules: 'pycfg.nets.cood_minkunet.shared_modules_opv2v' + cav_manager: + prototype: cood_collection.Sp3DCAV + dataset: opv2v + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.cood_minkunet.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/dairv2xt.yaml b/cosense3d/config/dairv2xt.yaml new file mode 100644 index 00000000..4554c3ce --- /dev/null +++ b/cosense3d/config/dairv2xt.yaml @@ -0,0 +1,18 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_lat1' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: {} + cav_manager: + prototype: base_cav.DairV2XCAV + memory_len: 1 + +VISUALIZATION: + output_viewer: + plots: [] + + + + diff --git a/cosense3d/config/gevBEV/evibev_opv2v.yaml b/cosense3d/config/gevBEV/evibev_opv2v.yaml new file mode 100644 index 00000000..11cdf50a --- /dev/null +++ b/cosense3d/config/gevBEV/evibev_opv2v.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.opv2v.seq4_vox02_bevmap' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [1, 2] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.gevBEV.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: 'pycfg.nets.gevBEV.shared_modules_evibev_opv2v' + cav_manager: + prototype: gevBEV_collection.EviBEV + dataset: opv2v + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.gevBEV.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/gevBEV/gevbev_attn_with_det_opv2v.yaml b/cosense3d/config/gevBEV/gevbev_attn_with_det_opv2v.yaml new file mode 100644 index 00000000..3cb6c03c --- /dev/null +++ b/cosense3d/config/gevBEV/gevbev_attn_with_det_opv2v.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq4_vox02_bevmap' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 30 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.00002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [15, 25] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.gevBEV.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: 'pycfg.nets.gevBEV.shared_modules_gevbev_with_det_opv2v_attn' + cav_manager: + prototype: gevBEV_collection.GevBEVwDet + dataset: opv2v + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.gevBEV.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/gevBEV/gevbev_naive_with_det_opv2v.yaml b/cosense3d/config/gevBEV/gevbev_naive_with_det_opv2v.yaml new file mode 100644 index 00000000..1b8764eb --- /dev/null +++ b/cosense3d/config/gevBEV/gevbev_naive_with_det_opv2v.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq4_vox02_bevmap' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 20 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.00002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [15] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.gevBEV.test_hooks_opv2v_bev_det' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: 'pycfg.nets.gevBEV.shared_modules_gevbev_with_det_opv2v' + cav_manager: + prototype: gevBEV_collection.GevBEVwDet + dataset: opv2v + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.gevBEV.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/gevBEV/gevbev_opv2v.yaml b/cosense3d/config/gevBEV/gevbev_opv2v.yaml new file mode 100644 index 00000000..dc55e47a --- /dev/null +++ b/cosense3d/config/gevBEV/gevbev_opv2v.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq4_vox02_bevmap' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [1, 2] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.gevBEV.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: 'pycfg.nets.gevBEV.shared_modules_gevbev_opv2v' + cav_manager: + prototype: gevBEV_collection.GevBEV + dataset: opv2v + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.gevBEV.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/gevBEV/gevbev_opv2v_naive_in3_v4.yaml b/cosense3d/config/gevBEV/gevbev_opv2v_naive_in3_v4.yaml new file mode 100644 index 00000000..90a90bb9 --- /dev/null +++ b/cosense3d/config/gevBEV/gevbev_opv2v_naive_in3_v4.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq1_vox04_bevmap' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 30 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [15, 25] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.gevBEV.test_hooks_opv2v_bev' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: 'pycfg.nets.gevBEV.shared_modules_gevbev_opv2v_naive_in3_v4' + cav_manager: + prototype: gevBEV_collection.GevBEV + dataset: opv2v + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.gevBEV.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/gevBEV/gevbev_opv2v_naive_in7.yaml b/cosense3d/config/gevBEV/gevbev_opv2v_naive_in7.yaml new file mode 100644 index 00000000..77f17e89 --- /dev/null +++ b/cosense3d/config/gevBEV/gevbev_opv2v_naive_in7.yaml @@ -0,0 +1,41 @@ +DATASET: 'pycfg.base.opv2v.seq4_vox02_bevmap' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 30 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [15, 25] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.gevBEV.test_hooks_opv2v_bev' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: 'pycfg.nets.gevBEV.shared_modules_gevbev_opv2v_naive_in7' + cav_manager: + prototype: gevBEV_collection.GevBEV + dataset: opv2v + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.gevBEV.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/opv2v.yaml b/cosense3d/config/opv2v.yaml new file mode 100644 index 00000000..978c533e --- /dev/null +++ b/cosense3d/config/opv2v.yaml @@ -0,0 +1,18 @@ +DATASET: 'pycfg.base.opv2v.seq4_vox02_bevmap' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: {} + cav_manager: + prototype: base_cav.OPV2VtCAV + memory_len: 1 + +VISUALIZATION: + output_viewer: + plots: [] + + + + diff --git a/cosense3d/config/opv2vt.yaml b/cosense3d/config/opv2vt.yaml new file mode 100644 index 00000000..921b2084 --- /dev/null +++ b/cosense3d/config/opv2vt.yaml @@ -0,0 +1,18 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_lat2' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: {} + cav_manager: + prototype: base_cav.OPV2VtCAV + memory_len: 1 + +VISUALIZATION: + output_viewer: + plots: [] + + + + diff --git a/cosense3d/config/pycfg/__init__.py b/cosense3d/config/pycfg/__init__.py new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/cosense3d/config/pycfg/__init__.py @@ -0,0 +1,2 @@ + + diff --git a/cosense3d/config/pycfg/base/__init__.py b/cosense3d/config/pycfg/base/__init__.py new file mode 100644 index 00000000..0ece69a0 --- /dev/null +++ b/cosense3d/config/pycfg/base/__init__.py @@ -0,0 +1,10 @@ +from cosense3d.utils.train_utils import get_gpu_architecture + + +gpu_arc = get_gpu_architecture() +if gpu_arc >= 75: + use_flash_attn = True + attn = 'MultiheadFlashAttention' +else: + use_flash_attn = True + attn = 'MultiheadAttention' \ No newline at end of file diff --git a/cosense3d/config/pycfg/base/dairv2xt.py b/cosense3d/config/pycfg/base/dairv2xt.py new file mode 100644 index 00000000..481a1fdd --- /dev/null +++ b/cosense3d/config/pycfg/base/dairv2xt.py @@ -0,0 +1,65 @@ +import copy +from collections import OrderedDict + +point_cloud_range = [-102.4, -41.6, -3.0, 102.4, 41.6, 1.0] +point_cloud_range_test = [-100, -38.4, -3.0, 100, 38.4, 1.0] +global_ref_time = 0.0 + +pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=['xyz', 'intensity', 'time'], time_offset=1.6261*1e9), + LoadAnnotations=dict(load3d_global=True, load3d_local=True, + with_velocity=True, min_num_pts=3, load_global_time=True), +) + +inference_pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=['xyz', 'intensity', 'time']), +) + +data_manager = dict( + train=dict( + aug=dict( + rot_range=[-1.57, 1.57], + flip='xy', + scale_ratio_range=[0.95, 1.05], + ), + pre_process=['remove_local_empty_boxes', + 'remove_global_empty_boxes'] + ), + test=dict( + aug=dict() + ) +) + + +def get_dairv2xt_cfg(seq_len, voxel_size, latency=0, load_bevmap=False): + pipeline = copy.deepcopy(pipeline_cpu) + if load_bevmap: + pipeline['LoadOPV2VBevMaps'] = dict() + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return dict( + name='dairv2xt', + dataset='temporal_cosense', + meta_path='dairv2xt', + data_path='dairv2xt', + enable_split_sub_folder=True, + DetectionBenchmark='Car', + data_info=data_info, + lidar_range=point_cloud_range, + voxel_size=voxel_size, + batch_size_train=2, + batch_size_test=1, + n_workers=4, + max_num_cavs=2, + com_range=200, + latency=latency, + seq_len=seq_len, + train_pipeline=pipeline, + test_pipeline=pipeline, + ) + + +seq4_pillar04 = get_dairv2xt_cfg(4, [0.4, 0.4, 4]) +seq4_vox04 = get_dairv2xt_cfg(4, [0.4, 0.4, 0.4]) +seq4_vox04_lat1 = get_dairv2xt_cfg(4, [0.4, 0.4, 0.4], 1) +seq4_vox01 = get_dairv2xt_cfg(4, [0.1, 0.1, 0.1]) +seq4_vox04_randlat = get_dairv2xt_cfg(4, [0.4, 0.4, 0.4], -1) \ No newline at end of file diff --git a/cosense3d/config/pycfg/base/hooks.py b/cosense3d/config/pycfg/base/hooks.py new file mode 100644 index 00000000..1ce45e83 --- /dev/null +++ b/cosense3d/config/pycfg/base/hooks.py @@ -0,0 +1,23 @@ + +train_hooks = [ + dict(type='MemoryUsageHook'), + dict(type='TrainTimerHook'), + dict(type="CheckPointsHook", epoch_every=10) + ] + + +def get_test_nms_eval_hooks(point_cloud_range_test): + return [ + dict(type="DetectionNMSHook", nms_thr=0.1, pre_max_size=500), + dict(type="EvalDetectionBEVHook", save_result=False, + pc_range=point_cloud_range_test), + ] + + +def get_test_bev_semseg_hooks(point_cloud_range_test): + return [dict(type='EvalBEVSemsegHook', test_range=point_cloud_range_test, save_result=True)] + + +def get_detection_plot(point_cloud_range_test, data_keys=['detection', 'global_labels']): + return dict(title='DetectionCanvas', lidar_range=point_cloud_range_test, + width=10, height=4, nrows=1, ncols=1, data_keys=data_keys) \ No newline at end of file diff --git a/cosense3d/config/pycfg/base/opv2v.py b/cosense3d/config/pycfg/base/opv2v.py new file mode 100644 index 00000000..16adaef7 --- /dev/null +++ b/cosense3d/config/pycfg/base/opv2v.py @@ -0,0 +1,100 @@ +from collections import OrderedDict + +point_cloud_range = [-144, -51.2, -3.0, 144, 51.2, 1.0] +point_cloud_range_bev_test = [-50, -50, -3.0, 50, 50, 1.0] +point_cloud_range_bev = [-51.2, -51.2, -3.0, 51.2, 51.2, 1.0] +point_cloud_range_test = [-140.8, -38.4, -3.0, 140.8, 38.4, 1.0] + + +data_manager = dict( + train=dict( + aug=dict( + rot_range=[-1.57, 1.57], + flip='xy', + scale_ratio_range=[0.95, 1.05], + ), + pre_process=OrderedDict( + remove_local_empty_boxes=dict(), + remove_global_empty_boxes=dict(), + sample_global_bev_tgt_pts=dict(sam_res=0.4, map_res=0.2, range=50, max_num_pts=5000, discrete=False) + ) + ), + test=dict( + aug=dict() + ) +) + + +data_manager_no_bev_tgt = dict( + train=dict( + aug=dict( + rot_range=[-1.57, 1.57], + flip='xy', + scale_ratio_range=[0.95, 1.05], + ), + pre_process=OrderedDict( + remove_local_empty_boxes=dict(), + remove_global_empty_boxes=dict(), + ) + ), + test=dict( + aug=dict() + ) +) + +def get_opv2v_cfg(seq_len, voxel_size, load_attributes=['xyz', 'intensity'], load_bev_map=False, max_num_cavs=7): + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=load_attributes), + LoadAnnotations=dict(load3d_global=True, load3d_local=True, min_num_pts=3), + ) + + pipeline_cpu_test = OrderedDict( + LoadLidarPoints=dict(load_attributes=load_attributes), + LoadAnnotations=dict(load3d_global=True, load3d_local=True, min_num_pts=3), + ) + + inference_pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=load_attributes), + ) + if load_bev_map: + pipeline_cpu['LoadOPV2VBevMaps'] = dict(use_global_map=True, range=50, keys=['bev']) + pipeline_cpu_test['LoadOPV2VBevMaps'] = dict(use_global_map=True, range=50, keys=['bev']) + inference_pipeline_cpu['LoadOPV2VBevMaps'] = dict(use_global_map=True) + + return dict( + name='opv2v', + dataset='temporal_cosense', + meta_path='opv2v', + data_path='opv2v', + enable_split_sub_folder=False, + DetectionBenchmark='Car', + data_info=data_info, + lidar_range=point_cloud_range, + voxel_size=voxel_size, + batch_size_train=4, + batch_size_test=1, + n_workers=4, + max_num_cavs=max_num_cavs, + com_range=70, + seq_len=seq_len, + train_pipeline=pipeline_cpu, + test_pipeline=pipeline_cpu_test, + ) + + +seq4_pillar04 = get_opv2v_cfg(4, [0.4, 0.4, 4]) +seq4_vox04 = get_opv2v_cfg(4, [0.4, 0.4, 0.4]) +seq4_vox02 = get_opv2v_cfg(4, [0.4, 0.4, 0.4]) +seq4_vox01 = get_opv2v_cfg(4, [0.1, 0.1, 0.1]) +seq1_vox04 = get_opv2v_cfg(1, [0.4, 0.4, 0.4]) +seq1_vox02 = get_opv2v_cfg(1, [0.2, 0.2, 0.2]) +seq1_vox01 = get_opv2v_cfg(1, [0.1, 0.1, 0.1]) +seq1_vox040460 = get_opv2v_cfg(1, [0.4, 0.4, 6.0]) + +seq4_pillar04_bevmap = get_opv2v_cfg(4, [0.4, 0.4, 4], load_bev_map=True) +seq1_vox02_bevmap = get_opv2v_cfg(1, [0.2, 0.2, 0.2], + load_attributes=['xyz', 'intensity', 'distance', 'cosine', 'sine'], + load_bev_map=True) +seq1_vox04_bevmap = get_opv2v_cfg(1, [0.4, 0.4, 0.4], load_bev_map=True) +seq1_vox04_bevmap_ego_only = get_opv2v_cfg(1, [0.4, 0.4, 0.4], load_bev_map=True, max_num_cavs=1) \ No newline at end of file diff --git a/cosense3d/config/pycfg/base/opv2vt.py b/cosense3d/config/pycfg/base/opv2vt.py new file mode 100644 index 00000000..4ee47c1e --- /dev/null +++ b/cosense3d/config/pycfg/base/opv2vt.py @@ -0,0 +1,99 @@ +import copy +from collections import OrderedDict + +point_cloud_range = [-144, -41.6, -3.0, 144, 41.6, 1.0] +point_cloud_range_test = [-140.8, -38.4, -3.0, 140.8, 38.4, 1.0] +global_ref_time = 0.05 + +pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=['xyz', 'intensity', 'time']), + LoadAnnotations=dict(load3d_global=True, load3d_local=True, + load_global_time=True, with_velocity=True, min_num_pts=0), +) + +inference_pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=['xyz', 'intensity', 'time']), + LoadAnnotations=dict(), +) + +data_manager = dict( + train=dict( + aug=dict( + rot_range=[-1.57, 1.57], + flip='xy', + scale_ratio_range=[0.95, 1.05], + ), + pre_process=['remove_local_empty_boxes', + 'remove_global_empty_boxes'] + ), + test=dict( + aug=dict() + ) +) + + +def get_opv2vt_cfg(seq_len, voxel_size, latency=0, load_bevmap=False, load_roadline=False, loc_err=[0., 0., 0.]): + """ + Examples of setting loc. errors: + + .. highlight:: python + .. code-block:: python + + # introduce loc. errors in the dataloader will lead to different errors at different frames + pipeline_cpu['LoadAnnotations']['loc_err'] = loc_err + inference_pipeline_cpu['LoadAnnotations']['loc_err'] = loc_err + + # instead, one can introduce unified errors for a short sequence by setting the data_manager argument loc_err + data_manager['test']['loc_err'] = loc_err + """ + train_pipeline = copy.deepcopy(pipeline_cpu) + inf_pipeline = copy.deepcopy(inference_pipeline_cpu) + if load_bevmap: + train_pipeline['LoadOPV2VBevMaps'] = dict() + if load_roadline: + train_pipeline['LoadCarlaRoadlineMaps'] = dict(ego_only=False) + inf_pipeline['LoadCarlaRoadlineMaps'] = dict(ego_only=False) + + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return dict( + name='opv2vt', + dataset='temporal_cosense', + meta_path='opv2vt', + data_path='opv2vt', + enable_split_sub_folder=False, + DetectionBenchmark='Car', + data_info=data_info, + lidar_range=point_cloud_range, + voxel_size=voxel_size, + batch_size_train=4, + batch_size_test=1, + n_workers=4, + max_num_cavs=7, + com_range=70, + latency=latency, + seq_len=seq_len, + n_loss_frame=1, + train_pipeline=train_pipeline, + test_pipeline=train_pipeline, + inf_pipeline=inf_pipeline, + loc_err=loc_err, + ) + + +seq4_pillar04 = get_opv2vt_cfg(4, [0.4, 0.4, 4]) +seq4_vox04 = get_opv2vt_cfg(4, [0.4, 0.4, 0.4]) +seq4_vox04_lat1 = get_opv2vt_cfg(4, [0.4, 0.4, 0.4], 1) +seq4_vox04_lat2 = get_opv2vt_cfg(4, [0.4, 0.4, 0.4], 2) +seq4_vox04_randlat = get_opv2vt_cfg(4, [0.4, 0.4, 0.4], -1) +seq4_vox04_randlat_rl = get_opv2vt_cfg(4, [0.4, 0.4, 0.4], -1, load_bevmap=True) +seq1_vox04_randlat_rl = get_opv2vt_cfg(1, [0.4, 0.4, 0.4], -1, load_bevmap=True) +seq4_vox01 = get_opv2vt_cfg(4, [0.1, 0.1, 0.1]) + +div = 1 +seq4_vox04_locerr_rl = get_opv2vt_cfg(4, [0.4, 0.4, 0.4], + load_roadline=True, + loc_err=[0.5 / div, 0.5 / div, 0.0174533 / div]) + # loc_err=[0.5, 0.5, 0.0872665]) + +seq4_vox04_locerr = get_opv2vt_cfg(4, [0.4, 0.4, 0.4], + loc_err=[0.5 / div, 0.5 / div, 0.0174533 / div]) \ No newline at end of file diff --git a/cosense3d/config/pycfg/base/v2vreal.py b/cosense3d/config/pycfg/base/v2vreal.py new file mode 100644 index 00000000..f10589db --- /dev/null +++ b/cosense3d/config/pycfg/base/v2vreal.py @@ -0,0 +1,72 @@ +from collections import OrderedDict + +point_cloud_range = [-106, -40, -5.0, 106, 40, 3.0] +point_cloud_range_test = [-102.4, -38.4, -5.0, 102.4, 38.4, 3.0] + + +data_manager = dict( + train=dict( + aug=dict( + rot_range=[-1.57, 1.57], + flip='xy', + scale_ratio_range=[0.95, 1.05], + ), + pre_process=OrderedDict( + remove_local_empty_boxes=dict(), + remove_global_empty_boxes=dict(), + sample_global_bev_tgt_pts=dict(sam_res=0.4, map_res=0.2, range=50, max_num_pts=5000, discrete=False) + ) + ), + test=dict( + aug=dict() + ) +) + + +def get_v2vreal_cfg(seq_len, voxel_size, load_attributes=['xyz', 'intensity'], load_bev_map=False): + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=load_attributes), + LoadAnnotations=dict(load3d_global=True, load3d_local=True, min_num_pts=3), + ) + + pipeline_cpu_test = OrderedDict( + LoadLidarPoints=dict(load_attributes=load_attributes), + LoadAnnotations=dict(load3d_global=True, load3d_local=True, min_num_pts=3), + ) + + inference_pipeline_cpu = OrderedDict( + LoadLidarPoints=dict(load_attributes=load_attributes), + ) + if load_bev_map: + pipeline_cpu['LoadOPV2VBevMaps'] = dict(use_global_map=False, range=50, keys=['bev']) + pipeline_cpu_test['LoadOPV2VBevMaps'] = dict(use_global_map=False, range=50, keys=['bev']) + inference_pipeline_cpu['LoadOPV2VBevMaps'] = dict(use_global_map=True) + + return dict( + name='v2vreal', + dataset='temporal_cosense', + meta_path='v2vreal', + data_path='v2vreal', + enable_split_sub_folder=False, + DetectionBenchmark='Car', + data_info=data_info, + lidar_range=point_cloud_range, + voxel_size=voxel_size, + batch_size_train=4, + batch_size_test=1, + n_workers=4, + max_num_cavs=7, + com_range=200, + seq_len=seq_len, + train_pipeline=pipeline_cpu, + test_pipeline=pipeline_cpu_test, + ) + + +seq4_pillar04 = get_v2vreal_cfg(4, [0.4, 0.4, 4]) +seq4_vox04 = get_v2vreal_cfg(4, [0.4, 0.4, 0.4]) +seq4_vox01 = get_v2vreal_cfg(4, [0.1, 0.1, 0.1]) + +seq1_vox02 = get_v2vreal_cfg(1, [0.2, 0.2, 0.2]) +seq1_vox04 = get_v2vreal_cfg(1, [0.4, 0.4, 0.4]) \ No newline at end of file diff --git a/cosense3d/config/pycfg/nets/RLseg.py b/cosense3d/config/pycfg/nets/RLseg.py new file mode 100644 index 00000000..a50b9851 --- /dev/null +++ b/cosense3d/config/pycfg/nets/RLseg.py @@ -0,0 +1,62 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import opv2v, hooks +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 1 + + +def get_shared_modules(point_cloud_range, enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=5, + cache_strides=[1], + height_compression=[1], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p1=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256) + ) + ), + + rlseg_head = dict( + type='heads.bev_roadline.BEVRoadLine', + gather_keys=['bev_feat'], + scatter_keys=['roadline'], + gt_keys=['roadline_tgts'], + data_info=data_info, + stride=1, + in_dim=256, + target_assigner=dict(type='target_assigners.RoadLineAssigner', res=0.4, range=50), + loss_cls=dict(type='FocalLoss', use_sigmoid=True, bg_idx=0, + gamma=2.0, alpha=0.25, loss_weight=2.0), + ) + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2v = hooks.get_test_nms_eval_hooks(opv2v.point_cloud_range_bev_test) +plots_opv2v = [hooks.get_detection_plot(opv2v.point_cloud_range_bev_test)] +shared_modules_opv2v = get_shared_modules(opv2v.point_cloud_range_bev, enc_dim=64) diff --git a/cosense3d/config/pycfg/nets/__init__.py b/cosense3d/config/pycfg/nets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/config/pycfg/nets/co_perception.py b/cosense3d/config/pycfg/nets/co_perception.py new file mode 100644 index 00000000..8ef858d9 --- /dev/null +++ b/cosense3d/config/pycfg/nets/co_perception.py @@ -0,0 +1,295 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg +from cosense3d.config.pycfg.template.bev_semseg_head import get_bev_semseg_head_cfg +from cosense3d.config.pycfg.template.rlseg_head import get_roadline_head_cfg +from cosense3d.config.pycfg.template.bev_head import get_bev_head_cfg, get_bev_multi_resolution_head_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0, enc_dim=64): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=5, + height_compression=[1, 2, 8], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p1=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p2=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p8=dict(kernels=[3, 3, 3], in_dim=256, out_dim=256) + ) + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['roadline', 'bev_semseg_local', 'det_local', 'bev_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d', 'roadline_tgts'], + heads=[ + get_roadline_head_cfg(data_info=data_info, stride=1), + get_bev_semseg_head_cfg( + gt_keys=['bev_tgt_pts', 'local_bboxes_3d'], + semseg_head_type='heads.bev_semseg.EviSemsegHead', + data_info=data_info, + stride=1, + in_dim=256, + tgt_assigner_type='target_assigners.DiscreteBEVAssigner', + ), + get_det_center_sparse_cfg( + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss", + ), + get_bev_head_cfg( + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=data_info, out_stride=out_stride, in_dim=256, n_cls=2 + ) + ], + strides=[1, 2, 8], + losses=[True, True, False], + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + gather_keys=['det_local', 'bev_local', 'bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_alignment={}, + + spatial_query_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['query_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + spatial_bev_fusion=dict( + type='fusion.naive_fusion.NaiveFusion', + gather_keys=['bev_feat', 'received_response'], + scatter_keys=['bev_fusion_feat'], + stride=1 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['query_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + semseg_head=get_bev_semseg_head_cfg( + gather_keys=['bev_fusion_feat'], + scatter_keys=['bev_semseg'], + gt_keys=['global_bev_tgt_pts', 'global_bboxes_3d'], + semseg_head_type='heads.bev_semseg.GevSemsegHead', + data_info=data_info, + stride=out_stride, + in_dim=256, + tgt_assigner_type='target_assigners.ContiBEVAssigner', + ), + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, enc_dim=64) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_opv2vt_no_roi_reg = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + ) + +shared_modules_opv2vt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_opv2vt_no_t = copy.deepcopy(shared_modules_opv2vt) + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_opv2vt_no_global_attn = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_opv2vt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_opv2vt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_opv2vt_roi_focal_loss = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) +shared_modules_opv2vt_roi_focal_loss['temporal_fusion']['norm_fusion'] = True + +#--------- Comparative 1 : Pose error ------------ +shared_modules_opv2vt_fcl_locerr = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, 32) +shared_modules_opv2vt_fcl_locerr['spatial_alignment'] = dict( + type='fusion.spatial_alignment.SpatialAlignment', + gather_keys=['detection_local', 'received_response'], + scatter_keys=['received_response'], +) + +#--------- Comparative 2 : Latency ------------ +shared_modules_opv2vt_fcl_lat = shared_modules_opv2vt_roi_focal_loss + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_dairv2xt_no_roi_reg = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + ) +shared_modules_dairv2xt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_dairv2xt_no_t = shared_modules_dairv2xt + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_dairv2xt_no_global_attn = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_dairv2xt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_dairv2xt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + +#--------- Ablation 5 : Focal loss and Gaussian GT for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss_gaussian = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss", + use_gaussian=True, + sigma=1.0 +) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + diff --git a/cosense3d/config/pycfg/nets/cood_attnfusion.py b/cosense3d/config/pycfg/nets/cood_attnfusion.py new file mode 100644 index 00000000..ab9e95bb --- /dev/null +++ b/cosense3d/config/pycfg/nets/cood_attnfusion.py @@ -0,0 +1,79 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import opv2v, hooks +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone = dict( + type='backbone3d.voxelnet.VoxelNet', + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_generator=dict( + type='voxel_generator.VoxelGenerator', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + max_points_per_voxel=32, + max_voxels_train=32000, + max_voxels_test=70000 + ), + voxel_encoder=dict( + type='pillar_encoder.PillarEncoder', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + features=['xyz', 'intensity', 'absolute_xyz'], + channels=[64] + ), + cml=dict(type='voxnet_utils.CML', in_channels=64), + ), + + fusion = dict( + type='fusion.attn_fusion.DenseAttentionFusion', + gather_keys=['bev_feat', 'received_response'], + scatter_keys=['bev_feat_fused'], + feature_dim=128, + neck=dict(type='bev_rpn.RPN', anchor_num=2) + ), + + detection_head = dict( + type='heads.det_anchor_dense.DetAnchorDense', + gather_keys=['bev_feat_fused'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + in_channels=768, + target_assigner=dict( + type='target_assigners.BoxAnchorAssigner', + box_size=[3.9, 1.6, 1.56], + dirs=[0, 90], + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=2, + pos_threshold=0.6, + neg_threshold=0.45, + score_thrshold=0.25, + box_coder=dict(type='ResidualBoxCoder', mode='simple_dist') + ), + loss_cls = dict(type='FocalLoss', use_sigmoid=True, + gamma=2.0, alpha=0.25, loss_weight=1.0), + loss_box = dict(type='SmoothL1Loss', loss_weight=2.0), + ) +) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2v = hooks.get_test_nms_eval_hooks(opv2v.point_cloud_range_test) +plots_opv2v = [hooks.get_detection_plot(opv2v.point_cloud_range_test)] +shared_modules_opv2v = get_shared_modules([-140.8, -41.6, -3, 140.8, 41.6, 1]) diff --git a/cosense3d/config/pycfg/nets/cood_fcooper.py b/cosense3d/config/pycfg/nets/cood_fcooper.py new file mode 100644 index 00000000..082dd979 --- /dev/null +++ b/cosense3d/config/pycfg/nets/cood_fcooper.py @@ -0,0 +1,86 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import opv2v, hooks +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg + + +voxel_size = [0.4, 0.4, 6] +out_stride = 2 + + +def get_shared_modules(point_cloud_range): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone = dict( + type='backbone3d.pillar_bev.PillarBEV', + gather_keys=['points'], + scatter_keys=['bev_feat'], + in_channels=64, + layer_nums=[3, 5, 8], + layer_strides=[2, 2, 2], + downsample_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + upsample_channels=[128, 128, 128], + voxel_generator=dict( + type='voxel_generator.VoxelGenerator', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + max_points_per_voxel=32, + max_voxels_train=32000, + max_voxels_test=70000 + ), + pillar_encoder=dict( + type='pillar_encoder.PillarEncoder', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + features=['xyz', 'intensity', 'absolute_xyz'], + channels=[64] + ), + bev_shrinker=dict( + type='downsample_conv.DownsampleConv', + in_channels=384, # 128 * 3 + dims=[256] + ), + ), + + fusion = dict( + type='fusion.maxout_fusion.BEVMaxoutFusion', + gather_keys=['bev_feat', 'received_response'], + scatter_keys=['bev_feat_fused'], + ), + + detection_head = dict( + type='heads.det_anchor_dense.DetAnchorDense', + gather_keys=['bev_feat_fused'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + in_channels=256, + target_assigner=dict( + type='target_assigners.BoxAnchorAssigner', + box_size=[3.9, 1.6, 1.56], + dirs=[0, 90], + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=2, + pos_threshold=0.6, + neg_threshold=0.45, + box_coder=dict(type='ResidualBoxCoder', mode='simple_dist') + ), + loss_cls = dict(type='FocalLoss', use_sigmoid=True, + gamma=2.0, alpha=0.25, loss_weight=0.25), + loss_box = dict(type='SmoothL1Loss', loss_weight=1.0), + ) +) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2v = hooks.get_test_nms_eval_hooks(opv2v.point_cloud_range_test) +plots_opv2v = [hooks.get_detection_plot(opv2v.point_cloud_range_test)] +shared_modules_opv2v = get_shared_modules([-140.8, -40, -3, 140.8, 40, 1]) diff --git a/cosense3d/config/pycfg/nets/cood_fpvrcnn.py b/cosense3d/config/pycfg/nets/cood_fpvrcnn.py new file mode 100644 index 00000000..2984fb78 --- /dev/null +++ b/cosense3d/config/pycfg/nets/cood_fpvrcnn.py @@ -0,0 +1,119 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import opv2v, hooks +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg + + +voxel_size = [0.1, 0.1, 0.1] +out_stride = 2 + + +def get_shared_modules(point_cloud_range): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone = dict( + type='backbone3d.spconv.Spconv', + gather_keys=['points'], + scatter_keys=['voxel_feat', 'bev_feat'], + in_channels=4, + out_channels=64, + voxel_generator=dict( + type='voxel_generator.VoxelGenerator', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + max_points_per_voxel=32, + max_voxels_train=32000, + max_voxels_test=70000 + ), + voxel_encoder=dict( + type='voxel_encoder.MeanVFE', + num_point_features=4, + ), + bev_neck=dict(type='ssfa.SSFA', in_channels=64, out_channels=128), + ), + + detection_head_local = dict( + type='heads.det_anchor_dense.DetAnchorDense', + gather_keys=['bev_feat'], + scatter_keys=['detection_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + in_channels=128, + get_boxes_when_training=True, + target_assigner=dict( + type='target_assigners.BoxAnchorAssigner', + box_size=[3.9, 1.6, 1.56], + dirs=[0, 90], + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=8, + pos_threshold=0.6, + neg_threshold=0.45, + score_thrshold=0.15, + box_coder=dict(type='ResidualBoxCoder', mode='simple_dist') + ), + loss_cls = dict(type='FocalLoss', use_sigmoid=True, + gamma=2.0, alpha=0.25, loss_weight=1.0), + loss_box = dict(type='SmoothL1Loss', loss_weight=2.0), + ), + + keypoint_composer=dict( + type='necks.cpm_composer.KeypointComposer', + gather_keys=['detection_local', 'bev_feat', "voxel_feat", 'points'], + scatter_keys=['keypoint_feat'], + train_from_epoch=5, + vsa=dict( + type='vsa.VoxelSetAbstraction', + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + num_keypoints=4096, + num_out_features=32, + num_bev_features=128, + num_rawpoint_features=3, + enlarge_selection_boxes=True, + ) + ), + + fusion=dict( + type='fusion.keypoints.KeypointsFusion', + gather_keys=['keypoint_feat', 'received_response'], + scatter_keys=['keypoint_feat_fused'], + train_from_epoch=5, + lidar_range=point_cloud_range, + ), + + detection_head_global = dict( + type='heads.det_roi_refine.KeypointRoIHead', + gather_keys=['keypoint_feat_fused'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d'], + train_from_epoch=5, + num_cls=1, + in_channels=32, + n_fc_channels=256, + dp_ratio=0.3, + roi_grid_pool=dict( + grid_size=6, + mlps=[[64, 64], [64, 64]], + pool_radius=[0.8, 1.6], + n_sample=[16, 16], + pool_method='max_pool', + ), + target_assigner=dict( + type='target_assigners.RoIBox3DAssigner', + box_coder=dict(type='ResidualBoxCoder', mode='simple_dist') + ) + ) +) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2v = hooks.get_test_nms_eval_hooks(opv2v.point_cloud_range_test) +plots_opv2v = [hooks.get_detection_plot(opv2v.point_cloud_range_test)] +shared_modules_opv2v = get_shared_modules([-140.8, -41.6, -3, 140.8, 41.6, 1]) diff --git a/cosense3d/config/pycfg/nets/cood_minkunet.py b/cosense3d/config/pycfg/nets/cood_minkunet.py new file mode 100644 index 00000000..2f16bdf4 --- /dev/null +++ b/cosense3d/config/pycfg/nets/cood_minkunet.py @@ -0,0 +1,106 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import opv2v, hooks +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg + + +voxel_size = [0.2, 0.2, 0.2] +out_stride = 2 + + +def get_shared_modules(point_cloud_range): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone = dict( + type='backbone3d.mink_unet.MinkUnet', + gather_keys=['points'], + scatter_keys=['pts_feat'], + d=3, + cache_strides=[2], + in_dim=4, + stride=out_stride, + floor_height=point_cloud_range[2], + data_info=data_info, + height_compression=OrderedDict(p2=dict(channels=[96, 128, 128], steps=[5, 3])), + enc_dim=32 + ), + + fusion = dict( + type='fusion.naive_fusion.NaiveFusion', + gather_keys=['pts_feat', 'received_response'], + scatter_keys=['fused_feat'], + stride=out_stride, + ), + + fusion_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['fused_feat'], + scatter_keys=['fused_neck_feat'], + data_info=data_info, + d=2, + convs=dict(p2=dict(kernels=[5, 5, 3], in_dim=128, out_dim=128)) + ), + + bev_head = dict( + type='heads.bev.BEV', + gather_keys=['fused_neck_feat'], + scatter_keys=['bev'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + data_info=data_info, + stride=out_stride, + in_dim=128, + num_cls=2, + target_assigner=dict(type='target_assigners.BEVPointAssigner'), + loss_cls=dict(type='EDLLoss', activation='exp', annealing_step=40, n_cls=2, loss_weight=1.0), + ), + + detection_head = dict( + type='heads.det_center_sparse.DetCenterSparse', + gather_keys=['fused_neck_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + data_info=data_info, + input_channels=128, + shared_conv_channel=128, + get_predictions=True, + stride=out_stride, + cls_head_cfg=dict(name='UnitedClsHead', one_hot_encoding=False), + reg_head_cfg=dict(name='UnitedRegHead', combine_channels=True, sigmoid_keys=['scr']), + class_names_each_head=[['vehicle.car']], + reg_channels=['box:6', 'dir:8', 'scr:4'], + cls_assigner=dict( + type='target_assigners.BEVCenternessAssigner', + n_cls=1, + min_radius=1.0, + pos_neg_ratio=0, + max_mining_ratio=0, + ), + box_assigner=dict( + type='target_assigners.BoxCenterAssigner', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=out_stride, + detection_benchmark='Car', + class_names_each_head=[['vehicle.car']], + center_threshold=0.5, + box_coder=dict(type='CenterBoxCoder'), + activation='sigmoid', + edl=False + ), + loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), + loss_box=dict(type='SmoothL1Loss', loss_weight=1.0), + ), +) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2v = hooks.get_test_nms_eval_hooks(opv2v.point_cloud_range_test) +plots_opv2v = [hooks.get_detection_plot(opv2v.point_cloud_range_test)] +shared_modules_opv2v = get_shared_modules(opv2v.point_cloud_range) diff --git a/cosense3d/config/pycfg/nets/gevBEV.py b/cosense3d/config/pycfg/nets/gevBEV.py new file mode 100644 index 00000000..4382bcad --- /dev/null +++ b/cosense3d/config/pycfg/nets/gevBEV.py @@ -0,0 +1,128 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import opv2v, hooks +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.bev_semseg_head import get_bev_semseg_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg + + + +def get_shared_modules(point_cloud_range, in_dim=4, version='gevbev', det=True, vs=0.2, out_stride=2, + unet_enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + voxel_size = [vs, vs, vs] + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + strides = [out_stride] + dconvs = {f"p{out_stride}": dict(kernels=[5, 5, 3], in_dim=384, out_dim=256),} + if 'gevbev' == version: + semseg_head_type = 'heads.bev_semseg.GevSemsegHead' + tgt_assigner_type = 'target_assigners.ContiBEVAssigner' + else: + tgt_assigner_type = 'target_assigners.DiscreteBEVAssigner' + semseg_head_type = 'heads.bev_semseg.EviSemsegHead' + if det: + strides = [out_stride, out_stride * 2] + dconvs = { + f"p{out_stride}": dict(kernels=[5, 5, 3], in_dim=384, out_dim=256), + f"p{out_stride * 2}": dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + } + + cfg = OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + height_compression=strides, + compression_kernel_size_xy=3, + cache_strides=strides, + in_dim=in_dim, + enc_dim=unet_enc_dim, + ), + + semseg_head_local=get_bev_semseg_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bev_semseg_local'], + gt_keys=['bev_tgt_pts', 'local_bboxes_3d'], + semseg_head_type=semseg_head_type, + data_info=data_info, + stride=out_stride, + in_dim=384, + tgt_assigner_type=tgt_assigner_type, + ), + + spatial_fusion=dict( + type='fusion.naive_fusion.NaiveFusion', + gather_keys=['bev_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + stride=strides + ), + + fusion_neck=dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['spatial_fusion_feat'], + scatter_keys=['spatial_fusion_feat'], + data_info=data_info, + d=2, + convs=dconvs, + n_layers=6, + ), + + semseg_head=get_bev_semseg_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['bev_semseg'], + gt_keys=['global_bev_tgt_pts', 'global_bboxes_3d'], + semseg_head_type=semseg_head_type, + data_info=data_info, + stride=out_stride, + in_dim=256, + tgt_assigner_type=tgt_assigner_type, + ), + + det_head=get_det_center_sparse_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + in_channels=256, + stride=out_stride * 2, + cls_loss = "FocalLoss", + ) + ) + + if not det: + cfg.pop('det_head') + return cfg + +###################################################### +# OPV2V +###################################################### +test_hooks_opv2v_bev = (hooks.get_test_bev_semseg_hooks(opv2v.point_cloud_range_bev_test)) +test_hooks_opv2v_bev_det = (hooks.get_test_bev_semseg_hooks(opv2v.point_cloud_range_bev_test) + + hooks.get_test_nms_eval_hooks(opv2v.point_cloud_range_test)) +plots_opv2v = [hooks.get_detection_plot(opv2v.point_cloud_range_test)] +shared_modules_gevbev_opv2v = get_shared_modules(opv2v.point_cloud_range, version='gevbev', det=False) +shared_modules_gevbev_with_det_opv2v = get_shared_modules(opv2v.point_cloud_range, version='gevbev', det=True) +shared_modules_evibev_opv2v = get_shared_modules(opv2v.point_cloud_range, version='evibev') + +#--------- Ablation 1 : Attention fusion ------------- +shared_modules_gevbev_with_det_opv2v_attn = get_shared_modules(opv2v.point_cloud_range, version='gevbev', det=True) +shared_modules_gevbev_with_det_opv2v_attn['spatial_fusion'] = dict( + type='fusion.attn_fusion.SparseAttentionFusion', + gather_keys=['bev_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + stride=[2, 4], + in_channels=348, + ) + +#--------- Ablation 2 : in_dim=7 ------------- +shared_modules_gevbev_opv2v_naive_in7 = get_shared_modules(opv2v.point_cloud_range_bev, version='gevbev', in_dim=7) + +#--------- Ablation 3 : voxel_size = 0.4m ------------- +shared_modules_gevbev_opv2v_naive_in3_v4 = get_shared_modules(opv2v.point_cloud_range_bev, version='gevbev', vs=0.4, out_stride=1, unet_enc_dim=64) \ No newline at end of file diff --git a/cosense3d/config/pycfg/nets/streamLTS.py b/cosense3d/config/pycfg/nets/streamLTS.py new file mode 100644 index 00000000..123f3e69 --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS.py @@ -0,0 +1,284 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg +from cosense3d.config.pycfg.template.bev_head import get_bev_head_cfg, get_bev_multi_resolution_head_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0, enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=3, + height_compression=[2, 8], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p2=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p8=dict(kernels=[3, 3, 3], in_dim=256, out_dim=256) + ) + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local', 'bev_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_assigner='BEVBoxAssigner', + cls_loss="FocalLoss" + ), + get_bev_head_cfg( + data_info, out_stride, in_dim=256, n_cls=2 + ) + ], + strides=[2, 8], + losses=[True, False], + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + gather_keys=['det_local', 'bev_local', 'bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_alignment={}, + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, enc_dim=64) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_opv2vt_no_roi_reg = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + n_cls=1 + ) + +shared_modules_opv2vt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_opv2vt_no_t = copy.deepcopy(shared_modules_opv2vt) + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_opv2vt_no_global_attn = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_opv2vt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_opv2vt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_assigner='BEVBoxAssigner', + cls_loss="FocalLoss" + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_opv2vt_roi_focal_loss = copy.deepcopy(shared_modules_opv2vt) + +#--------- Comparative 1 : Pose error ------------ +shared_modules_opv2vt_fcl_locerr = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, 32) +shared_modules_opv2vt_fcl_locerr['spatial_alignment'] = dict( + type='fusion.spatial_alignment.SpatialAlignment', + gather_keys=['detection_local', 'received_response'], + scatter_keys=['received_response'], +) + +#--------- Comparative 2 : Latency with centerness roi ------------ +shared_modules_opv2vt_fcl_lat = shared_modules_opv2vt_roi_focal_loss + +#--------- Comparative 3 : Latency with box roi ------------ +shared_modules_opv2vt_bev_fcl_lat = copy.deepcopy(shared_modules_opv2vt_roi_focal_loss) + +#--------- Ablation 5 : No dilconv ------------ +shared_modules_opv2vt_no_dilconv = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, 32) +shared_modules_opv2vt_no_dilconv['backbone_neck'] = dict( + type='necks.dilation_spconv.DilationSpconvAblation', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + d=2, + convs=dict( + p2=dict(kernels=[1], in_dim=384, out_dim=256), + p8=dict(kernels=[1], in_dim=256, out_dim=256), + ) + ) + + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_dairv2xt_no_roi_reg = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + n_cls=1 + ) +shared_modules_dairv2xt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_dairv2xt_no_t = shared_modules_dairv2xt + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_dairv2xt_no_global_attn = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_dairv2xt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_dairv2xt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_assigner='BEVBoxAssigner', + cls_loss="FocalLoss" + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=dairv2xt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + + +#--------- Comparative 3 : Latency with box roi ------------ +shared_modules_dairv2xt_bev_fcl_lat = copy.deepcopy(shared_modules_dairv2xt) + +#--------- Ablation 5 : No dilconv ------------ +shared_modules_dairv2xt_no_dilconv = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_dilconv['backbone_neck'] = dict( + type='necks.dilation_spconv.DilationSpconvAblation', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + d=2, + convs=dict( + p2=dict(kernels=[1], in_dim=384, out_dim=256), + p8=dict(kernels=[1], in_dim=256, out_dim=256), + ) + ) + + + diff --git a/cosense3d/config/pycfg/nets/streamLTS_attnfusion.py b/cosense3d/config/pycfg/nets/streamLTS_attnfusion.py new file mode 100644 index 00000000..35b9a846 --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_attnfusion.py @@ -0,0 +1,120 @@ +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.voxnet import get_voxnet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_anchor_dense import get_det_anchor_dense_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0.0): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_voxnet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat', 'multi_scale_bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + sparse_cml=True, + neck=dict(type='bev_rpn.CustomRPN', out_channels=256, num_layers=2), + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['multi_scale_bev_feat'], + scatter_keys=['det_local_dense', 'bev_local_dense'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_anchor_dense_cfg( + gather_keys=['bev_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + pos_threshold=0.3, + neg_threshold=0.1, + ), + dict( + type='heads.bev_dense.BevRoIDenseHead', + in_dim=256, + ) + ], + strides=[2, 8], + losses=[True, False], + ), + + formatting = dict( + type='necks.formatting.DenseToSparse', + gather_keys=['multi_scale_bev_feat', 'det_local_dense', 'bev_local_dense'], + scatter_keys=['multi_scale_bev_feat', 'det_local_sparse', 'bev_local_sparse'], + data_info=data_info, + strides=[2, 8] + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + gather_keys=['det_local_sparse', 'bev_local_sparse', 'multi_scale_bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time) + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) \ No newline at end of file diff --git a/cosense3d/config/pycfg/nets/streamLTS_fcooper.py b/cosense3d/config/pycfg/nets/streamLTS_fcooper.py new file mode 100644 index 00000000..8348d309 --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_fcooper.py @@ -0,0 +1,106 @@ +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.pillar_bev import get_pillar_bev_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_anchor_dense import get_det_anchor_dense_cfg + +voxel_size = [0.4, 0.4, 4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0.0): + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone = get_pillar_bev_cfg( + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + gather_keys=['points'], + scatter_keys=['bev_feat', 'multi_scale_bev_feat'], + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['multi_scale_bev_feat'], + scatter_keys=['det_local_dense', 'bev_local_dense'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_anchor_dense_cfg( + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + pos_threshold=0.3, neg_threshold=0.1, + ), + dict( + type='heads.bev_dense.BevRoIDenseHead', + in_dim=256, + ) + ], + strides=[2, 8], + losses=[True, False], + ), + + formatting = dict( + type='necks.formatting.DenseToSparse', + gather_keys=['multi_scale_bev_feat', 'det_local_dense', 'bev_local_dense', 'points'], + scatter_keys=['multi_scale_bev_feat', 'det_local_sparse', 'bev_local_sparse'], + data_info=data_info, + strides=[2, 8] + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + gather_keys=['det_local_sparse', 'bev_local_sparse', 'multi_scale_bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_fusion = dict( + type='fusion.maxout_fusion.SparseBEVMaxoutFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time) + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) \ No newline at end of file diff --git a/cosense3d/config/pycfg/nets/streamLTS_fpvrcnn.py b/cosense3d/config/pycfg/nets/streamLTS_fpvrcnn.py new file mode 100644 index 00000000..74253cd1 --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_fpvrcnn.py @@ -0,0 +1,140 @@ +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.spconv import get_spconv_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_anchor_dense import get_det_anchor_dense_cfg + +voxel_size = [0.1, 0.1, 0.1] +out_stride = 8 + + +def get_shared_modules(point_cloud_range, global_ref_time=0): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_spconv_cfg( + gather_keys=['points'], + scatter_keys=['voxel_feat', 'bev_feat', 'multi_scale_bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + bev_neck=dict(type='ssfa.SSFA', in_channels=64, out_channels=128, + shrink_strides=[2, 2], shrink_channels=[128, 256]) + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['multi_scale_bev_feat'], + scatter_keys=['det_local_dense', 'bev_local_dense'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_anchor_dense_cfg( + gather_keys=['bev_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + in_channels=128, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + stride=8, + pos_threshold=0.3, + neg_threshold=0.1, + score_thrshold=0.1, + get_boxes_when_training=True, + ), + dict( + type='heads.bev_dense.BevRoIDenseHead', + in_dim=256, + ) + ], + strides=[8, 32], + losses=[True, False], + ), + + keypoint_composer=dict( + type='necks.cpm_composer.KeypointComposer', + gather_keys=['det_local_dense', 'bev_feat', "voxel_feat", 'points'], + scatter_keys=['keypoint_feat'], + train_from_epoch=1, + vsa=dict( + type='vsa.VoxelSetAbstraction', + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + num_keypoints=2048, + num_out_features=256, + num_bev_features=128, + num_rawpoint_features=3, + enlarge_selection_boxes=True, + ) + ), + + formatting = dict( + type='necks.formatting.FPVRCNNToLTS', + gather_keys=['multi_scale_bev_feat', 'keypoint_feat', 'bev_local_dense'], + scatter_keys=['roi_local', 'roi_global', 'multi_scale_feat'], + data_info=data_info, + strides=[8, 32] + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + gather_keys=['roi_local', 'roi_global', 'multi_scale_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time) + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) \ No newline at end of file diff --git a/cosense3d/config/pycfg/nets/streamLTS_gevbev.py b/cosense3d/config/pycfg/nets/streamLTS_gevbev.py new file mode 100644 index 00000000..58a75e9a --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_gevbev.py @@ -0,0 +1,262 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg +from cosense3d.config.pycfg.template.bev_head import get_bev_head_cfg, get_bev_multi_resolution_head_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0, enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=3, + height_compression=[1, 2, 8], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p1=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p2=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p8=dict(kernels=[3, 3, 3], in_dim=256, out_dim=256) + ) + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local', 'bev_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + get_bev_head_cfg( + data_info, out_stride, in_dim=256, n_cls=2 + ) + ], + strides=[2, 8], + losses=[True, False], + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + gather_keys=['det_local', 'bev_local', 'bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_alignment={}, + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, enc_dim=64) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_opv2vt_no_roi_reg = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + ) + +shared_modules_opv2vt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_opv2vt_no_t = copy.deepcopy(shared_modules_opv2vt) + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_opv2vt_no_global_attn = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_opv2vt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_opv2vt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_opv2vt_roi_focal_loss = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + +#--------- Comparative 1 : Pose error ------------ +shared_modules_opv2vt_fcl_locerr = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, 32) +shared_modules_opv2vt_fcl_locerr['spatial_alignment'] = dict( + type='fusion.spatial_alignment.SpatialAlignment', + gather_keys=['detection_local', 'received_response'], + scatter_keys=['received_response'], +) + +#--------- Comparative 2 : Latency ------------ +shared_modules_opv2vt_fcl_lat = shared_modules_opv2vt_roi_focal_loss + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_dairv2xt_no_roi_reg = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + ) +shared_modules_dairv2xt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_dairv2xt_no_t = shared_modules_dairv2xt + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_dairv2xt_no_global_attn = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_dairv2xt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_dairv2xt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + +#--------- Ablation 5 : Focal loss and Gaussian GT for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss_gaussian = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss", + use_gaussian=True, + sigma=1.0 +) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + diff --git a/cosense3d/config/pycfg/nets/streamLTS_lat.py b/cosense3d/config/pycfg/nets/streamLTS_lat.py new file mode 100644 index 00000000..cf41b39a --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_lat.py @@ -0,0 +1,264 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_pred_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg +from cosense3d.config.pycfg.template.bev_head import get_bev_head_cfg, get_bev_multi_resolution_head_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0, enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=3, + height_compression=[2, 8], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p2=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p8=dict(kernels=[3, 3, 3], in_dim=256, out_dim=256) + ) + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local', 'bev_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d', 'local_bboxes_mask'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + get_bev_head_cfg( + data_info, out_stride, in_dim=256, n_cls=2 + ) + ], + strides=[2, 8], + losses=[True, False], + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusionV3', + gather_keys=['det_local', 'bev_local', 'bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_alignment={}, + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_pred_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d', 'global_bboxes_id', + 'local_bboxes_3d', 'local_labels_3d', 'local_bboxes_mask', 'local_bboxes_id'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_pred_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d', 'global_bboxes_mask'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, enc_dim=64) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_opv2vt_no_roi_reg = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + ) + +shared_modules_opv2vt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_opv2vt_no_t = copy.deepcopy(shared_modules_opv2vt) + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_opv2vt_no_global_attn = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_opv2vt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_opv2vt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_opv2vt_roi_focal_loss = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) +shared_modules_opv2vt_roi_focal_loss['temporal_fusion']['norm_fusion'] = True + +#--------- Comparative 1 : Pose error ------------ +shared_modules_opv2vt_fcl_locerr = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, 32) +shared_modules_opv2vt_fcl_locerr['spatial_alignment'] = dict( + type='fusion.spatial_alignment.SpatialAlignment', + gather_keys=['detection_local', 'received_response'], + scatter_keys=['received_response'], +) + +#--------- Comparative 2 : Latency ------------ +shared_modules_opv2vt_fcl_lat = shared_modules_opv2vt_roi_focal_loss +shared_modules_opv2vt_roi_focal_loss['temporal_fusion']['norm_fusion'] = False + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_dairv2xt_no_roi_reg = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + ) +shared_modules_dairv2xt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_dairv2xt_no_t = shared_modules_dairv2xt + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_dairv2xt_no_global_attn = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_dairv2xt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_dairv2xt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + +#--------- Ablation 5 : Focal loss and Gaussian GT for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss_gaussian = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss", + use_gaussian=True, + sigma=1.0 +) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + diff --git a/cosense3d/config/pycfg/nets/streamLTS_locerr.py b/cosense3d/config/pycfg/nets/streamLTS_locerr.py new file mode 100644 index 00000000..f27c60ea --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_locerr.py @@ -0,0 +1,183 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg +from cosense3d.config.pycfg.template.bev_head import get_bev_head_cfg, get_bev_multi_resolution_head_cfg +from cosense3d.config.pycfg.template.rlseg_head import get_roadline_head_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0, enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + freeze=False, + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=3, + height_compression=[2, 8], + enc_dim=enc_dim, + ), + + rl_backbone=get_minkunet_cfg( + freeze=True, + gather_keys=['points_rl'], + scatter_keys=['bev_feat_rl'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=5, + cache_strides=[1], + height_compression=[1], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + freeze=False, + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p2=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p8=dict(kernels=[3, 3, 3], in_dim=256, out_dim=256) + ) + ), + + rl_neck=dict( + type='necks.dilation_spconv.DilationSpconv', + freeze=True, + gather_keys=['bev_feat_rl'], + scatter_keys=['bev_feat_rl'], + data_info=data_info, + d=2, + convs=dict( + p1=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256) + ) + ), + + rlseg_head=dict( + type='heads.bev_roadline.BEVRoadLine', + freeze=True, + gather_keys=['bev_feat_rl'], + scatter_keys=['roadline_pred'], + gt_keys=['roadline_tgts'], + data_info=data_info, + stride=1, + in_dim=256, + target_assigner=dict(type='target_assigners.RoadLineAssigner', res=0.4, range=50), + loss_cls=dict(type='FocalLoss', use_sigmoid=True, bg_idx=0, + gamma=2.0, alpha=0.25, loss_weight=2.0), + ), + + localization=dict( + type='necks.spatial_alignment.MapRegistration', + freeze=True, + gather_keys=['roadline_pred', 'roadline', 'lidar_poses', 'lidar_poses_gt'], + scatter_keys=['lidar_poses_corrected', 'roadline_pred'], + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + freeze=False, + gather_keys=['bev_feat'], + scatter_keys=['det_local', 'bev_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_assigner='BEVBoxAssigner', + cls_loss="FocalLoss" + ), + get_bev_head_cfg( + data_info, out_stride, in_dim=256, n_cls=2 + ) + ], + strides=[2, 8], + losses=[True, False], + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + freeze=False, + gather_keys=['det_local', 'bev_local', 'bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn), + norm_fusion=False, + ), + + det1_head = get_query_guided_petr_head_cfg( + freeze=False, + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + pred_while_training=True + ), + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryAlignFusionRL', + gather_keys=['detection_local', 'roadline', 'roadline_pred', 'temp_fusion_feat', + 'lidar_poses_corrected', 'lidar_poses', 'lidar_pose_aug', + 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, enc_dim=64) + + + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) + + diff --git a/cosense3d/config/pycfg/nets/streamLTS_rl.py b/cosense3d/config/pycfg/nets/streamLTS_rl.py new file mode 100644 index 00000000..11b34343 --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_rl.py @@ -0,0 +1,273 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg +from cosense3d.config.pycfg.template.bev_head import get_bev_head_cfg, get_bev_multi_resolution_head_cfg +from cosense3d.config.pycfg.template.rlseg_head import get_roadline_head_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0, enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=3, + height_compression=[1, 2, 8], + cache_strides=[1, 2, 8], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p1=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p2=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p8=dict(kernels=[3, 3, 3], in_dim=256, out_dim=256) + ) + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['roadline', 'det_local', 'bev_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_roadline_head_cfg( + gt_keys=['roadline_tgts'], + data_info=data_info, + stride=1, + in_dim=256, + ), + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_assigner='BEVBoxAssigner', + cls_loss="FocalLoss" + ), + get_bev_head_cfg( + data_info, out_stride, in_dim=256, n_cls=2 + ) + ], + strides=[1, 2, 8], + losses=[True, True, False], + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusion', + gather_keys=['det_local', 'bev_local', 'bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn), + norm_fusion=False, + ), + + spatial_alignment={}, + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, enc_dim=64) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_opv2vt_no_roi_reg = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + ) + +shared_modules_opv2vt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_opv2vt_no_t = copy.deepcopy(shared_modules_opv2vt) + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_opv2vt_no_global_attn = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_opv2vt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_opv2vt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_opv2vt_roi_focal_loss = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_opv2vt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + +#--------- Comparative 1 : Pose error ------------ +shared_modules_opv2vt_fcl_locerr = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, 32) +shared_modules_opv2vt_fcl_locerr['spatial_alignment'] = dict( + type='fusion.spatial_alignment.SpatialAlignment', + gather_keys=['detection_local', 'received_response'], + scatter_keys=['received_response'], +) + +#--------- Comparative 2 : Latency ------------ +shared_modules_opv2vt_fcl_lat = shared_modules_opv2vt_roi_focal_loss + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_dairv2xt_no_roi_reg = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + ) +shared_modules_dairv2xt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_dairv2xt_no_t = shared_modules_dairv2xt + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_dairv2xt_no_global_attn = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_dairv2xt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_dairv2xt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + +#--------- Ablation 5 : Focal loss and Gaussian GT for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss_gaussian = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss", + use_gaussian=True, + sigma=1.0 +) +shared_modules_dairv2xt_roi_focal_loss_gaussian['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + diff --git a/cosense3d/config/pycfg/nets/streamLTS_v2.py b/cosense3d/config/pycfg/nets/streamLTS_v2.py new file mode 100644 index 00000000..5042ab6d --- /dev/null +++ b/cosense3d/config/pycfg/nets/streamLTS_v2.py @@ -0,0 +1,236 @@ +import copy +from collections import OrderedDict +from cosense3d.config.pycfg.base import use_flash_attn, opv2vt, dairv2xt, hooks +from cosense3d.config.pycfg.template.petr_transformer import get_petr_transformer_cfg +from cosense3d.config.pycfg.template.minkunet import get_minkunet_cfg +from cosense3d.config.pycfg.template.query_guided_petr_head import get_query_guided_petr_head_cfg +from cosense3d.config.pycfg.template.det_center_sparse import get_det_center_sparse_cfg +from cosense3d.config.pycfg.template.bev_head import get_bev_head_cfg, get_bev_multi_resolution_head_cfg + + +voxel_size = [0.4, 0.4, 0.4] +out_stride = 2 + + +def get_shared_modules(point_cloud_range, global_ref_time=0, enc_dim=32): + """ + gather_keys: + keys to gather data from cavs, key order is important, should match the forward input arguments order. + scatter_keys: + 1st key in the list is used as the key for scattering and storing module output data to cav. + """ + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return OrderedDict( + pts_backbone=get_minkunet_cfg( + gather_keys=['points'], + scatter_keys=['bev_feat'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + kernel_size_layer1=3, + height_compression=[2, 8], + enc_dim=enc_dim, + ), + + backbone_neck = dict( + type='necks.dilation_spconv.DilationSpconv', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=data_info, + d=2, + convs=dict( + p2=dict(kernels=[3, 3, 3], in_dim=384, out_dim=256), + p8=dict(kernels=[3, 3, 3], in_dim=256, out_dim=256) + ) + ), + + roi_head = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_assigner='BEVBoxAssigner', + cls_loss="FocalLoss" + ), + ], + strides=[2], + losses=[True], + ), + + temporal_fusion = dict( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], + scatter_keys=['temp_fusion_feat'], + in_channels=256, + ref_pts_stride=2, + feature_stride=8, + transformer_itrs=1, + global_ref_time=global_ref_time, + lidar_range=point_cloud_range, + transformer=get_petr_transformer_cfg(use_flash_attn) + ), + + spatial_alignment={}, + + spatial_fusion=dict( + type='fusion.spatial_query_fusion.SpatialQueryFusion', + gather_keys=['temp_fusion_feat', 'received_response'], + scatter_keys=['spatial_fusion_feat'], + in_channels=256, + pc_range=point_cloud_range, + resolution=0.8 + ), + + det1_head = get_query_guided_petr_head_cfg( + gather_keys=['temp_fusion_feat'], + scatter_keys=['detection_local'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=False, + ), + + det2_head = get_query_guided_petr_head_cfg( + gather_keys=['spatial_fusion_feat'], + scatter_keys=['detection'], + gt_keys=['global_bboxes_3d', 'global_labels_3d'], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_stride=out_stride, + sparse=True, + ), + + ) + +###################################################### +# OPV2Vt +###################################################### +test_hooks_opv2vt = hooks.get_test_nms_eval_hooks(opv2vt.point_cloud_range_test) +plots_opv2vt = [hooks.get_detection_plot(opv2vt.point_cloud_range_test)] +shared_modules_opv2vt = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, enc_dim=64) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_opv2vt_no_roi_reg = copy.deepcopy(shared_modules_opv2vt) +shared_modules_opv2vt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + n_cls=1 + ) + +shared_modules_opv2vt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_opv2vt_no_t = copy.deepcopy(shared_modules_opv2vt) + +#--------- Ablation 5 : No dilconv ------------ +shared_modules_opv2vt_no_dilconv = get_shared_modules(opv2vt.point_cloud_range, opv2vt.global_ref_time, 32) +shared_modules_opv2vt_no_dilconv['backbone_neck'] = dict( + type='necks.dilation_spconv.DilationSpconvAblation', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=dict(lidar_range=opv2vt.point_cloud_range, voxel_size=voxel_size), + d=2, + convs=dict( + p2=dict(kernels=[1], in_dim=384, out_dim=256), + p8=dict(kernels=[1], in_dim=256, out_dim=256), + ) + ) + + +###################################################### +# DairV2Xt +###################################################### +test_hooks_dairv2xt = hooks.get_test_nms_eval_hooks(dairv2xt.point_cloud_range_test) +plots_dairv2xt = [hooks.get_detection_plot(dairv2xt.point_cloud_range_test)] +shared_modules_dairv2xt = get_shared_modules(dairv2xt.point_cloud_range, dairv2xt.global_ref_time) + +#--------- Ablation 1 : No RoI regression------------- +shared_modules_dairv2xt_no_roi_reg = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_roi_reg['roi_head'] = get_bev_multi_resolution_head_cfg( + gather_keys=['bev_feat'], + scatter_keys=['bevseg_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + n_cls=1 + ) +shared_modules_dairv2xt_no_roi_reg['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV1', + gather_keys=['bevseg_local', 'bev_feat', 'memory'], +) + +#--------- Ablation 2 : No Timestamps for boxes -------- +# see changes in yaml cfg of cav_prototype +shared_modules_dairv2xt_no_t = shared_modules_dairv2xt + +#--------- Ablation 3 : No Global Attention ------------ +shared_modules_dairv2xt_no_global_attn = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_global_attn['temporal_fusion'].update( + type='fusion.temporal_fusion.LocalTemporalFusionV2', + gather_keys=['det_local', 'bev_feat', 'memory'], +) +Tlayer = shared_modules_dairv2xt_no_global_attn['temporal_fusion']['transformer']['decoder']['transformerlayers'] +Tlayer['attn_cfgs'] = Tlayer['attn_cfgs'][:1] +Tlayer['operation_order'] = ('self_attn', 'norm', 'ffn', 'norm') +shared_modules_dairv2xt_no_global_attn['roi_head'] = dict( + type='heads.multitask_head.MultiTaskHead', + gather_keys=['bev_feat'], + scatter_keys=['det_local'], + gt_keys=['local_bboxes_3d', 'local_labels_3d'], + heads=[ + get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=opv2vt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_assigner='BEVBoxAssigner', + cls_loss="FocalLoss" + ), + ], + strides=[2], + losses=[True], + ) + +#--------- Ablation 4 : Focal loss for RoI ------------ +shared_modules_dairv2xt_roi_focal_loss = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0] = get_det_center_sparse_cfg( + voxel_size=voxel_size, + point_cloud_range=dairv2xt.point_cloud_range, + in_channels=256, + generate_roi_scr=True, + cls_loss="FocalLoss" +) +shared_modules_dairv2xt_roi_focal_loss['roi_head']['heads'][0]['cls_head_cfg'] = ( + dict(name='UnitedClsHead', one_hot_encoding=False)) + + +#--------- Comparative 3 : Latency with box roi ------------ +shared_modules_dairv2xt_bev_fcl_lat = copy.deepcopy(shared_modules_dairv2xt) + +#--------- Ablation 5 : No dilconv ------------ +shared_modules_dairv2xt_no_dilconv = copy.deepcopy(shared_modules_dairv2xt) +shared_modules_dairv2xt_no_dilconv['backbone_neck'] = dict( + type='necks.dilation_spconv.DilationSpconvAblation', + gather_keys=['bev_feat'], + scatter_keys=['bev_feat'], + data_info=dict(lidar_range=dairv2xt.point_cloud_range, voxel_size=voxel_size), + d=2, + convs=dict( + p2=dict(kernels=[1], in_dim=384, out_dim=256), + p8=dict(kernels=[1], in_dim=256, out_dim=256), + ) + ) + + + diff --git a/cosense3d/config/pycfg/template/__init__.py b/cosense3d/config/pycfg/template/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/config/pycfg/template/bev_head.py b/cosense3d/config/pycfg/template/bev_head.py new file mode 100644 index 00000000..d8d471b2 --- /dev/null +++ b/cosense3d/config/pycfg/template/bev_head.py @@ -0,0 +1,29 @@ +from cosense3d.config import add_cfg_keys + +def get_bev_head_cfg(data_info, out_stride, in_dim=256, n_cls=2): + return dict( + type='heads.bev.BEV', + data_info=data_info, + stride=out_stride, + in_dim=in_dim, + target_assigner=dict(type='target_assigners.BEVPointAssigner'), + loss_cls=dict(type='EDLLoss', activation='relu', annealing_step=50, + n_cls=n_cls, loss_weight=1.0), + ) + + +@add_cfg_keys +def get_bev_multi_resolution_head_cfg(data_info, in_dim=256, n_cls=1): + cfg = dict( + type='heads.bev.BEVMultiResolution', + data_info=data_info, + strides=[2, 8], + strides_for_loss=[2], + down_sample_tgt=False, + in_dim=in_dim, + num_cls=n_cls, + target_assigner=dict(type='target_assigners.BEVPointAssigner', down_sample=False), + loss_cls=dict(type='FocalLoss', use_sigmoid=True, bg_idx=0, + gamma=2.0, alpha=0.25, loss_weight=2.0), + ) + return cfg \ No newline at end of file diff --git a/cosense3d/config/pycfg/template/bev_semseg_head.py b/cosense3d/config/pycfg/template/bev_semseg_head.py new file mode 100644 index 00000000..897b4e1d --- /dev/null +++ b/cosense3d/config/pycfg/template/bev_semseg_head.py @@ -0,0 +1,28 @@ +from cosense3d.config import add_cfg_keys + +@add_cfg_keys +def get_bev_semseg_head_cfg( + semseg_head_type, + in_dim, + data_info, + stride, + tgt_assigner_type, +): + return dict( + type=semseg_head_type, + data_info=data_info, + in_dim=in_dim, + stride=stride, + # dynamic_head=False, + target_assigner=dict( + type=tgt_assigner_type, + down_sample=True, + data_info=data_info, + stride=stride, + tgt_range=50 + ), + loss_cls=dict(type='EDLLoss', activation='relu', annealing_step=50, + n_cls=2, loss_weight=1.0), + ) + + diff --git a/cosense3d/config/pycfg/template/det_anchor_dense.py b/cosense3d/config/pycfg/template/det_anchor_dense.py new file mode 100644 index 00000000..51238e7a --- /dev/null +++ b/cosense3d/config/pycfg/template/det_anchor_dense.py @@ -0,0 +1,27 @@ +from cosense3d.config import add_cfg_keys + +@add_cfg_keys +def get_det_anchor_dense_cfg(voxel_size, point_cloud_range, in_channels=256, stride=2, + pos_threshold=0.6, neg_threshold=0.45, score_thrshold=0.25, + get_boxes_when_training=False, + ): + return dict( + type='heads.det_anchor_dense.DetAnchorDense', + in_channels=in_channels, + get_boxes_when_training=get_boxes_when_training, + target_assigner=dict( + type='target_assigners.BoxAnchorAssigner', + box_size=[3.9, 1.6, 1.56], + dirs=[0, 90], + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=stride, + pos_threshold=pos_threshold, + neg_threshold=neg_threshold, + score_thrshold=score_thrshold, + box_coder=dict(type='ResidualBoxCoder', mode='simple_dist') + ), + loss_cls = dict(type='FocalLoss', use_sigmoid=True, + gamma=2.0, alpha=0.25, loss_weight=0.25), + loss_box = dict(type='SmoothL1Loss', loss_weight=1.0), + ) \ No newline at end of file diff --git a/cosense3d/config/pycfg/template/det_center_sparse.py b/cosense3d/config/pycfg/template/det_center_sparse.py new file mode 100644 index 00000000..d33f62b8 --- /dev/null +++ b/cosense3d/config/pycfg/template/det_center_sparse.py @@ -0,0 +1,71 @@ +from cosense3d.config import add_cfg_keys +from cosense3d.modules.plugin.target_assigners import BEVCenternessAssigner + + +@add_cfg_keys +def get_det_center_sparse_cfg(voxel_size, point_cloud_range, + in_channels=256, stride=2, + generate_roi_scr=False, + cls_assigner="BEVCenternessAssigner", + cls_loss="EDLLoss", + use_gaussian=False, sigma=1.0): + if cls_assigner == "BEVCenternessAssigner": + cls_assigner = dict( + type='target_assigners.BEVCenternessAssigner', + n_cls=1, + min_radius=1.0, + pos_neg_ratio=0, + max_mining_ratio=0, + use_gaussian=use_gaussian, + sigma=sigma + ) + elif cls_assigner == "BEVBoxAssigner": + cls_assigner = dict( + type='target_assigners.BEVBoxAssigner', + n_cls=1, + pos_neg_ratio=0, + max_mining_ratio=0, + ) + else: + raise NotImplementedError + scr_activation = "relu" # default + edl = True + if cls_loss == "EDLLoss": + cls_loss = dict(type='EDLLoss', activation='exp', annealing_step=20, n_cls=2, loss_weight=5.0) + scr_activation = "exp" + one_hot_encoding = True + elif cls_loss == "FocalLoss": + cls_loss = dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0) + scr_activation = "sigmoid" + edl = False + one_hot_encoding = False + + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + return dict( + type='heads.det_center_sparse.DetCenterSparse', + data_info=data_info, + generate_roi_scr=generate_roi_scr, + input_channels=in_channels, + shared_conv_channel=256, + get_predictions=True, + stride=stride, + cls_head_cfg=dict(name='UnitedClsHead', one_hot_encoding=one_hot_encoding), + reg_head_cfg=dict(name='UnitedRegHead', combine_channels=True, sigmoid_keys=['scr']), + class_names_each_head=[['vehicle.car']], + reg_channels=['box:6', 'dir:8', 'scr:4'], + cls_assigner=cls_assigner, + box_assigner=dict( + type='target_assigners.BoxCenterAssigner', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=stride, + detection_benchmark='Car', + class_names_each_head=[['vehicle.car']], + center_threshold=0.5, + box_coder=dict(type='CenterBoxCoder'), + activation=scr_activation, + edl=edl + ), + loss_cls=cls_loss, + loss_box=dict(type='SmoothL1Loss', loss_weight=1.0), + ) \ No newline at end of file diff --git a/cosense3d/config/pycfg/template/minkunet.py b/cosense3d/config/pycfg/template/minkunet.py new file mode 100644 index 00000000..72dc56ce --- /dev/null +++ b/cosense3d/config/pycfg/template/minkunet.py @@ -0,0 +1,39 @@ +import math +from collections import OrderedDict + + +def get_minkunet_cfg(gather_keys, scatter_keys, voxel_size, point_cloud_range, + in_dim=4, dim=3, out_stride=2, height_compression=[2, 8], + compression_kernel_size_xy=1, cache_strides=[2, 8], enc_dim=32, + kernel_size_layer1=5, freeze=False): + data_info = dict(lidar_range=point_cloud_range, voxel_size=voxel_size) + if len(height_compression) > 0: + hc = OrderedDict() + height = (point_cloud_range[5] - point_cloud_range[2]) / voxel_size[2] + dims = {1: enc_dim, 2: enc_dim * 3, 4: enc_dim * 4, 8: enc_dim * 4} + # dims = {1: enc_dim, 2: enc_dim * 4, 4: enc_dim * 4, 8: enc_dim * 4} + for stride in height_compression: + downx = math.ceil(height / stride) + dim = dims[stride] + if downx > 4: + hc[f'p{stride}'] = dict(channels=[dim, 256, 384], steps=[5, max(downx // 5, 2)]) + else: + hc[f'p{stride}'] = dict(channels=[dim, 256], steps=[downx]) + else: + hc = None + return dict( + type='backbone3d.mink_unet.MinkUnet', + freeze=freeze, + gather_keys=gather_keys, + scatter_keys=scatter_keys, + d=3, + cache_strides=cache_strides, + kernel_size_layer1=kernel_size_layer1, + in_dim=in_dim, + stride=out_stride, + floor_height=point_cloud_range[2], + data_info=data_info, + height_compression=hc, + compression_kernel_size_xy=compression_kernel_size_xy, + enc_dim=enc_dim, + ) \ No newline at end of file diff --git a/cosense3d/config/pycfg/template/petr_transformer.py b/cosense3d/config/pycfg/template/petr_transformer.py new file mode 100644 index 00000000..73975d78 --- /dev/null +++ b/cosense3d/config/pycfg/template/petr_transformer.py @@ -0,0 +1,76 @@ + +def get_petr_transformer_cfg(flash_attn=True, embed_dims=256): + return dict( + type='transformer.PETRTemporalTransformer', + decoder=dict( + type='TransformerDecoder', + return_intermediate=True, + num_layers=1, + transformerlayers=dict( + type='TransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', #fp16 for 2080Ti training (save GPU memory). + embed_dims=embed_dims, + num_heads=8, + dropout=0.1, + fp16=False), + dict( + type='MultiheadFlashAttention' if flash_attn else 'MultiheadAttention', + embed_dims=embed_dims, + num_heads=8, + dropout=0.1, + fp16=flash_attn + ), + ], + ffn_cfgs=dict( + type='FFN', + embed_dims=embed_dims, + feedforward_channels=1024, + num_fcs=2, + dropout=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + feedforward_channels=1024, + ffn_dropout=0.1, + with_cp=False, ###use checkpoint to save memory + operation_order=('self_attn', 'norm', + 'cross_attn', 'norm', + 'ffn', 'norm')), + ) + ) + + +def get_transformer_cfg(flash_attn=True, embed_dims=256): + return dict( + type='transformer.PETRTemporalTransformer', + decoder=dict( + type='TransformerDecoder', + return_intermediate=True, + num_layers=1, + transformerlayers=dict( + type='TransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', #fp16 for 2080Ti training (save GPU memory). + embed_dims=embed_dims, + num_heads=8, + dropout=0.1, + fp16=False), + ], + ffn_cfgs=dict( + type='FFN', + embed_dims=embed_dims, + feedforward_channels=1024, + num_fcs=2, + dropout=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + feedforward_channels=1024, + ffn_dropout=0.1, + with_cp=False, ###use checkpoint to save memory + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + ) + ) + + diff --git a/cosense3d/config/pycfg/template/pillar_bev.py b/cosense3d/config/pycfg/template/pillar_bev.py new file mode 100644 index 00000000..4eb3cfc9 --- /dev/null +++ b/cosense3d/config/pycfg/template/pillar_bev.py @@ -0,0 +1,35 @@ +from collections import OrderedDict +from cosense3d.config import add_cfg_keys + + +@add_cfg_keys +def get_pillar_bev_cfg(voxel_size, point_cloud_range, **kwargs): + return dict( + type='backbone3d.pillar_bev.PillarBEV', + in_channels=64, + layer_nums=[3, 5, 8], + layer_strides=[2, 2, 2], + downsample_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + upsample_channels=[128, 128, 128], + voxel_generator=dict( + type='voxel_generator.VoxelGenerator', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + max_points_per_voxel=32, + max_voxels_train=32000, + max_voxels_test=70000 + ), + pillar_encoder=dict( + type='pillar_encoder.PillarEncoder', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + features=['xyz', 'intensity', 'absolute_xyz'], + channels=[64] + ), + bev_shrinker=dict( + type='downsample_conv.DownsampleConv', + in_channels=384, # 128 * 3 + dims=[256] + ), + ) \ No newline at end of file diff --git a/cosense3d/config/pycfg/template/query_guided_petr_head.py b/cosense3d/config/pycfg/template/query_guided_petr_head.py new file mode 100644 index 00000000..b2188e4f --- /dev/null +++ b/cosense3d/config/pycfg/template/query_guided_petr_head.py @@ -0,0 +1,80 @@ + + +def get_query_guided_petr_head_cfg(gather_keys, scatter_keys, gt_keys, + voxel_size, point_cloud_range, out_stride, + embed_dims=256, sparse=False, freeze=False, + pred_while_training=False): + return dict( + type='heads.query_guided_petr_head.QueryGuidedPETRHead', + freeze=freeze, + gather_keys=gather_keys, + scatter_keys=scatter_keys, + gt_keys=gt_keys, + sparse=sparse, + pred_while_training=pred_while_training, + embed_dims=embed_dims, + num_reg_fcs=1, + num_pred=1, + pc_range=point_cloud_range, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + num_classes=1, + reg_channels=['box:6', 'dir:8', 'scr:4', 'vel:2'], + cls_assigner=dict( + type='target_assigners.BEVBoxAssigner', + n_cls=1, + # min_radius=1.0, + pos_neg_ratio=0, + mining_thr=0, + ), + box_assigner=dict( + type='target_assigners.BoxCenterAssigner', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=out_stride, + detection_benchmark='Car', + class_names_each_head=[['vehicle.car']], + center_threshold=0.5, + box_coder=dict(type='CenterBoxCoder', with_velo=True), + ), + loss_cls=dict(type='FocalLoss', use_sigmoid=True, bg_idx=0, + gamma=2.0, alpha=0.25, loss_weight=2.0), + loss_box=dict(type='SmoothL1Loss', loss_weight=1.0), + ) + + +def get_query_guided_pred_head_cfg(gather_keys, scatter_keys, gt_keys, + voxel_size, point_cloud_range, out_stride, embed_dims=256, sparse=False): + return dict( + type='heads.query_guided_petr_head.QueryGuidedPredHead', + gather_keys=gather_keys, + scatter_keys=scatter_keys, + gt_keys=gt_keys, + sparse=sparse, + embed_dims=embed_dims, + num_reg_fcs=1, + num_pred=1, + pc_range=point_cloud_range, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + num_classes=1, + reg_channels=['box:6', 'dir:8', 'scr:4', 'vel:2'], + cls_assigner=dict( + type='target_assigners.BEVCenternessAssigner', + n_cls=1, + min_radius=1.0, + pos_neg_ratio=0, + mining_thr=0, + ), + box_assigner=dict( + type='target_assigners.BoxCenterAssigner', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + stride=out_stride, + detection_benchmark='Car', + class_names_each_head=[['vehicle.car']], + center_threshold=0.5, + box_coder=dict(type='BoxPredCoder', with_velo=True), + ), + loss_cls=dict(type='FocalLoss', use_sigmoid=True, bg_idx=0, + gamma=2.0, alpha=0.25, loss_weight=2.0), + loss_box=dict(type='SmoothL1Loss', loss_weight=1.0), + ) \ No newline at end of file diff --git a/cosense3d/config/pycfg/template/rlseg_head.py b/cosense3d/config/pycfg/template/rlseg_head.py new file mode 100644 index 00000000..34d62067 --- /dev/null +++ b/cosense3d/config/pycfg/template/rlseg_head.py @@ -0,0 +1,16 @@ +from cosense3d.config import add_cfg_keys + +@add_cfg_keys +def get_roadline_head_cfg(data_info, stride, in_dim=256, range=50): + res = data_info['voxel_size'][0] * stride + return dict( + type='heads.bev_roadline.BEVRoadLine', + data_info=data_info, + stride=stride, + in_dim=in_dim, + target_assigner=dict(type='target_assigners.RoadLineAssigner', res=res, range=range), + loss_cls=dict(type='FocalLoss', use_sigmoid=True, bg_idx=0, + gamma=2.0, alpha=0.25, loss_weight=2.0), + ) + + diff --git a/cosense3d/config/pycfg/template/spconv.py b/cosense3d/config/pycfg/template/spconv.py new file mode 100644 index 00000000..1cffeab1 --- /dev/null +++ b/cosense3d/config/pycfg/template/spconv.py @@ -0,0 +1,24 @@ + +def get_spconv_cfg(gather_keys, scatter_keys, voxel_size, point_cloud_range, + in_channels=4, out_channels=64, bev_neck=None, bev_compressor=None): + return dict( + type='backbone3d.spconv.Spconv', + gather_keys=gather_keys, + scatter_keys=scatter_keys, + in_channels=in_channels, + out_channels=out_channels, + voxel_generator=dict( + type='voxel_generator.VoxelGenerator', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + max_points_per_voxel=32, + max_voxels_train=32000, + max_voxels_test=70000 + ), + voxel_encoder=dict( + type='voxel_encoder.MeanVFE', + num_point_features=in_channels, + ), + bev_neck=bev_neck, + bev_compressor=bev_compressor + ) \ No newline at end of file diff --git a/cosense3d/config/pycfg/template/voxnet.py b/cosense3d/config/pycfg/template/voxnet.py new file mode 100644 index 00000000..b0c4f258 --- /dev/null +++ b/cosense3d/config/pycfg/template/voxnet.py @@ -0,0 +1,26 @@ + +def get_voxnet_cfg(gather_keys, scatter_keys, voxel_size, point_cloud_range, + neck=None, bev_compressor=None, sparse_cml=False): + return dict( + type='backbone3d.voxelnet.VoxelNet', + gather_keys=gather_keys, + scatter_keys=scatter_keys, + voxel_generator=dict( + type='voxel_generator.VoxelGenerator', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + max_points_per_voxel=32, + max_voxels_train=32000, + max_voxels_test=70000 + ), + voxel_encoder=dict( + type='pillar_encoder.PillarEncoder', + voxel_size=voxel_size, + lidar_range=point_cloud_range, + features=['xyz', 'intensity', 'absolute_xyz'], + channels=[64] + ), + cml=dict(type='voxnet_utils.CMLSparse' if sparse_cml else 'voxnet_utils.CML', in_channels=64), + neck=neck, + bev_compressor=bev_compressor + ) \ No newline at end of file diff --git a/cosense3d/config/rlSeg/rl_seg.yaml b/cosense3d/config/rlSeg/rl_seg.yaml new file mode 100644 index 00000000..6b104f38 --- /dev/null +++ b/cosense3d/config/rlSeg/rl_seg.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.opv2v.seq1_vox04_bevmap_ego_only' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 51 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'MultiStepLR' + milestones: [25, 40] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.RLseg.test_hooks_opv2v' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2v.data_manager' + shared_modules: 'pycfg.nets.RLseg.shared_modules_opv2v' + cav_manager: + prototype: RLseg.RLsegCAV + dataset: opv2v +# memory_len: 4 +# memory_emb_dims: 256 +# memory_num_propagated: 256 +# ref_pts_dim: 3 + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.RLseg.plots_opv2v' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_attnfusion_dairv2xt.yaml b/cosense3d/config/streamLTS/streamLTS_attnfusion_dairv2xt.yaml new file mode 100644 index 00000000..c6725874 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_attnfusion_dairv2xt.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS_attnfusion.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_attnfusion.shared_modules_dairv2xt' + cav_manager: + prototype: streamLTS_collection.slcAttnFusion + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS_attnfusion.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_attnfusion_opv2vt.yaml b/cosense3d/config/streamLTS/streamLTS_attnfusion_opv2vt.yaml new file mode 100644 index 00000000..f7002556 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_attnfusion_opv2vt.yaml @@ -0,0 +1,46 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS_attnfusion.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_attnfusion.shared_modules_opv2vt' + cav_manager: + prototype: streamLTS_collection.slcAttnFusion + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS_attnfusion.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt.yaml new file mode 100644 index 00000000..72acdd55 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [50] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_bev_fcl_randlat.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_bev_fcl_randlat.yaml new file mode 100644 index 00000000..4016e63b --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_bev_fcl_randlat.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_bev_fcl_lat' + cav_manager: + prototype: streamLTS_collection.LTSDairV2X + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_fcl_lat.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_fcl_lat.yaml new file mode 100644 index 00000000..f4ab38e8 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_fcl_lat.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_lat1' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [50] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_fcl_lat' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_fcl_locerr.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_fcl_locerr.yaml new file mode 100644 index 00000000..e60256aa --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_fcl_locerr.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_lat1' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [50] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_fcl_locerr' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAVLocCorr + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_dilconv.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_dilconv.yaml new file mode 100644 index 00000000..9b27e60f --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_dilconv.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_no_dilconv' + cav_manager: + prototype: streamLTS_collection.LTSDairV2X + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_global_attn.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_global_attn.yaml new file mode 100644 index 00000000..f9f20f3f --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_global_attn.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_no_global_attn' + cav_manager: + prototype: streamLTS_collection.LTSDairV2X + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_roi_reg.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_roi_reg.yaml new file mode 100644 index 00000000..31499f1a --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_roi_reg.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_no_roi_reg' + cav_manager: + prototype: streamLTS_collection.LTSDairV2X + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_t.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_t.yaml new file mode 100644 index 00000000..45365267 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_no_t.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_no_t' + cav_manager: + prototype: streamLTS_collection.slcNoBoxTimeDairV2X + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_roi_focal_loss.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_roi_focal_loss.yaml new file mode 100644 index 00000000..e7cccb38 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_roi_focal_loss.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [50] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_roi_focal_loss' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_dairv2xt_roi_focal_loss_gaussian.yaml b/cosense3d/config/streamLTS/streamLTS_dairv2xt_roi_focal_loss_gaussian.yaml new file mode 100644 index 00000000..35ba2d59 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_dairv2xt_roi_focal_loss_gaussian.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [50] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_dairv2xt_roi_focal_loss_gaussian' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_fcooper_dairv2xt.yaml b/cosense3d/config/streamLTS/streamLTS_fcooper_dairv2xt.yaml new file mode 100644 index 00000000..40a8283a --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_fcooper_dairv2xt.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS_fcooper.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_fcooper.shared_modules_dairv2xt' + cav_manager: + prototype: streamLTS_collection.slcFcooper + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS_fcooper.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_fcooper_opv2vt.yaml b/cosense3d/config/streamLTS/streamLTS_fcooper_opv2vt.yaml new file mode 100644 index 00000000..d10c2dbd --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_fcooper_opv2vt.yaml @@ -0,0 +1,46 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS_fcooper.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_fcooper.shared_modules_opv2vt' + cav_manager: + prototype: streamLTS_collection.slcFcooper + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS_fcooper.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_fpvrcnn_dairv2xt.yaml b/cosense3d/config/streamLTS/streamLTS_fpvrcnn_dairv2xt.yaml new file mode 100644 index 00000000..b0c93eee --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_fpvrcnn_dairv2xt.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.dairv2xt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS_fpvrcnn.test_hooks_dairv2xt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.dairv2xt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_fpvrcnn.shared_modules_dairv2xt' + cav_manager: + prototype: streamLTS_collection.slcFPVRCNN + dataset: dairv2xt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: true + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS_fpvrcnn.plots_dairv2xt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_fpvrcnn_opv2vt.yaml b/cosense3d/config/streamLTS/streamLTS_fpvrcnn_opv2vt.yaml new file mode 100644 index 00000000..8d4b01b6 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_fpvrcnn_opv2vt.yaml @@ -0,0 +1,45 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 1 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS_fpvrcnn.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_fpvrcnn.shared_modules_opv2vt' + cav_manager: + prototype: streamLTS_collection.slcFPVRCNN + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS_fpvrcnn.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt.yaml new file mode 100644 index 00000000..604df9df --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_bev_fcl_lat.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_bev_fcl_lat.yaml new file mode 100644 index 00000000..97ee8783 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_bev_fcl_lat.yaml @@ -0,0 +1,49 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_lat1' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_bev_fcl_lat' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_bev_fcl_randlat_rl.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_bev_fcl_randlat_rl.yaml new file mode 100644 index 00000000..08c71e6c --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_bev_fcl_randlat_rl.yaml @@ -0,0 +1,49 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_randlat_rl' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_rl.shared_modules_opv2vt' + cav_manager: + prototype: streamLTS_collection.LTSCAVLocCorr + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_fcl.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_fcl.yaml new file mode 100644 index 00000000..d74f4c87 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_fcl.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_roi_focal_loss' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_fcl_lat.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_fcl_lat.yaml new file mode 100644 index 00000000..5aebee53 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_fcl_lat.yaml @@ -0,0 +1,49 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_lat1' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_fcl_lat' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_gevbev.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_gevbev.yaml new file mode 100644 index 00000000..d74f4c87 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_gevbev.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_roi_focal_loss' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_lat.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_lat.yaml new file mode 100644 index 00000000..42ae0991 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_lat.yaml @@ -0,0 +1,49 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_lat1' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 +# policy: 'MultiStepLR' +# milestones: [1, 2] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS_lat.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_lat.shared_modules_opv2vt_fcl_lat' + cav_manager: + prototype: streamLTS_lat_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_no_dilconv.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_dilconv.yaml new file mode 100644 index 00000000..2694bfad --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_dilconv.yaml @@ -0,0 +1,49 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 55 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: +# policy: 'TransformerAdaptiveScheduler' +# dim_embed: 256 +# warmup_steps: 4000 + policy: 'MultiStepLR' + milestones: [3] + gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_no_dilconv' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false +# num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_no_global_attn.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_global_attn.yaml new file mode 100644 index 00000000..d455da6c --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_global_attn.yaml @@ -0,0 +1,46 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_no_global_attn' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_no_roi_reg.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_roi_reg.yaml new file mode 100644 index 00000000..331df497 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_roi_reg.yaml @@ -0,0 +1,46 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_no_roi_reg' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_no_t.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_t.yaml new file mode 100644 index 00000000..ced31434 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_no_t.yaml @@ -0,0 +1,46 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS.shared_modules_opv2vt_no_t' + cav_manager: + prototype: streamLTS_collection.slcNoBoxTime + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_randlat.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_randlat.yaml new file mode 100644 index 00000000..642483e1 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_randlat.yaml @@ -0,0 +1,46 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_randlat' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 4000 + +TEST: + hooks: 'pycfg.nets.streamLTS_v2.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_v2.shared_modules_opv2vt_bev_fcl_lat' + cav_manager: + prototype: streamLTS_collection.StreamLidarCAV + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + num_grad_cav: 2 + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS_v2.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/streamLTS/streamLTS_opv2vt_randlat_locerr.yaml b/cosense3d/config/streamLTS/streamLTS_opv2vt_randlat_locerr.yaml new file mode 100644 index 00000000..9876bdf2 --- /dev/null +++ b/cosense3d/config/streamLTS/streamLTS_opv2vt_randlat_locerr.yaml @@ -0,0 +1,48 @@ +DATASET: 'pycfg.base.opv2vt.seq4_vox04_locerr_rl' + +TRAIN: +# project_name: 'centernet' + wandb_account: 'opheliayuan' + log_every: 10 + max_epoch: 50 + gpus: 0 + hooks: 'pycfg.base.hooks.train_hooks' + optimizer: + lr: 0.0002 + weight_decay: 1e-2 + betas: [0.9, 0.98] + lr_scheduler: + policy: 'TransformerAdaptiveScheduler' + dim_embed: 256 + warmup_steps: 2000 +# policy: 'MultiStepLR' +# milestones: [10, 15] +# gamma: 0.1 + +TEST: + hooks: 'pycfg.nets.streamLTS.test_hooks_opv2vt' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.opv2vt.data_manager' + shared_modules: 'pycfg.nets.streamLTS_locerr.shared_modules_opv2vt' + cav_manager: + prototype: streamLTS_collection.LTSCAVLocCorr + dataset: opv2vt + memory_len: 4 + memory_emb_dims: 256 + memory_num_propagated: 256 + ref_pts_dim: 3 + all_grad: false + +VISUALIZATION: + output_viewer: + plots: 'pycfg.nets.streamLTS.plots_opv2vt' + + + + + + + diff --git a/cosense3d/config/v2vreal.yaml b/cosense3d/config/v2vreal.yaml new file mode 100644 index 00000000..8793ab1b --- /dev/null +++ b/cosense3d/config/v2vreal.yaml @@ -0,0 +1,18 @@ +DATASET: 'pycfg.base.v2vreal.seq1_vox04' + + +CONTROLLER: + num_loss_frame: 1 + data_manager: 'pycfg.base.v2vreal.data_manager' + shared_modules: {} + cav_manager: + prototype: base_cav.OPV2VtCAV + memory_len: 1 + +VISUALIZATION: + output_viewer: + plots: [] + + + + diff --git a/cosense3d/dataset/__init__.py b/cosense3d/dataset/__init__.py new file mode 100644 index 00000000..cb5e8111 --- /dev/null +++ b/cosense3d/dataset/__init__.py @@ -0,0 +1,33 @@ +import logging +import torch +import importlib + +from torch.utils.data.distributed import DistributedSampler + + +def get_dataloader(cfgs, mode='train', distributed=False): + name = cfgs['dataset'] + module = importlib.import_module(f'cosense3d.dataset.{name.lower()}_dataset') + dataset_full_name = ''.join([n[:1].upper() + n[1:] for n in name.split('_')]) + 'Dataset' + assert hasattr(module, dataset_full_name), "Invalid dataset." + module_class = getattr(module, dataset_full_name) + dataset = module_class(cfgs, mode) + shuffle = cfgs.get('shuffle', True) if mode=='train' else False + if distributed: + shuffle = False + sampler = DistributedSampler(dataset) + else: + sampler = None + dataloader = torch.utils.data.DataLoader(dataset, + batch_size=cfgs[f'batch_size_{mode}'], + sampler=sampler, + num_workers=cfgs['n_workers'], + shuffle=shuffle, + collate_fn=dataset.collate_batch, + pin_memory=True, + drop_last=True) + return dataloader + + + + diff --git a/cosense3d/dataset/const.py b/cosense3d/dataset/const.py new file mode 100644 index 00000000..0f868329 --- /dev/null +++ b/cosense3d/dataset/const.py @@ -0,0 +1,183 @@ + +LABEL_COLORS = { + 'Unlabeled': (0, 0, 0), # 0 Unlabeled + 'Buildings': (70, 70, 70), # 1 Buildings + 'Fences': (100, 40, 40), # 2 Fences + 'Other': (55, 90, 80), # 3 Other + 'Pedestrians': (220, 20, 60), # 4 Pedestrians + 'Poles': (153, 153, 153), # 5 Poles + 'RoadLines': (157, 234, 50), # 6 RoadLines + 'Roads': (128, 64, 128), # 7 Roads + 'Sidewalks': (244, 35, 232), # 8 Sidewalks + 'Vegetation': (107, 142, 35), # 9 Vegetation + 'Vehicles': (0, 0, 142), # 10 Vehicles + 'Walls': (102, 102, 156), # 11 Walls + 'TrafficSign': (220, 220, 0), # 12 TrafficSign + 'Sky': (70, 130, 180), # 13 Sky + 'Ground': (81, 0, 81), # 14 Ground + 'Bridge': (150, 100, 100), # 15 Bridge + 'Railtrack': (230, 150, 140), # 16 Railtrack + 'GuardRail': (180, 165, 180), # 17 GuardRail + 'TrafficLight': (250, 170, 30), # 18 TrafficLight + 'Static': (110, 190, 160), # 19 Static + 'Dynamic': (170, 120, 50), # 20 Dynamic + 'Water': (45, 60, 150), # 21 Water + 'Terrain': (145, 170, 100) # 22 Terrain +} + + +VALID_CLS_nuscenes = [ + [24], # 1 drivable surface + [17, 19, 20], # 2 car + [15, 16], # 3 bus + [18], # 4 construction_vehicle + [21], # 5 motorcycle + [14], # 6 bicycle + [22], # 7 trailer + [23], # 8 truck + [2, 3, 4, 5, 6, 7, 8], # 9 pedestrian + [12], # 10 traffic_cone + [25], # 11 other_flat + [26], # 12 sidewalk + [27], # 13 terrain + [28], # 14 manmade + [30], # 15 vegetation + [9], # 16 barrier +] + +CoSenseBenchmarks = { + 'CenterPoints': { + 0: [ + 'vehicle.car', + ], + 1: [ + 'vehicle.truck', + ], + 2: [ + 'vehicle.bus', + ], + 3: [ + 'vehicle.motorcycle', + ], + 4: [ + 'vehicle.cyclist' + ], + 5: [ + 'human.pedestrian', + ] + }, + 'Car': { + 0: ['vehicle.car'] + }, + 'Detection3Dpseudo4WheelVehicle': { + 0: [ + 'vehicle.car', + 'vehicle.van', + # 'vehicle.truck', + # 'vehicle.bus', + ] # four-wheel-vehicle + }, + 'Detection3DpseudoVehicle': { + 0: [ + 'vehicle.car', + 'vehicle.van', + 'vehicle.truck', + 'vehicle.bus', + ], # four-wheel-vehicle + 1: [ + 'vehicle.motorcycle', + 'vehicle.cyclist', + 'vehicle.scooter' + ] # two-wheel-vehicle + }, + 'Detection3DpseudoAll': { + 0: [ + 'vehicle.car', + 'vehicle.van', + 'vehicle.truck', + # 'vehicle.bus', + ], # four-wheel-vehicle + 1: [ + 'vehicle.motorcycle', + 'vehicle.cyclist', + 'vehicle.scooter' + ], # two-wheel-vehicle + 2: [ + 'human.pedestrian', + 'human.wheelchair', + 'human.sitting' + ] # human + } +} + +OPV2V_TOWN_DICTIONARY = { + '2021_08_20_21_48_35': 'Town06', + '2021_08_18_19_48_05': 'Town06', + '2021_08_20_21_10_24': 'Town06', + '2021_08_21_09_28_12': 'Town06', + '2021_08_22_07_52_02': 'Town05', + '2021_08_22_09_08_29': 'Town05', + '2021_08_22_21_41_24': 'Town05', + '2021_08_23_12_58_19': 'Town05', + '2021_08_23_15_19_19': 'Town04', + '2021_08_23_16_06_26': 'Town04', + '2021_08_23_17_22_47': 'Town04', + '2021_08_23_21_07_10': 'Town10HD', + '2021_08_23_21_47_19': 'Town10HD', + '2021_08_24_07_45_41': 'Town10HD', + '2021_08_24_11_37_54': 'Town07', + '2021_08_24_20_09_18': 'Town04', + '2021_08_24_20_49_54': 'Town04', + '2021_08_24_21_29_28': 'Town04', + '2021_08_16_22_26_54': 'Town06', + '2021_08_18_09_02_56': 'Town06', + '2021_08_18_18_33_56': 'Town06', + '2021_08_18_21_38_28': 'Town06', + '2021_08_18_22_16_12': 'Town06', + '2021_08_18_23_23_19': 'Town06', + '2021_08_19_15_07_39': 'Town06', + '2021_08_20_16_20_46': 'Town06', + '2021_08_20_20_39_00': 'Town06', + '2021_08_20_21_00_19': 'Town06', + '2021_08_21_09_09_41': 'Town06', + '2021_08_21_15_41_04': 'Town05', + '2021_08_21_16_08_42': 'Town05', + '2021_08_21_17_00_32': 'Town05', + '2021_08_21_21_35_56': 'Town05', + '2021_08_21_22_21_37': 'Town05', + '2021_08_22_06_43_37': 'Town05', + '2021_08_22_07_24_12': 'Town05', + '2021_08_22_08_39_02': 'Town05', + '2021_08_22_09_43_53': 'Town05', + '2021_08_22_10_10_40': 'Town05', + '2021_08_22_10_46_58': 'Town06', + '2021_08_22_11_29_38': 'Town06', + '2021_08_22_22_30_58': 'Town05', + '2021_08_23_10_47_16': 'Town04', + '2021_08_23_11_06_41': 'Town05', + '2021_08_23_11_22_46': 'Town04', + '2021_08_23_12_13_48': 'Town05', + '2021_08_23_13_10_47': 'Town05', + '2021_08_23_16_42_39': 'Town04', + '2021_08_23_17_07_55': 'Town04', + '2021_08_23_19_27_57': 'Town10HD', + '2021_08_23_20_47_11': 'Town10HD', + '2021_08_23_22_31_01': 'Town10HD', + '2021_08_23_23_08_17': 'Town10HD', + '2021_08_24_09_25_42': 'Town07', + '2021_08_24_09_58_32': 'Town07', + '2021_08_24_12_19_30': 'Town07', + '2021_09_09_13_20_58': 'Town03', + '2021_09_09_19_27_35': 'Town01', + '2021_09_10_12_07_11': 'Town04', + '2021_09_09_23_21_21': 'Town03', + '2021_08_21_17_30_41': 'Town05', + '2021_08_22_13_37_16': 'Town06', + '2021_08_22_22_01_17': 'Town05', + '2021_08_23_10_51_24': 'Town05', + '2021_08_23_13_17_21': 'Town05', + '2021_08_23_19_42_07': 'Town10HD', + '2021_09_09_22_21_11': 'Town02', + '2021_09_11_00_33_16': 'Town10HD', + '2021_08_18_19_11_02': 'Town06' +} \ No newline at end of file diff --git a/cosense3d/dataset/cosense_dataset.py b/cosense3d/dataset/cosense_dataset.py new file mode 100644 index 00000000..e395606c --- /dev/null +++ b/cosense3d/dataset/cosense_dataset.py @@ -0,0 +1,244 @@ +import copy +import glob +import os +import logging +import time +import random +from typing import List, Optional, Union + +import open3d as o3d +import cv2 +from PIL import Image +import numpy as np +import torch +import torch.nn.functional as F +from torch.utils.data import Dataset + +from cosense3d.dataset.pipeline import Pipeline +from cosense3d.utils.misc import load_json +from cosense3d.dataset.const import CoSenseBenchmarks as csb +from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs + + +class CosenseDataset(Dataset): + LABEL_COLORS = {} + VALID_CLS = [] + + def __init__(self, cfgs, mode): + self.cfgs = cfgs + self.mode = mode + self.COM_RANGE = self.cfgs.get('com_range', 70) + self.latency = cfgs.get('latency', 0) + self.loc_err = np.array(cfgs.get('loc_err', [0, 0, 0])) + if cfgs.get('enable_split_sub_folder', True): + self.data_path = os.path.join(self.cfgs['data_path'], self.mode) + else: + self.data_path = self.cfgs['data_path'] + + self.max_num_cavs = cfgs['max_num_cavs'] + + self.init_dataset() + + self.pipeline = Pipeline(cfgs[f'{mode}_pipeline']) + # for frames that do not need loss calculation, omit gt-loading to save time + if 'inf_pipeline' in cfgs: + self.inf_pipeline = Pipeline(cfgs['inf_pipeline']) + else: + self.inf_pipeline = self.pipeline + + def __len__(self): + return len(self.samples) + + def __getitem__(self, item): + return self.load_frame_data(item) + + def load_frame_data(self, + item: int, + prev_agents: Optional[List] = None, + prev_item: Optional[int] = None, + omit_gt: Optional[bool] = False, + loc_err: Union[list, None] = None) -> dict: + """ + Load all data and annotations from one frame to standard CoSense format. + + :param item: sample index. + :param prev_agents: only load data the previous agents if given, this is used for temporal data loading. + :param prev_item: the index of the previous loaded sample. + :param omit_gt: whether to omit loading the ground truth annotations. + :param loc_err: localization error. + :return: data_dict + """ + sample_info = self.load_sample_info(item, prev_agents, prev_item) + sample_info['loc_err'] = loc_err + if omit_gt: + data_dict = self.inf_pipeline(sample_info) + else: + data_dict = self.pipeline(sample_info) + data_dict.pop('sample_info') + data_dict.pop('data_path') + return data_dict + + def init_dataset(self): + """Load all necessary meta information""" + self.load_meta() + self.parse_samples() + + def parse_samples(self): + """List all frame-wise instances""" + # list all frames, each frame as a sample + self.samples = [] + drop_scenarios = self.cfgs.get('drop_scenarios', []) + for scenario, scontent in self.meta_dict.items(): + if scenario in drop_scenarios: + continue + self.samples.extend(sorted([[scenario, frame] for frame in scontent.keys()])) + self.samples = sorted(self.samples) + + print(f"{self.mode} : {len(self.samples)} samples.") + + def load_meta(self): + """Load meta data from CoSense json files""" + self.meta_dict = {} + meta_dir = self.cfgs['meta_path'] + if meta_dir == '': + return + if 'split' in self.cfgs: + scenarios = self.cfgs['split'][self.mode] + elif os.path.exists(os.path.join(self.cfgs['meta_path'], f"{self.mode}.txt")): + with open(os.path.join(self.cfgs['meta_path'], f"{self.mode}.txt"), 'r') as fh: + scenarios = [l.strip() for l in fh.readlines() if len(l.strip()) > 0] + else: + scenarios = [d[:-5] for d in os.listdir(meta_dir) if 'json' in d] + + for scenario in scenarios: + meta_file = os.path.join(meta_dir, f"{scenario}.json") + scenario_dict = load_json(meta_file) + # scenario_dict = {s: scenario_dict[s] for s in list(scenario_dict.keys())[:1]} + self.meta_dict[scenario] = scenario_dict + + def load_sample_info(self, item: int, prev_agents: Optional[List] = None, prev_item: Optional[int] = None) -> dict: + """ + Load meta info of the ```item```'th sample. + + :param item: sample index. + :param prev_agents: only load data the previous agents if given, this is used for temporal data loading. + :param prev_item: the index of the previous loaded sample. + :return: batch_dict: dict(scenario: str, frame: str, sample_info: dict) + """ + # load meta info + scenario, frame = self.samples[item] + sample_info = copy.deepcopy(self.meta_dict[scenario][frame]) + + if prev_item is None: + prev_item = max(item - 1, 0) + prev_scenario, prev_frame = self.samples[prev_item] + prev_idx = f'{prev_scenario}.{prev_frame}' + next_item = min(item + 1, self.__len__() - 1) + next_scenario, next_frame = self.samples[next_item] + next_idx = f'{next_scenario}.{next_frame}' + + if prev_scenario != scenario: + prev_agents = None + valid_agent_ids = self.get_valid_agents(sample_info, prev_agents) + + # previous agents might not in current frame when load sequential data + scenario_tokens = [f'{scenario}.{ai}' for ai in valid_agent_ids if ai in sample_info['agents']] + + # if latency > 0, set the sample info of coop. cavs to previous frame at -latency + if self.latency != 0: + # get random latency if latency flag is -1 + latency = np.random.randint(3) if self.latency == -1 else self.latency + latent_item = max(item - latency, 0) + latent_scenario, latent_frame = self.samples[latent_item] + if latent_scenario != scenario: + # make sure the scenario is the same as the current frame + latent_scenario = scenario + latent_frame = frame + latent_info = copy.deepcopy(self.meta_dict[latent_scenario][latent_frame]) + # update coop agent info to latent frame + for cav_id in valid_agent_ids: + if cav_id == sample_info['meta']['ego_id']: + continue + if cav_id in latent_info['agents']: + sample_info['agents'][cav_id] = latent_info['agents'][cav_id] + + return { + 'scenario': scenario, + 'frame': frame, + 'data_path': self.data_path, + 'sample_info': sample_info, + 'valid_agent_ids': valid_agent_ids, + 'scene_tokens': scenario_tokens, + } + + def get_valid_agents(self, sample_info: dict, prev_agents: Optional[List] = None) -> List: + """ + Return prev_agents if given else select the given number of agents in the communication range + which includes the ego agent. + + Parameters + ---------- + sample_info: meta info the one sample. + prev_agents: list of the agent ids loader last time. + + Returns + ------- + agents_ids: list of valid agent for the current sample + """ + if prev_agents is not None: + return prev_agents + else: + agents = sample_info['agents'] + ego_id = str(sample_info['meta']['ego_id']) + agents_ids = [ego_id] + # filter cavs in communication range + ego_pose_vec = agents[ego_id]['pose'] + in_range_cavs = [] + for ai, adict in agents.items(): + if ai == ego_id: + continue + if ((adict['pose'][0] - ego_pose_vec[0])**2 + (adict['pose'][1] - ego_pose_vec[1])**2 + < self.COM_RANGE**2): + in_range_cavs.append(ai) + if self.max_num_cavs > 1: + agents_ids += random.sample(in_range_cavs, k=min(self.max_num_cavs - 1, len(in_range_cavs))) + return agents_ids + + @staticmethod + def collate_batch(batch_list): + keys = batch_list[0].keys() + batch_dict = {k:[] for k in keys} + + def list_np_to_tensor(ls): + ls_tensor = [] + for i, l in enumerate(ls): + if isinstance(l, list): + l_tensor = list_np_to_tensor(l) + ls_tensor.append(l_tensor) + elif isinstance(l, np.ndarray): + tensor = torch.from_numpy(l) + if l.dtype == np.float64: + tensor = tensor.float() + ls_tensor.append(tensor) + else: + ls_tensor.append(l) + return ls_tensor + + for k in keys: + if isinstance(batch_list[0][k], np.ndarray): + batch_dict[k] = [torch.from_numpy(batch[k]) for batch in batch_list] + elif isinstance(batch_list[0][k], list): + batch_dict[k] = [list_np_to_tensor(batch[k]) for batch in batch_list] + else: + batch_dict[k] = [batch[k] for batch in batch_list] + return batch_dict + + +if __name__=="__main__": + from cosense3d.utils.misc import load_yaml + from torch.utils.data import DataLoader + cfgs = load_yaml("/mars/projects20/CoSense3D/cosense3d/config/petr.yaml") + cosense_dataset = CosenseDataset(cfgs['DATASET'], 'train') + cosense_dataloader = DataLoader(dataset=cosense_dataset, collate_fn=cosense_dataset.collate_batch) + for data in cosense_dataloader: + print(data.keys()) \ No newline at end of file diff --git a/cosense3d/dataset/pipeline/__init__.py b/cosense3d/dataset/pipeline/__init__.py new file mode 100644 index 00000000..6436bec3 --- /dev/null +++ b/cosense3d/dataset/pipeline/__init__.py @@ -0,0 +1,32 @@ +from cosense3d.dataset.pipeline.loading import * +from cosense3d.dataset.pipeline.transform import * + + +class Pipeline(object): + """Composes several processing modules together. + Take care that these functions modify the input data directly. + """ + + def __init__(self, cfgs): + self.processes = [] + if isinstance(cfgs, list): + for cfg in cfgs: + for k, v in cfg.items(): + self.build_process(k, v) + elif isinstance(cfgs, OrderedDict): + for k, v in cfgs.items(): + self.build_process(k, v) + else: + raise NotImplementedError + + def build_process(self, k, v): + cls = globals().get(k, None) + assert cls is not None, f"Pipeline process node {k} not found." + self.processes.append(cls(**v)) + + def __call__(self, data_dict): + for p in self.processes: + p(data_dict) + return data_dict + + diff --git a/cosense3d/dataset/pipeline/loading.py b/cosense3d/dataset/pipeline/loading.py new file mode 100644 index 00000000..9cca040c --- /dev/null +++ b/cosense3d/dataset/pipeline/loading.py @@ -0,0 +1,679 @@ +import os, random, copy +import glob +from collections import OrderedDict + +import matplotlib.pyplot as plt +import numpy as np +import torch +from plyfile import PlyData +import cv2 + +from cosense3d.utils.pclib import pose_to_transformation +from cosense3d.utils.pcdio import point_cloud_from_path +from cosense3d.utils.misc import load_json + + +class LoadLidarPoints: + + def __init__(self, + coop_mode=True, + load_attributes=['xyz', 'intensity'], + time_offset=0): + self.coop_mode = coop_mode + self.load_attributes = load_attributes + self.time_offset = time_offset + + def read_pcd(self, pts_filename): + pcd = point_cloud_from_path(pts_filename) + points = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1) + lidar_dict = {'xyz': points} + if 'intensity' in pcd.fields: + lidar_dict['intensity'] = pcd.pc_data['intensity'] + if 'timestamp' in pcd.fields: + lidar_dict['time'] = pcd.pc_data['timestamp'] + return lidar_dict + + def _load_points(self, pts_filename): + """ + Load point clouds data form file. + + Parameters + ---------- + pcd_file : str + The pcd file that contains the point cloud. + return_o3d: bool + Default returns numpy array, set True to return pcd as o3d PointCloud object + + Returns + ------- + lidar_dict: + xyz: pcd_np | pcd : np.ndarray | o3d.geometry.PointCloud + The lidar xyz coordinates in numpy format, shape:(n, 3); + intensity: (optional) np.ndarray, (n,); + label: (optional) np.ndarray, (n,); + time: (optional) np.ndarray, (n,); + ray: (optional) np.ndarray, (n,); + """ + lidar_dict = {} + ext = os.path.splitext(pts_filename)[-1] + if ext == '.pcd': + # we do not use to avoid conflict with PyQt5 + lidar_dict = self.read_pcd(pts_filename) + + # pcd = o3d.io.read_point_cloud(pts_filename) + # xyz = np.asarray(pcd.points, dtype=np.float32) + # lidar_dict['xyz'] = xyz + # # we save the intensity in the first channel + # intensity = np.expand_dims(np.asarray(pcd.colors)[:, 0], -1) + # if len(intensity) == len(xyz): + # lidar_dict['intensity'] = intensity + + elif ext == '.bin': + pcd_np = np.fromfile(pts_filename, dtype=np.float32).reshape(-1, 4) + lidar_dict['xyz'] = pcd_np[:, :3] + # check attribute of last column, + # num of unique labels for the datasets in this projects is less than 50, + # unique intensities is normally larger then 50 + if len(np.unique(pcd_np[:, -1])) < 50: + lidar_dict['label'] = pcd_np[:, -1] + elif pcd_np[:, -1].max() > 1: + lidar_dict['intensity'] = pcd_np[:, -1] / 255 + else: + lidar_dict['intensity'] = pcd_np[:, -1] + + elif ext == '.ply': + ply = PlyData.read(pts_filename) + data = ply['vertex'] + properties = [prop.name for prop in data.properties] + data = {name: np.array(data[name]) for name in properties} + xyz = np.stack([data.pop(x) for x in 'xyz'], axis=1) + lidar_dict['xyz'] = xyz + lidar_dict.update(data) + else: + raise NotImplementedError + # reshape for cat + for k, v in lidar_dict.items(): + if v.ndim == 1: + lidar_dict[k] = v.reshape(-1, 1) + return lidar_dict + + def _load_single(self, pts_filename, timestamp=0): + lidar_dict = self._load_points(pts_filename) + if 'intensity' in self.load_attributes and 'intensity' not in lidar_dict: + lidar_dict['intensity'] = np.ones_like(lidar_dict['xyz'][:, :1]) + if 'time' in self.load_attributes: + if 'time' in lidar_dict: + lidar_dict['time'] -= self.time_offset + else: + lidar_dict['time'] = np.zeros_like(lidar_dict['xyz'][:, :1]) + (timestamp - self.time_offset) + if 'distance' in self.load_attributes: + lidar_dict['distance'] = np.linalg.norm(lidar_dict['xyz'][:, :2], axis=1, keepdims=True) + if 'cosine' in self.load_attributes: + lidar_dict['cosine'] = np.cos(np.arctan2(lidar_dict['xyz'][:, 1:2], lidar_dict['xyz'][:, 0:1])) + if 'sine' in self.load_attributes: + lidar_dict['sine'] = np.sin(np.arctan2(lidar_dict['xyz'][:, 1:2], lidar_dict['xyz'][:, 0:1])) + + points = np.concatenate( + [lidar_dict[attri] for attri in self.load_attributes], axis=-1) + + return points + + def __call__(self, data_dict): + if self.coop_mode: + points = [] + for ai in data_dict['valid_agent_ids']: + adict = data_dict['sample_info']['agents'][ai] + filename = os.path.join(data_dict['data_path'], adict['lidar']['0']['filename']) + points.append(self._load_single(filename, adict['lidar']['0']['time'])) + else: + ego_id = data_dict['sample_info']['meta']['ego_id'] + ego_dict = data_dict['sample_info']['agents'][ego_id] + filename = os.path.join(data_dict['data_path'], ego_dict['lidar']['0']['filename']) + points = self._load_single(filename) + + data_dict['points'] = points + data_dict['points_attributes'] = self.load_attributes + + return data_dict + + +class LoadMultiViewImg: + def __init__(self, bgr2rgb=False, to_float32=False, max_num_img=None, img_filter_keys=None): + self.bgr2rgb = bgr2rgb + self.to_float32 = to_float32 + self.max_num_img = max_num_img + self.img_filter_keys = img_filter_keys + + def __call__(self, data_dict): + agents = data_dict['sample_info']['agents'] + chosen_cams = OrderedDict() + + img = [] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + chosen_cams[ai] = [] + # get image info + num_cam = 0 + if self.max_num_img is not None and self.max_num_img < len(adict['camera']): + selected = random.sample(list(adict['camera'].keys()), k=self.max_num_img) + cam_dicts = {ci: adict['camera'][ci] for ci in selected} + else: + cam_dicts = copy.copy(adict['camera']) + for ci, cdict in cam_dicts.items(): + # One lidar frame might have several images, only take the 1st one + filename = cdict['filenames'][0] + if self.img_filter_keys is not None and \ + len([1 for k in self.img_filter_keys if k in filename]) == 0: + continue + num_cam += 1 + chosen_cams[ai].append(ci) + img_file = os.path.join(data_dict['data_path'], filename) + I = cv2.imread(img_file) + if self.bgr2rgb: + I = cv2.cvtColor(I, cv2.COLOR_BGR2RGB) + img.append(I) + # img is of shape (h, w, c, num_views) + img = np.stack(img, axis=0) + if self.to_float32: + img = img.astype(np.float32) + + data_dict['img'] = img + data_dict['chosen_cams'] = chosen_cams + return data_dict + + +class LoadAnnotations: + def __init__(self, + load2d=False, load_cam_param=False, + load3d_local=False, load3d_global=False, + load_global_time=False, load3d_pred=False, + min_num_pts=0, with_velocity=False, + class_agnostic_3d=True, time_offset=0, + loc_err=None): + self.load2d = load2d + self.load_cam_param = load_cam_param + self.load3d_local = load3d_local + self.load3d_global = load3d_global + self.load3d_pred = load3d_pred + self.load_global_time = load_global_time + self.min_num_pts = min_num_pts + self.with_velocity = with_velocity + self.class_agnostic_3d = class_agnostic_3d + self.time_offset = time_offset + self.loc_err = np.array(loc_err) if loc_err is not None else None # x, y, r + + def __call__(self, data_dict): + self._load_essential(data_dict) + if self.load2d: + data_dict = self._load_anno2d(data_dict) + elif self.load_cam_param: + data_dict = self._load_cam_param(data_dict) + + if self.load3d_local: + data_dict = self._load_anno3d_local(data_dict) + if self.load3d_global: + data_dict = self._load_anno3d_global(data_dict) + if self.load_global_time: + data_dict = self._load_global_time(data_dict) + if self.load3d_pred: + data_dict = self._load_anno3d_pred(data_dict) + + return data_dict + + def _add_loc_err(self, pose, loc_err): + pose_ = copy.deepcopy(pose) + if self.loc_err is not None: + loc_err = np.random.randn(3) * self.loc_err + + pose_[0] = pose[0] + loc_err[0] + pose_[1] = pose[1] + loc_err[1] + pose_[5] = pose[5] + loc_err[2] + return pose_ + + def _load_essential(self, data_dict): + lidar_poses = [] + lidar_poses_gt = [] + vehicle_poses = [] + timestampes = [] + agents = data_dict['sample_info']['agents'] + loc_err = data_dict['loc_err'] + ego_pose = agents[data_dict['sample_info']['meta']['ego_id']]['lidar']['0']['pose'] + ego_pose = self._add_loc_err(ego_pose, loc_err[0]) + ego_pose = pose_to_transformation(ego_pose) + for i, ai in enumerate(data_dict['valid_agent_ids']): + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + lidar_pose = self._add_loc_err(adict['lidar']['0']['pose'], loc_err=loc_err[i]) + lidar_pose = pose_to_transformation(lidar_pose) + lidar_poses.append(lidar_pose) + if self.loc_err is not None or loc_err is not None: + lidar_poses_gt.append(pose_to_transformation(adict['lidar']['0']['pose'])) + vehicle_poses.append(adict['pose']) + if adict['lidar']['0']['time'] is not None: + # dairv2x + timestampes.append(adict['lidar']['0']['time'] - self.time_offset) + else: + # opv2v + # TODO update opv2v meta files with lidar timestamps + timestampes.append(int(data_dict['frame']) * 0.1 - self.time_offset) + + data_dict.update({ + 'lidar_poses': lidar_poses, + 'ego_poses': ego_pose, + 'timestamp': timestampes, + 'vehicle_poses': vehicle_poses + }) + + if len(lidar_poses) == len(lidar_poses_gt): + data_dict['lidar_poses_gt'] = lidar_poses_gt + + return data_dict + + def _load_cam_param(self, data_dict): + intrinsics = [] + extrinsics = [] + lidar2img = [] + + agents = data_dict['sample_info']['agents'] + chosen_cams = data_dict['chosen_cams'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + cam_ids = chosen_cams[ai] + for ci in cam_ids: + cdict = adict['camera'][ci] + I4x4 = np.eye(4) + I4x4[:3, :3] = np.array(cdict['intrinsic']) + intrinsics.append(I4x4.astype(np.float32)) + extrinsics.append(np.array(cdict['lidar2cam']).astype(np.float32)) + lidar2img.append(self.get_lidar2img_transform( + cdict['lidar2cam'], cdict['intrinsic']).astype(np.float32)) + + data_dict.update({ + 'intrinsics': intrinsics, + 'extrinsics': extrinsics, + 'lidar2img': lidar2img, + }) + return data_dict + + def _load_anno2d(self, data_dict): + intrinsics = [] + extrinsics = [] + lidar2img = [] + bboxes2d = [] + centers2d = [] + depths = [] + labels = [] + + agents = data_dict['sample_info']['agents'] + chosen_cams = data_dict['chosen_cams'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + cam_ids = chosen_cams[ai] + for ci in cam_ids: + cdict = adict['camera'][ci] + I4x4 = np.eye(4) + I4x4[:3, :3] = np.array(cdict['intrinsic']) + intrinsics.append(I4x4.astype(np.float32)) + extrinsics.append(np.array(cdict['lidar2cam']).astype(np.float32)) + lidar2img.append(self.get_lidar2img_transform( + cdict['lidar2cam'], cdict['intrinsic']).astype(np.float32)) + cam_info = adict['camera'][ci] + # num_lidar_pts = np.ones(len(gt_names)).astype(int) + # valid_flag = np.ones(len(gt_names)).astype(bool) + mask = np.array(cam_info['num_pts']) > self.min_num_pts + bboxes2d.append(np.array(cam_info['bboxes2d']).astype(np.float32)[mask]) + centers2d.append(np.array(cam_info['centers2d']).astype(np.float32)[mask]) + depths.append(np.array(cam_info['depths']).astype(np.float32)[mask]) + labels.append(np.zeros(mask.sum(), dtype=int)) + + data_dict.update({ + 'intrinsics': intrinsics, + 'extrinsics': extrinsics, + 'lidar2img': lidar2img, + 'bboxes2d': bboxes2d, + 'centers2d': centers2d, + 'depths2d': depths, + 'labels2d': labels + }) + return data_dict + + def _load_anno3d_local(self, data_dict): + local_bboxes_3d = [] + local_labels_3d = [] + local_boxes_3d_id = [] + local_names = [] + agents = data_dict['sample_info']['agents'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + boxes = np.array(adict['gt_boxes']).reshape(-1, 11) + if 'num_pts' not in adict: + mask = np.ones_like(boxes[:, 0]).astype(bool) + else: + mask = np.array(adict['num_pts']) > self.min_num_pts + if len(boxes) != len(mask): + # TODO: update num pts in meta + mask = np.ones_like(boxes[..., 0]).astype(bool) + boxes = boxes[mask] + local_boxes = boxes[:, [2, 3, 4, 5, 6, 7, 10]].astype(np.float32) + local_boxes_id = boxes[:, 0].astype(int) + if self.class_agnostic_3d: + local_labels = np.zeros(len(boxes), dtype=int) + else: + local_labels = boxes[:, 1].astype(int) + if self.with_velocity: + if 'velos' in adict: + velos = np.array(adict['velos']).reshape(-1, 2).astype(np.float32) / 3.6 + local_boxes = np.concatenate([local_boxes, velos[mask]], axis=-1) + else: + velos = np.zeros_like(local_boxes[:, :2]) + local_boxes = np.concatenate([local_boxes, velos], axis=-1) + local_bboxes_3d.append(local_boxes) + local_labels_3d.append(local_labels) + local_boxes_3d_id.append(local_boxes_id) + assert np.all(local_labels == 0), "Num. cls > 1 not implemented." + local_names.append(['car' for _ in local_labels]) + + data_dict.update({ + 'local_bboxes_3d': local_bboxes_3d, + 'local_labels_3d': local_labels_3d, + 'local_bboxes_id': local_boxes_3d_id, + 'local_names': local_names, + }) + + return data_dict + + def _load_anno3d_global(self, data_dict): + frame_meta = data_dict['sample_info']['meta'] + boxes = np.array(frame_meta['bbx_center_global']) + global_bboxes_3d = boxes[:, [2, 3, 4, 5, 6, 7, 10]].astype(np.float32) + global_bboxes_id = boxes[:, 0].astype(int) + if self.class_agnostic_3d: + global_labels_3d = np.zeros(len(boxes), dtype=int) + else: + global_labels_3d = boxes[:, 1].astype(int) + + if self.with_velocity: + if 'bbx_velo_global' in frame_meta: + global_velocity = np.array(frame_meta['bbx_velo_global']).astype(np.float32) / 3.6 + else: + global_velocity = np.zeros_like(global_bboxes_3d[:, :2]) + global_bboxes_3d = np.concatenate([global_bboxes_3d, global_velocity], axis=-1) + + if 'num_pts' in frame_meta and self.min_num_pts > 0: + global_box_num_pts = np.array(frame_meta['num_pts']) + mask = global_box_num_pts > self.min_num_pts + global_bboxes_3d = global_bboxes_3d[mask] + global_labels_3d = global_labels_3d[mask] + global_bboxes_id = global_bboxes_id[mask] + + # TODO: currently only support car + global_names = ['car' for _ in global_labels_3d] + data_dict.update({ + 'global_bboxes_3d': global_bboxes_3d, + 'global_labels_3d': global_labels_3d, + 'global_bboxes_id': global_bboxes_id, + 'global_names': global_names, + }) + return data_dict + + def _load_global_time(self, data_dict): + frame_meta = data_dict['sample_info']['meta'] + if 'global_bbox_time' in frame_meta: + data_dict['global_time'] = frame_meta['global_bbox_time'][0] + else: + data_dict['global_time'] = data_dict['points'][0][:, -1].max() + return data_dict + + def _load_anno3d_pred(self, data_dict): + frames = sorted(list(data_dict['sample_info']['meta']['boxes_pred'].keys())) + boxes_preds = [data_dict['sample_info']['meta']['boxes_pred'][f] for f in frames] + data_dict['bboxes_3d_pred'] = np.array(boxes_preds) + return data_dict + + def get_lidar2img_transform(self, lidar2cam, intrinsic): + if isinstance(lidar2cam, list): + intrinsic = np.array(intrinsic) + try: + P = intrinsic @ lidar2cam[:3] + except: + print(intrinsic) + print(lidar2cam) + lidar2img = np.concatenate([P, lidar2cam[3:]], axis=0) + return lidar2img + + +class LoadOPV2VBevMaps: + def __init__(self, keys=None, use_global_map=True, ego_only=True, range=75): + self.keys = keys + self.use_global_map = use_global_map + self.ego_only = ego_only + self.range = range + self.map_res = 0.2 + self.map_size = int(self.range * 2 / self.map_res) + pad = int(range / self.map_res) + if self.use_global_map: + self.keys = ['bevmap', 'bevmap_coor'] + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps/png" + map_files = glob.glob(os.path.join(map_path, '*.png')) + self.scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + self.map_bounds = load_json(os.path.join(assets_path, 'map_bounds.json')) + self.bevmaps = {} + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + bevmap = cv2.imread(mf) / 255. + # bevmap = np.pad(bevmap, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0) + self.bevmaps[town] = bevmap + + # grid coor template + grid = np.ones((self.map_size, self.map_size)) + inds = np.stack(np.where(grid)) + xy = inds * 0.2 - self.range + 0.1 + self.xy_pad = np.concatenate([xy, np.zeros_like(xy[:1]), np.ones_like(xy[:1])], axis=0) + # carla has different coor system as cosense3d, T_corr: carla -> cosense3d + self.T_corr = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 0, 1]]) + else: + assert keys is not None and len(keys) > 0 + + def __call__(self, data_dict): + path = os.path.join(data_dict['data_path'], data_dict['scenario']) + ego_id = data_dict['sample_info']['meta']['ego_id'] + load_dict = {} + + agents = data_dict['valid_agent_ids'] + for ai in agents: + if self.ego_only and ego_id != ai: + for k in load_dict.keys(): + load_dict[k].append(None) + continue + else: + out = self.load_single(path, ai, data_dict) + for k in out.keys(): + if k not in load_dict: + load_dict[k] = [] + load_dict[k].append(out[k]) + + data_dict.update(load_dict) + return data_dict + + def load_single(self, path, ai, data_dict): + # map1 = self.crop_map_for_pose(data_dict, ai)[0] + # map2 = cv2.imread(os.path.join(path, ai, f"{data_dict['frame']}_bev.png"))[..., 0] + # map2 = np.array(map2, dtype=float) / 255. + # map2[map2 > 0] = 1 + # map2 = np.flip(map2, 0).copy() + # bevmap = np.zeros((500, 500, 3)) + # bevmap[..., 0] = map1 + # bevmap[..., 1] = map2 + # import matplotlib.pyplot as plt + # plt.imshow(bevmap) + # plt.show() + # plt.close() + + out = {} + if self.use_global_map: + out['bevmap'], out['bevmap_coor'] = self.crop_map_for_pose(data_dict, ai) + else: + frame = data_dict['frame'] + for k in self.keys: + filename = os.path.join(path, ai, f"{frame}_{k}.png") + bev_map = cv2.imread(filename)[..., 0] + # bev_map = cv2.cvtColor(bev_map, cv2.COLOR_BGR2GRAY) + bev_map = np.array(bev_map, dtype=float) / 255. + bev_map[bev_map > 0] = 1 + out[f'{k}map'] = np.flip(bev_map, 0).copy() + out[f'{k}map_coor'] = [-self.range, - self.range] + return out + + def crop_map_for_pose(self, data_dict, ai): + scenario = data_dict['scenario'] + town = self.scene_maps[scenario] + # lidar_pose = pose_to_transformation(data_dict['sample_info']['agents'][ai]['lidar']['0']['pose']) + lidar_pose = data_dict['lidar_poses'][data_dict['valid_agent_ids'].index(ai)] + cur_map = self.bevmaps[town] + sx, sy = cur_map.shape[:2] + bound = self.map_bounds[town] + + # transform template bev points to world coordinates + transform = self.T_corr @ lidar_pose + xy_tf = transform @ self.xy_pad + # calculate map indices of bev points + xy_tf[0] -= bound[0] + 1.0 + xy_tf[1] -= bound[1] + 1.0 + map_inds = np.floor(xy_tf[:2] / 0.2) + xs = np.clip(map_inds[0], 0, sx - 1).astype(int) + ys = np.clip(map_inds[1], 0, sy - 1).astype(int) + # retrieve cropped bev map from global map + bevmap = cur_map[xs, ys].reshape(self.map_size, self.map_size, 3) # [::-1, ::-1] + + # # bound[0] -= self.range + # # bound[1] -= self.range + # offset_x = int((lidar_pose[0] - self.range - bound[0]) / self.map_res) + # offset_y = int((lidar_pose[1] - self.range - bound[1]) / self.map_res) + # + # xmin = max(offset_x, 0) + # xmax = min(offset_x + size, bevmap.shape[0] - 1) + # ymin = max(offset_y, 0) + # ymax = min(offset_y + size, bevmap.shape[1] - 1) + # bevmap_crop = bevmap[xmin:xmax, ymin:ymax] + # bevmap_coor = [bound[0] + xmin * self.map_res, bound[1] + ymin * self.map_res] + + if data_dict['sample_info']['agents'][ai]['pose'][2] > 2: + bevmap = bevmap[..., 1].astype(bool) + else: + bevmap = bevmap[..., :2].any(-1) + + # import matplotlib.pyplot as plt + # points = data_dict['points'][0] + # mask = bevmap.reshape(-1).astype(bool) + # plt.plot(self.xy_pad[0, mask], self.xy_pad[1, mask], 'g.', markersize=1) + # plt.plot(points[:, 0], points[:, 1], 'b.', markersize=1) + # plt.show() + # plt.close() + + return bevmap, [-self.range, - self.range] + + +class LoadCarlaRoadlineMaps: + def __init__(self, ego_only=True, range=75): + self.ego_only = ego_only + self.range = range + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps/roadline" + map_files = glob.glob(os.path.join(map_path, '*.bin')) + self.scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + self.maps = {} + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + rlmap = np.fromfile(mf, dtype=float).reshape(-1, 2) + # bevmap = np.pad(bevmap, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0) + self.maps[town] = rlmap + + def __call__(self, data_dict): + path = os.path.join(data_dict['data_path'], data_dict['scenario']) + ego_id = data_dict['sample_info']['meta']['ego_id'] + roadline = [] + + agents = data_dict['valid_agent_ids'] + for ai in agents: + if self.ego_only and ego_id != ai: + out = None + else: + out = self.load_single(path, ai, data_dict) + roadline.append(out) + + data_dict['roadline'] = roadline + return data_dict + + def load_single(self, path, ai, data_dict): + scenario = data_dict['scenario'] + town = self.scene_maps[scenario] + # lidar_pose = pose_to_transformation(data_dict['sample_info']['agents'][ai]['lidar']['0']['pose']) + lidar_pose = data_dict['lidar_poses'][data_dict['valid_agent_ids'].index(ai)] + cur_map = self.maps[town] + + mask = (cur_map[:, 0] > (lidar_pose[0, 3] - self.range)) & \ + (cur_map[:, 0] < (lidar_pose[0, 3] + self.range)) & \ + (cur_map[:, 1] > (lidar_pose[1, 3] - self.range)) & \ + (cur_map[:, 1] < (lidar_pose[1, 3] + self.range)) + + roadline = cur_map[mask] + + # # visualize + # lidar_file = data_dict['sample_info']['agents'][ai]['lidar']['0']['filename'] + # lidar_file = os.path.join(os.path.dirname(path), lidar_file) + # ply = PlyData.read(lidar_file) + # data = ply['vertex'] + # properties = [prop.name for prop in data.properties] + # data = {name: np.array(data[name]) for name in properties} + # pcd = np.stack([data.pop(x) for x in 'xyz'], axis=1) + # pcd = lidar_pose @ np.concatenate([pcd, np.ones_like(pcd[:, :1])], axis=-1).T + # + # import matplotlib.pyplot as plt + # plt.plot(roadline[:, 0], roadline[:, 1], 'g.', markersize=1) + # plt.plot(pcd[0], pcd[1], 'r.', markersize=1) + # plt.show() + # plt.close() + + return roadline + + +class LoadSparseBevTargetPoints: + def __init__(self, num_points=3000, ego_only=False): + self.num_points = num_points + self.ego_only = ego_only + + def __call__(self, data_dict): + bev_pts = [] + agents = data_dict['sample_info']['agents'] + ego_id = data_dict['sample_info']['meta']['ego_id'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + if self.ego_only and ai != ego_id: + bev_pts.append(np.empty((0, 3))) + else: + pass + + def generate_sparse_bev_pts(self, pcd): + pass + + + + + diff --git a/cosense3d/dataset/pipeline/transform.py b/cosense3d/dataset/pipeline/transform.py new file mode 100644 index 00000000..b706c9cb --- /dev/null +++ b/cosense3d/dataset/pipeline/transform.py @@ -0,0 +1,214 @@ +import numpy as np +from PIL import Image +import torch + + +class ResizeCropFlipRotImage: + """ + Augment images with random resize, crop, flip and rotation. Modified from StreamPETR. + """ + def __init__(self, data_aug_conf=None, with_2d=True, filter_invisible=True, training=True): + self.data_aug_conf = data_aug_conf + self.training = training + self.min_size = 2.0 + self.with_2d = with_2d + self.filter_invisible = filter_invisible + + def __call__(self, data_dict): + imgs = data_dict['img'] + N = len(imgs) + new_imgs = [] + new_gt_bboxes = [] + new_centers2d = [] + new_gt_labels = [] + new_depths = [] + assert self.data_aug_conf['rot_lim'] == [0.0, 0.0], "Rotation is not currently supported" + + resize, resize_dims, crop, flip, rotate = self._sample_augmentation() + + for i in range(N): + img = Image.fromarray(np.uint8(imgs[i])) + img, ida_mat = self._img_transform( + img, + resize=resize, + resize_dims=resize_dims, + crop=crop, + flip=flip, + rotate=rotate, + ) + if self.with_2d: # sync_2d bbox labels + gt_bboxes = data_dict['bboxes2d'][i] + centers2d = data_dict['centers2d'][i] + gt_labels = data_dict['labels2d'][i] + depths = data_dict['depths2d'][i] + if len(gt_bboxes) != 0: + gt_bboxes, centers2d, gt_labels, depths = self._bboxes_transform( + gt_bboxes, + centers2d, + gt_labels, + depths, + resize=resize, + crop=crop, + flip=flip, + ) + if len(gt_bboxes) != 0 and self.filter_invisible: + gt_bboxes, centers2d, gt_labels, depths = self._filter_invisible(gt_bboxes, centers2d, gt_labels, depths) + + new_gt_bboxes.append(gt_bboxes) + new_centers2d.append(centers2d) + new_gt_labels.append(gt_labels) + new_depths.append(depths) + + new_imgs.append(np.array(img).astype(np.float32)) + data_dict['intrinsics'][i][:3, :3] = ida_mat @ data_dict['intrinsics'][i][:3, :3] + data_dict['bboxes2d'] = new_gt_bboxes + data_dict['centers2d'] = new_centers2d + data_dict['labels2d'] = new_gt_labels + data_dict['depths2d'] = new_depths + data_dict['img'] = new_imgs + data_dict['lidar2img'] = [data_dict['intrinsics'][i] @ data_dict['extrinsics'][i] + for i in range(len(data_dict['extrinsics']))] + + return data_dict + + def _bboxes_transform(self, bboxes, centers2d, gt_labels, depths,resize, crop, flip): + assert len(bboxes) == len(centers2d) == len(gt_labels) == len(depths) + fH, fW = self.data_aug_conf["final_dim"] + bboxes = bboxes * resize + bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop[0] + bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop[1] + bboxes[:, [0, 2]] = np.clip(bboxes[:, [0, 2]], 0, fW) + bboxes[:, [1, 3]] = np.clip(bboxes[:, [1, 3]], 0, fH) + keep = ((bboxes[:, 2] - bboxes[:, 0]) >= self.min_size) & ((bboxes[:, 3] - bboxes[:, 1]) >= self.min_size) + + if flip: + x0 = bboxes[:, 0].copy() + x1 = bboxes[:, 2].copy() + bboxes[:, 2] = fW - x0 + bboxes[:, 0] = fW - x1 + bboxes = bboxes[keep] + + centers2d = centers2d * resize + centers2d[:, 0] = centers2d[:, 0] - crop[0] + centers2d[:, 1] = centers2d[:, 1] - crop[1] + centers2d[:, 0] = np.clip(centers2d[:, 0], 0, fW) + centers2d[:, 1] = np.clip(centers2d[:, 1], 0, fH) + if flip: + centers2d[:, 0] = fW - centers2d[:, 0] + + centers2d = centers2d[keep] + gt_labels = gt_labels[keep] + depths = depths[keep] + + return bboxes, centers2d, gt_labels, depths + + def _filter_invisible(self, bboxes, centers2d, gt_labels, depths): + # filter invisible 2d bboxes + assert len(bboxes) == len(centers2d) == len(gt_labels) == len(depths) + fH, fW = self.data_aug_conf["final_dim"] + indices_maps = np.zeros((fH,fW)) + tmp_bboxes = np.zeros_like(bboxes) + tmp_bboxes[:, :2] = np.ceil(bboxes[:, :2]) + tmp_bboxes[:, 2:] = np.floor(bboxes[:, 2:]) + tmp_bboxes = tmp_bboxes.astype(np.int64) + sort_idx = np.argsort(-depths, axis=0, kind='stable') + tmp_bboxes = tmp_bboxes[sort_idx] + bboxes = bboxes[sort_idx] + depths = depths[sort_idx] + centers2d = centers2d[sort_idx] + gt_labels = gt_labels[sort_idx] + for i in range(bboxes.shape[0]): + u1, v1, u2, v2 = tmp_bboxes[i] + indices_maps[v1:v2, u1:u2] = i + indices_res = np.unique(indices_maps).astype(np.int64) + bboxes = bboxes[indices_res] + depths = depths[indices_res] + centers2d = centers2d[indices_res] + gt_labels = gt_labels[indices_res] + + return bboxes, centers2d, gt_labels, depths + + def _get_rot(self, h): + return torch.Tensor( + [ + [np.cos(h), np.sin(h)], + [-np.sin(h), np.cos(h)], + ] + ) + + def _img_transform(self, img, resize, resize_dims, crop, flip, rotate): + ida_rot = torch.eye(2) + ida_tran = torch.zeros(2) + # adjust image + img = img.resize(resize_dims) + img = img.crop(crop) + if flip: + img = img.transpose(method=Image.FLIP_LEFT_RIGHT) + img = img.rotate(rotate) + + # post-homography transformation + ida_rot *= resize + ida_tran -= torch.Tensor(crop[:2]) + if flip: + A = torch.Tensor([[-1, 0], [0, 1]]) + b = torch.Tensor([crop[2] - crop[0], 0]) + ida_rot = A.matmul(ida_rot) + ida_tran = A.matmul(ida_tran) + b + A = self._get_rot(rotate / 180 * np.pi) + b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2 + b = A.matmul(-b) + b + ida_rot = A.matmul(ida_rot) + ida_tran = A.matmul(ida_tran) + b + ida_mat = torch.eye(3) + ida_mat[:2, :2] = ida_rot + ida_mat[:2, 2] = ida_tran + return img, ida_mat + + def _sample_augmentation(self): + H, W = self.data_aug_conf["H"], self.data_aug_conf["W"] + fH, fW = self.data_aug_conf["final_dim"] + if self.training: + resize = np.random.uniform(*self.data_aug_conf["resize_lim"]) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int((1 - np.random.uniform(*self.data_aug_conf["bot_pct_lim"])) * newH) - fH + crop_w = int(np.random.uniform(0, max(0, newW - fW))) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + if self.data_aug_conf["rand_flip"] and np.random.choice([0, 1]): + flip = True + rotate = np.random.uniform(*self.data_aug_conf["rot_lim"]) + else: + resize = max(fH / H, fW / W) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int((1 - np.mean(self.data_aug_conf["bot_pct_lim"])) * newH) - fH + crop_w = int(max(0, newW - fW) / 2) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + rotate = 0 + return resize, resize_dims, crop, flip, rotate + + +class ResizeImage: + """ + Resize images. + """ + def __init__(self, img_size): + self.img_size = img_size + + def __call__(self, data_dict): + imgs = data_dict['img'] + imgs_out = [] + for i, img in enumerate(imgs): + img = Image.fromarray(np.uint8(img)) + W, H = img.size + img = img.resize(self.img_size) + imgs_out.append(np.array(img).astype(np.float32)) + + data_dict['intrinsics'][i][0, 0] = self.img_size[0] / W * data_dict['intrinsics'][i][0, 0] + data_dict['intrinsics'][i][1, 1] = self.img_size[1] / H * data_dict['intrinsics'][i][1, 1] + + # todo convert 2d annotations + data_dict['img'] = imgs_out + return data_dict \ No newline at end of file diff --git a/cosense3d/dataset/temporal_cosense_dataset.py b/cosense3d/dataset/temporal_cosense_dataset.py new file mode 100644 index 00000000..feea0ba9 --- /dev/null +++ b/cosense3d/dataset/temporal_cosense_dataset.py @@ -0,0 +1,68 @@ +import random +import numpy as np +from cosense3d.dataset.cosense_dataset import CosenseDataset + + +class TemporalCosenseDataset(CosenseDataset): + """Sequential Cosense data loader.""" + def __init__(self, cfgs, mode): + super().__init__(cfgs, mode) + self.seq_len = cfgs['seq_len'] + self.n_loss_frame = cfgs.get('n_loss_frame', 1) + self.rand_len = cfgs.get('rand_len', 0) + self.seq_mode = cfgs.get('seq_mode', False) + self.clean_seq = cfgs.get('clean_seq', False) + + def __getitem__(self, index): + queue = [] + index_list = list(range(index - self.seq_len - self.rand_len + 1, index)) + random.shuffle(index_list) + index_list = sorted(index_list[self.rand_len:]) + index_list.append(index) + prev_scene_token = None + prev_agents = None + prev_i = None + num_cav = None + omit_gt = [True] * (len(index_list) - self.n_loss_frame) + [False] * self.n_loss_frame + loc_err = np.random.randn(self.max_num_cavs, 3) * self.loc_err.reshape(-1, 3) + + for i, idx in enumerate(index_list): + idx = max(0, idx) + input_dict = self.load_frame_data( + idx, prev_agents, prev_i, omit_gt=omit_gt[i], loc_err=loc_err) + prev_i = idx + + if not self.seq_mode: # for sliding window only + prev_exists = [] + prev_agents = [] + for tk in input_dict['scene_tokens']: + prev_agents.append(tk.split('.')[-1]) + if prev_scene_token is not None and tk in prev_scene_token: + prev_exists.append(np.array([True])) + else: + prev_exists.append(np.array([False])) + input_dict.update(dict(prev_exists=prev_exists)) + prev_scene_token = input_dict['scene_tokens'] + + queue.append(input_dict) + + # remove frames not belong to the current sequence + # and ensure all frames have the same ego id + valid_idx_start = 0 + if self.clean_seq: + ego_id = queue[-1]['valid_agent_ids'][0] + for idx in range(len(queue)): + if queue[idx]['valid_agent_ids'][0] != ego_id: + valid_idx_start = idx + 1 + queue = {k: [q[k] if k in q else None for q in queue[valid_idx_start:]] for k in queue[-1].keys()} + return queue + + +if __name__=="__main__": + from cosense3d.utils.misc import load_yaml + from torch.utils.data import DataLoader + cfgs = load_yaml("/mars/projects20/CoSense3D/cosense3d/config/petr.yaml") + cosense_dataset = TemporalCosenseDataset(cfgs['DATASET'], 'train') + cosense_dataloader = DataLoader(dataset=cosense_dataset, collate_fn=cosense_dataset.collate_batch) + for data in cosense_dataloader: + print(data.keys()) \ No newline at end of file diff --git a/cosense3d/dataset/toolkit/__init__.py b/cosense3d/dataset/toolkit/__init__.py new file mode 100644 index 00000000..b3e6c8a4 --- /dev/null +++ b/cosense3d/dataset/toolkit/__init__.py @@ -0,0 +1,113 @@ +import open3d as o3d +import copy +import numpy as np + + +def register_pcds(source_cloud, target_cloud, initial_transf, thr=0.2, visualize=False, title="PCD"): + # Load point clouds + if isinstance(source_cloud, str): + source_cloud = o3d.io.read_point_cloud(source_cloud) + if isinstance(target_cloud, str): + target_cloud = o3d.io.read_point_cloud(target_cloud) + + # source_cloud.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=2, max_nn=50)) + # target_cloud.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=2, max_nn=50)) + + # Perform ICP registration + icp_result = initial_transf + if not isinstance(thr, list): + thr = [thr] + + icp_result = o3d.pipelines.registration.registration_icp( + source_cloud, target_cloud, thr[0], initial_transf, + o3d.pipelines.registration.TransformationEstimationPointToPoint()) + + if len(thr) > 1: + for x in thr[1:]: + icp_result = o3d.pipelines.registration.registration_icp( + source_cloud, target_cloud, x, icp_result.transformation, + o3d.pipelines.registration.TransformationEstimationPointToPoint()) + + # Obtain the final transformation matrix + # transformation_matrix = initial_transf + transformation_matrix = icp_result.transformation + + if visualize: + # Apply the final transformation to the source point cloud + source_aligned0 = copy.deepcopy(source_cloud).transform(initial_transf) + source_aligned = copy.deepcopy(source_cloud).transform(transformation_matrix) + # + # src_pts = np.array(source_cloud.points) + # src_pts_aligned = np.array(source_aligned.points) + # tgt_pts = np.array(target_cloud.points) + # src_angles = (np.arctan2(src_pts[:, 1], src_pts[:, 0]) + np.pi * 3 - np.deg2rad(100)) % ( 2 * np.pi) + # tgt_angles = (np.arctan2(tgt_pts[:, 1], tgt_pts[:, 0]) + np.pi * 3 - np.deg2rad(255)) % ( 2 * np.pi) + # steps = 10 + # res = 1 / steps + # pcds = [] + # for i in range(steps): + # mask_src = (src_angles >= np.pi * 2 * i * res) & (src_angles < np.pi * 2 * (i + 1) * res) + # mask_tgt = (tgt_angles >= np.pi * 2 * i * res) & (tgt_angles < np.pi * 2 * (i + 1) * res) + # + # cur_src_cloud = o3d.geometry.PointCloud() + # cur_tgt_cloud = o3d.geometry.PointCloud() + # cur_src_cloud.points = o3d.utility.Vector3dVector(src_pts[mask_src]) + # cur_tgt_cloud.points = o3d.utility.Vector3dVector(tgt_pts[mask_tgt]) + # cur_src_cloud.paint_uniform_color([0, 0.0 + i / steps * 1.0, 0]) + # cur_tgt_cloud.paint_uniform_color([0, 0, 0.2 + i / steps * 0.8]) + # pcds += [cur_src_cloud] + # o3d.visualization.draw_geometries(pcds) + + # Visualize the aligned point clouds + source_aligned0.paint_uniform_color([1, 0, 0]) + source_aligned.paint_uniform_color([1, 0.706, 0]) + target_cloud.paint_uniform_color([0, 0.651, 0.929]) + o3d.visualization.draw_geometries([source_aligned0, target_cloud], window_name=title) + o3d.visualization.draw_geometries([source_aligned, target_cloud], window_name=title) + + return copy.deepcopy(transformation_matrix) + + +def callback_registrations(source, target, source_points, target_points): + """ + Callback function for point picking. Registers two point clouds using selected corresponding points. + """ + print("Point picking callback called!") + + # Corresponding points + correspondences = np.asarray([source_points, target_points]) + + # Create Open3D point cloud from numpy arrays + source_pc = o3d.geometry.PointCloud() + source_pc.points = o3d.utility.Vector3dVector(source.points[source_points]) + target_pc = o3d.geometry.PointCloud() + target_pc.points = o3d.utility.Vector3dVector(target.points[target_points]) + + # Perform registration + transformation = o3d.pipelines.registration.registration_ransac_based_on_feature_matching( + source_pc, target_pc, correspondences, + o3d.pipelines.registration.TransformationEstimationPointToPoint(), + o3d.pipelines.registration.RANSACConvergenceCriteria(4000000, 500) + ) + + # Apply the transformation to the source point cloud + source.transform(transformation.transformation) + + # Visualize the result + o3d.visualization.draw_geometries([source, target]) + return transformation + + +def click_register(source, target): + # Visualize the two point clouds + o3d.visualization.draw_geometries([source, target]) + + # Register point clouds by picking corresponding points + print("Pick corresponding points in both point clouds. Press 'Q' to finish picking.") + source_points = o3d.visualization.PointCloudPickPoints() + target_points = o3d.visualization.PointCloudPickPoints() + transformation = o3d.visualization.draw_geometries_with_editing( + [source, target, source_points, target_points], + callback=callback_registrations, + window_name="Pick corresponding points") + return transformation diff --git a/cosense3d/dataset/toolkit/cosense.py b/cosense3d/dataset/toolkit/cosense.py new file mode 100644 index 00000000..1e7428d5 --- /dev/null +++ b/cosense3d/dataset/toolkit/cosense.py @@ -0,0 +1,616 @@ +import copy +import glob +import os +import pickle +import random + +import torch +import tqdm +import yaml + +import numpy as np +from cosense3d.utils.misc import load_json, save_json +from cosense3d.utils import pclib +from cosense3d.ops.utils import points_in_boxes_cpu +from cosense3d.dataset.toolkit import register_pcds + + +type_sustech2cosense = { + 'Car': 'vehicle.car', + 'Van': 'vehicle.van', + 'Truck': 'vehicle.truck', + 'Bus': 'vehicle.bus', + 'Tram': 'vehicle.tram', + 'Unknown': 'unknown', + 'BicycleRider': 'vehicle.cyclist', + 'Bicyclerider': 'vehicle.cyclist', + 'MotorcyleRider': 'vehicle.motorcycle', + 'Pedestrian': 'human.pedestrian', + 'HumanSitting': 'human.sitting', + 'Scooterrider': 'vehicle.scooter' +} +type_cosense2sustech = { + v: k for k, v in type_sustech2cosense.items() +} + +csColors = { + 'vehicle.car': [0, 215, 255], #0 + 'vehicle.van': [246, 250, 112], #1 + 'vehicle.truck': [255, 132, 0], #2 + 'vehicle.bus': [0, 223, 162], #3 + 'vehicle.tram': [0, 121, 255], #4 + 'vehicle.motorcycle': [255, 0, 96], #5 + 'vehicle.cyclist': [244, 35, 232], #6 + 'vehicle.scooter': [227, 132, 255], #7 + 'vehicle.other': [180, 254, 152], #8 + 'human.pedestrian': [220, 20, 60], #9 + 'human.wheelchair': [134, 93, 255], #10 + 'human.sitting': [56, 229, 77], #11 + 'static.trafficcone': [255, 0, 0], #12 + 'static.barrowlist': [255, 50, 0], #13 + 'vehicle.tricyclist': [255, 50, 50], #14 + 'unknown': [255, 255, 255], +} + + +class CoSenseDataConverter: + OBJ_LIST = [ + 'vehicle.car', #0 + 'vehicle.van', #1 + 'vehicle.truck', #2 + 'vehicle.bus', #3 + 'vehicle.tram', #4 + 'vehicle.motorcycle', #5 + 'vehicle.cyclist', #6 + 'vehicle.scooter', #7 + 'vehicle.other', #8 + 'human.pedestrian', #9 + 'human.wheelchair', #10 + 'human.sitting', #11 + 'static.trafficcone', #12 + 'static.barrowlist', #13 + 'vehicle.tricyclist', #13 + 'unknown', #14 + ] + OBJ_ID2NAME = {i: n for i, n in enumerate(OBJ_LIST)} + OBJ_NAME2ID = {n: i for i, n in enumerate(OBJ_LIST)} + + def __init__(self, data_path, meta_path, mode='all'): + self.data_path = data_path + self.meta_path = meta_path + self.meta = self.load_meta(meta_path, mode) + + def update_from_sustech(self, sustech_path): + for scenario, sdict in self.meta.items(): + for frame, fdict in sdict.items(): + new_label_file = os.path.join( + sustech_path, + scenario, 'label', + frame + '.json' + ) + objects = self.obj_from_sustech(new_label_file) + # TODO the transformation from local to global + self.meta[scenario][frame]['meta']['bbx_center_global'] = objects + + save_json(sdict, os.path.join(self.meta_path, f"{scenario}.json")) + + def to_sustech(self, out_dir=None): + # make out dirs + out_dir = os.path.join(self.data_path, '..', 'sustech_fmt') \ + if out_dir is None else out_dir + for s, sdict in self.meta.items(): + scenario_dir = os.path.join(out_dir, s) + os.makedirs(os.path.join(scenario_dir, 'lidar'), exist_ok=True) + os.makedirs(os.path.join(scenario_dir, 'label'), exist_ok=True) + for f, fdict in tqdm.tqdm(sdict.items()): + bbx_global_center = np.array(fdict['meta']['bbx_center_global']) + # bbx_global_corner = boxes_to_corners_3d(bbx_global_center[:, 2:]) + lidars = [] + for a, adict in fdict['agents'].items(): + for l, ldict in adict['lidar'].items(): + lidar_pose = ldict['pose'] + filename = ldict['filename'].replace('\\', '/') + # TODO rotate points and bbxs + pcd = pclib.load_pcd(os.path.join(self.data_path, filename)) + points = np.concatenate([pcd['xyz'], pcd['intensity'].reshape(-1, 1)], axis=-1) + lidars.append(points.astype(np.float32)) + lidars = np.concatenate(lidars, axis=0) + lidars.tofile(os.path.join(out_dir, scenario_dir, 'lidar', f"{f}.bin")) + # write label file + self.obj_to_sustech( + bbx_global_center, + os.path.join(out_dir, scenario_dir, 'label', f"{f}.json") + ) + + def to_opv2v(self, out_dir=None): + # make out dirs + out_dir = os.path.join(self.data_path, '..', 'opv2v_fmt') \ + if out_dir is None else out_dir + os.makedirs(out_dir, exist_ok=True) + for s, sdict in self.meta.items(): + scenario_dir = os.path.join(out_dir, s) + os.makedirs(scenario_dir, exist_ok=True) + for f, fdict in tqdm.tqdm(sdict.items()): + bbx_global_center = np.array(fdict['meta']['bbx_center_global']) + # bbx_global_corner = boxes_to_corners_3d(bbx_global_center[:, 2:]) + for a, adict in fdict['agents'].items(): + agent_dir = os.path.join(scenario_dir, a) + if not os.path.exists(agent_dir): + os.makedirs(agent_dir) + for l, ldict in adict['lidar'].items(): + lidar_pose = ldict['pose'] + filename = ldict['filename'].replace('\\', '/') + # TODO rotate points and bbxs + pclib.lidar_bin2bin( + os.path.join(self.data_path, filename), + os.path.join(agent_dir, f + '.bin') + ) + # write label file + self.obj_to_opv2v(bbx_global_center, lidar_pose, + os.path.join(agent_dir, f + '.yaml')) + + def to_kitti(self, out_dir=None): + from cosense3d.dataset.toolkit.kitti import type_cosense2kitti + split = { + # 'train': ['measurement4_0'], + # 'val': ['measurement4_1'], + 'test': sorted(self.meta.keys()), + } + # make out dirs + out_dir = os.path.join(self.data_path, '..', 'kitti_test') \ + if out_dir is None else out_dir + os.makedirs(os.path.join(out_dir, 'ImageSets'), exist_ok=True) + for dir_name in ['velodyne', 'image_2', 'label_2', 'calib']: + os.makedirs(os.path.join(out_dir, 'training', dir_name), exist_ok=True) + os.makedirs(os.path.join(out_dir, 'validating', dir_name), exist_ok=True) + os.makedirs(os.path.join(out_dir, 'testing', dir_name), exist_ok=True) + # create split files + for sp, seqs in split.items(): + with open(os.path.join(out_dir, 'ImageSets', f"{sp}.txt"), 'w') as fh: + frames = [] + for seq in seqs: + cur_frames = sorted(self.meta[seq].keys()) + cur_frames = [seq.split('_')[0][-1] + f[1:] for f in cur_frames] + frames.extend(cur_frames) + fh.write("\n".join(sorted(frames))) + for s, sdict in self.meta.items(): + if s not in split[sp] or int(s.split('_')[1]) < 10: + continue + print(sp, s) + scenario_dir = os.path.join(out_dir, s) + cur_split = {'train': 'training', 'val': 'validating', 'test': 'testing'}[sp] + # os.makedirs(scenario_dir, exist_ok=True) + # sdict = {k: sdict[k] for k in sorted(list(sdict.keys()))[:10]} + for f, fdict in tqdm.tqdm(sdict.items()): + ##### save lidar ###### + points = [] + for ai, adict in fdict['agents'].items(): + for li, ldict in adict['lidar'].items(): + lidar_file = os.path.join(self.data_path, ldict['filename']) + points.append( + np.fromfile(lidar_file, np.float32).reshape(-1, 4) + ) + points = np.concatenate(points, axis=0) + lidar_out_file = os.path.join( + out_dir, cur_split, 'velodyne', f"{s.split('_')[0][-1] + f[1:]}.bin" + ) + points.tofile(lidar_out_file) + ######## save label ####### + label = fdict['meta']['bbx_center_global'] + label_out_file = os.path.join( + out_dir, cur_split, 'label_2', f"{s.split('_')[0][-1] + f[1:]}.txt" + ) + with open(label_out_file, 'w') as fh: + for l in label: + # kitti label format + cosense_type = self.OBJ_ID2NAME[l[1]] + type = [type_cosense2kitti[cosense_type]] + + trancated = ['0'] + occluded = ['0'] + alpha = [f"{np.arctan2(l[3], l[2]):.2f}"] + bbox = ['0'] * 4 + dimensions = [f"{l[x]:.2f}" for x in [7, 6, 5]] # hwl + l[4] -= l[7] / 2 + location = [f"{l[x]:.2f}" for x in [2, 3, 4]] # in cam coor + rotation_y = [f"{-l[10] - np.pi/2:.2f}"] + ls = type + trancated + occluded + alpha + bbox + dimensions +\ + location + rotation_y + line = " ".join(ls) + fh.write(line) + fh.write('\n') + + def obj_from_sustech(self, label_file): + if not os.path.exists(label_file): + return [] + objs = load_json(label_file) + bboxes = [] + for obj_dict in objs: + obj_id = obj_dict['obj_id'] + obj_type = obj_dict['obj_type'] + position = obj_dict['psr']['position'] + rotation = obj_dict['psr']['rotation'] + scale = obj_dict['psr']['scale'] + + cosense_type_name = type_sustech2cosense[obj_type] + obj_type_id = self.OBJ_NAME2ID[cosense_type_name] + bbx_center = [ + float(obj_id), + float(obj_type_id), + position['x'], + position['y'], + position['z'], + scale['x'], + scale['y'], + scale['z'], + rotation['x'], + rotation['y'], + rotation['z'], + ] + bboxes.append(bbx_center) + return bboxes + + def obj_to_sustech(self, cosense_objs, sustech_file): + sustech_objs = [] + if len(cosense_objs.shape) == 0: + save_json(sustech_objs, sustech_file) + return + for obj in cosense_objs: + obj_type = type_cosense2sustech[ + self.OBJ_ID2NAME[int(obj[1])] + ] + sustech_objs.append( + { + 'obj_id': obj[0], + 'obj_type': obj_type, + 'psr': { + 'position': { + 'x': obj[2], + 'y': obj[3], + 'z': obj[4] + }, + 'scale': { + 'x': obj[5], + 'y': obj[6], + 'z': obj[7] + }, + 'rotation': { + 'x': obj[8], + 'y': obj[9], + 'z': obj[10] + } + } + } + ) + save_json(sustech_objs, sustech_file) + + def obj_to_opv2v(self, bbxs, pose, out_file, timestamp=None): + vehicles = {} + # only keep car, van, bus, truck + bbxs = bbxs[bbxs[:, 1] < 4] + for bbx in bbxs: + obj_id = int(bbx[0]) + obj_type = int(bbx[1]) + # process the information to the opv2v format + location = bbx[2:5] + angle = bbx[[8, 10, 9]] / np.pi * 180 + angle[[0, 2]] *= -1 + extent = bbx[5:8] / 2 + + vehicles[int(obj_id)] = { + 'angle': angle.tolist(), + 'center': [0.0] * 3, + 'extent': extent.tolist(), + 'location': location.tolist(), + 'speed': 0, + 'type': obj_type + } + if isinstance(pose, np.ndarray): + pose = pose.tolist() + yaml_dict = { + 'lidar_pose': pose, + 'true_ego_pos': pose, + 'ego_speed': 0, + 'vehicles': vehicles + } + if timestamp is not None: + # timestamp for ouster is corrected by subtracting a systematic time offset (0.35s) + yaml_dict['timestamp'] = float(timestamp) + with open(out_file, 'w') as fh: + yaml.dump(yaml_dict, fh, default_flow_style=False) + + + @staticmethod + def load_meta(meta_path, mode): + if mode == 'all': + scenario_meta_files = sorted(glob.glob(meta_path + "/*.json")) + else: + scenario_meta_files = [] + with open(os.path.join(meta_path, f'{mode}.txt'), 'r') as fh: + for line in fh.readlines(): + scenario_meta_files.append(os.path.join(meta_path, f'{line.strip()}.json')) + + meta_dict = {} + + for f in scenario_meta_files: + scenario = os.path.basename(f)[:-5] + meta_dict[scenario] = load_json(f) + + return meta_dict + + @staticmethod + def cal_vbbx_mean_dim(meta): + """Calculate mean dimensions of four-wheel vehicles""" + dimensions = [] + for s, sdict in meta.items(): + for f, fdict in sdict.items(): + bbx = np.array(fdict['meta']['bbx_center_global']) + dimensions.append(bbx[bbx[:, 5] > 2, 5:8]) + print(np.concatenate(dimensions, axis=0).mean(axis=0)) + + @staticmethod + def fdict_template(): + return { + 'agents': { + '0': { + 'type': None, + 'pose': [0.0] * 6, + 'time': None, # timestamp for the current vehicle pose + 'lidar': { + '0': { + 'pose': [0.0] * 6, + 'time': None, # timestamp for the current lidar triggering round + 'filename': None + } + }, + 'camera': {}, # TODO API for cameras + } + }, + # no cooperation needed, take lidar as global for each frame + 'meta': {'bbx_center_global': []} + } + + @staticmethod + def add_cam_to_fdict(fdict, agent_id, cam_id, filenames, intrinsic, extrinsic, **kwargs): + if agent_id not in fdict['agents']: + adict = CoSenseDataConverter.fdict_template()['agents'][0] + fdict['agents'][agent_id] = adict + kwargs.update({ + 'filenames': filenames, + 'intrinsic': intrinsic, + 'extrinsic': extrinsic + }) + fdict['agents'][agent_id]['camera'][cam_id] = kwargs + + @staticmethod + def update_frame_bbx(fdict, bbx): + fdict['meta']['bbx_center_global'] = bbx + + @staticmethod + def update_agent(fdict, + agent_id, + agent_type=None, + agent_pose=None, + agent_time=None, + **kwargs): + if agent_id not in fdict['agents']: + fdict['agents'][agent_id] = CoSenseDataConverter.fdict_template()['agents']['0'] + if agent_type is not None: + fdict['agents'][agent_id]['type'] = agent_type + if agent_pose is not None: + fdict['agents'][agent_id]['pose'] = agent_pose + if agent_time is not None: + fdict['agents'][agent_id]['time'] = agent_time + for k, v in kwargs.items(): + fdict['agents'][agent_id][k] = v + + @staticmethod + def update_agent_lidar(fdict, + agent_id, + lidar_id, + lidar_pose=None, + lidar_time=None, + lidar_file=None): + if agent_id not in fdict['agents']: + fdict['agents'][agent_id] = CoSenseDataConverter.fdict_template()['agents']['0'] + if lidar_pose is not None: + fdict['agents'][agent_id]['lidar'][lidar_id]['pose'] = lidar_pose + if lidar_time is not None: + fdict['agents'][agent_id]['lidar'][lidar_id]['time'] = lidar_time + if lidar_file is not None: + fdict['agents'][agent_id]['lidar'][lidar_id]['filename'] = lidar_file + + @staticmethod + def update_agent_gt_boxes(fdict, + agent_id, + gt_boxes): + if agent_id not in fdict['agents']: + fdict['agents'][agent_id] = CoSenseDataConverter.fdict_template()['agents']['0'] + fdict['agents'][agent_id]['gt_boxes'] = gt_boxes + + @staticmethod + def remove_lidar_info(fdict, agent_id): + fdict['agents'][agent_id]['lidar'] = {} + + @staticmethod + def supervison_full_to_sparse(meta_dict, out_path, lidar_range=None, det_r=None, + num_box_per_frame=None, num_box_total=None, label_ratio=None): + def select_box(bboxes, cls_idx, num): + bboxes = np.array(bboxes) + bboxes_car = bboxes[bboxes[:, 1] == cls_idx] + if lidar_range is not None: + mask = (bboxes_car[:, 2] > lidar_range[0]) & (bboxes_car[:, 2] < lidar_range[3]) & \ + (bboxes_car[:, 3] > lidar_range[1]) & (bboxes_car[:, 3] < lidar_range[4]) & \ + (bboxes_car[:, 4] > lidar_range[2]) & (bboxes_car[:, 4] < lidar_range[5]) + else: + mask = np.linalg.norm(bboxes_car[:, 2:4], axis=1) < det_r + bboxes_car = bboxes_car[mask] + if len(bboxes_car) == 0: + return None + choice = np.random.choice(np.array(len(bboxes_car)), num) + bboxes_car = bboxes_car[choice].reshape(num, 11).tolist() + return bboxes_car + + if num_box_per_frame is not None: + for s, sdict in meta_dict.items(): + sdict_out = copy.deepcopy(sdict) + for f, fdict in sdict.items(): + bboxes = fdict['meta']['bbx_center_global'] + choice = select_box(bboxes, 0, 1) + if choice is None: + sdict_out.pop(f) + else: + sdict_out[f]['meta']['bbx_center_global'] = choice + save_json(sdict_out, os.path.join(out_path, f'{s}.json')) + elif num_box_total is not None: + samples = [] + # find frames with car labels + for s, sdict in meta_dict.items(): + for f, fdict in sdict.items(): + bboxes = fdict['meta']['bbx_center_global'] + classes = [int(b[1]) for b in bboxes] + if 0 in classes: + samples.append((s, f)) + # select given number of frames + samples = random.choices(samples, k=num_box_total) + sdict_out = {} + for sample in samples: + fdict = copy.deepcopy(meta_dict[sample[0]][sample[1]]) + bboxes = fdict['meta']['bbx_center_global'] + fdict['meta']['bbx_center_global'] = select_box(bboxes, 0, 1) + sdict_out[sample[1]] = fdict + save_json(sdict_out, os.path.join(out_path, 'train.json')) + with open(os.path.join(out_path, 'train.txt'), 'w') as fh: + fh.write('train') + + @staticmethod + def global_boxes_to_local(meta_dict, data_path, meta_path): + samples = {i: {'box': [], 'points': []} for i in CoSenseDataConverter.OBJ_ID2NAME.keys()} + for s, sdict in meta_dict.items(): + for f, fdict in tqdm.tqdm(meta_dict[s].items()): + global_boxes = fdict['meta']['bbx_center_global'] + global_boxes = np.array(global_boxes) + for a, adict in fdict['agents'].items(): + for l, ldict in adict['lidar'].items(): + lidar = pclib.load_pcd(os.path.join(data_path, ldict['filename'])) + box_cls = global_boxes[:, 1] + res = points_in_boxes_cpu(lidar['xyz'], global_boxes[:, [2, 3, 4, 5, 6, 7, 10]]) + box_n_pts = res.sum(axis=1) + valid = box_n_pts > 10 + boxes = global_boxes[valid] + box_cls = box_cls[valid] + pts_idx_of_boxes = res[valid] + CoSenseDataConverter.update_agent_gt_boxes(fdict, a, boxes.tolist()) + + for i, box in enumerate(boxes): + cls = box[1] + points = lidar['xyz'][pts_idx_of_boxes[i].astype(bool)] + intensity = lidar['intensity'][pts_idx_of_boxes[i].astype(bool)] + # transform box and points to box coodiante + points = points - box[2:5].reshape(1, 3) + # points will be modified during transformation, so make a copy here + new_points = np.copy(points) + st = np.sin(-box[-1]) + ct = np.cos(-box[-1]) + points[:, 0] = new_points[:, 0] * ct - new_points[:, 1] * st + points[:, 1] = new_points[:, 0] * st + new_points[:, 1] * ct + points = np.concatenate([points, intensity[:, None]], axis=1) + samples[cls]['box'].append(box[5:8]) + samples[cls]['points'].append(points) + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # box_vis = np.array([[0]*3 + box[5:8].tolist() + [0]]) + # ax = plt.figure(figsize=(10, 10)).add_subplot(1, 1, 1) + # draw_points_boxes_plt( + # ax=ax, + # pc_range=5, + # points=points, + # boxes_gt=box_vis, + # filename='/home/yuan/Downloads/tmp.png' + # ) + + + save_json(sdict, os.path.join(meta_path, f'{s}.json')) + for sample_id, content in samples.items(): + if len(content['box']) == 0: + continue + sample_name = CoSenseDataConverter.OBJ_ID2NAME[sample_id] + with open(os.path.join(meta_path, f'{sample_name}.pickle'), 'wb') as file: + pickle.dump(content, file) + + @staticmethod + def parse_global_bbox_velo(meta_dict, data_path, meta_path): + for s, sdict in meta_dict.items(): + for f, fdict in sdict.items(): + cur_global_boxes = fdict['meta']['bbx_center_global'] + # cur_global_boxes = {box[0]: box[1:] for box in cur_global_boxes} + velos = [] + next_frame = f'{int(f) + 1:06d}' + last_frame = f'{int(f) - 1:06d}' + next_global_boxes = {} + prev_global_boxes = {} + if next_frame in sdict: + next_global_boxes = sdict[next_frame]['meta']['bbx_center_global'] + next_global_boxes = {box[0]: box[1:] for box in next_global_boxes} + if last_frame in sdict: + prev_global_boxes = sdict[last_frame]['meta']['bbx_center_global'] + prev_global_boxes = {box[0]: box[1:] for box in prev_global_boxes} + + for box_ in cur_global_boxes: + box_id = box_[0] + box = box_[1:] + if box_id in next_global_boxes: + velo = [(next_global_boxes[box_id][1] - box[1]) * 10, # m/s + (next_global_boxes[box_id][2] - box[2]) * 10,] + elif box_id in prev_global_boxes: + velo = [(box[1] - prev_global_boxes[box_id][1]) * 10, + (box[2] - prev_global_boxes[box_id][2]) * 10] + else: + velo = [0., 0.] + + velos.append(velo) + fdict['meta']['bbx_velo_global'] = velos + + save_json(sdict, os.path.join(meta_path, f'{s}.json')) + + + @staticmethod + def draw_sample_distributions(meta_path): + """ + Draw distribution of the number of observation points for each sample category. + + :param meta_path: path contains pickle files of object samples + :return: + """ + import matplotlib.pyplot as plt + files = glob.glob(os.path.join(meta_path, '*.pickle')) + for f in files: + with open(f, 'rb') as file: + samples = pickle.load(file) + n_points = np.array([min(len(points), 500) for points in samples['points']]) + plt.hist(n_points, bins=10, density=True, alpha=0.6, label=os.path.basename(f)[:-7]) + plt.title(os.path.basename(f)[:-7]) + # plt.legend() + plt.savefig(os.path.join(meta_path, f'{os.path.basename(f)[:-7]}.png')) + plt.close() + + + +if __name__=="__main__": + cosense3d = CoSenseDataConverter( + "/koko/LUMPI/lumpi_selected/data", + "/koko/LUMPI/lumpi_selected/meta", + 'all' + ) + # cosense3d.to_kitti("/koko/LUMPI/kitti_test") + # cosense3d.to_sustech("/koko/LUMPI/lumpi_selected_sustech") + # cosense3d.to_opv2v("/media/hdd/yuan/koko/data/LUMPI/opv2v_fmt") + # cosense3d.update_from_sustech("/koko/LUMPI/sustech_fmt") + # cosense.supervison_full_to_sparse(cosense.meta, + # '/koko/cosense3d/kitti-sparse-num534', + # lidar_range=[-100, -40, -3.5, 100, 40, 3], + # num_box_total=534) + # cosense.global_boxes_to_local(cosense.meta, cosense.data_path, cosense.meta_path) + # cosense.update_from_sustech('/koko/LUMPI/sustech_fmt') + # cosense.parse_global_bbox_velo(cosense.meta, cosense.data_path, cosense.meta_path) + # cosense.draw_sample_distributions(cosense.meta_path) diff --git a/cosense3d/dataset/toolkit/dairv2x.py b/cosense3d/dataset/toolkit/dairv2x.py new file mode 100644 index 00000000..f20439a7 --- /dev/null +++ b/cosense3d/dataset/toolkit/dairv2x.py @@ -0,0 +1,792 @@ +import copy +import glob +import math +import os + +import matplotlib.pyplot as plt +import tqdm +import numpy as np +import open3d as o3d +from scipy.optimize import linear_sum_assignment + +from cosense3d.utils import pclib, vislib, box_utils +from cosense3d.utils.misc import load_json, save_json +from cosense3d.utils.box_utils import corners_to_boxes_3d, transform_boxes_3d +from cosense3d.dataset.toolkit import register_pcds +from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs +from cosense3d.ops.utils import points_in_boxes_cpu +from cosense3d.utils.pcdio import point_cloud_from_path +from cosense3d.utils.vislib import o3d_draw_frame_data, \ + o3d_draw_agent_data, o3d_draw_pcds_bbxs + +global_time_offset = 1.62616 * 1e9 + + +def calib_to_tf_matrix(calib_file): + calib = load_json(calib_file) + if 'transform' in calib: + tf = calib['transform'] + else: + tf = calib + tf_matrix = np.eye(4) + tf_matrix[:3, :3] = np.array(tf['rotation']) + tf_matrix[:3, 3:] = np.array(tf['translation']) + if 'relative_error' in calib: + tf_matrix[0, 3] += calib['relative_error']['delta_x'] + tf_matrix[1, 3] += calib['relative_error']['delta_y'] + return tf_matrix + + +def load_label(label_file): + labels = load_json(label_file) + bbxs_center = [] + bbxs_corner = [] + for l in labels: + obj_type = { + 'car': 'vehicle.car', + 'van': 'vehicle.van', + 'truck': 'vehicle.truck', + 'bus': 'vehicle.bus', + 'pedestrian': 'human.pedestrian', + 'trafficcone': 'static.trafficcone', + 'motorcyclist': 'vehicle.motorcycle', + 'cyclist': 'vehicle.cyclist', + 'tricyclist': 'vehicle.tricyclist', + 'barrowlist': 'static.barrowlist', + }[l.get('type', "car").lower()] + track_id = l.get('track_id', -1) + bbx = [ + int(track_id), + cs.OBJ_NAME2ID[obj_type], + l['3d_location']['x'], + l['3d_location']['y'], + l['3d_location']['z'], + l['3d_dimensions']['l'], + l['3d_dimensions']['w'], + l['3d_dimensions']['h'], + 0, + 0, + l['rotation'] + ] + bbxs_center.append([float(x) for x in bbx]) + if 'world_8_points' in l: + bbx_corner = np.array(l['world_8_points']) + bbx_corner = [bbx.tolist() for bbx in bbx_corner] + bbxs_corner.append(bbx_corner) + return bbxs_center, bbxs_corner + + +def load_info_to_dict(info_file): + infos = load_json(info_file) + info_dict = {} + for info in infos: + frame = os.path.basename(info['pointcloud_path'][:-4]) + info_dict[frame] = info + return info_dict + + +def convert_v2x_c(root_dir, meta_out_dir): + cvi_path = "cooperative-vehicle-infrastructure" + infra_path = "infrastructure-side" + cav_path = "vehicle-side" + coop_path = "cooperative" + info_file = "data_info.json" + inf_lidar_path = "cooperative-vehicle-infrastructure-infrastructure-side-velodyne" + cav_lidar_path = "cooperative-vehicle-infrastructure-vehicle-side-velodyne" + new_label_path = "DAIR-V2X-C_Complemented_Anno" + inf_info_file = os.path.join(root_dir, cvi_path, infra_path, info_file) + inf_info = load_info_to_dict(inf_info_file) + veh_info_file = os.path.join(root_dir, cvi_path, cav_path, info_file) + veh_info = load_info_to_dict(veh_info_file) + frame_pairs = load_json(os.path.join(root_dir, cvi_path, coop_path, info_file)) + + meta_dict = {} + veh_frames = [] + inf_frames = [] + offsets = [] + for pair in frame_pairs: + veh_frame = os.path.basename(pair['vehicle_pointcloud_path'][:-4]) + inf_frame = os.path.basename(pair['infrastructure_pointcloud_path'][:-4]) + label_frame = os.path.basename(pair['cooperative_label_path'][:-5]) + assert veh_frame == label_frame + veh_frames.append(veh_frame) + inf_frames.append(inf_frame) + offsets.append(pair['system_error_offset']) + + # load all re-annotated samples + train = load_json(os.path.join(root_dir, new_label_path, 'train.json')) + val = load_json(os.path.join(root_dir, new_label_path, 'val.json')) + split = { + 'train': train, + # 'test': val + } + + for sp, frames in split.items(): + for frame in tqdm.tqdm(frames): + cur_veh_info = veh_info[frame] + scenario = cur_veh_info['batch_id'] + # processing vehicle meta + tf_novatel2world = calib_to_tf_matrix( + os.path.join(root_dir, cvi_path, cav_path, + cur_veh_info['calib_novatel_to_world_path']) + ) + tf_lidar2novatel = calib_to_tf_matrix( + os.path.join(root_dir, cvi_path, cav_path, + cur_veh_info['calib_lidar_to_novatel_path']) + ) + tf_lidar2world = tf_novatel2world @ tf_lidar2novatel + veh_lidar_pose = pclib.tf2pose(tf_lidar2world) + veh_pose = pclib.tf2pose(tf_novatel2world) + veh_lidar_time = float(cur_veh_info['pointcloud_timestamp']) * 1e-6 + veh_lidar_file = os.path.join(cav_lidar_path, frame + '.pcd') + veh_bbxs_center, _ = load_label( + os.path.join(root_dir, + f"{new_label_path}/new_labels/vehicle-side_label/lidar", + frame + '.json' + ) + ) + + # process infra info + cur_inf_frame = inf_frames[veh_frames.index(frame)] + cur_inf_info = inf_info[cur_inf_frame] + tf_virtuallidar2world = calib_to_tf_matrix( + os.path.join(root_dir, cvi_path, infra_path, + cur_inf_info['calib_virtuallidar_to_world_path']) + ) + + inf_lidar_time = float(cur_inf_info['pointcloud_timestamp']) * 1e-6 + inf_lidar_file = os.path.join(inf_lidar_path, cur_inf_frame + ".pcd") + + # inf_lidar_pose = pclib.tf2pose(tf_infra2ego) + inf_lidar_pose = pclib.tf2pose(tf_virtuallidar2world) + inf_label_path = os.path.join(root_dir, + f"{cvi_path}/infrastructure-side/label/virtuallidar", + cur_inf_frame + '.json') + inf_bbxs_center, _ = load_label(inf_label_path) + + # process global meta + coop_label_path = os.path.join(root_dir, + f"{new_label_path}/new_labels/cooperative_label/label_world", + frame + '.json' + ) + world_bbxs_center, world_bbxs_corner = load_label(coop_label_path) + coop_bbxs_corner = pclib.rotate_box_corners_with_tf_np( + np.array(world_bbxs_corner), np.linalg.inv(tf_lidar2world) + ) + coop_bbxs_center = np.concatenate( + [np.array(world_bbxs_center)[:, :2], + corners_to_boxes_3d(coop_bbxs_corner)], + axis=1 + ).tolist() + + # if not os.path.exists(inf_label_path): + # print('infra label not found.') + # inf_bbxs_center = pclib.rotate_box_corners_with_tf_np( + # np.array(world_bbxs_corner), np.linalg.inv(tf_virtuallidar2world) + # ) + # inf_bbxs_center = np.concatenate( + # [np.array(world_bbxs_center)[:, :2], + # corners_to_boxes_3d(inf_bbxs_center)], + # axis=1 + # ).tolist() + + + + # pcd = point_cloud_from_path(os.path.join(root_dir, veh_lidar_file)) + # points = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1) + # o3d_draw_pcds_bbxs([points], [np.array(veh_bbxs_center)]) + + # construct meta dict + fdict = cs.fdict_template() + # add cav lidar meta + cs.update_agent(fdict, + agent_id='0', + agent_type='cav', + agent_pose=veh_pose, + gt_boxes=veh_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='0', + lidar_id='0', + lidar_pose=veh_lidar_pose, + lidar_time=veh_lidar_time, + lidar_file=veh_lidar_file) + # add infra lidar meta + cs.update_agent(fdict, + agent_id='1', + agent_type='infra', + agent_pose=inf_lidar_pose, + gt_boxes=inf_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='1', + lidar_id='0', + lidar_pose=inf_lidar_pose, + lidar_time=inf_lidar_time, + lidar_file=inf_lidar_file) + cs.update_frame_bbx(fdict, + coop_bbxs_center + )# in global coords + fdict['meta']['ego_id'] = '0' + fdict['meta']['ego_lidar_pose'] = veh_lidar_pose + if scenario not in meta_dict: + meta_dict[scenario] = {} + meta_dict[scenario][frame] = fdict + # save meta + os.makedirs(meta_out_dir, exist_ok=True) + for scenario, meta in meta_dict.items(): + meta_file = os.path.join(meta_out_dir, f'{scenario}.json') + save_json(meta, meta_file) + with open(os.path.join(meta_out_dir, f'{sp}.txt'), 'w') as fh: + fh.write('\n'.join(list(meta_dict.keys()))) + + +def convert_v2x_seq(root_dir, meta_out_dir): + split = "test" + inf_info_file = os.path.join(root_dir, "infrastructure-side/data_info.json") + inf_info = load_info_to_dict(inf_info_file) + veh_info_file = os.path.join(root_dir, "vehicle-side/data_info.json") + veh_info = load_info_to_dict(veh_info_file) + frame_pairs = load_json(os.path.join(root_dir, "cooperative/data_info.json")) + + meta_dict = {} + for pdict in frame_pairs: + scenario = pdict['vehicle_sequence'] + ############################################################# + # processing vehicle meta + cur_veh_info = veh_info[pdict['vehicle_frame']] + tf_novatel2world = calib_to_tf_matrix( + os.path.join(root_dir, "vehicle-side", cur_veh_info['calib_novatel_to_world_path']) + ) + tf_lidar2novatel = calib_to_tf_matrix( + os.path.join(root_dir, "vehicle-side", cur_veh_info['calib_lidar_to_novatel_path']) + ) + tf_lidar2world = tf_novatel2world @ tf_lidar2novatel + veh_lidar_pose = pclib.tf2pose(tf_lidar2world) + veh_pose = pclib.tf2pose(tf_novatel2world) + + veh_lidar_time = float(cur_veh_info['pointcloud_timestamp']) * 1e-6 + veh_lidar_file = os.path.join("vehicle-side", cur_veh_info['pointcloud_path']) + veh_bbxs_center, _ = load_label( + os.path.join(root_dir, "vehicle-side", cur_veh_info['label_lidar_std_path']) + ) + + ############################################################### + # process infra info + cur_inf_info = inf_info[pdict['infrastructure_frame']] + tf_virtuallidar2world = calib_to_tf_matrix( + os.path.join(root_dir, "infrastructure-side", cur_inf_info['calib_virtuallidar_to_world_path']) + ) + inf_lidar_pose = pclib.tf2pose(tf_virtuallidar2world) + inf_lidar_time = float(cur_inf_info['pointcloud_timestamp']) * 1e-6 + inf_lidar_file = os.path.join("infrastructure-side", cur_inf_info['pointcloud_path']) + inf_bbxs_center, _ = load_label( + os.path.join(root_dir, "infrastructure-side", cur_inf_info['label_lidar_std_path']) + ) + inf_bbxs_center = [] + + ############################################################### + # process global meta + coop_bbxs_center, _ = load_label( + os.path.join(root_dir, "cooperative", "label", f"{pdict['vehicle_frame']}.json") + ) + + ############################################################### + # construct meta dict + fdict = cs.fdict_template() + # add cav lidar meta + cs.update_agent(fdict, + agent_id='0', + agent_type='cav', + agent_pose=veh_pose, + gt_boxes=veh_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='0', + lidar_id='0', + lidar_pose=veh_lidar_pose, + lidar_time=veh_lidar_time, + lidar_file=veh_lidar_file) + # add infra lidar meta + cs.update_agent(fdict, + agent_id='1', + agent_type='infra', + agent_pose=inf_lidar_pose, + gt_boxes=inf_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='1', + lidar_id='0', + lidar_pose=inf_lidar_pose, + lidar_time=inf_lidar_time, + lidar_file=inf_lidar_file) + cs.update_frame_bbx(fdict, + coop_bbxs_center + )# in global coords + fdict['meta']['ego_id'] = '0' + fdict['meta']['ego_lidar_pose'] = veh_lidar_pose + if scenario not in meta_dict: + meta_dict[scenario] = {} + meta_dict[scenario][pdict['vehicle_frame']] = fdict + # save meta + os.makedirs(meta_out_dir, exist_ok=True) + for scenario, meta in meta_dict.items(): + meta_file = os.path.join(meta_out_dir, f'{scenario}.json') + save_json(meta, meta_file) + with open(os.path.join(meta_out_dir, f'{split}.txt'), 'w') as fh: + fh.write('\n'.join(list(meta_dict.keys()))) + + +def parse_static_pcd(adict, root_dir): + pose = pclib.pose_to_transformation(adict['lidar']['0']['pose']) + pcd = o3d.io.read_point_cloud(os.path.join(root_dir, adict['lidar']['0']['filename'])) + points = np.array(pcd.points) + boxes = np.array(adict['gt_boxes'])[:, [2, 3, 4, 5, 6, 7, 10]] + in_box_mask = points_in_boxes_cpu(points, boxes).any(axis=0) + pcd.points = o3d.utility.Vector3dVector(points[np.logical_not(in_box_mask)]) + return pcd, pose + + +def register_sequence(sdict, frames, root_dir, ignore_ids=[], vis=False): + agents_reg = {} + for f in tqdm.tqdm(frames): + # print(f) + fdict = sdict[f] + for ai, adict in fdict['agents'].items(): + if ai in ignore_ids: + continue + pcd, pose = parse_static_pcd(adict, root_dir) + if ai not in agents_reg: + agents_reg[ai] = { + 'init_pose': pose, + 'last_pose_old': pose, + 'last_pose_new': pose, + 'last_pcd': pcd, + 'pcd_merged': copy.copy(pcd).transform(pose), + 'last_frame': f, + 'sequence_info': {f: {'lidar_pose': pose}}} + else: + source_pcd = pcd + target_pcd = agents_reg[ai]['last_pcd'] + tf_init = np.linalg.inv(agents_reg[ai]['last_pose_old']) @ pose + tf_out = register_pcds(source_pcd, target_pcd, tf_init, [0.2], visualize=vis) + pose_new = agents_reg[ai]['last_pose_new'] @ tf_out + pcd_merged = agents_reg[ai]['pcd_merged'] + pcd_transformed = copy.copy(source_pcd).transform(pose_new) + # if vis: + # pcd_transformed.paint_uniform_color([1, 0.706, 0]) + # pcd_merged.paint_uniform_color([0, 0.651, 0.929]) + # o3d.visualization.draw_geometries([pcd_merged, pcd_transformed]) + pcd_merged = pcd_merged + pcd_transformed + pcd_merged = pcd_merged.voxel_down_sample(voxel_size=0.1) + + agents_reg[ai]['last_pose_old'] = pose + agents_reg[ai]['last_pose_new'] = pose_new + agents_reg[ai]['last_pcd'] = pcd + agents_reg[ai]['pcd_merged'] = pcd_merged + agents_reg[ai]['sequence_info'][f] = {'lidar_pose': pose} + + return agents_reg + + +def register_pcds_to_blocks(seq, sdict, root_dir, idx=0): + frames = sorted(sdict.keys()) + sub_seq = frames[:1] + cnt = 0 + for i, f in enumerate(frames[1:]): + if (i == len(frames) - 2 or int(f) - int(sub_seq[-1]) > 2): + if i == len(frames) - 2: + sub_seq.append(f) + if len(sub_seq) >= 8: + vis = False + agents_reg = register_sequence(sdict, sub_seq, root_dir, ['1'], vis) + pcd_merged = agents_reg['0']['pcd_merged'] + o3d.visualization.draw_geometries([pcd_merged]) + o3d.io.write_point_cloud(f"{root_dir}/agent0_seq{seq}_{cnt}.pcd", pcd_merged) + info_file = f"{root_dir}/agent0_seq{seq}_{cnt}.npy" + np.save(info_file, {k: v for k, v in agents_reg['0'].items() if 'pcd' not in k}, allow_pickle=True) + cnt += 1 + if not i == len(frames) - 2: + sub_seq = [f] + else: + sub_seq.append(f) + + +def optimize_trajectory(seq, sdict, root_dir, out_meta_dir, ego_agent_id, idx, sub_idx): + """ + This function iterates over scenarios, for each scenario it does the following steps: + 1. register point clouds sequentially for each agent to get accurate trajectory of agents. + Before registration, the points belonging to the labeled objets with high dynamics are removed. + After registration of each sequence pair, the merged point cloud is down-sampled to save space. + 2. match the registered point clouds of different agents to get optimized relative poses. + 3. recover the relative pose to the world pose. + + Parameters + ---------- + meta_path: directory of meta files + root_dir: root dir of data + + Returns + ------- + meta: meta information with updated poses of agents + """ + info_file = f"{root_dir}/agent0_seq{seq}_{sub_idx}.npy" + ego_info = np.load(info_file, allow_pickle=True).item() + pcd_merged = o3d.io.read_point_cloud(f"{root_dir}/agent0_seq{seq}_{sub_idx}.pcd") + frames = sorted(ego_info['sequence_info'].keys()) + sub_seq_dict = {} + + infra_info = sdict[frames[0]]['agents']['1'] + pcd, pose = parse_static_pcd(infra_info, root_dir) + tf_init = pose + # o3d.visualization.draw_geometries([pcd_merged]) + tf_out = register_pcds(pcd, pcd_merged, tf_init, [1, 0.2], visualize=True) + pose = pclib.tf2pose(tf_out) + + for f in tqdm.tqdm(frames): + fdict = sdict[f] + fdict['agents']['1']['lidar']['0']['pose'] = pose + fdict['agents']['1']['pose'] = pose + + lidar_pose_new = ego_info['sequence_info'][f]['lidar_pose'] + lidar_pose_old = pclib.pose_to_transformation(fdict['agents'][ego_agent_id]['lidar']['0']['pose']) + # lidar_old2new = np.linalg.inv(lidar_pose_new) @ lidar_pose_old + vpose_to_lpose = np.linalg.inv(lidar_pose_old) @ pclib.pose_to_transformation(fdict['agents'][ego_agent_id]['pose']) + vpose_new = lidar_pose_new @ vpose_to_lpose + fdict['agents'][ego_agent_id]['pose'] = pclib.tf2pose(vpose_new) + fdict['agents'][ego_agent_id]['lidar']['0']['pose'] = pclib.tf2pose(lidar_pose_new) + sub_seq_dict[f] = fdict + if int(f) > 1002: + vis_pcd, vis_pose = parse_static_pcd(fdict['agents'][ego_agent_id], root_dir) + vis_pcd2, vis_pose2 = parse_static_pcd(fdict['agents']['1'], root_dir) + vis_pcd = vis_pcd.transform(lidar_pose_new) + vis_pcd2 = vis_pcd2.transform(vis_pose2) + + # o3d.visualization.draw_geometries([pcd_merged]) + corr = register_pcds(vis_pcd2, vis_pcd, np.eye(4), [1, 0.2], visualize=True) + vis_pose2 = corr @ vis_pcd2 + vis_pose2 = pclib.tf2pose(vis_pose2) + fdict['agents']['1']['lidar']['0']['pose'] = vis_pose2 + fdict['agents']['1']['pose'] = vis_pose2 + + # vis_pcd.paint_uniform_color([1, 0.706, 0]) + # vis_pcd2.paint_uniform_color([0, 0.651, 0.929]) + # o3d.visualization.draw_geometries([vis_pcd, vis_pcd2.transform(corr)]) + + save_json(sub_seq_dict, os.path.join(out_meta_dir, f"{seq}_{sub_idx}.json")) + + +def optimize_poses(meta_path): + mfiles = glob.glob(os.path.join(meta_path, '*.json'))[3:] + mfiles = ["/koko/cosense3d/dairv2x/45.json"] + for idx, mf in enumerate(mfiles): + sdict = load_json(mf) + seq = os.path.basename(mf)[:-5] + print('###########################', seq, len(sdict)) + + # register_pcds_to_blocks( + # seq, + # sdict, + # "/home/data/DAIR-V2X", + # idx + # ) + files = glob.glob(f"/home/data/DAIR-V2X/agent0_seq{seq}_*.npy") + for sub_idx in range(len(files)): + optimize_trajectory(seq, sdict, + "/home/data/DAIR-V2X", + "/home/data/DAIR-V2X/meta", + '0', + idx, + sub_idx=sub_idx + ) + + +def register_step_one(mf): + """Find vehicle that is most close to infra""" + sdict = load_json(mf) + seq = os.path.basename(mf)[:-5] + frames = sorted(sdict.keys()) + min_dist = 1000 + min_dist_frame = frames[0] + for f in frames: + fdict = sdict[f] + veh_pose = fdict['agents']['0']['lidar']['0']['pose'] + inf_pose = fdict['agents']['1']['lidar']['0']['pose'] + dist = np.sqrt((veh_pose[0] - inf_pose[0]) ** 2 + (inf_pose[1] - veh_pose[1]) ** 2) + if dist < min_dist: + min_dist = dist + min_dist_frame = f + print(f"Step1: registration starts from frame {min_dist_frame}") + return min_dist_frame, min_dist + + +def register_step_two(start_frame, mf, meta_out_dir): + """Register point clouds""" + sdict = load_json(mf) + seq = os.path.basename(mf)[:-5] + frames = sorted(sdict.keys()) + total_frames = len(frames) + start_idx = frames.index(start_frame) + ref_pcd, ref_tf = parse_static_pcd(sdict[start_frame]['agents']['1'], root_dir) + ref_pose = pclib.tf2pose(ref_tf) + ref_pcd = ref_pcd.transform(ref_tf) + idx_l = start_idx + idx_r = start_idx + 1 + vis = False + cnt = 0 + while True: + if idx_l < 0 and idx_r >= len(frames): + break + if idx_l >= 0: + cur_frame = frames[idx_l] + pcd, tf = parse_static_pcd(sdict[cur_frame]['agents']['0'], root_dir) + if cnt == -1: + # tf = registration.manual_registration(pcd.transform(tf), ref_pcd) + + tf_corr = np.array([ [ 9.98532892e-01, 5.34621722e-02, 8.59413959e-03, -1.22072297e+02], + [-5.34946946e-02, 9.98561645e-01, 3.59984429e-03, 2.15912680e+02], + [-8.38932267e-03, -4.05430380e-03, 9.99956590e-01, 4.32884527e+01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + tf = tf_corr @ tf + + else: + tf = register_pcds(pcd, ref_pcd, tf, [1.6, 0.5], vis, cur_frame) + pose = pclib.tf2pose(tf) + sdict[cur_frame]['agents']['0']['lidar']['0']['pose'] = pose + sdict[cur_frame]['agents']['0']['pose'] = pose + sdict[cur_frame]['agents']['1']['lidar']['0']['pose'] = ref_pose + sdict[cur_frame]['agents']['1']['pose'] = ref_pose + ref_pcd = ref_pcd + pcd.transform(tf) + idx_l -= 1 + cnt += 1 + if idx_r < len(frames): + cur_frame = frames[idx_r] + pcd, tf = parse_static_pcd(sdict[cur_frame]['agents']['0'], root_dir) + tf = register_pcds(pcd, ref_pcd, tf, [1.6, 0.5], vis, cur_frame) + pose = pclib.tf2pose(tf) + sdict[cur_frame]['agents']['0']['lidar']['0']['pose'] = pose + sdict[cur_frame]['agents']['0']['pose'] = pose + sdict[cur_frame]['agents']['1']['lidar']['0']['pose'] = ref_pose + sdict[cur_frame]['agents']['1']['pose'] = ref_pose + ref_pcd = ref_pcd + pcd.transform(tf) + idx_r += 1 + cnt += 1 + + ref_pcd = ref_pcd.voxel_down_sample(voxel_size=0.1) + print(f"\rStep2: registered [{cnt}/{total_frames}] frames",end='',flush=True) + + save_json(sdict, os.path.join(meta_out_dir, f"{seq}.json")) + print('\n') + + +def select_sub_scenes(meta_in, root_dir, meta_out, split): + with open(os.path.join(meta_in, f"{split}.txt"), 'r') as f: + scenes = sorted(f.read().splitlines()) + + sub_scenes = [] + for s in tqdm.tqdm(scenes): + sdict = load_json(os.path.join(meta_in, f"{s}.json")) + frames = sorted(sdict.keys()) + sub_seq = frames[:1] + cnt = 0 + for i, f in enumerate(frames[1:]): + if (i == len(frames) - 2 or int(f) - int(sub_seq[-1]) > 1): + if i == len(frames) - 2: + # reach the end + sub_seq.append(f) + if len(sub_seq) >= 6: + # find one valid sub sequence + new_sdict = parse_global_bboxes(sdict, sub_seq, root_dir) + save_json(new_sdict, os.path.join(meta_out, f"{s}_{cnt}.json")) + sub_scenes.append(f"{s}_{cnt}") + cnt += 1 + if not i == len(frames) - 2: + # sequence breaks, add the current frame to the new seq + sub_seq = [f] + else: + sub_seq.append(f) + + with open(os.path.join(meta_out, f"{split}.txt"), 'w') as f: + f.writelines('\n'.join(sub_scenes)) + + +def parse_timestamped_boxes(adict, root_dir, four_wheel_only=True): + lf = os.path.join(root_dir, adict['lidar']['0']['filename']) + pcd = point_cloud_from_path(lf) + boxes = np.array(adict['gt_boxes']) + if four_wheel_only: + boxes = boxes[boxes[:, 1] < 4] + if 'timestamp' in pcd.fields: + points = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1) + points_inds = points_in_boxes_cpu(points, boxes[:, [2, 3, 4, 5, 6, 7, 10]]).astype(bool) + times = pcd.pc_data['timestamp'] + timestamps = [] + for i, inds in enumerate(points_inds): + if inds.sum() == 0: + nearst_angle_idx = np.abs(np.arctan2(boxes[i, 3], boxes[i, 2]) - + np.arctan2(points[:, 1], points[:, 0])).argmin() + timestamps.append(times[nearst_angle_idx]) + else: + ts = times[inds] + timestamps.append(ts.mean()) + timestamps = np.array(timestamps) + else: + timestamps = np.zeros_like(boxes[:, 0]) + adict['lidar']['0']['time'] + + return timestamps, boxes + + +def parse_global_bboxes(sdict, frames, root_dir): + """Step three""" + new_sdict = {} + tracklets = {} + id_counter = 1 + last_track_ids = set() + for fi, f in enumerate(frames): + fdict = sdict[f] + new_fdict = copy.deepcopy(fdict) + matched_track_ids = set() + matched_inds = [] + for ai, adict in fdict['agents'].items(): + timestamps, boxes = parse_timestamped_boxes(adict, root_dir) + tf = pclib.pose_to_transformation(adict['lidar']['0']['pose']) + boxes_global = transform_boxes_3d(boxes, tf, mode=11) + if len(tracklets) == 0: + for i, (t, box) in enumerate(zip(timestamps, boxes_global)): + tracklets[id_counter] = [[t] + box[1:].tolist()] + boxes[i, 0] = id_counter + id_counter += 1 + else: + tracked_boxes = [] + tracked_ids = [] + for k, v in tracklets.items(): + tracked_ids.append(k) + tracked_boxes.append(v[-1]) + tracked_boxes = np.array(tracked_boxes) + tracked_ids = np.array(tracked_ids) + dist_cost = np.linalg.norm(tracked_boxes[:, [2, 3]][:, None] - boxes_global[:, [2, 3]][None], axis=-1) + thr = 3 + min_dist = dist_cost.min(axis=0) + min_idx = dist_cost.argmin(axis=0) + match_inds = [] + for i, box in enumerate(boxes_global): + cur_box = [timestamps[i]] + box[1:].tolist() + if min_dist[i] < thr: + tracklets[tracked_ids[min_idx[i]]].append(cur_box) + match_inds.append([tracked_ids[min_idx[i]], i]) + boxes[i, 0] = tracked_ids[min_idx[i]] + else: + tracklets[id_counter] = [cur_box] + boxes[i, 0] = id_counter + id_counter += 1 + matched_inds.extend(match_inds) + + new_fdict['agents'][ai]['gt_boxes'] = boxes.tolist() + new_sdict[f] = new_fdict + + object_size_type = {} + for ti, tracklet in tracklets.items(): + tracklets[ti] = np.array(sorted(tracklet)) + object_size_type[ti] = { + 'size': np.median(tracklets[ti][:, 5:8], axis=0), + 'type': np.median(tracklets[ti][:, 1], axis=0), + } + + # remove last two frames + new_sdict.pop(frames[-1]) + new_sdict.pop(frames[-2]) + for f, fdict in new_sdict.items(): + object_ids = [] + for ai, adict in fdict['agents'].items(): + object_ids.extend([int(box[0]) for box in adict['gt_boxes']]) + object_ids = set(object_ids) + aligned_time = math.ceil(fdict['agents']['0']['lidar']['0']['time'] * 10) / 10 + aligned_boxes = [[], [], []] + for object_id in object_ids: + if object_id in tracklets: + tracklet = tracklets[object_id] + if len(tracklet) == 0: + continue + for i in range(3): + cur_time = aligned_time + 0.1 * i + time_diff = tracklet[:, 0] - cur_time + try: + prev_idx = np.where(time_diff < 0)[0].max() + next_idx = np.where(time_diff > 0)[0].min() + prev_t = tracklet[prev_idx][0] + next_t = tracklet[next_idx][0] + dxyz = tracklet[next_idx][[2, 3, 4]] - tracklet[prev_idx][[2, 3, 4]] + xyz = tracklet[prev_idx][[2, 3, 4]] + dxyz * (cur_time - prev_t) / (next_t - prev_t) + prev_rot = tracklet[next_idx][10] + object_param = [object_id , object_size_type[object_id]['type']] + xyz.tolist() + \ + object_size_type[object_id]['size'].tolist() + [0, 0, prev_rot] + aligned_boxes[i].append(object_param) + except: + aligned_boxes[i].append([0] * 11) + else: + print('d') + aligned_boxes = np.array(aligned_boxes) + tf = pclib.pose_to_transformation(fdict['agents']['0']['lidar']['0']['pose']) + aligned_boxes = box_utils.transform_boxes_3d( + aligned_boxes.reshape(-1, 11), np.linalg.inv(tf), mode=11).reshape(aligned_boxes.shape) + fdict['meta']['bbx_center_global'] = aligned_boxes[0].tolist() + fdict['meta']['boxes_pred'] = {f"{int(f) + i + 1:06d}": x[:, [2, 3, 4, 10]].tolist() \ + for i, x in enumerate(aligned_boxes[1:])} + + return new_sdict + + +def remove_ego_boxes(meta_in): + mfs = glob.glob(os.path.join(meta_in, '*.json')) + for mf in mfs: + sdict = load_json(mf) + for f, fdict in sdict.items(): + gt_boxes = np.array(fdict['agents']['0']['gt_boxes']) + depth = np.linalg.norm(gt_boxes[:, 2:4], axis=-1) + gt_boxes = gt_boxes[depth > 2] + fdict['agents']['0']['gt_boxes'] = gt_boxes.tolist() + + global_boxes = np.array(fdict['meta']['bbx_center_global']) + mask = np.linalg.norm(global_boxes[:, 2:4], axis=-1) > 2 + fdict['meta']['bbx_center_global'] = global_boxes[mask].tolist() + boxes_pred = fdict['meta']['boxes_pred'] + fdict['meta']['boxes_pred'] = {k: np.array(v)[mask].tolist() for k, v in boxes_pred.items()} + + save_json(sdict, mf) + + +if __name__=="__main__": + root_dir = "/home/data/DAIR-V2X" + meta_out_dir = "/home/data/DAIR-V2X/meta-sub-scenes" + meta_path = "/home/data/cosense3d/dairv2x" + # root_dir = "/home/data/DAIR-V2X-Seq/SPD-Example" + # meta_out_dir = "/home/data/cosense3d/dairv2x_seq" + # convert_v2x_c(root_dir, meta_path) + # meta_dict = load_meta(os.path.join(meta_out_dir, 'dairv2x')) + # o3d_play_sequence(meta_dict, root_dir) + # optimize_poses(meta_path) + + # with open("/home/data/DAIR-V2X/meta/test.txt", 'w') as fh: + # files = glob.glob("/home/data/DAIR-V2X/meta/*.json") + # for f in files: + # fh.writelines(os.path.basename(f)[:-5] + '\n') + + # mfs = sorted(glob.glob("/home/yuan/data/DAIR-V2X/meta-loc-correct/*.json"))[:1] + # # mf = "/home/data/cosense3d/dairv2x/11.json" + # for mf in mfs: + # if int(os.path.basename(mf)[:-5]) <= 10: + # continue + # min_dist_frame, min_dist = register_step_one(mf) + # sdict = register_step_two(min_dist_frame, mf, meta_out_dir) + # parse_global_bboxes(mf, meta_out_dir, root_dir) + + # select_sub_scenes( + # "/home/yuan/data/DAIR-V2X/meta-loc-correct", + # "/home/yuan/data/DAIR-V2X", + # "/home/yuan/data/DAIR-V2X/meta-sub-scenes", + # "test" + # ) + + remove_ego_boxes("/home/yuan/data/DAIR-V2X/meta_with_pred") + + + + + + diff --git a/cosense3d/dataset/toolkit/opv2v.py b/cosense3d/dataset/toolkit/opv2v.py new file mode 100644 index 00000000..8b73b5bb --- /dev/null +++ b/cosense3d/dataset/toolkit/opv2v.py @@ -0,0 +1,703 @@ +import copy +import json +import math +import os +from glob import glob + +import matplotlib.pyplot as plt +import numpy as np +import torch +import tqdm +import open3d as o3d +import os.path as osp + +import cv2 +from collections import OrderedDict +from torch.utils.data import Dataset + + +from scipy.spatial.transform import Rotation as R +from cosense3d.utils.misc import load_yaml, save_json, load_json +from cosense3d.dataset.toolkit import register_pcds +from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs +from cosense3d.utils.box_utils import boxes_to_corners_3d +from cosense3d.utils.pclib import load_pcd +from cosense3d.utils.vislib import draw_points_boxes_plt, draw_2d_bboxes_on_img +from cosense3d.ops.utils import points_in_boxes_cpu + + +def x_to_world(pose: list) -> np.ndarray: + """ + The transformation matrix from x-coordinate system to carla world system + Parameters + + :param pose: [x, y, z, roll, yaw, pitch] + :return: The transformation matrix. + """ + x, y, z, roll, yaw, pitch = pose[:] + + # used for rotation matrix + c_y = np.cos(np.radians(yaw)) + s_y = np.sin(np.radians(yaw)) + c_r = np.cos(np.radians(roll)) + s_r = np.sin(np.radians(roll)) + c_p = np.cos(np.radians(pitch)) + s_p = np.sin(np.radians(pitch)) + + matrix = np.identity(4) + # translation matrix + matrix[0, 3] = x + matrix[1, 3] = y + matrix[2, 3] = z + + # rotation matrix + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + + return matrix + + +def x1_to_x2(x1, x2): + """ + Transformation matrix from x1 to x2. + + Parameters + ---------- + x1 : list or np.ndarray + The pose of x1 under world coordinates or + transformation matrix x1->world + x2 : list or np.ndarray + The pose of x2 under world coordinates or + transformation matrix x2->world + + Returns + ------- + transformation_matrix : np.ndarray + The transformation matrix. + + """ + if isinstance(x1, list) and isinstance(x2, list): + x1_to_world = x_to_world(x1) + x2_to_world = x_to_world(x2) + world_to_x2 = np.linalg.inv(x2_to_world) + transformation_matrix = np.dot(world_to_x2, x1_to_world) + + # object pose is list while lidar pose is transformation matrix + elif isinstance(x1, list) and not isinstance(x2, list): + x1_to_world = x_to_world(x1) + world_to_x2 = x2 + transformation_matrix = np.dot(world_to_x2, x1_to_world) + # both are numpy matrix + else: + world_to_x2 = np.linalg.inv(x2) + transformation_matrix = np.dot(world_to_x2, x1) + + return transformation_matrix + + +def create_bbx(extent): + """ + Create bounding box with 8 corners under obstacle vehicle reference. + + Parameters + ---------- + extent : list + Width, height, length of the bbx. + + Returns + ------- + bbx : np.array + The bounding box with 8 corners, shape: (8, 3) + """ + + bbx = np.array([[extent[0], -extent[1], -extent[2]], + [extent[0], extent[1], -extent[2]], + [-extent[0], extent[1], -extent[2]], + [-extent[0], -extent[1], -extent[2]], + [extent[0], -extent[1], extent[2]], + [extent[0], extent[1], extent[2]], + [-extent[0], extent[1], extent[2]], + [-extent[0], -extent[1], extent[2]]]) + + return bbx + + +def corner_to_center(corner3d, order='lwh'): + """ + Convert 8 corners to x, y, z, dx, dy, dz, yaw. + + Parameters + ---------- + corner3d : np.ndarray + (N, 8, 3) + + order : str + 'lwh' or 'hwl' + + Returns + ------- + box3d : np.ndarray + (N, 7) + """ + assert corner3d.ndim == 3 + batch_size = corner3d.shape[0] + + xyz = np.mean(corner3d[:, [0, 3, 5, 6], :], axis=1) + h = abs(np.mean(corner3d[:, 4:, 2] - corner3d[:, :4, 2], axis=1, + keepdims=True)) + l = (np.sqrt(np.sum((corner3d[:, 0, [0, 1]] - corner3d[:, 3, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 2, [0, 1]] - corner3d[:, 1, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 4, [0, 1]] - corner3d[:, 7, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 5, [0, 1]] - corner3d[:, 6, [0, 1]]) ** 2, + axis=1, keepdims=True))) / 4 + + w = (np.sqrt( + np.sum((corner3d[:, 0, [0, 1]] - corner3d[:, 1, [0, 1]]) ** 2, axis=1, + keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 2, [0, 1]] - corner3d[:, 3, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 4, [0, 1]] - corner3d[:, 5, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 6, [0, 1]] - corner3d[:, 7, [0, 1]]) ** 2, + axis=1, keepdims=True))) / 4 + + theta = (np.arctan2(corner3d[:, 1, 1] - corner3d[:, 2, 1], + corner3d[:, 1, 0] - corner3d[:, 2, 0]) + + np.arctan2(corner3d[:, 0, 1] - corner3d[:, 3, 1], + corner3d[:, 0, 0] - corner3d[:, 3, 0]) + + np.arctan2(corner3d[:, 5, 1] - corner3d[:, 6, 1], + corner3d[:, 5, 0] - corner3d[:, 6, 0]) + + np.arctan2(corner3d[:, 4, 1] - corner3d[:, 7, 1], + corner3d[:, 4, 0] - corner3d[:, 7, 0]))[:, + np.newaxis] / 4 + + if order == 'lwh': + return np.concatenate([xyz, l, w, h, theta], axis=1).reshape( + batch_size, 7) + elif order == 'hwl': + return np.concatenate([xyz, h, w, l, theta], axis=1).reshape( + batch_size, 7) + else: + raise NotImplementedError + + +def project_world_objects(object_dict, + output_dict, + lidar_pose, + order): + """ + Project the objects under world coordinates into another coordinate + based on the provided extrinsic. + + Parameters + ---------- + object_dict : dict + The dictionary contains all objects surrounding a certain cav. + + output_dict : dict + key: object id, value: object bbx (xyzlwhyaw). + + lidar_pose : list + (6, ), lidar pose under world coordinate, [x, y, z, roll, yaw, pitch]. + + order : str + 'lwh' or 'hwl' + """ + for object_id, object_content in object_dict.items(): + location = object_content['location'] + rotation = object_content['angle'] + center = object_content['center'] + extent = object_content['extent'] + + if 'ass_id' not in object_content or object_content['ass_id'] == -1: + ass_id = object_id + else: + ass_id = object_content['ass_id'] + if 'obj_type' not in object_content: + obj_type = 'Car' + else: + obj_type = object_content['obj_type'] + + # todo: pedestrain is not consdered yet + # todo: only single class now + if obj_type == 'Pedestrian': + continue + + object_pose = [location[0] + center[0], + location[1] + center[1], + location[2] + center[2], + rotation[0], rotation[1], rotation[2]] + object2lidar = x1_to_x2(object_pose, lidar_pose) + + # shape (3, 8) + bbx = create_bbx(extent).T + # bounding box under ego coordinate shape (4, 8) + bbx = np.r_[bbx, [np.ones(bbx.shape[1])]] + + # project the 8 corners to lidar coordinate + bbx_lidar = np.dot(object2lidar, bbx).T + bbx_lidar = np.expand_dims(bbx_lidar[:, :3], 0) + bbx_lidar = corner_to_center(bbx_lidar, order=order) + + # get velocity + if 'speed' in object_content: + speed = object_content['speed'] + theta = bbx_lidar[0, -1] + velo = np.array([speed * np.cos(theta), speed * np.sin(theta)]) + else: + velo = None + + if bbx_lidar.shape[0] > 0: + output_dict.update({object_id: {'coord': bbx_lidar, + 'ass_id': ass_id, + 'velo': velo}}) + + +def update_local_boxes3d(fdict, objects_dict, ref_pose, order, data_dir, cav_id): + output_dict = {} + # add ground truth boxes at cav local coordinate + project_world_objects(objects_dict, + output_dict, + ref_pose, + order) + boxes_local = [] + velos = [] + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id = object_content['ass_id'] + else: + object_id = object_id + object_bbx = object_content['coord'] + if order == 'hwl': + object_bbx = object_bbx[:, [0, 1, 2, 5, 4, 3, 6]] + boxes_local.append( + [object_id, 0, ] + + object_bbx[0, :6].tolist() + + [0, 0, object_bbx[0, 6]] + ) + if 'velo' in object_content and object_content['velo'] is not None: + velos.append(object_content['velo'].tolist()) + + cs.update_agent(fdict, cav_id, gt_boxes=boxes_local) + if len(velos) == len(boxes_local): + cs.update_agent(fdict, cav_id, velos=velos) + + # get visibility of local boxes + lidar = load_pcd(os.path.join(data_dir, fdict['agents'][cav_id]['lidar']['0']['filename']))['xyz'] + if len(boxes_local) > 0: + boxes = np.array(boxes_local)[:, [2, 3, 4, 5, 6, 7, 10]] + res = points_in_boxes_cpu(lidar, boxes) + num_pts = res.sum(axis=1) + cs.update_agent(fdict, cav_id, num_pts=num_pts.tolist()) + else: + cs.update_agent(fdict, cav_id, num_pts=[]) + + +def opv2v_pose_to_cosense(pose): + if len(pose) == 6: + transformation = x_to_world(pose) + else: + transformation = pose + rot = R.from_matrix(transformation[:3, :3]).as_euler('xyz', degrees=False) + tl = transformation[:3, 3] + pose = tl.tolist() + rot.tolist() + return pose + + +def update_cam_params(opv2v_params, cosense_fdict, agent_id, scenario, frame): + for k, v in opv2v_params.items(): + if 'camera' in k: + cam_id = int(k[-1:]) + cs.add_cam_to_fdict( + cosense_fdict, + agent_id, + cam_id, + [os.path.join(scenario, agent_id, f'{frame}_{k}.png')], + v['intrinsic'], + v['extrinsic'], + pose=v['cords'], + ) + + +def project_points(points, lidar2cam, I): + """Project 3d points to image planes""" + points_homo = np.concatenate([points[:, :3], np.ones_like(points[:, :1])], axis=1).T + points_homo = lidar2cam @ points_homo + pixels = I @ points_homo[:3] + pixels[:2] = pixels[:2] / pixels[2:] + depths = points_homo[2] + return pixels, depths + + +def boxes_3d_to_2d(boxes3d, num_pts, lidar2cam, I, img_size): + n_box = len(boxes3d) + box_center = boxes3d.mean(axis=1) + box_points = boxes3d.reshape(-1, 3) + + box_pixels, _ = project_points(box_points, lidar2cam, I) + center_pixels, depths = project_points(box_center, lidar2cam, I) + + box_pixels = box_pixels.T.reshape(n_box, 8, 3) + mask = (box_pixels[:, :, 2] > 0).all(axis=1) + box_pixels = box_pixels[mask] + center_pixels = center_pixels[:2].T[mask] + depths = depths[mask] + num_pts = num_pts[mask] + x_min = np.clip(box_pixels[..., 0].min(axis=1), a_min=0, a_max=img_size[1]) + y_min = np.clip(box_pixels[..., 1].min(axis=1), a_min=0, a_max=img_size[0]) + x_max = np.clip(box_pixels[..., 0].max(axis=1), a_min=0, a_max=img_size[1]) + y_max = np.clip(box_pixels[..., 1].max(axis=1), a_min=0, a_max=img_size[0]) + mask = (x_min < img_size[1]) & (x_max > 0) & (y_min < img_size[0]) & (y_max > 0) + bbox_2d = np.stack([x_min[mask], y_min[mask], x_max[mask], y_max[mask]], axis=-1) + return bbox_2d, center_pixels[mask], depths[mask], num_pts[mask] + + +def update_2d_bboxes(fdict, cav_id, lidar_pose, data_dir): + local_boxes = np.array(fdict['agents'][cav_id]['gt_boxes']) + if len(local_boxes) > 0: + local_boxes = local_boxes[:, 2:] + num_pts = np.array(fdict['agents'][cav_id]['num_pts']) + boxes_corners = boxes_to_corners_3d(local_boxes) + # lidar = load_pcd(os.path.join(data_dir, fdict['agents'][cav_id]['lidar'][0]['filename'])) + # lidar = np.concatenate([lidar['xyz'], np.ones_like(lidar['intensity'])], axis=1) + # draw_points_boxes_plt(pc_range=100, points=lidar, filename="/home/yuan/Downloads/tmp.png") + cam_UE2pinhole = np.array([[0, 1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) + for cam_id, cam_params in fdict['agents'][cav_id]['camera'].items(): + img = cv2.imread(os.path.join(data_dir, cam_params['filenames'][0]))[..., ::-1] + lidar2cam_UE = x1_to_x2(lidar_pose, cam_params['pose']) + lidar2cam_pinhole = cam_UE2pinhole @ lidar2cam_UE + I = np.array(cam_params['intrinsic']) + # draw_3d_points_boxes_on_img(img, lidar2cam_pinhole, I, lidar, boxes_corners) + bboxes2d, centers2d, depths, num_pts_2d = boxes_3d_to_2d( + boxes_corners, num_pts, lidar2cam_pinhole, I, img_size=img.shape) + # draw_2d_bboxes_on_img(img, bboxes2d) + cam_params['bboxes2d'] = bboxes2d.tolist() + cam_params['centers2d'] = centers2d.tolist() + cam_params['depths'] = depths.tolist() + cam_params['num_pts'] = num_pts_2d.tolist() + cam_params['lidar2cam'] = lidar2cam_pinhole.tolist() + else: + cam_UE2pinhole = np.array([[0, 1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) + for cam_id, cam_params in fdict['agents'][cav_id]['camera'].items(): + lidar2cam_UE = x1_to_x2(lidar_pose, cam_params['pose']) + lidar2cam_pinhole = cam_UE2pinhole @ lidar2cam_UE + cam_params['lidar2cam'] = lidar2cam_pinhole.tolist() + cam_params['bboxes2d'] = [] + cam_params['centers2d'] = [] + cam_params['depths'] = [] + cam_params['num_pts'] = [] + + +def opv2v_to_cosense(path_in, path_out, isSim=True, correct_transf=False, pcd_ext='pcd'): + if isSim: + order = 'lwh' + else: + order = 'hwl' + flag = False + for split in ['train', 'test']: + scenarios = sorted(os.listdir(os.path.join(path_in, split))) + with open(os.path.join(path_out, f'{split}.txt'), 'w') as fh: + fh.write('\n'.join(scenarios)) + for s in scenarios: + print(s) + # if s == "2021_08_22_09_43_53": + # flag = True + # if not flag: + # continue + visualize = False + sdict = {} + spath = os.path.join(path_in, split, s) + cavs = sorted([x for x in os.listdir(spath) if os.path.isdir(os.path.join(spath, x))]) + ego_id = cavs[0] + frames = sorted([x[:-5] + for x in os.listdir(os.path.join(spath, ego_id)) if + x.endswith('.yaml') and 'sparse_gt' not in x]) + for f in tqdm.tqdm(frames): + fdict = cs.fdict_template() + ego_lidar_pose = None + object_id_stack = [] + object_velo_stack = [] + object_stack = [] + for i, cav_id in enumerate(cavs): + yaml_file = os.path.join(spath, cav_id, f'{f}.yaml') + params = load_yaml(yaml_file) + cs.update_agent(fdict, cav_id, agent_type='cav', + agent_pose=opv2v_pose_to_cosense(params['true_ego_pos'])) + update_cam_params(params, fdict, cav_id, s, f) + + if cav_id == ego_id: + ego_lidar_pose = params['lidar_pose'] + + # get transformation from ego to cav, correct transformation if necessary + transformation = x1_to_x2(params['lidar_pose'], ego_lidar_pose) + if not isSim and correct_transf and cav_id != ego_id: + ego_lidar_file = os.path.join(path_in, split, s, ego_id, f'{f}.pcd') + cav_lidar_file = os.path.join(path_in, split, s, cav_id, f'{f}.pcd') + transformation = register_pcds(cav_lidar_file, ego_lidar_file, transformation, visualize) + visualize = False + # cav_lidar_pose2ego = opv2v_pose_to_cosense(transformation) + + # get cav lidar pose in cosense format + cs.update_agent(fdict, cav_id, 'cav') + cs.update_agent_lidar(fdict, cav_id, '0', + lidar_pose=opv2v_pose_to_cosense(params['lidar_pose']), + lidar_file=os.path.join(s, cav_id, f'{f}.{pcd_ext}')) + + objects_dict = params['vehicles'] + output_dict = {} + if isSim: + glob_ref_pose = ego_lidar_pose + local_ref_pose = params['lidar_pose'] + else: + glob_ref_pose = transformation + local_ref_pose = [0,] * 6 + + data_dir = os.path.join(path_in, split) + update_local_boxes3d(fdict, objects_dict, local_ref_pose, order, data_dir, cav_id) + if isSim: + # v2vreal has no camera data + update_2d_bboxes(fdict, cav_id, params['lidar_pose'], data_dir) + + # add gt boxes in ego coordinates as global boxes of cosense3d format + project_world_objects(objects_dict, + output_dict, + glob_ref_pose, + order) + + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id_stack.append(object_content['ass_id']) + else: + object_id_stack.append(object_id + 100 * int(cav_id)) + if object_content['velo'] is not None: + object_velo_stack.append(object_content['velo']) + object_stack.append(object_content['coord']) + + # exclude all repetitive objects + unique_indices = \ + [object_id_stack.index(x) for x in set(object_id_stack)] + object_stack = np.vstack(object_stack) + object_stack = object_stack[unique_indices] + if len(object_velo_stack) == len(object_stack): + object_velo_stack = np.vstack(object_velo_stack) + object_velo_stack = object_velo_stack[unique_indices] + if order == 'hwl': + object_stack = object_stack[:, [0, 1, 2, 5, 4, 3, 6]] + + cosense_bbx_center = np.zeros((len(object_stack), 11)) + cosense_bbx_center[:, 0] = np.array(object_id_stack)[unique_indices] + cosense_bbx_center[:, 2:8] = object_stack[:, :6] + cosense_bbx_center[:, 10] = object_stack[:, 6] + cs.update_frame_bbx(fdict, cosense_bbx_center.tolist()) + if '0' not in cavs: + fdict['agents'].pop('0') # remove template agent + + fdict['meta']['ego_id'] = ego_id + fdict['meta']['ego_lidar_pose'] = opv2v_pose_to_cosense(ego_lidar_pose) + if len(object_velo_stack) == len(object_stack): + fdict['meta']['bbx_velo_global'] = object_velo_stack.tolist() + + boxes_num_pts = {int(i): 0 for i in cosense_bbx_center[:, 0]} + for adict in fdict['agents'].values(): + for box, num_pts in zip(adict['gt_boxes'], adict['num_pts']): + boxes_num_pts[int(box[0])] += num_pts + fdict['meta']['num_pts'] = [boxes_num_pts[int(i)] for i in cosense_bbx_center[:, 0]] + + sdict[f] = fdict + + # plot + # ego_pose = pose_to_transformation(fdict['meta']['ego_lidar_pose']) + # ax = None + # for ai, adict in fdict['agents'].items(): + # cav_pose = pose_to_transformation(adict['lidar'][0]['pose']) + # T_cav2ego = np.linalg.inv(ego_pose) @ cav_pose + # lidar_file = os.path.join(path_in, split, adict['lidar'][0]['filename']) + # points = load_pcd(lidar_file)['xyz'] + # points = np.concatenate([points, np.ones_like(points[:, :1])], axis=-1) + # points = (T_cav2ego @ points.T).T + # color = 'g' if ai == ego_id else 'r' + # ax = draw_points_boxes_plt( + # pc_range=100, + # points=points[:, :3], + # points_c=color, + # ax=ax, + # return_ax=True + # ) + # plt.show() + # plt.close() + # pass + save_json(sdict, os.path.join(path_out, f'{s}.json')) + + +def pose_to_transformation(pose): + """ + + Args: + pose: list, [x, y, z, roll, pitch, yaw] + + Returns: + transformation: np.ndarray, (4, 4) + """ + transformation = np.eye(4) + r = R.from_euler('xyz', pose[3:]).as_matrix() + transformation[:3, :3] = r + transformation[:3, 3] = np.array(pose[:3]) + return transformation + + +def update_global_bboxes_num_pts(data_dir, meta_path): + json_files = glob(meta_path + '/*.json') + for jf in tqdm.tqdm(json_files): + # tmp = os.path.join(data_dir, 'train', os.path.basename(jf)[:-5]) + # data_dir_split = os.path.join(data_dir, 'train') if os.path.exists(tmp) else os.path.join(data_dir, 'test') + with open(jf, 'r') as fh: + meta = json.load(fh) + for f, fdict in meta.items(): + # lidar_files = [ldict['filename'] for adict in fdict['agents'].values() for ldict in adict['lidar'].values()] + # lidar_files = [os.path.join(data_dir_split, lf) for lf in lidar_files] + # pcds = [load_pcd(lf)['xyz'] for lf in lidar_files] + # pcds = np.concatenate(pcds, axis=0) + boxes = np.array(fdict['meta']['bbx_center_global']) + boxes_num_pts = {int(i): 0 for i in boxes[:, 0]} + for adict in fdict['agents'].values(): + for box, num_pts in zip(adict['gt_boxes'], adict['num_pts']): + boxes_num_pts[int(box[0])] += num_pts + fdict['meta']['num_pts'] = [boxes_num_pts[int(i)] for i in boxes[:, 0]] + + save_json(meta, jf.replace('opv2v', 'opv2v_full_')) + + +def generate_bevmaps(data_dir, meta_path): + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps" + map_files = glob(os.path.join(map_path, '*.png')) + scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + map_bounds = load_json(os.path.join(assets_path, 'map_bounds.json')) + bevmaps = {} + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + bevmap = cv2.imread(mf) + # bevmap = np.pad(bevmap, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0) + bevmaps[town] = bevmap + + T_corr = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 0, 1]]) + + json_files = glob(meta_path + '/*.json') + grid = np.ones((500, 500)) + inds = np.stack(np.where(grid)) + xy = inds * 0.2 - 50 + 0.1 + xy_pad = np.concatenate([xy, np.zeros_like(xy[:1]), np.ones_like(xy[:1])], axis=0) + for jf in tqdm.tqdm(json_files): + scene = os.path.basename(jf).split('.')[0] + town = scene_maps[scene] + cur_map = bevmaps[town] + sx, sy = cur_map.shape[:2] + meta = load_json(jf) + for f, fdict in meta.items(): + for ai, adict in fdict['agents'].items(): + lidar_pose = adict['lidar']['0']['pose'] + transform = T_corr @ pose_to_transformation(lidar_pose) + xy_tf = transform @ xy_pad + # xy_tf = xy_pad + # xy_tf[0] = xy_tf[0] - lidar_pose[0] + # xy_tf[1] = xy_tf[1] - lidar_pose[1] + xy_tf[0] -= map_bounds[town][0] + xy_tf[1] -= map_bounds[town][1] + map_inds = np.floor(xy_tf[:2] / 0.2) + xs = np.clip(map_inds[0], 0, sx - 1).astype(int) + ys = np.clip(map_inds[1], 0, sy - 1).astype(int) + bevmap = cur_map[xs, ys].reshape(500, 500, 3)[::-1, ::-1] + + filename = os.path.join(data_dir, 'train', scene, ai, f'{f}_bev.png') + if not os.path.exists(filename): + filename = os.path.join(data_dir, 'test', scene, ai, f'{f}_bev.png') + gt_bev = cv2.imread(filename) + + img = np.zeros((500, 1050, 3)) + img[:, :500] = bevmap[:, ::-1] + img[:, 550:] = gt_bev + cv2.imwrite('/home/yuan/Downloads/tmp.png', img) + print(filename) + + +def generate_roadline(map_dir, map_bounds_file): + """ + Convert global BEV semantic maps to 2d road line points. + + :param map_dir: directory for images of BEV semantic maps + :param map_bounds_file: json file that describe the world coordinates of the BEV map origin (image[0, 0]) + :return: Nx2 array, 2d world coordinates of road line points in meters. + """ + bounds = load_json(map_bounds_file) + map_files = glob(map_dir) + for mf in map_files: + roadmap = cv2.imread(mf) + # TODO + + +def convert_bev_semantic_map_to_road_height_map(map_dir, map_bounds_file, scenario_town_map_file, meta_dir): + import torch + bounds = load_json(map_bounds_file) + scenario_town_map = load_json(scenario_town_map_file) + map_files = os.listdir(map_dir) + bevmaps = {mf.split('.')[0]: cv2.imread(os.path.join(map_dir, mf))[..., :2] for mf in map_files} + trajectory = {mf.split('.')[0]: [] for mf in map_files} + meta_files = glob(os.path.join(meta_dir, "*.json")) + for mf in meta_files: + scenario = os.path.basename(mf).split('.')[0] + sdict = load_json(mf) + ego_poses = [] + for f, fdict in sdict.items(): + # gt_boxes = {f"{int(x[0]):d}": x[1:] for x in ego_dict['gt_boxes']} + # ego_box = gt_boxes[fdict['meta']['ego_id']] + ego_poses.append(fdict['agents'][fdict['meta']['ego_id']]['pose'][:3]) + trajectory[scenario_town_map[scenario]].extend(ego_poses) + + for town, bevmap in bevmaps.items(): + inds = np.where(bevmap[..., 1]) + coords = np.stack(inds, axis=1) * 0.2 + coords = torch.from_numpy(coords).cuda() + bound = bounds[town] + coords[:, 0] += bound[0] + coords[:, 1] += bound[1] + traj_pts = torch.tensor(trajectory[town]).cuda() + + for i in range(0, len(coords), 10000): + i1 = i*10000 + i2 = (i+1)*10000 + dists = torch.norm(coords[i1:i2, None, :2] - traj_pts[None, :, :2], dim=-1) + min_dist, min_idx = dists.min(dim=-1) + heights = traj_pts[min_idx][:, -1] + # TODO + + +if __name__=="__main__": + # opv2v_to_cosense( + # "/home/data/v2vreal", + # "/home/data/v2vreal/meta", + # isSim=False, + # pcd_ext='pcd' + # ) + + # generate_bevmaps( + # "/home/yuan/data/OPV2Va", + # "/home/yuan/data/OPV2Va/meta", + # ) + + convert_bev_semantic_map_to_road_height_map( + "/code/CoSense3d/cosense3d/carla/assets/maps", + "/code/CoSense3d/cosense3d/carla/assets/map_bounds.json", + "/code/CoSense3d/cosense3d/carla/assets/scenario_town_map.json", + "/home/data/OPV2Va/meta" + ) + diff --git a/cosense3d/dataset/toolkit/opv2v_t.py b/cosense3d/dataset/toolkit/opv2v_t.py new file mode 100644 index 00000000..49c9f96a --- /dev/null +++ b/cosense3d/dataset/toolkit/opv2v_t.py @@ -0,0 +1,722 @@ +import glob +import os.path +import random + +import numpy as np +import torch +from plyfile import PlyData +from matplotlib import colormaps +from multiprocessing import Pool +import torch_scatter +from functools import partial + +from cosense3d.dataset.toolkit.opv2v import * +from cosense3d.utils.vislib import o3d_draw_pcds_bbxs +from cosense3d.utils.pclib import save_cosense_ply, pose_to_transformation +from cosense3d.utils.box_utils import transform_boxes_3d +from cosense3d.utils.misc import load_json, update_dict +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.utils.common import cat_coor_with_idx + + +# jet = cm.get_cmap('jet') +jet = colormaps['jet'] + + +def read_ply(filename, properties=None): + ply = PlyData.read(filename) + data = ply['vertex'] + properties_from_file = [p.name for p in ply.elements[0].properties] + if properties is None: + properties = properties_from_file + else: + for p in properties: + assert p in properties_from_file, f"Property '{p}' not found." + data_dict = {} + for p in properties: + data_dict[p] = np.array(data[p]) + + return data_dict + + +def get_local_boxes3d(objects_dict, ref_pose, order): + output_dict = {} + # add ground truth boxes at cav local coordinate + project_world_objects(objects_dict, + output_dict, + ref_pose, + order) + boxes_local = [] + velos = [] + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id = object_content['ass_id'] + else: + object_id = object_id + object_bbx = object_content['coord'] + if order == 'hwl': + object_bbx = object_bbx[:, [0, 1, 2, 5, 4, 3, 6]] + boxes_local.append( + [object_id, 0, ] + + object_bbx[0, :6].tolist() + + [0, 0, object_bbx[0, 6]] + ) + if 'velo' in object_content and object_content['velo'] is not None: + velos.append(object_content['velo'].tolist()) + # TODO adapt velos + else: + velos.append([0., 0.]) + + return boxes_local, velos + + +def read_ply_to_dict(f): + data = read_ply(f) + timestamp = os.path.basename(f).split('.')[:-1] + timestamp = int(timestamp[0]) * 0.05 + int(timestamp[1]) * 0.01 + timestamp = np.ones_like(data['x']) * timestamp + data['time'] = timestamp.astype(np.float32) + return data + + +def read_sub_frame(f): + pcd_dict = read_ply_to_dict(f + '.ply') + params = load_yaml(f + '_objects.yaml', cloader=True) + # params = load_yaml(f + '.yaml') + # update_dict(params, params_) + gt_boxes, velos = get_local_boxes3d(params['vehicles'], + params['lidar_pose'], 'lwh') + gt_boxes = np.array(gt_boxes) + # velos = np.array(velos) + points = np.stack([pcd_dict[x] for x in 'xyz'], axis=-1) + points = points[pcd_dict['ObjTag'] == 10] + return gt_boxes, pcd_dict, points + + +def get_box_velo(box, speeds, frame): + box_id = str(int(box[0])) + try: + speed = speeds[box_id][frame] + except: + if box_id not in speeds: + speed = 0.0 + elif frame not in speeds[box_id]: + frames = list(speeds[box_id].keys()) + nearst_frame_idx = (np.array(frames).astype(int) - int(frame)).argmax() + speed = speeds[box_id][frames[nearst_frame_idx]] + else: + raise NotImplementedError + return speed + + +def get_velos(boxes, speeds, frame): + with Pool(16) as pool: + out_speeds = pool.map( + partial(get_box_velo, speeds=speeds, frame=frame), + boxes + ) + out_speeds = np.array(out_speeds) + + theta = boxes[:, -1] + velos = np.stack([out_speeds * np.cos(theta), + out_speeds * np.sin(theta)], axis=-1) + return velos + + +def pad_box_result(res, out_len): + if len(res[0]) == out_len: + return res + box = np.zeros((out_len,) + res[0].shape[1:], dtype=res[0].dtype) + box[:res[0].shape[0]] = res[0] + # set id index to -1 to indicate it is padded + box[res[0].shape[0]:, 0] = -1 + box[res[0].shape[0]:, 4] = 100 + return box, res[1], res[2] + + +def parse_sub_frame(f): + pcd_dict = read_ply_to_dict(f + '.ply') + params = load_yaml(f + '.yaml') + gt_boxes, velos = get_local_boxes3d(params['vehicles'], + params['lidar_pose'], 'lwh') + gt_boxes = np.array(gt_boxes) + velos = np.array(velos) + points = np.stack([pcd_dict[x] for x in 'xyz'], axis=-1) + pts_mask = points_in_boxes_cpu(torch.from_numpy(points), + torch.from_numpy(gt_boxes)[:, [2, 3, 4, 5, 6, 7, 10]]) + num_pts = pts_mask.sum(dim=-1).numpy() + box_mask = num_pts > 0 + gt_boxes = gt_boxes[box_mask].tolist() + velos = velos[box_mask].tolist() + num_pts = num_pts[box_mask].tolist() + + # update boxes dict + # for i, box in enumerate(gt_boxes): + # id = int(box[0]) + # if id not in boxes: + # boxes[id] = { + # 'box': box, + # 'velo': velos[i], + # 'num_pts': num_pts[i] + # } + # else: + # if boxes[id]['num_pts'] < num_pts[i]: + # boxes[id] = { + # 'box': box, + # 'velo': velos[i], + # 'num_pts': num_pts[i] + boxes[id]['num_pts'] + # } + # else: + # boxes[id]['num_pts'] += num_pts[i] + + return (gt_boxes, velos, num_pts, pcd_dict) + + +def read_frame_plys_boxes(path, frame, prev_frame=None, time_offset=0, parse_boxes=True): + data_list = [] + files = [] + if prev_frame is not None: + files_prev_frame = [f'{prev_frame}.{i}' for i in range(10 - time_offset, 10)] + files.extend(files_prev_frame) + files_cur_frame = [f'{frame}.{i}' for i in range(0, 10 - time_offset)] + files.extend(files_cur_frame) + files = [os.path.join(path, f) for f in files] + boxes = {} + + with Pool(10) as pool: + res = pool.map(read_sub_frame, files) + max_len = max([len(x[0]) for x in res]) + res = pool.starmap(pad_box_result, zip(res, [max_len] * len(res))) + + pcd_dict = {k: np.concatenate([d[1][k] for d in res], axis=0) for k in res[0][1]} + boxes_tensor = cat_coor_with_idx([torch.from_numpy(x[0]) for x in res]).float() + points_tensor = cat_coor_with_idx([torch.from_numpy(x[2]) for x in res]).float() + + _, pts_idx_of_box = points_in_boxes_gpu(points_tensor.cuda(), + boxes_tensor[:, [0, 3, 4, 5, 6, 7, 8, 11]].cuda(), + batch_size=len(res)) + + pts_idx_of_box = pts_idx_of_box[pts_idx_of_box >= 0] + cnt = torch.ones_like(pts_idx_of_box) + num_pts_in_box = cnt.new_zeros(len(boxes_tensor)) + torch_scatter.scatter_add(cnt, pts_idx_of_box, out=num_pts_in_box, dim=0) + num_pts_in_box = num_pts_in_box.reshape(10, -1).cpu() + num_pts = num_pts_in_box.sum(dim=0) + boxes_tensor = boxes_tensor.view(10, -1, boxes_tensor.shape[-1])[..., 1:] + max_inds = num_pts_in_box.max(dim=0).indices + boxes_selected = boxes_tensor[max_inds, torch.arange(len(max_inds))].numpy() + boxes_selected = boxes_selected[boxes_selected[:, 0] >= 0] + + # o3d_draw_pcds_bbxs([points_tensor[:, 1:].numpy()], [boxes_selected]) + + return pcd_dict, boxes_selected, num_pts + + +def load_frame_data(scene_dir, cavs, frame): + ego_id = cavs[0] + yaml_file = os.path.join(scene_dir, ego_id, f'{frame}.5.yaml') + meta = load_yaml(yaml_file) + gt_boxes, velos = get_local_boxes3d(meta['vehicles'], + meta['lidar_pose'], 'lwh') + ego_pose = meta['lidar_pose'] + + points_list = [] + time_list = [] + for cav in cavs: + cav_dir = os.path.join(scene_dir, cav) + data = read_frame_plys_boxes(cav_dir, frame, parse_boxes=False)[0] + points = np.stack([data[k] for k in 'xyz'], axis=-1) + times = (data['time'] - data['time'].min()) * 10 + lidar_pose = load_yaml( + os.path.join(scene_dir, cav, f'{frame}.5.yaml'))['lidar_pose'] + transform = x1_to_x2(lidar_pose, ego_pose) + points = (transform[:3, :3] @ points.T + transform[:3, 3].reshape(3, 1)).T + points_list.append(points) + time_list.append(times) + points = np.concatenate(points_list, axis=0) + times = np.concatenate(time_list, axis=0) + return points, times, gt_boxes, velos + + +def opv2vt_to_cosense(data_dir, split, data_out_dir, meta_out_dir): + order = 'lwh' + time_offsets = load_json(os.path.join(data_out_dir, 'time_offsets.json')) + split_dir = os.path.join(data_dir, split) + scenes = sorted(os.listdir(split_dir))[:2] + with open(os.path.join(meta_out_dir, f'{split}.txt'), 'w') as fh: + fh.write('\n'.join(scenes)) + for s in scenes: + print(s) + scene_dir = os.path.join(split_dir, s) + sdict = {} + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + if os.path.exists(os.path.join(scene_dir, 'speeds.json')): + speeds = load_json(os.path.join(scene_dir, 'speeds.json')) + else: + speeds = parse_speed_from_yamls(scene_dir) + ego_id = cavs[0] + frames = sorted([x.split(".")[0] for x in os.listdir( + os.path.join(scene_dir, cavs[0])) if '.0.ply' in x]) + for i, f in tqdm.tqdm(enumerate(frames[1:-1])): + frame_mid_time = int(f) * 0.05 + 0.05 + fdict = cs.fdict_template() + ego_lidar_pose = None + object_id_stack = [] + object_velo_stack = [] + object_stack = [] + for j, cav_id in enumerate(cavs): + cur_data_out_dir = os.path.join(data_out_dir, split, s, cav_id) + os.makedirs(cur_data_out_dir, exist_ok=True) + yaml_file = os.path.join(scene_dir, cav_id, f'{f}.5.yaml') + params = load_yaml(yaml_file, cloader=True) + cs.update_agent(fdict, cav_id, agent_type='cav', agent_pose=params['true_ego_pos']) + # update_cam_params(params, fdict, cav_id, s, f) + + if cav_id == ego_id: + ego_lidar_pose = params['lidar_pose'] + + # get cav lidar pose in cosense format + cs.update_agent(fdict, cav_id, 'cav') + cs.update_agent_lidar(fdict, cav_id, 0, + lidar_pose=opv2v_pose_to_cosense(params['lidar_pose']), + lidar_file=os.path.join(s, cav_id, f'{f}.ply')) + # save lidar files + data, local_boxes, num_pts = read_frame_plys_boxes(os.path.join(scene_dir, cav_id), f, + prev_frame=frames[i], time_offset=time_offsets[s][cav_id]) + velos = get_velos(local_boxes, speeds, f) + # save_cosense_ply(data, os.path.join(cur_data_out_dir, f'{f}.ply')) + + objects_dict = params.get('vehicles', {}) + output_dict = {} + glob_ref_pose = ego_lidar_pose + local_ref_pose = params['lidar_pose'] + + # update_local_boxes + cs.update_agent(fdict, cav_id, gt_boxes=local_boxes.tolist()) + cs.update_agent(fdict, cav_id, velos=velos.tolist()) + cs.update_agent(fdict, cav_id, num_pts=num_pts.tolist()) + # update_2d_bboxes(fdict, cav_id, params['lidar_pose'], data_dir) + + # add gt boxes in ego coordinates as global boxes of cosense3d format + project_world_objects(objects_dict, + output_dict, + glob_ref_pose, + order) + + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id_stack.append(object_content['ass_id']) + else: + object_id_stack.append(object_id + 100 * int(cav_id)) + if object_content['velo'] is not None: + object_velo_stack.append(object_content['velo']) + object_stack.append(object_content['coord']) + + # exclude all repetitive objects + unique_indices = \ + [object_id_stack.index(x) for x in set(object_id_stack)] + object_stack = np.vstack(object_stack) + object_stack = object_stack[unique_indices] + if len(object_velo_stack) > 0: + object_velo_stack = np.vstack(object_velo_stack) + object_velo_stack = object_velo_stack[unique_indices] + if order == 'hwl': + object_stack = object_stack[:, [0, 1, 2, 5, 4, 3, 6]] + + cosense_bbx_center = np.zeros((len(object_stack), 11)) + cosense_bbx_center[:, 0] = np.array(object_id_stack)[unique_indices] + cosense_bbx_center[:, 2:8] = object_stack[:, :6] + cosense_bbx_center[:, 10] = object_stack[:, 6] + cs.update_frame_bbx(fdict, cosense_bbx_center.tolist()) + fdict['agents'].pop(0) # remove template agent + + fdict['meta']['ego_id'] = ego_id + fdict['meta']['ego_lidar_pose'] = opv2v_pose_to_cosense(ego_lidar_pose) + fdict['meta']['global_bbox_time'] = np.full(len(cosense_bbx_center), frame_mid_time).tolist() + fdict['meta']['bbx_velo_global'] = get_velos(cosense_bbx_center, speeds, f).tolist() + + sdict[f] = fdict + + save_json(sdict, os.path.join(meta_out_dir, f'{s}.json')) + del sdict + + +def vis_frame_data(): + scene_dir = "/koko/OPV2V/temporal_dump/test/2021_08_18_19_48_05" + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + frames = sorted([x.split(".")[0] for x in os.listdir( + os.path.join(scene_dir, cavs[0])) if '.0.ply' in x]) + for f in frames[::10]: + points, times, local_boxes, velos = load_frame_data(scene_dir, cavs, f) + pcd = o3d.geometry.PointCloud() + # color_inds = np.round(times).astype(int) + colors = jet(times)[:, :3] + o3d_draw_pcds_bbxs([points], [np.array(local_boxes)], + pcds_colors=[colors]) + + +def parse_speed_from_yamls(scene_dir): + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + vehicle_dict = {} + for cav in cavs: + cav_dir = os.path.join(scene_dir, cav) + yamls = sorted(glob(os.path.join(cav_dir, '*5_objects.yaml'))) + for yaml in tqdm.tqdm(yamls): + frame = int(os.path.basename(yaml).split('.')[0]) + params = load_yaml(yaml, cloader=True) + for k, v in params['vehicles'].items(): + if k not in vehicle_dict: + vehicle_dict[k] = {'frames': [], 'locations': []} + if frame not in vehicle_dict[k]['frames']: + vehicle_dict[k]['frames'].append(frame) + vehicle_dict[k]['locations'].append(v['location']) + + # vehicle_dict = load_json(os.path.join(scene_dir, 'vehicles.json')) + velo_dict = {} + for veh_id, veh_info in vehicle_dict.items(): + times = np.array(veh_info['frames']) * 0.05 + sort_inds = np.argsort(times) + times = times[sort_inds] + locations = np.array(veh_info['locations']) + locations = locations[sort_inds] + time_offsets = times[1:] - times[:-1] + interp_inds = np.where(time_offsets > 0.15)[0] + loc_offsets = np.linalg.norm(locations[1:] - locations[:-1], axis=-1) + speeds = loc_offsets / time_offsets + + # interpolate missed frames + speeds_interp = [] + times_interp = [] + last_idx = 0 + for idx in interp_inds: + speeds_interp.extend(speeds[last_idx:idx]) + times_interp.extend(times[last_idx:idx]) + steps = int(round(time_offsets[idx] * 10)) + if idx == 0: + interp_s = [speeds[0]] * (steps - 1) + interp_t = [times[0]] * (steps - 1) + else: + interp_s = np.linspace(speeds[idx-1], speeds[idx], steps + 1)[1:-1].tolist() + interp_t = np.linspace(times[idx-1], times[idx], steps + 1)[1:-1].tolist() + speeds_interp.extend(interp_s) + times_interp.extend(interp_t) + last_idx = idx + speeds_interp.extend(speeds[last_idx:]) + times_interp.extend(times[last_idx:]) + + velo_dict[veh_id] = {f'{round(t*20):06d}': speed for t, speed in zip(times_interp, speeds_interp)} + save_json(velo_dict, os.path.join(scene_dir, 'speeds.json')) + return velo_dict + + +def update_velo(scenario_meta_file): + meta = load_json(scenario_meta_file) + frames = sorted(list(meta.keys())) + objects = {} + + # find all global objects + for f in frames: + fdict = meta[f] + boxes = fdict['meta']['bbx_center_global'] + for box in boxes: + box_id = int(box[0]) + if box_id not in objects: + objects[box_id] = {'frames': [], 'box': []} + objects[box_id]['frames'].append(int(f)) + objects[box_id]['boxes'].append(box) + + def cal_velos(cur_gt_boxes, next_gt_boxes, cur_pose, next_pose, meta_last): + cur_gt_boxes_dict = {int(box[0]): box for box in cur_gt_boxes} + next_gt_boxes_np = np.array(next_gt_boxes) + cur_pose = pose_to_transformation(cur_pose) + next_pose = pose_to_transformation(next_pose) + transf_next_to_cur = np.linalg.inv(cur_pose) @ next_pose + next_gt_boxes_np = transform_boxes_3d(next_gt_boxes_np, transf_next_to_cur) + next_gt_boxes_dict = {int(box[0]): box.tolist() for box in next_gt_boxes_np} + velos = {} + for k, v in cur_gt_boxes_dict.items(): + if k not in next_gt_boxes_dict: + if k in meta_last: + velos[k] = meta_last[k] + else: + velos[k] = [0, 0] + continue + velo = [(next_gt_boxes_dict[k][2] - v[2]) * 10, (next_gt_boxes_dict[k][3] - v[3]) * 10] # m/s + velos[k] = velo + velos = [velos[int(box[0])] for box in cur_gt_boxes] + return velos + + for i, f in enumerate(frames[:-1]): + fdict = meta[f] + global_ids = sorted([int(box[0]) for box in fdict['meta']['bbx_center_global']]) + global_ids = set(global_ids) + local_ids = [] + for a, adict in fdict['agents'].items(): + local_ids.extend([int(box[0]) for box in adict['gt_boxes']]) + local_ids = set(local_ids) + next_fdict = meta[frames[i + 1]] + last_fdict = meta[frames[max(i-1, 0)]] + + if i == 0: + meta_last = {} + else: + meta_last = {int(box[0]): last_fdict['meta']['bbx_velo_global'][i] \ + for i, box in enumerate(last_fdict['meta']['bbx_center_global'])} + meta[f]['meta']['bbx_velo_global'] = cal_velos( + fdict['meta']['bbx_center_global'], + next_fdict['meta']['bbx_center_global'], + fdict['meta']['ego_lidar_pose'], + next_fdict['meta']['ego_lidar_pose'], + meta_last + ) + for a, adict in fdict['agents'].items(): + if i == 0: + meta_last = {} + else: + meta_last = {int(box[0]): last_fdict['agents'][a]['velos'][i] \ + for i, box in enumerate(last_fdict['agents'][a]['gt_boxes'])} + velos = cal_velos( + adict['gt_boxes'], next_fdict['agents'][a]['gt_boxes'], + adict['lidar']['0']['pose'], next_fdict['agents'][a]['lidar']['0']['pose'], + meta_last + ) + meta[f]['agents'][a]['velos'] = velos + save_json(meta, scenario_meta_file) + + +def vis_cosense_scenario(scenario_meta_file, data_dir): + meta = load_json(scenario_meta_file) + for f, fdict in meta.items(): + global_boxes = np.array(fdict['meta']['bbx_center_global']) + for a, adict in fdict['agents'].items(): + lidar_file = os.path.join(data_dir, adict['lidar']['0']['filename']) + pcd_dict = read_ply(lidar_file) + points = np.stack([pcd_dict[x] for x in 'xyz'], axis=-1) + boxes = np.array(adict['gt_boxes']) + + o3d_draw_pcds_bbxs([points], [boxes, global_boxes], + bbxs_colors=[[0, 255, 0], [255, 0, 0]]) + + +def gen_time_offsets(data_dir): + out_dict = {} + for split in ['train', 'test']: + split_dir = os.path.join(data_dir, split) + scenes = os.listdir(split_dir) + for s in scenes: + out_dict[s] = {} + scene_dir = os.path.join(split_dir, s) + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + for i, cav in enumerate(cavs): + if i == 0: + out_dict[s][cav] = 0 + else: + out_dict[s][cav] = random.randint(0, 5) + save_json(out_dict, os.path.join(data_dir, f'time_offsets.json')) + + +def load_vehicles_gframe(params): + """Load vehicles in global coordinate system.""" + object_dict = params['vehicles'] + object_out = {} + for object_id, object_content in object_dict.items(): + location = object_content['location'] + rotation = object_content['angle'] + center = object_content['center'] + extent = object_content['extent'] + + object_pose = [location[0] + center[0], + location[1] + center[1], + location[2] + center[2], + rotation[0], rotation[1], rotation[2]] + + object_out[object_id] = [0,] + object_pose[:3] + extent + object_pose[3:] + return object_out + + +def transform_boxes_global_to_ref(boxes, ref_pose): + pass + + +def update_global_boxes(root_dir, meta_in, meta_out, split): + split_dir = os.path.join(root_dir, split) + scenes = os.listdir(split_dir) + for s in scenes: + scene_dir = os.path.join(split_dir, s) + sdict = load_json(os.path.join(meta_in, f"{s}.json")) + cavs = sorted([x for x in os.listdir(scene_dir) if os.path.isdir(os.path.join(scene_dir, x))]) + + ego_files = sorted(glob(os.path.join(scene_dir, cavs[0], '*.0_objects.yaml'))) + sim_frames = [os.path.basename(x)[:6] for x in ego_files] + global_objects = {x: {} for x in sim_frames} + ego_poses = {} + + for cav in cavs[1:]: + yaml_files = sorted(glob(os.path.join(scene_dir, cav, '*.0_objects.yaml'))) + for yf in yaml_files: + frame = os.path.basename(yf)[:6] + objects = load_yaml(yf)['vehicles'] + global_objects[frame].update(objects) + for yf in ego_files: + frame = os.path.basename(yf)[:6] + params = load_yaml(yf) + ego_poses[frame] = params['lidar_pose'] + global_objects[frame].update(params['vehicles']) + + frames = sorted(list(sdict.keys())) + for f in frames[:-1]: + lidar_pose = ego_poses[f] + sdict[f]['meta']['boxes_pred'] = {} + box_ids = [int(box[0]) for box in sdict[f]['meta']['bbx_center_global']] + for i in range(1, 3): + cur_frame = f"{int(f) + i * 2:06d}" + boxes_global = global_objects[cur_frame] + boxes_ref = {} + project_world_objects(boxes_global, boxes_ref, lidar_pose, 'lwh') + boxes_pred = [] + for box_id in box_ids: + if box_id in boxes_global: + pred = boxes_ref[box_id]['coord'].reshape(7)[[0, 1, 2, 6]].tolist() + else: + pred = [0,] * 4 + boxes_pred.append(pred) + sdict[f]['meta']['boxes_pred'][cur_frame] = boxes_pred + sdict.pop(frames[-1]) + save_json(sdict, os.path.join(meta_out, f"{s}.json")) + + +def update_bev_map(root_dir, meta_in, meta_out, split): + from cosense3d.dataset.const import OPV2V_TOWN_DICTIONARY + resolution = 0.2 + pixels_per_meter = 1 / resolution + radius = 100 + map_bounds = load_json(f'../../carla/assets/map_bounds.json') + split_dir = os.path.join(root_dir, split) + scenes = os.listdir(split_dir)[3:] + x = np.linspace(- radius + 0.5 * resolution, radius, + int(radius * 2 / resolution) - 1) + bev_points = np.stack(np.meshgrid(x, x), axis=0) + bev_points = np.r_[bev_points, [np.zeros(bev_points.shape[1:]), + np.ones(bev_points.shape[1:])]].reshape(4, -1) + + for s in scenes: + town = OPV2V_TOWN_DICTIONARY[s] + bev_map = cv2.imread(f'../../carla/assets/maps/{town}.png') + sx, sy, _ = bev_map.shape + map_bound = map_bounds[town] + scene_dir = os.path.join(split_dir, s) + sdict = load_json(os.path.join(meta_in, f"{s}.json")) + for f, fdict in sdict.items(): + adict = fdict['agents'][fdict['meta']['ego_id']] + lidar_pose = adict['lidar']['0']['pose'] + lidar_file = os.path.join(split_dir, adict['lidar']['0']['filename']) + pcd = load_pcd(lidar_file)['xyz'] + transform = pose_to_transformation(lidar_pose) + cords = np.dot(transform, bev_points).T + xs = np.floor((cords[:, 0] - map_bound[0]) * pixels_per_meter).astype(int) + ys = np.floor((cords[:, 1] - map_bound[1]) * pixels_per_meter).astype(int) + xs = np.maximum(np.minimum(xs, sx - 1), 0) + ys = np.maximum(np.minimum(ys, sy - 1), 0) + road_mask = bev_map[xs, ys] / 255. + mask = road_mask[:, :2].any(axis=1) + + import matplotlib.pyplot as plt + plt.plot(bev_points[0][mask], bev_points[1][mask], '.g') + plt.plot(pcd[:, 0], pcd[:, 1], '.r', markersize=1) + plt.show() + plt.close() + break + + +def generate_roadline_reference_points(root_dir, meta_file): + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps/png" + roadline_path = f"{assets_path}/maps/roadline" + map_files = glob(os.path.join(map_path, '*.png')) + map_bounds = load_json(os.path.join(assets_path, 'map_bounds.json')) + + kernel = 3 + map_res = 0.2 + + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + bound = map_bounds[town] + bevmap = cv2.imread(mf) / 255. + bevmap = torch.from_numpy(bevmap[..., :2]).any(dim=-1).float() + bevmap[bevmap == 0] = -1 + filters = torch.ones(1, 1, kernel, kernel, device=bevmap.device) / (kernel ** 2 * 2) + road = torch.conv2d(bevmap[None, None], filters).squeeze() + mask = (road < 0.5) & (road > -0.5) + inds = torch.where(mask) + # scores = 1 - road[mask].abs() + coords = torch.stack(inds).T * map_res + 0.3 + coords[:, 0] = coords[:, 0] + bound[0] + coords[:, 1] = coords[:, 1] + bound[1] + coords = coords.numpy().astype(float) + coords.tofile(os.path.join(roadline_path, f'{town}.bin')) + + + # sdict = load_json(meta_file) + # scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + # scenario = os.path.basename(meta_file).split('.')[0] + # town = scene_maps[scenario] + # for fi, fdict in sdict.items(): + # if int(fi) % 10 != 1: + # continue + # for ai, adict in fdict['agents'].items(): + # lidar_pose = adict['lidar']['0']['pose'] + # lidar_file = os.path.join(root_dir, 'test', adict['lidar']['0']['filename']) + # pcd = load_pcd(lidar_file)['xyz'] + # transform = pose_to_transformation(lidar_pose) + # pcd = (transform @ np.concatenate([pcd, np.ones_like(pcd[:, :1])], axis=1).T).T + # + # fig = plt.figure(figsize=(16, 12)) + # ax = fig.add_subplot() + # ax.plot(coords[:, 0], coords[:, 1], '.g', markersize=1) + # ax.scatter(pcd[:, 0], pcd[:, 1], s=1, c=np.clip(pcd[:, 2], a_min=-3, a_max=1), cmap='jet') + # plt.savefig("/home/yys/Downloads/tmp.jpg") + # plt.close() + # continue + + +if __name__=="__main__": + generate_roadline_reference_points( + "/home/data/OPV2Va", + "/home/data/OPV2Va/meta/2021_08_23_17_22_47.json" + ) + + # gen_time_offsets("/media/yuan/luna/data/OPV2Vt") + # parse_speed_from_yamls("/home/data/OPV2V/temporal_dump/train/2021_08_16_22_26_54") + # opv2vt_to_cosense( + # "/media/yuan/luna/data/OPV2Vt/temporal_dump", + # "train", + # "/koko/OPV2V/temporal", + # "/koko/cosense3d/opv2v_temporal" + # ) + # opv2vt_to_cosense( + # "/home/data/OPV2V/temporal_dump", + # "test", + # "/home/data/OPV2V/temporal", + # "/home/data/cosense3d/opv2v_temporal" + # ) + # vis_frame_data() + # vis_cosense_scenario( + # "/home/data/cosense3d/opv2v_temporal/2021_08_16_22_26_54.json", + # "/home/data/OPV2V/temporal/train" + # ) + # update_velo( + # "/media/yuan/luna/data/OPV2Vt/meta/2021_08_16_22_26_54.json", + # ) + # update_bev_map( + # "/koko/OPV2V/temporal", + # "/koko/cosense3d/opv2vt", + # "/koko/cosense3d/opv2vt_bev", + # "train" + # ) + + + diff --git a/cosense3d/modules/__init__.py b/cosense3d/modules/__init__.py new file mode 100644 index 00000000..fd02566d --- /dev/null +++ b/cosense3d/modules/__init__.py @@ -0,0 +1,172 @@ +import torch +from torch import nn +from typing import List, Dict, Optional +import importlib + +from cosense3d.modules.utils.common import cat_coor_with_idx +from cosense3d.modules.utils.me_utils import ME + + +def build_module(module_cfg): + module_full_path=module_cfg['type'] + package, module_name = module_full_path.rsplit('.', 1) + module = importlib.import_module(f'cosense3d.modules.{package}') + cls_obj = getattr(module, module_name, None) + assert cls_obj is not None, f'Class \'{module_name}\' not found.' + try: + inst = cls_obj(**module_cfg) + except Exception as e: + raise Exception(f"{module_name}:{e.__repr__()}") + return inst + + +class BaseModule(nn.Module): + def __init__(self, gather_keys, scatter_keys, gt_keys=[], freeze=False, **kwargs): + super(BaseModule, self).__init__() + self.gather_keys = gather_keys + self.scatter_keys = scatter_keys + self.gt_keys = gt_keys + self.freeze = freeze + + def to_gpu(self, gpu_id): + self.to(gpu_id) + addtional_sync_func = nn.SyncBatchNorm.convert_sync_batchnorm + return None + + def freeze_parameters(self): + for param in self.parameters(): + param.requires_grad = False + + def forward(self, *args, **kwargs): + raise NotImplementedError + + def loss(self, *args, **kwargs): + """This must be implemented in head module.""" + # TODO: Create Head base module. + pass + + def prepare_vis_data(self): + pass + + def format_input(self, input: List): + pass + + def format_output(self, output, B): + pass + + def cat_data_from_list(self, input, key=None, pad_idx=False): + if key is not None: + data = [x[key] for x in input] + else: + data = input + if isinstance(data[0], torch.Tensor): + if pad_idx: + return cat_coor_with_idx(data) + else: + return torch.cat(data, dim=0) + else: + return data + + def stack_data_from_list(self, input, key=None): + if key is not None: + data = [x[key] for x in input] + else: + data = input + if isinstance(data[0], torch.Tensor): + return torch.stack(data, dim=0) + else: + return data + + + def cat_list(self, x_list, recursive=False): + """Concatenate sub_lists to one list""" + if len(x_list) > 0 and isinstance(x_list[0], list): + out = [] + for x in x_list: + out.extend(self.cat_list(x) if recursive else x) + return out + else: + return x_list + + def cat_dict_list(self, d_list: List[Dict]): + out_dict = {k:[] for k in d_list[0].keys()} + for k in d_list[0].keys(): + for d in d_list: + out_dict[k].extend(d[k]) + return out_dict + + def stack_dict_list(self, d_list: List[Dict]): + out_dict = {k:[] for k in d_list[0].keys()} + for k in d_list[0].keys(): + for d in d_list: + out_dict[k].append(d[k]) + out_dict[k] = torch.stack(out_dict[k], dim=0) + return out_dict + + def compose_imgs(self, img_list): + imgs = [img for x in img_list for img in x] + return torch.stack(imgs, dim=0) + + def compose_stensor(self, stensor_list, stride): + coor = [stensor[f'p{stride}']['coor'] for stensor in stensor_list] + coor = cat_coor_with_idx(coor) + feat = [stensor[f'p{stride}']['feat'] for stensor in stensor_list] + feat = torch.cat(feat, dim=0) + if 'ctr' in stensor_list[0][f'p{stride}']: + ctr = [stensor[f'p{stride}']['ctr'] for stensor in stensor_list] + ctr = torch.cat(ctr, dim=0) + else: + ctr = None + return coor, feat, ctr + + def decompose_stensor(self, res, N): + # decompose batch + for k, v in res.items(): + if isinstance(v, ME.SparseTensor): + coor, feat = v.decomposed_coordinates_and_features + ctr = None + elif isinstance(v, dict): + coor, feat, ctr = [], [], [] + for i in range(N): + mask = v['coor'][:, 0] == i + coor.append(v['coor'][mask, 1:]) + feat.append(v['feat'][mask]) + ctr.append(v['ctr'][mask]) + else: + raise NotImplementedError + res[k] = {'coor': coor, 'feat': feat, 'ctr': ctr} + + # compose result list + res_list = self.compose_result_list(res, N) + return res_list + + def compose_result_list(self, res, N): + """ + + :param res: dict(k:list) + :param N: + :return: + """ + keys = res.keys() + res_list = [] + for i in range(N): + cur_res = {} + for k, v in res.items(): + if isinstance(v, dict): + cur_res[k] = { + 'coor': v['coor'][i], + 'feat': v['feat'][i], + 'ctr': v['ctr'][i] + } + elif isinstance(v, list) or isinstance(v, torch.Tensor): + cur_res[k] = v[i] + else: + raise NotImplementedError + res_list.append(cur_res) + return res_list + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(gather_keys={self.gather_keys}, ' + repr_str += f'scatter_keys={self.scatter_keys})' + return repr_str \ No newline at end of file diff --git a/cosense3d/modules/backbone2d/__init__.py b/cosense3d/modules/backbone2d/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/modules/backbone2d/resnet.py b/cosense3d/modules/backbone2d/resnet.py new file mode 100644 index 00000000..1e95590a --- /dev/null +++ b/cosense3d/modules/backbone2d/resnet.py @@ -0,0 +1,758 @@ +# Modified from OpenMMLab. +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import List + +from torch import nn +import torch.utils.checkpoint as cp + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.norm import build_norm_layer +from cosense3d.modules.utils.conv import build_conv_layer +from cosense3d.modules.plugin import build_plugin_layer + + +class ResLayer(nn.Sequential): + """ + ResLayer to build ResNet style backbone. + """ + def __init__(self, + block: nn.Module, + inplanes: int, + planes: int, + num_blocks: int, + stride: int = 1, + avg_down: bool = False, + conv_cfg: dict = None, + norm_cfg: dict = dict(type='BN'), + downsample_first: bool = True, + **kwargs): + """ + + :param block: block used to build ResLayer. + :param inplanes: inplanes of block. + :param planes: planes of block. + :param num_blocks: number of blocks. + :param stride: stride of the first block. Default: 1 + :param avg_down: Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + :param conv_cfg: dictionary to construct and config conv layer. + Default: None + :param norm_cfg: dictionary to construct and config norm layer. + Default: dict(type='BN') + :param downsample_first: Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + :param kwargs: + """ + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + inplanes = planes * block.expansion + for _ in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + else: # downsample_first=False is for HourglassModule + for _ in range(num_blocks - 1): + layers.append( + block( + inplanes=inplanes, + planes=inplanes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + """Bottleneck block for ResNet. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(init_cfg) + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert plugins is None or isinstance(plugins, list) + if plugins is not None: + allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] + assert all(p['position'] in allowed_position for p in plugins) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.plugins = plugins + self.with_plugins = plugins is not None + + if self.with_plugins: + # collect plugins for conv1/conv2/conv3 + self.after_conv1_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv1' + ] + self.after_conv2_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv2' + ] + self.after_conv3_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv3' + ] + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + dcn, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_plugins: + self.after_conv1_plugin_names = self.make_block_plugins( + planes, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + planes, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + planes * self.expansion, self.after_conv3_plugins) + + def make_block_plugins(self, in_channels, plugins): + """make plugins for block. + + Args: + in_channels (int): Input channels of plugin. + plugins (list[dict]): List of plugins cfg to build. + + Returns: + list[str]: List of the names of plugin. + """ + assert isinstance(plugins, list) + plugin_names = [] + for plugin in plugins: + plugin = plugin.copy() + name, layer = build_plugin_layer( + plugin, + in_channels=in_channels, + postfix=plugin.pop('postfix', '')) + assert not hasattr(self, name), f'duplicate plugin {name}' + self.add_module(name, layer) + plugin_names.append(name) + return plugin_names + + def forward_plugin(self, x, plugin_names): + out = x + for name in plugin_names: + out = getattr(self, name)(out) + return out + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: normalization layer after the third convolution layer""" + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class ResNet(BaseModule): + """ResNet backbone. Modified from mmdet. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + stem_channels (int | None): Number of stem channels. If not specified, + it will be the same as `base_channels`. Default: None. + base_channels (int): Number of base channels of res layer. Default: 64. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Resnet stages. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - position (str, required): Position inside block to insert + plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=None, + base_channels=64, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + with_cp=False, + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + self.zero_init_residual = zero_init_residual + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + block = self.arch_settings[depth][0] + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + + self.depth = depth + if stem_channels is None: + stem_channels = base_channels + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = stem_channels + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + if plugins is not None: + stage_plugins = self.make_stage_plugins(plugins, i) + else: + stage_plugins = None + planes = base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=stage_plugins, + init_cfg=block_init_cfg) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_stage_plugins(self, plugins: List[dict], stage_idx) -> List[dict]: + """ + Make plugins for ResNet ``stage_idx`` th stage. + + Currently we support to insert ``context_block``, + ``empirical_attention_block``, ``nonlocal_block`` into the backbone + like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of + Bottleneck. + + An example of plugins format could be: + + Examples: + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True), + ... position='after_conv2'), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='1'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='2'), + ... stages=(True, True, True, True), + ... position='after_conv3') + ... ] + >>> self = ResNet(depth=18) + >>> stage_plugins = self.make_stage_plugins(plugins, 0) + >>> assert len(stage_plugins) == 3 + + Suppose ``stage_idx=0``, the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->conv3->yyy->zzz1->zzz2 + + Suppose 'stage_idx=1', the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 + + If stages is missing, the plugin would be applied to all stages. + + :param plugins: List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + :param stage_idx: Index of stage to build. + :return: Plugins for current stage. + """ + stage_plugins = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + # whether to insert plugin into current stage + if stages is None or stages[stage_idx]: + stage_plugins.append(plugin) + + return stage_plugins + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels)[1], + nn.ReLU(inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + x = self.maxpool(x) + outs = [] + + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() \ No newline at end of file diff --git a/cosense3d/modules/backbone2d/resnet_encoder.py b/cosense3d/modules/backbone2d/resnet_encoder.py new file mode 100644 index 00000000..862a82a4 --- /dev/null +++ b/cosense3d/modules/backbone2d/resnet_encoder.py @@ -0,0 +1,91 @@ +import torch +import torch.nn as nn +import torchvision.models as models + +from einops import rearrange + +from cosense3d.modules import BaseModule +from cosense3d.modules.plugin import build_plugin_module +from cosense3d.modules.utils.positional_encoding import img_locations + + +class ResnetEncoder(BaseModule): + """Resnet family to encode image.""" + def __init__(self, num_layers, feat_indices, out_index, img_size, + neck=None, **kwargs): + super(ResnetEncoder, self).__init__(**kwargs) + + self.num_layers = num_layers + self.feat_indices = sorted(feat_indices) + self.out_index = out_index + self.img_size = img_size + indices = (out_index, ) if isinstance(out_index, int) else out_index + self.strides = [2 ** (idx + 1) for idx in indices] + self.feat_sizes = [(img_size[0] // stride, img_size[1] // stride) + for stride in self.strides] + if 'img_coor' in self.scatter_keys: + self.img_locations = [nn.Parameter( + img_locations(img_size, feat_size), requires_grad=False) + for feat_size in self.feat_sizes] + self.img_locations = nn.ParameterList(self.img_locations) + + resnet = getattr(models, f'resnet{self.num_layers}', None) + + if resnet is None: + raise ValueError(f"{self.num_layers} is not a valid number of resnet ""layers") + + resnet_weights = getattr(models, f"ResNet{self.num_layers}_Weights") + self.encoder = resnet(weights=resnet_weights.DEFAULT) + self.neck = build_plugin_module(neck) if neck is not None else None + + def forward(self, input_images, **kwargs): + num_imgs = [len(x) for x in input_images] + imgs = self.compose_imgs(input_images) + b, h, w, c = imgs.shape + + # b, h, w, c -> b, c, h, w + imgs = imgs.permute(0, 3, 1, 2).contiguous() + + x = self.encoder.conv1(imgs) + x = self.encoder.bn1(x) + x = self.encoder.relu(x) + x = self.encoder.maxpool(x) + + out = [] + for i in range(1, 5): + x = getattr(self.encoder, f'layer{i}')(x) + if i in self.feat_indices: + out.append(x) + + if self.neck is not None: + out = self.neck(out) + if isinstance(self.out_index, tuple): + out = [out[self.feat_indices.index(i)] for i in self.out_index] + else: + out = out[self.feat_indices.index(self.out_index)] + return self.format_output(out, num_imgs) + + def format_output(self, output, num_imgs): + ptr = 0 + output_list = [] + coor_list = [] + for n in num_imgs: + if isinstance(output, (tuple, list)): + output_list.append(tuple(out[ptr:ptr+n] for out in output)) + else: + output_list.append(output[ptr:ptr + n]) + if 'img_coor' in self.scatter_keys: + assert hasattr(self, 'img_locations') + img_locs = [locs.unsqueeze(0).repeat(n, 1, 1, 1) + for locs in self.img_locations] + if isinstance(self.out_index, int): + img_locs = img_locs[0] + coor_list.append(img_locs) + ptr += n + out_dict = {} + if 'img_feat' in self.scatter_keys: + out_dict['img_feat'] = output_list + if 'img_coor' in self.scatter_keys: + out_dict['img_coor'] = coor_list + + return out_dict \ No newline at end of file diff --git a/cosense3d/modules/backbone3d/__init__.py b/cosense3d/modules/backbone3d/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/modules/backbone3d/mink_unet.py b/cosense3d/modules/backbone3d/mink_unet.py new file mode 100644 index 00000000..2801a4a7 --- /dev/null +++ b/cosense3d/modules/backbone3d/mink_unet.py @@ -0,0 +1,206 @@ +import torch +from torch import nn +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.common import * +from cosense3d.modules.utils.me_utils import * + + +class MinkUnet(BaseModule): + QMODE = ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE + def __init__(self, + data_info, + stride, + in_dim, + d=3, + kernel_size_layer1=3, + enc_dim=32, + cache_strides=None, + floor_height=0, + height_compression=None, + compression_kernel_size_xy=1, + to_dense=False, + dist=False, + **kwargs): + super(MinkUnet, self).__init__(**kwargs) + update_me_essentials(self, data_info) + self.stride = stride + self.in_dim = in_dim + self.enc_dim = enc_dim + self.floor_height = floor_height + self.to_dense = to_dense + self.height_compression = height_compression + self.compression_kernel_size_xy = compression_kernel_size_xy + self.d = d + self.lidar_range_tensor = nn.Parameter(torch.Tensor(self.lidar_range), requires_grad=False) + # For determine batchnorm type: if the model is trained on multiple GPUs with ME.MinkowskiBatchNorm, + # the BN would perform differently in eval mode because the running_mean and running_var would be + # different to training mode, this is caused by different number of tracked batches, therefore if + # ditributed training is used for this model, either ME.MinkowskiSyncBatchNorm should be used, or + # the running mean and var should be adapted. + # TODO: adapt running mean and var in inference mode if model is trained with DDP + self.dist = dist + if cache_strides is None: + self.cache_strides = [stride] + self.max_resolution = stride + else: + self.max_resolution = min(cache_strides) + self.cache_strides = cache_strides + self._init_unet_layers(kernel_size_layer1) + if height_compression is not None: + self._init_height_compression_layers(height_compression) + self.init_weights() + + def _init_unet_layers(self, kernel_size_layer1=3): + self.enc_mlp = linear_layers([self.in_dim * 2, 16, self.enc_dim], norm='LN') + kernel_conv1 = [kernel_size_layer1,] * min(self.d, 3) + kernel = [3,] * min(self.d, 3) + if self.d == 4: + kernel = kernel + [1,] + kernel_conv1 = kernel + [1,] + + kwargs = {'d': self.d, 'bn_momentum': 0.1} + self.conv1 = minkconv_conv_block(self.enc_dim, self.enc_dim, kernel_conv1, + 1, **kwargs) + self.conv2 = get_conv_block([self.enc_dim, self.enc_dim, self.enc_dim], kernel, **kwargs) + self.conv3 = get_conv_block([self.enc_dim, self.enc_dim * 2, self.enc_dim * 2], kernel, **kwargs) + self.conv4 = get_conv_block([self.enc_dim * 2, self.enc_dim * 4, self.enc_dim * 4], kernel, **kwargs) + + if self.max_resolution <= 4: + self.trconv4 = get_conv_block([self.enc_dim * 4, self.enc_dim * 2, self.enc_dim * 2], kernel, tr=True, **kwargs) + if self.max_resolution <= 2: + self.trconv3 = get_conv_block([self.enc_dim * 4, self.enc_dim * 2, self.enc_dim * 2], kernel, tr=True, **kwargs) + if self.max_resolution <= 1: + self.trconv2 = get_conv_block([self.enc_dim * 3, self.enc_dim * 2, self.enc_dim], kernel, tr=True, **kwargs) + self.out_layer = minkconv_layer(self.enc_dim * 2, self.enc_dim, kernel, 1, d=self.d) + + def _init_height_compression_layers(self, planes): + self.stride_size_dict = {} + for k, v in planes.items(): + self.stride_size_dict[int(k[1])] = self.grid_size(int(k[1])) + layers = [] + steps = v['steps'] + channels = v['channels'] + for i, s in enumerate(steps): + kernel = [self.compression_kernel_size_xy] * 2 + [s] + stride = [1] * 2 + [s] + layers.append( + minkconv_conv_block(channels[i], channels[i+1], + kernel, stride, self.d, 0.1) + ) + layers = nn.Sequential(*layers) + setattr(self, f'{k}_compression', layers) + + def init_weights(self): + for n, p in self.named_parameters(): + if ('mlp' in n and 'weight' in n) or 'kernel' in n: + if p.ndim == 1: + continue + nn.init.xavier_uniform_(p) + + def to_gpu(self, gpu_id): + self.to(gpu_id) + return ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm + + def forward(self, points: list, **kwargs): + res = self.forward_unet(points, **kwargs) + + if self.height_compression is not None: + res = self.forward_height_compression(res) + + res = self.format_output(res, len(points)) + return res + + def forward_unet(self, points, **kwargs): + N = len(points) + points = [torch.cat([torch.ones_like(pts[:, :1]) * i, pts], dim=-1 + ) for i, pts in enumerate(points)] + x = prepare_input_data(points, self.voxel_size, self.QMODE, self.floor_height, + self.d, self.in_dim) + x1, norm_points_p1, points_p1, count_p1, pos_embs = voxelize_with_centroids( + x, self.enc_mlp, self.lidar_range_tensor) + + # convs + x1 = self.conv1(x1) + x2 = self.conv2(x1) + x4 = self.conv3(x2) + p8 = self.conv4(x4) + p8_cat = p8 + + # transposed convs + if self.max_resolution <= 4: + p4 = self.trconv4(p8) + p4_cat = ME.cat(x4, p4) + if self.max_resolution <= 2: + p2 = self.trconv3(p4_cat) + p2_cat = ME.cat(x2, p2) + if self.max_resolution <= 1: + p1 = self.trconv2(p2_cat) + p1_cat = self.out_layer(ME.cat(x1, p1)) + if self.max_resolution == 0: + p0 = devoxelize_with_centroids(p1, x, pos_embs) + p0_cat = {'coor': torch.cat(points, dim=0), 'feat': p0} + + vars = locals() + res = {f'p{k}': vars[f'p{k}_cat'] for k in self.cache_strides} + + tmp = x4.F.max(dim=0).values + return res + + def forward_height_compression(self, res): + for stride in self.stride_size_dict.keys(): + out_tensor = getattr(self, f'p{stride}_compression')(res[f'p{stride}']) + assert len(out_tensor.C[:, 3].unique()) == 1, \ + (f"height is not fully compressed. " + f"Unique z coords: {','.join([str(x.item()) for x in out_tensor.C[:, 3].unique()])}") + if self.to_dense: + out_tensor = self.stensor_to_dense(out_tensor).permute(0, 3, 1, 2) + res[f'p{stride}'] = out_tensor + else: + ctr = indices2metric(out_tensor.C, self.voxel_size) + res[f'p{stride}'] = {'coor': out_tensor.C[:, :3], 'feat': out_tensor.F, 'ctr': ctr[:, 1:3]} + return res + + def format_output(self, res, N): + out_dict = {self.scatter_keys[0]: self.decompose_stensor(res, N)} + return out_dict + + def stensor_to_dense(self, stensor): + mask, indices = self.valid_coords(stensor) + b = int(stensor.C[:, 0].max()) + 1 + d = stensor.F.shape[1] + features = stensor.F[mask].view(-1, d) + s = self.stride_size_dict[stensor.tensor_stride[0]] + dtensor = features.new_zeros((b, s[0], s[1], d)) + dtensor[indices[0], indices[1], indices[2]] = features + return dtensor + + def valid_coords(self, stensor): + stride = stensor.tensor_stride + s = self.stride_size_dict[stride[0]] + # remove voxels that are outside range + xi = torch.div(stensor.C[:, 1], stride[0], rounding_mode='floor') + s[0] / 2 + yi = torch.div(stensor.C[:, 2], stride[1], rounding_mode='floor') + s[1] / 2 + + mask = (xi >= 0) * (xi < s[0]) * (yi >= 0) * (yi < s[1]) + indices = (stensor.C[:, 0][mask].long(), + xi[mask].long(), + yi[mask].long() + ) + # if the backbone uses 4d convs, last dim is time + if stensor.C.shape[1] == 5: + ti = stensor.C[:, 4] + mask = mask * (ti >= 0) * (ti < self.seq_len) + indices = indices + ti[mask].long() + return mask, indices + + def grid_size(self, stride): + x_range = self.lidar_range[3] - self.lidar_range[0] + y_range = self.lidar_range[4] - self.lidar_range[1] + x_size = int(x_range / self.voxel_size[0]) // stride + y_size = int(y_range / self.voxel_size[1]) // stride + return (x_size, y_size) + + + + + diff --git a/cosense3d/modules/backbone3d/pillar_bev.py b/cosense3d/modules/backbone3d/pillar_bev.py new file mode 100644 index 00000000..a68ab004 --- /dev/null +++ b/cosense3d/modules/backbone3d/pillar_bev.py @@ -0,0 +1,156 @@ +import torch +from torch import nn +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.utils.common import * +from cosense3d.modules.utils.me_utils import * + + +class PillarBEV(BaseModule): + def __init__(self, + in_channels, + layer_nums, + layer_strides, + downsample_channels, + upsample_channels, + upsample_strides, + voxel_generator, + pillar_encoder, + bev_shrinker=None, + bev_compressor=None, + **kwargs): + super(PillarBEV, self).__init__(**kwargs) + self.pillar_encoder = plugin.build_plugin_module(pillar_encoder) + self.voxel_generator = plugin.build_plugin_module(voxel_generator) + self.grid_size = self.voxel_generator.grid_size + + if bev_shrinker is not None: + self.bev_shrinker = plugin.build_plugin_module(bev_shrinker) + if bev_compressor is not None: + self.bev_compressor = plugin.build_plugin_module(bev_compressor) + + num_levels = len(layer_nums) + c_in_list = [in_channels, *downsample_channels[:-1]] + + self.blocks = nn.ModuleList() + self.deblocks = nn.ModuleList() + + for idx in range(num_levels): + cur_layers = [ + nn.ZeroPad2d(1), + nn.Conv2d( + c_in_list[idx], downsample_channels[idx], kernel_size=3, + stride=layer_strides[idx], padding=0, bias=False + ), + nn.BatchNorm2d(downsample_channels[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ] + for k in range(layer_nums[idx]): + cur_layers.extend([ + nn.Conv2d(downsample_channels[idx], downsample_channels[idx], + kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(downsample_channels[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ]) + + self.blocks.append(nn.Sequential(*cur_layers)) + if len(upsample_strides) > 0: + stride = upsample_strides[idx] + if stride >= 1: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d( + downsample_channels[idx], upsample_channels[idx], + upsample_strides[idx], + stride=upsample_strides[idx], bias=False + ), + nn.BatchNorm2d(upsample_channels[idx], + eps=1e-3, momentum=0.01), + nn.ReLU() + )) + else: + stride = np.round(1 / stride).astype(np.int) + self.deblocks.append(nn.Sequential( + nn.Conv2d( + downsample_channels[idx], upsample_channels[idx], + stride, + stride=stride, bias=False + ), + nn.BatchNorm2d(upsample_channels[idx], eps=1e-3, + momentum=0.01), + nn.ReLU() + )) + + c_in = sum(upsample_channels) + if len(upsample_strides) > num_levels: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], + stride=upsample_strides[-1], bias=False), + nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01), + nn.ReLU(), + )) + + self.num_bev_features = c_in + + def forward(self, points: list, **kwargs): + N = len(points) + voxels, coords, num_points = self.voxel_generator([x[:, :4] for x in points]) + coords = self.cat_data_from_list(coords, pad_idx=True) + voxels = self.cat_data_from_list(voxels) + num_points = self.cat_data_from_list(num_points) + pillar_features = self.pillar_encoder(voxels, coords, num_points) + bev_feat = self.to_dense_bev(coords, pillar_features, N) + + ups = [] + ret_dict = {} + x = bev_feat + + for i in range(len(self.blocks)): + x = self.blocks[i](x) + + stride = int(bev_feat.shape[2] / x.shape[2]) + ret_dict[f'p{stride}'] = x + + if len(self.deblocks) > 0: + ups.append(self.deblocks[i](x)) + else: + ups.append(x) + + if len(ups) > 1: + x = torch.cat(ups, dim=1) + elif len(ups) == 1: + x = ups[0] + + if len(self.deblocks) > len(self.blocks): + x = self.deblocks[-1](x) + + if hasattr(self, 'bev_shrinker'): + x = self.bev_shrinker(x) + if hasattr(self, 'bev_compressor'): + x = self.bev_compressor(x) + + out = {self.scatter_keys[0]: x} + if 'multi_scale_bev_feat' in self.scatter_keys: + stride = int(bev_feat.shape[2] / x.shape[2]) + ret_dict[f'p{stride}'] = x + out['multi_scale_bev_feat'] = [{k: v[i] for k, v in ret_dict.items()} for i in range(N)] + return out + + def format_output(self, res, N): + out_dict = {self.scatter_keys[0]: self.decompose_stensor(res, N)} + return out_dict + + def to_dense_bev(self, coor, feat, N): + bev_feat = torch.zeros(N, + self.grid_size[2], + self.grid_size[1], + self.grid_size[0], + feat.shape[-1], + dtype=feat.dtype, + device=feat.device) + coor = coor.long() + bev_feat[coor[:, 0], coor[:, 1], coor[:, 2], coor[:, 3]] = feat + bev_feat = bev_feat.permute(0, 4, 1, 2, 3) + assert bev_feat.shape[2] == 1 + return bev_feat.squeeze(dim=2) + + + diff --git a/cosense3d/modules/backbone3d/spconv.py b/cosense3d/modules/backbone3d/spconv.py new file mode 100644 index 00000000..04c49b5f --- /dev/null +++ b/cosense3d/modules/backbone3d/spconv.py @@ -0,0 +1,194 @@ +from functools import partial +from typing import List + +import spconv +import torch +import torch.nn as nn + +from spconv.pytorch import SparseSequential, SubMConv3d, SparseConv3d, SparseInverseConv3d, SparseConvTensor +from cosense3d.modules import BaseModule, plugin + + +def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(), + ) + + return m + + +class Spconv(BaseModule): + def __init__(self, + in_channels, + out_channels, + voxel_generator, + voxel_encoder, + bev_neck=None, + bev_compressor=None, + cache_coords=True, + cache_strides=[1, 2, 4, 8], + **kwargs): + super(Spconv, self).__init__(**kwargs) + self.num_point_features = out_channels + self.cache_keys = [] + if cache_coords: + self.cache_keys.append('coords') + for s in cache_strides: + self.cache_keys.append(f'p{s}') + self.voxel_generator = plugin.build_plugin_module(voxel_generator) + self.voxel_encoder = plugin.build_plugin_module(voxel_encoder) + self.grid_size = self.voxel_generator.grid_size + if bev_neck is not None: + self.bev_neck = plugin.build_plugin_module(bev_neck) + if bev_compressor is not None: + self.bev_compressor = plugin.build_plugin_module(bev_compressor) + self._init_layers(in_channels, out_channels) + + def _init_layers(self, in_channels, out_channels): + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + + self.sparse_shape = self.grid_size.tolist()[::-1] + self.sparse_shape[0] += 1 + + self.conv_input = SparseSequential( + SubMConv3d(in_channels, 16, 3, + padding=1, bias=False, indice_key='subm1'), + norm_fn(16), + nn.ReLU(), + ) + block = post_act_block + + self.conv1 = SparseSequential( + block(16, 16, 3, + norm_fn=norm_fn, padding=1, indice_key='subm1'), + ) + + self.conv2 = SparseSequential( + # [1600, 1408, 41] <- [800, 704, 21] + block(16, 32, 3, + norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + ) + + self.conv3 = SparseSequential( + # [800, 704, 21] <- [400, 352, 11] + block(32, 64, 3, + norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + ) + + self.conv4 = SparseSequential( + # [400, 352, 11] <- [200, 176, 5] + block(64, 64, 3, + norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + ) + + last_pad = 0 + self.conv_out = SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + SparseConv3d(64, out_channels, (3, 1, 1), + stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), + norm_fn(out_channels), + nn.ReLU(), + ) + + self.backbone_channels = { + 'x_conv1': 16, + 'x_conv2': 32, + 'x_conv3': 64, + 'x_conv4': 64, + 'out': out_channels + } + + def forward(self, points: list, **kwargs): + B = len(points) + res_dict = {} + voxels, coords, num_points = self.voxel_generator(x[:, :4] for x in points) + res_dict['coords'] = coords + coords = self.cat_data_from_list(coords, pad_idx=True) + voxels = self.cat_data_from_list(voxels) + num_points = self.cat_data_from_list(num_points) + voxel_features = self.voxel_encoder(voxels, num_points) + + input_sp_tensor = SparseConvTensor( + features=voxel_features, + indices=coords.int(), + spatial_shape=self.sparse_shape, + batch_size=B + ) + + x = self.conv_input(input_sp_tensor) + + res_dict['p1'] = self.conv1(x) + res_dict['p2'] = self.conv2(res_dict['p1']) + res_dict['p4'] = self.conv3(res_dict['p2'] ) + res_dict['p8'] = self.conv4(res_dict['p4'] ) + + res_dict['p8_out'] = self.conv_out(res_dict['p8']) + res_dict['bev'] = self.to_dense(res_dict['p8_out']) + + multi_scale_bev_feat = {} + if hasattr(self, 'bev_neck'): + res = self.bev_neck(res_dict['bev']) + if isinstance(res, tuple): + res_dict['bev'] = res[0] + multi_scale_bev_feat = res[1] + else: + res_dict['bev'] = res + if hasattr(self, 'bev_compressor'): + res_dict['bev'] = self.bev_compressor(res_dict['bev']) + + out_dict = {} + if 'voxel_feat' in self.scatter_keys: + out_dict['voxel_feat'] = self.format_output( + {k: res_dict[k] for k in self.cache_keys}, B) + if 'bev_feat' in self.scatter_keys: + out_dict['bev_feat'] = res_dict['bev'] + if 'multi_scale_bev_feat' in self.scatter_keys: + multi_scale_bev_feat[1] = res_dict['bev'] + out_dict['multi_scale_bev_feat'] = \ + [{f'p{k * 8}': v[i] for k, v in multi_scale_bev_feat.items()} for i in range(B)] + return out_dict + + def format_output(self, out_dict, B): + out_list = [] + for i in range(B): + new_dict = {} + for k, v in out_dict.items(): + if isinstance(v, list) or isinstance(v, torch.Tensor): + new_dict[k] = v[i] + else: + coor = v.indices + feat = v.features.contiguous() + mask = coor[:, 0] == i + new_dict[k] = {'coor': coor[mask, 1:], 'feat': feat[mask]} + out_list.append(new_dict) + + return out_list + + def to_dense(self, stensor): + spatial_features = stensor.dense() + N, C, D, H, W = spatial_features.shape + bev_featrues = spatial_features.reshape(N, C * D, H, W) + return bev_featrues.contiguous() + + + diff --git a/cosense3d/modules/backbone3d/voxelnet.py b/cosense3d/modules/backbone3d/voxelnet.py new file mode 100644 index 00000000..5e417190 --- /dev/null +++ b/cosense3d/modules/backbone3d/voxelnet.py @@ -0,0 +1,85 @@ +import torch +from torch import nn +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.utils.common import * +from cosense3d.modules.utils.me_utils import * + + +class VoxelNet(BaseModule): + def __init__(self, + voxel_generator, + voxel_encoder, + cml, + neck=None, + bev_compressor=None, + **kwargs): + super(VoxelNet, self).__init__(**kwargs) + self.voxel_generator = plugin.build_plugin_module(voxel_generator) + self.voxel_encoder = plugin.build_plugin_module(voxel_encoder) + self.grid_size = self.voxel_generator.grid_size + self.cml = plugin.build_plugin_module(cml) + + if neck is not None: + self.neck = plugin.build_plugin_module(neck) + if bev_compressor is not None: + self.bev_compressor = plugin.build_plugin_module(bev_compressor) + + def forward(self, points: list, **kwargs): + N = len(points) + voxels, coords, num_points = self.voxel_generator(points) + coords = self.cat_data_from_list(coords, pad_idx=True) + voxels = self.cat_data_from_list(voxels) + num_points = self.cat_data_from_list(num_points) + voxel_features = self.voxel_encoder(voxels, coords, num_points) + if self.cml.dense: + voxel_features = self.to_dense(coords, voxel_features, N) + voxel_features = self.cml(voxel_features) + else: + voxel_features, voxel_coords = self.cml(voxel_features, coords) + voxel_features = self.to_dense(voxel_coords, voxel_features, N, filter_range=True) + # 3d to 2d feature + bev_feat = voxel_features.flatten(1, 2) + x = bev_feat + ret_dict = {} + if hasattr(self, 'neck'): + res = self.neck(x) + if isinstance(res, torch.Tensor): + x = res + else: + x = res[0] + ret_dict = res[1] + if hasattr(self, 'bev_compressor'): + x = self.bev_compressor(x) + + out = {self.scatter_keys[0]: x} + if 'multi_scale_bev_feat' in self.scatter_keys: + stride = int(bev_feat.shape[2] / x.shape[2]) + ret_dict[f'p{stride}'] = x + out['multi_scale_bev_feat'] = [{k: v[i] for k, v in ret_dict.items()} for i in range(N)] + + return out + + def to_dense(self, coor, feat, N, filter_range=False): + if filter_range: + strides = self.cml.out_strides.cpu() + grid_size = torch.ceil(self.grid_size[[2, 1, 0]] / strides).int().tolist() + mask = (coor[:, 1] >= 0) & (coor[:, 1] < grid_size[0]) & \ + (coor[:, 2] >= 0) & (coor[:, 2] < grid_size[1]) & \ + (coor[:, 3] >= 0) & (coor[:, 3] < grid_size[2]) + coor, feat = coor[mask], feat[mask] + else: + grid_size = self.grid_size[[2, 1, 0]].tolist() + bev_feat = torch.zeros(N, + grid_size[0], + grid_size[1], + grid_size[2], + feat.shape[-1], + dtype=feat.dtype, + device=feat.device) + coor = coor.long() + bev_feat[coor[:, 0], coor[:, 1], coor[:, 2], coor[:, 3]] = feat + + return bev_feat.permute(0, 4, 1, 2, 3) + + + diff --git a/cosense3d/modules/fusion/__init__.py b/cosense3d/modules/fusion/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/modules/fusion/attn_fusion.py b/cosense3d/modules/fusion/attn_fusion.py new file mode 100644 index 00000000..7524fa05 --- /dev/null +++ b/cosense3d/modules/fusion/attn_fusion.py @@ -0,0 +1,94 @@ +import warnings +from typing import Dict + +import torch + +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.plugin.attn import ScaledDotProductAttention +from cosense3d.modules.utils.me_utils import update_me_essentials +from cosense3d.modules.utils.common import cat_coor_with_idx + + +class SparseAttentionFusion(BaseModule): + def __init__(self, stride, in_channels, **kwargs): + super(SparseAttentionFusion, self).__init__(**kwargs) + if isinstance(stride, int): + self.stride = [stride] + else: + self.stride = stride + self.attn = ScaledDotProductAttention(in_channels) + + def forward(self, ego_feats, coop_feats=None, **kwargs): + fused_feat = [] + fuse_key = self.gather_keys[0] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + batch_feat = {} + for stride in self.stride: + coor, feat, ctr = self.fuse_feature_at_stride(ego_feat, coop_feat, stride, fuse_key) + batch_feat[f'p{stride}'] = {'coor': coor, 'feat': feat, 'ctr': ctr} + fused_feat.append(batch_feat) + return self.format_output(fused_feat) + + def format_output(self, output): + return {self.scatter_keys[0]: output} + + def fuse_feature_at_stride(self, ego_feat, coop_feat, stride, fuse_key): + coor = [ego_feat[f'p{stride}']['coor']] + feat = [ego_feat[f'p{stride}']['feat']] + ctr = [ego_feat[f'p{stride}']['ctr']] + if len(coop_feat) == 0: + return coor[0], feat[0], ctr[0] + else: + # fuse coop to ego + for cpfeat in coop_feat.values(): + if fuse_key not in cpfeat: + continue + cpm = cpfeat[fuse_key][f'p{stride}'] + coor.append(cpm['coor']) + feat.append(cpm['feat']) + ctr.append(cpm['ctr']) + + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + ctr_cat = torch.cat(ctr, dim=0) + uniq_coor, reverse_inds = torch.unique(coor_cat[:, 1:], dim=0, + return_inverse=True) + uniq_ctr = ctr_cat[reverse_inds.unique()] + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_zeros(len(uniq_coor), feat_cat.shape[-1]) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + q = feats_pad[0].unsqueeze(1) # num_pts, 1, d + kv = torch.stack(feats_pad[1:], dim=1) # num_pts, num_coop_cav, d + feat_out = self.attn(q, kv, kv).squeeze(1) + return uniq_coor, feat_out, uniq_ctr + + +class DenseAttentionFusion(BaseModule): + def __init__(self, feature_dim, neck=None, **kwargs): + super(DenseAttentionFusion, self).__init__(**kwargs) + self.attn = ScaledDotProductAttention(feature_dim) + if neck is not None: + self.neck = plugin.build_plugin_module(neck) + + def forward(self, ego_feats, coop_feats=None, **kwargs): + out = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + feat = [ego_feat] + for cpfeat in coop_feat.values(): + if 'bev_feat' not in cpfeat: + continue + feat.append(cpfeat['bev_feat']) + xx = torch.stack(feat, dim=0) + N, C, H, W = xx.shape + xx = xx.view(N, C, -1).permute(2, 0, 1) + h = self.attn(xx, xx, xx) + h = h.permute(1, 2, 0).view(N, C, H, W)[0, ...] + out.append(h) + out = torch.stack(out) + if hasattr(self, 'neck'): + out = self.neck(out) + return {self.scatter_keys[0]: out} + diff --git a/cosense3d/modules/fusion/box_fusion.py b/cosense3d/modules/fusion/box_fusion.py new file mode 100644 index 00000000..7fbc96be --- /dev/null +++ b/cosense3d/modules/fusion/box_fusion.py @@ -0,0 +1,184 @@ +import torch +import torch.nn as nn + +from cosense3d.modules import BaseModule +from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu +pi = 3.141592653 + + +def limit_period(val, offset=0.5, period=2 * pi): + return val - torch.floor(val / period + offset) * period + + +class BoxFusion(BaseModule): + def __init__(self, lidar_range, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + + def forward(self, ego_preds, coop_preds, memory, global_times, **kwargs): + out_dict = {'box': [], 'scr': [], 'lbl': [], 'time': [], 'idx': []} + for ego_pred, coop_pred, mem, global_time in zip(ego_preds, coop_preds, memory, global_times): + boxes = [ego_pred['preds']['box']] + scores = [ego_pred['preds']['scr']] + labels = [ego_pred['preds']['lbl']] + times = [ego_pred['preds']['time']] + if len(mem) > 0: + boxes.append(mem['preds']['box']) + scores.append(mem['preds']['scr']) + labels.append(mem['preds']['lbl']) + times.append(mem['preds']['time']) + for cppred in coop_pred.values(): + boxes.append(cppred['detection_local']['preds']['box']) + scores.append(cppred['detection_local']['preds']['scr']) + labels.append(cppred['detection_local']['preds']['lbl']) + times.append(cppred['detection_local']['preds']['time']) + clusters_boxes, clusters_scores, cluster_labels, cluster_times = \ + self.clustering(boxes, scores, labels, times, global_time) + boxes_fused, scores_fused, labels_fused, times_fused = self.cluster_fusion( + clusters_boxes, clusters_scores, cluster_labels, cluster_times, global_time) + out_dict['box'].append(boxes_fused) + out_dict['scr'].append(scores_fused) + out_dict['lbl'].append(labels_fused) + out_dict['time'].append(times_fused) + out_dict['idx'].append(torch.zeros_like(labels_fused)) + + out_list = self.compose_result_list(out_dict, len(ego_preds)) + return {self.scatter_keys[0]: [{'preds': x} for x in out_list]} + + def clustering(self, boxes, scores, labels, times, global_time): + times_cat = torch.cat(times, dim=0) + # remove boxes outside the maximum time length + mask = (global_time - times_cat) < 0.15 + pred_boxes_cat = torch.cat(boxes, dim=0)[mask] + pred_boxes_cat[:, -1] = limit_period(pred_boxes_cat[:, -1]) + pred_scores_cat = torch.cat(scores, dim=0)[mask] + pred_labels_cat = torch.cat(labels, dim=0)[mask] + times_cat = times_cat[mask] + + if len(pred_scores_cat) == 0: + clusters = [torch.Tensor([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.57]). + to(boxes[0].device).view(1, 7)] + scores= [torch.Tensor([0.01]).to(boxes[0].device).view(-1)] + labels = [torch.Tensor([-1]).to(boxes[0].device).view(-1)] + times = [torch.Tensor([-1]).to(boxes[0].device).view(-1)] + return clusters, scores, labels, times + + ious = boxes_iou3d_gpu(pred_boxes_cat, pred_boxes_cat) + cluster_indices = torch.zeros(len(ious)).int() # gt assignments of preds + cur_cluster_id = 1 + while torch.any(cluster_indices == 0): + cur_idx = torch.where(cluster_indices == 0)[0][ + 0] # find the idx of the first pred which is not assigned yet + cluster_indices[torch.where(ious[cur_idx] > 0.1)[0]] = cur_cluster_id + cur_cluster_id += 1 + clusters = [] + scores = [] + labels = [] + times = [] + for j in range(1, cur_cluster_id): + clusters.append(pred_boxes_cat[cluster_indices == j]) + scores.append(pred_scores_cat[cluster_indices == j]) + labels.append(pred_labels_cat[cluster_indices == j]) + times.append(times_cat[cluster_indices == j]) + + return clusters, scores, labels, times + + @torch.no_grad() + def cluster_fusion(self, clusters, scores, labels, times, global_time): + """ + Merge boxes in each cluster with scores as weights for merging + """ + for i, (c, s, l, t) in enumerate(zip(clusters, scores, labels, times)): + assert len(c) == len(s) + if len(c) == 1: + labels[i] = l[0] + times[i] = t[0] + continue + uniq_lbls, cnt = l.mode(keepdim=True) + labels[i] = uniq_lbls[cnt.argmax()] + + + box_fused, s_fused = self.merge_sync_boxes(c, s) + scores[i] = s_fused + clusters[i] = box_fused + times[i] = t.mean() + + return torch.cat(clusters, dim=0), torch.cat(scores), torch.stack(labels), torch.stack(times) + + @torch.no_grad() + def temporal_cluster_fusion(self, clusters, scores, labels, times, global_time): + """ + Merge boxes in each cluster with scores as weights for merging + """ + for i, (c, s, l, t) in enumerate(zip(clusters, scores, labels, times)): + assert len(c) == len(s) + if len(c) == 1: + labels[i] = l[0] + times[i] = t[0] + continue + uniq_lbls, cnt = l.mode(keepdim=True) + labels[i] = uniq_lbls[cnt.argmax()] + + t_idx = (t * 100).round().int() + uniq_ts = torch.unique(t_idx) + ts = [] + boxes = [] + scrs = [] + for idx in uniq_ts: + mask = t_idx == idx + cur_cluster = c[mask] + cur_scores = s[mask] + box_fused, s_fused = self.merge_sync_boxes(cur_cluster, cur_scores) + ts.append(t[mask].mean()) + boxes.append(box_fused) + scrs.append(s_fused) + + if len(ts) == 1: + scores[i] = scrs[0] + clusters[i] = boxes[0] + times[i] = ts[0] + else: + # interpolate to global time + ts = torch.stack(ts) + sort_inds = torch.argsort(ts) + ts = ts[sort_inds] + boxes = torch.cat(boxes, dim=0)[sort_inds] + scrs = torch.cat(scrs)[sort_inds] + velo = (boxes[-1, :2] - boxes[-2, :2]) / (ts[-1] - ts[-2]) + out_box = boxes[scrs.argmax()] + out_box[:2] += velo * (global_time - ts[-1]) + scores[i] = torch.mean(scrs, dim=0, keepdim=True) + clusters[i] = out_box.reshape(1, -1) + times[i] = torch.tensor(global_time, device=ts.device) + + return torch.cat(clusters, dim=0), torch.cat(scores), torch.stack(labels), torch.stack(times) + + def merge_sync_boxes(self, c, s): + # reverse direction for non-dominant direction of boxes + dirs = c[:, -1] + max_score_idx = torch.argmax(s) + dirs_diff = torch.abs(dirs - dirs[max_score_idx].item()) + lt_pi = (dirs_diff > pi).int() + dirs_diff = dirs_diff * (1 - lt_pi) + ( + 2 * pi - dirs_diff) * lt_pi + score_lt_half_pi = s[dirs_diff > pi / 2].sum() # larger than + score_set_half_pi = s[ + dirs_diff <= pi / 2].sum() # small equal than + # select larger scored direction as final direction + if score_lt_half_pi <= score_set_half_pi: + dirs[dirs_diff > pi / 2] += pi + else: + dirs[dirs_diff <= pi / 2] += pi + dirs = limit_period(dirs) + s_normalized = s / s.sum() + sint = torch.sin(dirs) * s_normalized + cost = torch.cos(dirs) * s_normalized + theta = torch.atan2(sint.sum(), cost.sum()).view(1, ) + center_dim = c[:, :-1] * s_normalized[:, None] + box_fused = torch.cat([center_dim.sum(dim=0), theta]).unsqueeze(0) + s_sorted = torch.sort(s, descending=True).values + s_fused = 0 + for j, ss in enumerate(s_sorted): + s_fused += ss ** (j + 1) + s_fused = torch.tensor([min(s_fused, 1.0)], device=s.device) + return box_fused, s_fused \ No newline at end of file diff --git a/cosense3d/modules/fusion/fax.py b/cosense3d/modules/fusion/fax.py new file mode 100644 index 00000000..a28bcc37 --- /dev/null +++ b/cosense3d/modules/fusion/fax.py @@ -0,0 +1,352 @@ +""" +This class is about swap fusion applications +""" +import torch +from einops import rearrange +from torch import nn, einsum +from einops.layers.torch import Rearrange, Reduce + +from cosense3d.modules import BaseModule +from cosense3d.modules.plugin.cobevt import NaiveDecoder + + +class PreNormResidual(nn.Module): + def __init__(self, dim, fn): + super().__init__() + self.norm = nn.LayerNorm(dim) + self.fn = fn + + def forward(self, x, **kwargs): + return self.fn(self.norm(x), **kwargs) + x + + +class FeedForward(nn.Module): + def __init__(self, dim, hidden_dim, dropout=0.): + super().__init__() + self.net = nn.Sequential( + nn.Linear(dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, dim), + nn.Dropout(dropout) + ) + + def forward(self, x): + return self.net(x) + + +# swap attention -> max_vit +class Attention(nn.Module): + """ + Unit Attention class. Todo: mask is not added yet. + + Parameters + ---------- + dim: int + Input feature dimension. + dim_head: int + The head dimension. + dropout: float + Dropout rate + agent_size: int + The agent can be different views, timestamps or vehicles. + """ + + def __init__( + self, + dim, + dim_head=32, + dropout=0., + agent_size=6, + window_size=7 + ): + super().__init__() + assert (dim % dim_head) == 0, \ + 'dimension should be divisible by dimension per head' + + self.heads = dim // dim_head + self.scale = dim_head ** -0.5 + self.window_size = [agent_size, window_size, window_size] + + self.to_qkv = nn.Linear(dim, dim * 3, bias=False) + self.attend = nn.Sequential( + nn.Softmax(dim=-1) + ) + + self.to_out = nn.Sequential( + nn.Linear(dim, dim, bias=False), + nn.Dropout(dropout) + ) + + self.relative_position_bias_table = nn.Embedding( + (2 * self.window_size[0] - 1) * + (2 * self.window_size[1] - 1) * + (2 * self.window_size[2] - 1), + self.heads) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for + # each token inside the window + coords_d = torch.arange(self.window_size[0]) + coords_h = torch.arange(self.window_size[1]) + coords_w = torch.arange(self.window_size[2]) + # 3, Wd, Wh, Ww + coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w, indexing='ij')) + coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww + + # 3, Wd*Wh*Ww, Wd*Wh*Ww + relative_coords = \ + coords_flatten[:, :, None] - coords_flatten[:, None, :] + # Wd*Wh*Ww, Wd*Wh*Ww, 3 + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + # shift to start from 0 + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 2] += self.window_size[2] - 1 + + relative_coords[:, :, 0] *= \ + (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1) + relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1) + relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww + self.register_buffer("relative_position_index", + relative_position_index) + + def forward(self, x, mask=None): + # x shape: b, l, h, w, w_h, w_w, c + batch, agent_size, height, width, window_height, window_width, _, device, h \ + = *x.shape, x.device, self.heads + + # flatten + x = rearrange(x, 'b l x y w1 w2 d -> (b x y) (l w1 w2) d') + # project for queries, keys, values + q, k, v = self.to_qkv(x).chunk(3, dim=-1) + # split heads + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), + (q, k, v)) + # scale + q = q * self.scale + + # sim + sim = einsum('b h i d, b h j d -> b h i j', q, k) + + # add positional bias + L = agent_size * window_height * window_width + bias = self.relative_position_bias_table(self.relative_position_index[:L, :L]) + sim = sim + rearrange(bias, 'i j h -> h i j') + + # mask shape if exist: b x y w1 w2 e l + if mask is not None: + # b x y w1 w2 e l -> (b x y) 1 (l w1 w2) + mask = rearrange(mask, 'b x y w1 w2 e l -> (b x y) e (l w1 w2)') + # (b x y) 1 1 (l w1 w2) = b h 1 n + mask = mask.unsqueeze(1) + sim = sim.masked_fill(mask == 0, -float('inf')) + + # attention + attn = self.attend(sim) + # aggregate + out = einsum('b h i j, b h j d -> b h i d', attn, v) + # merge heads + out = rearrange(out, 'b h (l w1 w2) d -> b l w1 w2 (h d)', + l=agent_size, w1=window_height, w2=window_width) + + # combine heads out + out = self.to_out(out) + return rearrange(out, '(b x y) l w1 w2 d -> b l x y w1 w2 d', + b=batch, x=height, y=width) + + +class SwapFusionBlockMask(nn.Module): + """ + Swap Fusion Block contains window attention and grid attention with + mask enabled for multi-vehicle cooperation. + """ + + def __init__(self, + input_dim, + mlp_dim, + dim_head, + window_size, + agent_size, + drop_out): + super(SwapFusionBlockMask, self).__init__() + + self.window_size = window_size + + self.window_attention = PreNormResidual(input_dim, + Attention(input_dim, dim_head, + drop_out, + agent_size, + window_size)) + self.window_ffd = PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, + drop_out)) + self.grid_attention = PreNormResidual(input_dim, + Attention(input_dim, dim_head, + drop_out, + agent_size, + window_size)) + self.grid_ffd = PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, + drop_out)) + + def forward(self, x, mask): + # x: b l c h w + # mask: b h w 1 l + # window attention -> grid attention + mask_swap = mask + + # mask b h w 1 l -> b x y w1 w2 1 L + mask_swap = rearrange(mask_swap, + 'b (x w1) (y w2) e l -> b x y w1 w2 e l', + w1=self.window_size, w2=self.window_size) + x = rearrange(x, 'b m d (x w1) (y w2) -> b m x y w1 w2 d', + w1=self.window_size, w2=self.window_size) + x = self.window_attention(x, mask=mask_swap) + x = self.window_ffd(x) + x = rearrange(x, 'b m x y w1 w2 d -> b m d (x w1) (y w2)') + + # grid attention + mask_swap = mask + mask_swap = rearrange(mask_swap, + 'b (w1 x) (w2 y) e l -> b x y w1 w2 e l', + w1=self.window_size, w2=self.window_size) + x = rearrange(x, 'b m d (w1 x) (w2 y) -> b m x y w1 w2 d', + w1=self.window_size, w2=self.window_size) + x = self.grid_attention(x, mask=mask_swap) + x = self.grid_ffd(x) + x = rearrange(x, 'b m x y w1 w2 d -> b m d (w1 x) (w2 y)') + + return x + + +class SwapFusionBlock(nn.Module): + """ + Swap Fusion Block contains window attention and grid attention. + """ + + def __init__(self, + input_dim, + mlp_dim, + dim_head, + window_size, + agent_size, + drop_out): + super(SwapFusionBlock, self).__init__() + # b = batch * max_cav + self.block = nn.Sequential( + Rearrange('b m d (x w1) (y w2) -> b m x y w1 w2 d', + w1=window_size, w2=window_size), + PreNormResidual(input_dim, Attention(input_dim, dim_head, drop_out, + agent_size, window_size)), + PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, drop_out)), + Rearrange('b m x y w1 w2 d -> b m d (x w1) (y w2)'), + + Rearrange('b m d (w1 x) (w2 y) -> b m x y w1 w2 d', + w1=window_size, w2=window_size), + PreNormResidual(input_dim, Attention(input_dim, dim_head, drop_out, + agent_size, window_size)), + PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, drop_out)), + Rearrange('b m x y w1 w2 d -> b m d (w1 x) (w2 y)'), + ) + + def forward(self, x, mask=None): + # todo: add mask operation later for mulit-agents + x = self.block(x) + return x + + +class SwapFusionEncoder(BaseModule): + """ + Data rearrange -> swap block -> mlp_head + """ + + def __init__(self, + input_dim=128, + mlp_dim=256, + agent_size=5, + window_size=8, + dim_head=32, + drop_out=0.1, + depth=3, + mask=False, + decoder=None, + **kwargs): + super(SwapFusionEncoder, self).__init__(**kwargs) + self.layers = nn.ModuleList([]) + self.depth = depth + self.mask = mask + swap_fusion_block = SwapFusionBlockMask if self.mask else SwapFusionBlock + + for i in range(self.depth): + block = swap_fusion_block(input_dim, + mlp_dim, + dim_head, + window_size, + agent_size, + drop_out) + self.layers.append(block) + + # mlp head + self.mlp_head = nn.Sequential( + Reduce('b m d h w -> b d h w', 'mean'), + Rearrange('b d h w -> b h w d'), + nn.LayerNorm(input_dim), + nn.Linear(input_dim, input_dim), + Rearrange('b h w d -> b d h w') + ) + + if decoder is not None: + self.decoder = NaiveDecoder(decoder) + + def forward(self, ego_feat, coop_cpm, **kwargs): + B = len(ego_feat) + C, H, W = ego_feat[0].shape + x = [] + mask = [] + num_cavs = [] + for xe, xc in zip(ego_feat, coop_cpm): + values = xc.values() + ego_mask = torch.ones_like(xe[:1]) + x.append([xe,] + [v['bev_feat'] for v in values]) + mask.append([ego_mask,] + [v['bev_mask'] for v in values]) + num_cavs.append(len(values) + 1) + l = max(num_cavs) + x_pad = ego_feat[0].new_zeros(B, l, C, H, W) + mask_pad = ego_feat[0].new_zeros(B, H, W, 1, l) + for i in range(B): + x_pad[i, :len(x[i])] = torch.stack(x[i], dim=0) + mask_pad[i, :, :, :, :len(x[i])] = torch.stack(mask[i], dim=-1).permute(1, 2, 0, 3) + for stage in self.layers: + x_pad = stage(x_pad, mask=mask_pad) + out = self.mlp_head(x_pad) + + if hasattr(self, 'decoder'): + out = self.decoder(out.unsqueeze(1)) + out = rearrange(out, 'b l c h w -> (b l) c h w') + return {self.scatter_keys[0]: out} + + +if __name__ == "__main__": + import os + + os.environ['CUDA_VISIBLE_DEVICES'] = '1' + args = {'input_dim': 512, + 'mlp_dim': 512, + 'agent_size': 4, + 'window_size': 8, + 'dim_head': 4, + 'drop_out': 0.1, + 'depth': 2, + 'mask': True + } + block = SwapFusionEncoder(args) + block.cuda() + test_data = torch.rand(1, 4, 512, 32, 32) + test_data = test_data.cuda() + mask = torch.ones(1, 32, 32, 1, 4) + mask = mask.cuda() + + output = block(test_data, mask) + print(output) diff --git a/cosense3d/modules/fusion/keypoints.py b/cosense3d/modules/fusion/keypoints.py new file mode 100644 index 00000000..453f008d --- /dev/null +++ b/cosense3d/modules/fusion/keypoints.py @@ -0,0 +1,112 @@ +import torch +import torch.nn as nn + +from cosense3d.modules import BaseModule +from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu +pi = 3.141592653 + + +def limit_period(val, offset=0.5, period=2 * pi): + return val - torch.floor(val / period + offset) * period + + +class KeypointsFusion(BaseModule): + def __init__(self, lidar_range, train_from_epoch=0, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.train_from_epoch = train_from_epoch + + def forward(self, ego_feats, coop_feats, **kwargs): + epoch = kwargs.get('epoch', self.train_from_epoch + 1) + if epoch < self.train_from_epoch: + return {self.scatter_keys[0]: [None for _ in ego_feats]} + out_dict = {'boxes': [], 'scores': [], 'feat': [], 'coor': []} + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + feat = [ego_feat['point_features']] + coor = [ego_feat['point_coords']] + boxes = [ego_feat['boxes']] + scores = [ego_feat['scores']] + for cpfeat in coop_feat.values(): + if 'keypoint_feat' not in cpfeat: + continue + feat.append(cpfeat['keypoint_feat']['point_features']) + coor.append(cpfeat['keypoint_feat']['point_coords']) + boxes.append(cpfeat['keypoint_feat']['boxes']) + scores.append(cpfeat['keypoint_feat']['scores']) + clusters_boxes, clusters_scores = self.clustering(boxes, scores) + boxes_fused, scores_fused = self.cluster_fusion(clusters_boxes, clusters_scores) + out_dict['boxes'].append(boxes_fused) + out_dict['scores'].append(scores_fused) + out_dict['feat'].append(torch.cat(feat, dim=0)) + out_dict['coor'].append(torch.cat(coor, dim=0)) + + return {self.scatter_keys[0]: self.compose_result_list(out_dict, len(ego_feats))} + + def clustering(self, boxes, scores): + pred_boxes_cat = torch.cat(boxes, dim=0) + pred_boxes_cat[:, -1] = limit_period(pred_boxes_cat[:, -1]) + pred_scores_cat = torch.cat(scores, dim=0) + + if len(pred_scores_cat) == 0: + clusters = [torch.Tensor([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.57]). + to(boxes[0].device).view(1, 7)] + scores= [torch.Tensor([0.01]).to(boxes[0].device).view(-1)] + return clusters, scores + + ious = boxes_iou3d_gpu(pred_boxes_cat, pred_boxes_cat) + cluster_indices = torch.zeros(len(ious)).int() # gt assignments of preds + cur_cluster_id = 1 + while torch.any(cluster_indices == 0): + cur_idx = torch.where(cluster_indices == 0)[0][ + 0] # find the idx of the first pred which is not assigned yet + cluster_indices[torch.where(ious[cur_idx] > 0.1)[0]] = cur_cluster_id + cur_cluster_id += 1 + clusters = [] + scores = [] + for j in range(1, cur_cluster_id): + clusters.append(pred_boxes_cat[cluster_indices == j]) + scores.append(pred_scores_cat[cluster_indices == j]) + + return clusters, scores + + @torch.no_grad() + def cluster_fusion(self, clusters, scores): + """ + Merge boxes in each cluster with scores as weights for merging + """ + for i, (c, s) in enumerate(zip(clusters, scores)): + assert len(c) == len(s) + if len(c) == 1: + continue + # reverse direction for non-dominant direction of boxes + dirs = c[:, -1] + max_score_idx = torch.argmax(s) + dirs_diff = torch.abs(dirs - dirs[max_score_idx].item()) + lt_pi = (dirs_diff > pi).int() + dirs_diff = dirs_diff * (1 - lt_pi) + ( + 2 * pi - dirs_diff) * lt_pi + score_lt_half_pi = s[dirs_diff > pi / 2].sum() # larger than + score_set_half_pi = s[ + dirs_diff <= pi / 2].sum() # small equal than + # select larger scored direction as final direction + if score_lt_half_pi <= score_set_half_pi: + dirs[dirs_diff > pi / 2] += pi + else: + dirs[dirs_diff <= pi / 2] += pi + dirs = limit_period(dirs) + s_normalized = s / s.sum() + sint = torch.sin(dirs) * s_normalized + cost = torch.cos(dirs) * s_normalized + theta = torch.atan2(sint.sum(), cost.sum()).view(1, ) + center_dim = c[:, :-1] * s_normalized[:, None] + clusters[i] = torch.cat([center_dim.sum(dim=0), theta]).unsqueeze(0) + s_sorted = torch.sort(s, descending=True).values + s_fused = 0 + for j, ss in enumerate(s_sorted): + s_fused += ss ** (j + 1) + s_fused = torch.tensor([min(s_fused, 1.0)], device=s.device) + scores[i] = s_fused + + return torch.cat(clusters, dim=0), torch.cat(scores) + + diff --git a/cosense3d/modules/fusion/maxout_fusion.py b/cosense3d/modules/fusion/maxout_fusion.py new file mode 100644 index 00000000..e236dfe4 --- /dev/null +++ b/cosense3d/modules/fusion/maxout_fusion.py @@ -0,0 +1,71 @@ +import torch +import torch.nn as nn + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.common import cat_coor_with_idx + + +class BEVMaxoutFusion(BaseModule): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def forward(self, ego_feats, coop_feats, **kwargs): + out_feat = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + feat = [ego_feat] + for cpfeat in coop_feat.values(): + if 'bev_feat' not in cpfeat: + continue + feat.append(cpfeat['bev_feat']) + feat = torch.stack(feat, dim=0).max(dim=0).values + out_feat.append(feat) + + return {self.scatter_keys[0]: out_feat} + + +class SparseBEVMaxoutFusion(BaseModule): + def __init__(self, + pc_range, + resolution, + **kwargs): + super().__init__(**kwargs) + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.resolution = resolution + + def forward(self, ego_feats, coop_feats, **kwargs): + fused_feat = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + coor = [ego_feat['ref_pts']] + feat = [ego_feat['outs_dec'][-1]] + if len(coop_feat) == 0: + fused_feat.append({ + 'ref_pts': coor[0], + 'outs_dec': feat[0].unsqueeze(1) + }) + continue + + # fuse coop to ego + for cpfeat in coop_feat.values(): + coor.append(cpfeat[self.gather_keys[0]]['ref_pts']) + feat.append(cpfeat[self.gather_keys[0]]['outs_dec'][-1]) + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + # coor_int = coor_cat[:, 1:] * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + # coor_int = (coor_int * (1 / self.resolution)).int() + uniq_coor, reverse_inds = torch.unique(coor_cat[:, 1:], dim=0, + return_inverse=True) + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_full((len(uniq_coor), feat_cat.shape[-1]), -torch.inf) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + out = torch.stack(feats_pad, dim=0).max(dim=0).values + fused_feat.append({ + 'ref_pts': uniq_coor, + 'outs_dec': out.unsqueeze(1) + }) + return self.format_output(fused_feat) + + def format_output(self, output): + return {self.scatter_keys[0]: output} \ No newline at end of file diff --git a/cosense3d/modules/fusion/naive_fusion.py b/cosense3d/modules/fusion/naive_fusion.py new file mode 100644 index 00000000..00a1a3c2 --- /dev/null +++ b/cosense3d/modules/fusion/naive_fusion.py @@ -0,0 +1,60 @@ +import warnings +from typing import Dict + +import torch + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.me_utils import update_me_essentials + + +class NaiveFusion(BaseModule): + def __init__(self, stride, **kwargs): + super(NaiveFusion, self).__init__(**kwargs) + if isinstance(stride, int): + self.stride = [stride] + else: + self.stride = stride + + def forward(self, ego_feats, coop_feats=None, **kwargs): + fused_feat = [] + fuse_key = self.gather_keys[0] + + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + batch_feat = {} + for stride in self.stride: + coor, feat, ctr = self.fuse_feature_at_stride( + ego_feat, coop_feat, stride, fuse_key + ) + batch_feat[f'p{stride}'] = { + 'coor': coor, + 'feat': feat, + 'ctr': ctr, + } + fused_feat.append(batch_feat) + return self.format_output(fused_feat) + + def fuse_feature_at_stride(self, ego_feat, coop_feat, stride, fuse_key): + coor = [ego_feat[f'p{stride}']['coor']] + feat = [ego_feat[f'p{stride}']['feat']] + ctr = [ego_feat[f'p{stride}']['ctr']] + # fuse coop to ego + for cpfeat in coop_feat.values(): + if fuse_key not in cpfeat: + continue + cpm = cpfeat[fuse_key][f'p{stride}'] + coor.append(cpm['coor']) + feat.append(cpm['feat']) + ctr.append(cpm['ctr']) + coor = torch.cat(coor, dim=0) + feat = torch.cat(feat, dim=0) + ctr = torch.cat(ctr, dim=0) + return coor, feat, ctr + + + def format_output(self, output): + return {self.scatter_keys[0]: output} + + + + + diff --git a/cosense3d/modules/fusion/spatial_query_fusion.py b/cosense3d/modules/fusion/spatial_query_fusion.py new file mode 100644 index 00000000..bf23e36e --- /dev/null +++ b/cosense3d/modules/fusion/spatial_query_fusion.py @@ -0,0 +1,215 @@ +from typing import Mapping, Any + +import torch +import torch.nn as nn + +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.utils.common import cat_coor_with_idx +from cosense3d.modules.plugin.attn import ScaledDotProductAttention +from cosense3d.modules.utils.localization_utils import register_points +from cosense3d.modules.utils.common import pad_r +from cosense3d.modules.utils.misc import MLN +import cosense3d.modules.utils.positional_encoding as PE + + +class SpatialQueryFusion(BaseModule): + def __init__(self, + in_channels, + pc_range, + resolution, + **kwargs): + super().__init__(**kwargs) + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.resolution = resolution + self.attn = ScaledDotProductAttention(in_channels) + + def forward(self, ego_feats, coop_feats, **kwargs): + fused_feat = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + coor = [ego_feat['ref_pts']] + feat = [ego_feat['outs_dec'][-1]] + if len(coop_feat) == 0: + fused_feat.append({ + 'ref_pts': coor[0], + 'outs_dec': feat[0].unsqueeze(1) + }) + continue + + # fuse coop to ego + for cpfeat in coop_feat.values(): + coor.append(cpfeat[self.gather_keys[0]]['ref_pts']) + feat.append(cpfeat[self.gather_keys[0]]['outs_dec'][-1]) + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + # coor_int = coor_cat[:, 1:] * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + # coor_int = (coor_int * (1 / self.resolution)).int() + uniq_coor, reverse_inds = torch.unique(coor_cat[:, 1:], dim=0, + return_inverse=True) + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_zeros(len(uniq_coor), feat_cat.shape[-1]) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + q = feats_pad[0].unsqueeze(1) # num_pts, 1, d + kv = torch.stack(feats_pad, dim=1) # num_pts, num_coop_cav, d + out = self.attn(q, kv, kv).squeeze(1) + fused_feat.append({ + 'ref_pts': uniq_coor, + 'outs_dec': out.unsqueeze(1) + }) + return self.format_output(fused_feat) + + def format_output(self, output): + return {self.scatter_keys[0]: output} + + +class SpatialQueryAlignFusionRL(BaseModule): + def __init__(self, + in_channels, + pc_range, + resolution, + num_pose_feat=64, + **kwargs): + super().__init__(**kwargs) + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.resolution = resolution + self.emb_dim = in_channels + self.attn = ScaledDotProductAttention(in_channels) + self.pose_pe = MLN(4 * 12, f_dim=self.emb_dim) + self.num_pose_feat = num_pose_feat + self.position_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + + def forward(self, det_local, roadline, roadline_preds, ego_queries, + ego_pose_corrected, ego_poses, ego_poses_aug, + cpms, **kwargs): + fused_feat = [] + for i, cpm in enumerate(cpms): + det = det_local[i] + ego_rl, ego_rl_pred, ego_query = roadline[i], roadline_preds[i], ego_queries[i] + ego_pose_corr, ego_pose, pose_aug2g = ego_pose_corrected[i], ego_poses[i], ego_poses_aug[i] + # augment-frame to ego-aligned-world frame + Taug2eaw = ego_pose_corr @ ego_pose.inverse() @ pose_aug2g + ego_bctr = det['preds']['box'][:, :2] + ego_coor = ego_query['ref_pts'] + ego_coor_emb = self.query_embedding(PE.pos2posemb2d(ego_coor[:, :2], self.num_pose_feat)) + ego_feat = ego_query['outs_dec'][-1] + ego_coor_emb + ego_coor = ego_coor * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + coor = [ego_coor] # in augment-frame + feat = [ego_feat] + if len(cpm) == 0: + fused_feat.append({ + 'ref_pts': coor[0], + 'outs_dec': feat[0].unsqueeze(1) + }) + continue + + # fuse coop to ego + for cpfeat in cpm.values(): + if len(cpfeat['box_ctrs']) == 0: + continue + # transformation matrix coop-aligned-world frame to ego-aligned-world frame + if self.training: + # during training, ground-truth poses are used, caw-frame==eaw-frame + Tcaw2aug = Taug2eaw.inverse() + else: + Tcaw2eaw = self.align_coordinates(ego_bctr, ego_rl, ego_rl_pred, Taug2eaw, cpfeat) + Tcaw2aug = Taug2eaw.inverse() @ Tcaw2eaw + T = Tcaw2aug @ cpfeat['Taug2caw'] + + # encode the transformation matrix that transforms feature points + # from erroneous ego-frame to the corrected ego-frame + ref_pts = (T @ pad_r(cpfeat['ref_pts'], 1.0).T)[:3].T + ref_pts_norm = (ref_pts - self.pc_range[:3]) / (self.pc_range[3:] - self.pc_range[:3]) + rot_emb = PE.nerf_positional_encoding(T[:2, :2].flatten(-2)).repeat(len(ref_pts), 1) + pos_emb = self.position_embedding(PE.pos2posemb2d(ref_pts_norm[:, :2], self.num_pose_feat)) + transform_emb = self.pose_pe(pos_emb, rot_emb) + coor.append(ref_pts) + feat.append(cpfeat['feat'][-1] + transform_emb) + + # inplace transformation for coop point cloud: only for visualization in GLViewer + cpfeat['points'][:, :3] = (T @ pad_r(cpfeat['points'][:, :3], 1.0).T)[:3].T + + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + # coor_int = coor_cat[:, 1:] * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + coor_int = (coor_cat[:, 1:] * (1 / self.resolution)).int() + uniq_coor, reverse_inds = torch.unique(coor_int, dim=0, return_inverse=True) + uniq_coor = (uniq_coor * self.resolution - self.pc_range[:3]) / (self.pc_range[3:] - self.pc_range[:3]) + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_zeros(len(uniq_coor), feat_cat.shape[-1]) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + q = feats_pad[0].unsqueeze(1) # num_pts, 1, d + kv = torch.stack(feats_pad, dim=1) # num_pts, num_coop_cav, d + out = self.attn(q, kv, kv).squeeze(1) + fused_feat.append({ + 'ref_pts': uniq_coor, + 'outs_dec': out.unsqueeze(1) + }) + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # ax = draw_points_boxes_plt(pc_range=self.pc_range.tolist(), return_ax=True) + # for pts in coor: + # pts = pts.detach().cpu().numpy() + # ax.plot(pts[:, 0], pts[:, 1], '.', markersize=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + return self.format_output(fused_feat) + + def format_output(self, output, **kwargs): + return {self.scatter_keys[0]: output} + + def align_coordinates(self, ego_bctr, ego_rl, ego_rl_pred, ego_pose, cpfeat): + coop_bctr = cpfeat['box_ctrs'] + coop_rl = cpfeat['roadline'] + + # transform ego points from aug-frame to ego-aligned world-frame + ego_bctr = (ego_pose @ pad_r(pad_r(ego_bctr, 0.0), 1.0).T).T + ego_rl_pred = (ego_pose @ pad_r(pad_r(ego_rl_pred, 0.0), 1.0).T).T + coop_pts = pad_r(torch.cat([coop_rl, coop_bctr], dim=0)) + ego_pts = torch.cat([pad_r(ego_rl[:, :3]), ego_bctr[:, :3]], dim=0) + + transform, coop_pts_tf = register_points(coop_pts, ego_pts, thr=0.8) + + # import matplotlib.pyplot as plt + # ego_bctr_vis = ego_bctr.detach().cpu().numpy() + # ego_rl_pred_vis = ego_rl_pred.detach().cpu().numpy() + # ego_rl_vis = ego_rl.detach().cpu().numpy() + # coop_bctr_vis = coop_bctr.detach().cpu().numpy() + # coop_rl_vis = coop_rl.detach().cpu().numpy() + # + # plt.plot(ego_rl_vis[:, 0], ego_rl_vis[:, 1], 'g.', markersize=1) + # plt.plot(ego_rl_pred_vis[:, 0], ego_rl_pred_vis[:, 1], 'y.', markersize=1) + # plt.plot(ego_bctr_vis[:, 0], ego_bctr_vis[:, 1], 'yo', markersize=5, markerfacecolor='none') + # plt.plot(coop_rl_vis[:, 0], coop_rl_vis[:, 1], 'r.', markersize=1) + # plt.plot(coop_bctr_vis[:, 0], coop_bctr_vis[:, 1], 'ro', markersize=5, markerfacecolor='none', alpha=0.5) + # # plt.plot(coop_pts_tf[:, 0], coop_pts_tf[:, 1], 'b.', markersize=1) + # plt.savefig("/home/yys/Downloads/tmp.png") + # plt.close() + + return torch.from_numpy(transform).float().to(ego_pose.device) + + + + + + + + + + + + + diff --git a/cosense3d/modules/fusion/temporal_fusion.py b/cosense3d/modules/fusion/temporal_fusion.py new file mode 100644 index 00000000..b7e9b5cb --- /dev/null +++ b/cosense3d/modules/fusion/temporal_fusion.py @@ -0,0 +1,939 @@ +from typing import Mapping, Any + +import torch +import torch.nn as nn + +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.utils.misc import SELayer_Linear, MLN, MLN2 +import cosense3d.modules.utils.positional_encoding as PE + + +class TemporalLidarFusion(BaseModule): + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=64, + topk=2048, + num_propagated=256, + memory_len=1024, + num_query=644, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk = topk + self.num_query = num_query + self.num_propagated = num_propagated + self.memory_len = memory_len + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.reference_points = nn.Embedding(self.num_query, self.pos_dim) + self.pseudo_reference_points = nn.Embedding(self.num_propagated, self.pos_dim) + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + + def init_weights(self): + nn.init.uniform_(self.reference_points.weight.data, 0, 1) + nn.init.uniform_(self.pseudo_reference_points.weight.data, 0, 1) + self.pseudo_reference_points.weight.requires_grad = False + self.transformer.init_weights() + + def forward(self, rois, bev_feat, mem_dict, **kwargs): + feat, ctr = self.gather_topk(rois, bev_feat) + + pos = ((ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + pos_emb = self.position_embeding(self.embed_pos(pos)) + memory = self.memory_embed(feat) + pos_emb = self.featurized_pe(pos_emb, memory) + + reference_points = self.reference_points.weight.unsqueeze(0).repeat(memory.shape[0], 1, 1) + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos = \ + self.temporal_alignment(query_pos, tgt, reference_points, mem_dict) + mask_dict = [None, None] + outs_dec, _ = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(rois)) + ] + + return {self.scatter_keys[0]: outs} + + def gather_topk(self, rois, bev_feats): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{self.feature_stride}']['ctr'] + feat = bev_feat[f'p{self.feature_stride}']['feat'] + scores = roi['scr'] + if scores.shape[0] < self.topk: + raise NotImplementedError + else: + topk_inds = torch.topk(scores, k=self.topk).indices + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr + + def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim) + + def temporal_alignment(self, query_pos, tgt, ref_pts, mem_dict): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + if not x.all(): + # pad the recent memory ref pts with pseudo points + pseudo_ref_pts = self.pseudo_reference_points.weight.unsqueeze(0).repeat(B, 1, 1) + x = x.view(*((-1,) + (1,) * (pseudo_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + pseudo_ref_pts * (1 - x) + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + temp_memory = mem_dict['embeddings'] + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + query_pos += self.time_embedding( + self.embed_pos(torch.zeros_like(ref_pts[..., :1]), self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos + + +class TemporalFusion(BaseModule): + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=128, + topk_ref_pts=1024, + topk_feat=512, + num_propagated=256, + memory_len=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.topk_feat = topk_feat + self.ref_pts_stride = ref_pts_stride + self.num_propagated = num_propagated + self.memory_len = memory_len + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.LayerNorm(self.embed_dims * 4), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + + def init_weights(self): + self.transformer.init_weights() + + def forward(self, rois, bev_feat, mem_dict, time_scale=None, **kwargs): + ref_feat, ref_ctr = self.gather_topk(rois, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + mem_feat, mem_ctr = self.gather_topk(rois, bev_feat, self.feature_stride, self.topk_feat) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos = ((mem_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos_emb = self.position_embeding(self.embed_pos(mem_pos)) + memory = self.memory_embed(mem_feat) + pos_emb = self.featurized_pe(mem_pos_emb, memory) + + if time_scale is not None: + ref_time = torch.rad2deg(torch.arctan2(ref_ctr[..., 1:2], ref_ctr[..., 0:1])) + 180 + ref_time = torch.stack([ts[inds.long()] for inds, ts in zip(ref_time, time_scale)], dim=0) + else: + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = [None, None] + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(rois)) + ] + + return {self.scatter_keys[0]: outs} + + def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + scores = roi[f'p{stride}']['conf'][:, + roi[f'p{stride}']['reg'].shape[-1] - 1:].sum(dim=-1) + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr + + def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim) + + def temporal_alignment(self, query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + temp_memory = mem_dict['embeddings'] + + if not x.all(): + # pad the recent memory ref pts with pseudo points + ext_inds = torch.randperm(self.topk_ref_pts)[:self.num_propagated] + ext_ref_pts = ref_pts[:, ext_inds] + ext_feat = ref_feat[:, ext_inds] + # pseudo_ref_pts = pseudo_ref_pts + torch.rand_like(pseudo_ref_pts) + x = x.view(*((-1,) + (1,) * (ext_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + ext_ref_pts * (1 - x) + ext_feat = temp_memory[:, 0] * x + ext_feat * (1 - x) + else: + ext_feat = temp_memory[:, 0] + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + if ref_time is None: + ref_time = torch.zeros_like(ref_pts[..., :1]) + self.global_ref_time + query_pos += self.time_embedding(self.embed_pos(ref_time, self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos, ext_feat + + +class LocalTemporalFusion(BaseModule): + """Modified from TemporalFusion to standardize input and output keys""" + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=128, + topk_ref_pts=1024, + topk_feat=512, + num_propagated=256, + memory_len=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + norm_fusion=False, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.topk_feat = topk_feat + self.ref_pts_stride = ref_pts_stride + self.num_propagated = num_propagated + self.memory_len = memory_len + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + self.norm_fusion = norm_fusion + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + if self.norm_fusion: + self.local_global_fusion = nn.Sequential( + nn.Linear(self.embed_dims * 2, self.embed_dims), + nn.LayerNorm(self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims), + ) + + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.LayerNorm(self.embed_dims * 4), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + + def init_weights(self): + self.transformer.init_weights() + + def forward(self, local_roi, global_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + mem_feat, mem_ctr = self.gather_topk(global_roi, bev_feat, self.feature_stride, self.topk_feat) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos = ((mem_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos_emb = self.position_embeding(self.embed_pos(mem_pos)) + memory = self.memory_embed(mem_feat) + pos_emb = self.featurized_pe(mem_pos_emb, memory) + + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = [None, None] + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + if self.norm_fusion: + outs_dec = self.local_global_fusion(torch.cat([local_feat, global_feat], dim=-1)) + else: + # simple addition will lead to large values in long sequences + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs} + + def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + if 'scr' in roi: + scores = roi['scr'] + else: + scores = roi[f'p{stride}']['scr'] + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr + + def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim) + + def temporal_alignment(self, query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + temp_memory = mem_dict['embeddings'] + + if not x.all(): + # pad the recent memory ref pts with pseudo points + ext_inds = torch.randperm(self.topk_ref_pts)[:self.num_propagated] + ext_ref_pts = ref_pts[:, ext_inds] + ext_feat = ref_feat[:, ext_inds] + # pseudo_ref_pts = pseudo_ref_pts + torch.rand_like(pseudo_ref_pts) + x = x.view(*((-1,) + (1,) * (ext_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + ext_ref_pts * (1 - x) + ext_feat = temp_memory[:, 0] * x + ext_feat * (1 - x) + else: + ext_feat = temp_memory[:, 0] + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + if ref_time is None: + ref_time = torch.zeros_like(ref_pts[..., :1]) + self.global_ref_time + query_pos += self.time_embedding(self.embed_pos(ref_time, self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos, ext_feat + + +class LocalTemporalFusionV1(LocalTemporalFusion): + def forward(self, rois, bev_feat, mem_dict, **kwargs): + return super().forward(rois, rois, bev_feat, mem_dict, **kwargs) + + +class LocalTemporalFusionV2(LocalTemporalFusion): + def forward(self, local_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = None + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(None, tgt, query_pos, None, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs} + + +class LocalTemporalFusionV3(BaseModule): + """TemporalFusion with feature flow""" + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=128, + topk_ref_pts=1024, + topk_feat=512, + num_propagated=256, + memory_len=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + norm_fusion=False, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.topk_feat = topk_feat + self.ref_pts_stride = ref_pts_stride + self.num_propagated = num_propagated + self.memory_len = memory_len + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + self.norm_fusion = norm_fusion + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + if self.norm_fusion: + self.local_global_fusion = nn.Sequential( + nn.Linear(self.embed_dims * 2, self.embed_dims), + nn.LayerNorm(self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims), + ) + + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.LayerNorm(self.embed_dims * 4), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + + def init_weights(self): + self.transformer.init_weights() + + def forward(self, local_roi, global_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + mem_feat, mem_ctr = self.gather_topk(global_roi, bev_feat, self.feature_stride, self.topk_feat) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos = ((mem_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos_emb = self.position_embeding(self.embed_pos(mem_pos)) + memory = self.memory_embed(mem_feat) + pos_emb = self.featurized_pe(mem_pos_emb, memory) + + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = [None, None] + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + if self.norm_fusion: + outs_dec = self.local_global_fusion(torch.cat([local_feat, global_feat], dim=-1)) + else: + # simple addition will lead to large values in long sequences + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs} + + def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + if 'scr' in roi: + scores = roi['scr'] + else: + scores = roi[f'p{stride}']['scr'] + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr + + def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim) + + def temporal_alignment(self, query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + temp_memory = mem_dict['embeddings'] + + if not x.all(): + # pad the recent memory ref pts with pseudo points + ext_inds = torch.randperm(self.topk_ref_pts)[:self.num_propagated] + ext_ref_pts = ref_pts[:, ext_inds] + ext_feat = ref_feat[:, ext_inds] + # pseudo_ref_pts = pseudo_ref_pts + torch.rand_like(pseudo_ref_pts) + x = x.view(*((-1,) + (1,) * (ext_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + ext_ref_pts * (1 - x) + ext_feat = temp_memory[:, 0] * x + ext_feat * (1 - x) + else: + ext_feat = temp_memory[:, 0] + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + if ref_time is None: + ref_time = torch.zeros_like(ref_pts[..., :1]) + self.global_ref_time + query_pos += self.time_embedding(self.embed_pos(ref_time, self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos, ext_feat + + +class LocalNaiveFusion(BaseModule): + """This is a naive replacement of LocalTemporalFusion by only selecting the topk points for later spatial fusion""" + def __init__(self, + in_channels, + feature_stride, + lidar_range, + pos_dim=3, + topk_ref_pts=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + **kwargs): + super().__init__(**kwargs) + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.ref_pts_stride = ref_pts_stride + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + def forward(self, local_roi, global_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + outs_dec = ref_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': ref_pos[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs} + + def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + if 'scr' in roi: + scores = roi['scr'] + else: + scores = roi[f'p{stride}']['scr'] + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr + + + + + diff --git a/cosense3d/modules/heads/__init__.py b/cosense3d/modules/heads/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/modules/heads/bev.py b/cosense3d/modules/heads/bev.py new file mode 100644 index 00000000..45970c5b --- /dev/null +++ b/cosense3d/modules/heads/bev.py @@ -0,0 +1,278 @@ +import os + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.me_utils import * +from cosense3d.modules.utils.common import pad_r, linear_last, cat_coor_with_idx +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.losses import edl, build_loss +from cosense3d.modules.plugin import build_plugin_module +from cosense3d.modules.plugin.attn import NeighborhoodAttention + + +class BEV(BaseModule): + def __init__(self, + data_info, + in_dim, + stride, + target_assigner, + loss_cls, + num_cls=1, + class_names_each_head=None, + down_sample_tgt=False, + generate_roi_scr=True, + **kwargs): + super(BEV, self).__init__(**kwargs) + self.in_dim = in_dim + self.class_names_each_head = class_names_each_head + self.down_sample_tgt = down_sample_tgt + self.stride = stride + self.num_cls = num_cls + self.generate_roi_scr = generate_roi_scr + for k, v in data_info.items(): + setattr(self, k, v) + update_me_essentials(self, data_info, self.stride) + + self.reg_layer = linear_last(in_dim, 32, num_cls, bias=True) + + self.tgt_assigner = build_plugin_module(target_assigner) + self.loss_cls = build_loss(**loss_cls) + self.is_edl = True if 'edl' in self.loss_cls.name.lower() else False + + def forward(self, stensor_list, **kwargs): + coor, feat, ctr = self.format_input(stensor_list) + + if self.training and self.down_sample_tgt: + coor, feat = self.down_sample(coor, feat) + + centers = indices2metric(coor, self.voxel_size) + reg = self.reg_layer(feat) + + conf, unc = self.tgt_assigner.get_predictions( + reg, self.is_edl, getattr(self.loss_cls, 'activation')) + + out = { + 'ctr': centers, + 'reg': reg, + 'conf': conf, + 'unc': unc, + } + if self.generate_roi_scr: + out['scr'] = conf.max(dim=-1).values + + return self.format_output(out, len(stensor_list)) + + def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride) + + def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['ctr'][:, 0] == i + output_new['ctr'].append(output['ctr'][mask, 1:]) + output_new['reg'].append(output['reg'][mask]) + output_new['conf'].append(output['conf'][mask]) + output_new['unc'].append(output['unc'][mask]) + if 'scr' in output_new: + output_new['scr'].append(output['scr'][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output + + def down_sample(self, coor, feat): + keep = torch.rand_like(feat[:, 0]) > 0.5 + coor = coor[keep] + feat = feat[keep] + + return coor, feat + + def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + tgt_pts = self.cat_data_from_list(batch_list, 'ctr', pad_idx=True) + boxes_vis = gt_boxes[0][:, :7].detach().cpu().numpy() + gt_boxes = self.cat_data_from_list(gt_boxes, pad_idx=True) + conf = self.cat_data_from_list(batch_list, 'conf') + tgt_pts, tgt_label, valid = self.tgt_assigner.assign( + tgt_pts, gt_boxes[:, :8], len(batch_list), conf, **kwargs) + epoch_num = kwargs.get('epoch', 0) + reg = self.cat_data_from_list(batch_list, 'reg') + + # if kwargs['itr'] % 100 == 0: + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # from matplotlib import colormaps + # jet = colormaps['jet'] + # points = batch_list[0]['ctr'].detach().cpu().numpy() + # scores = batch_list[0]['conf'][:, self.num_cls - 1:].detach().cpu().numpy() + # ax = draw_points_boxes_plt( + # pc_range=[-144, -41.6, -3.0, 144, 41.6, 1.0], + # # points=points, + # boxes_gt=boxes_vis, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], c=scores, cmap=jet, s=3, marker='s', vmin=0, vmax=1) + # plt.savefig(f"{os.environ['HOME']}/Downloads/tmp1.jpg") + # plt.close() + + if valid is None: + # targets are not down-sampled + avg_factor = max(tgt_label.sum(), 1) + loss_cls = self.loss_cls( + reg, + tgt_label, + temp=epoch_num, + avg_factor=avg_factor + ) + else: + # negative targets are not down-sampled to a ratio to the positive samples + loss_cls = self.loss_cls( + reg[valid], + tgt_label, + temp=epoch_num, + ) + loss_dict = {'bev_loss': loss_cls} + return loss_dict + + +class BEVMultiResolution(BaseModule): + def __init__(self, strides, strides_for_loss, **kwargs): + super().__init__(**kwargs) + self.strides = strides + self.strides_for_loss = strides_for_loss + for s in strides: + kwargs['stride'] = s + setattr(self, f'head_p{s}', BEV(**kwargs)) + + def forward(self, stensor_list, *args, **kwargs): + out_list = [{} for b in range(len(stensor_list))] + for s in self.strides: + out = getattr(self, f'head_p{s}')(stensor_list)[self.scatter_keys[0]] + for i, x in enumerate(out): + out_list[i][f'p{s}'] = x + + return {self.scatter_keys[0]: out_list} + + def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + loss_dict = {} + for s in self.strides_for_loss: + ldict = getattr(self, f'head_p{s}').loss( + [l[f'p{s}'] for l in batch_list], gt_boxes, gt_labels, **kwargs) + for k, v in ldict.items(): + loss_dict[f'{k}_s{s}'] = v + return loss_dict + + +class ContinuousBEV(BaseModule): + def __init__(self, + out_channels, + data_info, + in_dim, + stride, + context_decoder, + target_assigner, + loss_cls, + class_names_each_head=None, + **kwargs): + super().__init__(**kwargs) + self.in_dim = in_dim + self.class_names_each_head = class_names_each_head + self.stride = stride + for k, v in data_info.items(): + setattr(self, k, v) + update_me_essentials(self, data_info, self.stride) + + self.context_decoder = build_plugin_module(context_decoder) + + self.reg_layer = linear_last(in_dim, 32, out_channels, bias=True) + + self.tgt_assigner = build_plugin_module(target_assigner) + self.loss_cls = build_loss(**loss_cls) + + @torch.no_grad() + def sample_reference_points(self, centers, gt_boxes, gt_labels): + gt_boxes = self.cat_data_from_list(gt_boxes, pad_idx=True) + if self.training: + new_pts = centers.clone() + new_pts[:, 1:] += (torch.rand_like(centers[:, 1:]) - 0.5) * self.res[0] + ref_pts, ref_label, _ = self.tgt_assigner.assign( + new_pts, gt_boxes, len(gt_boxes)) + else: + ref_pts, ref_label, _ = self.tgt_assigner.assign( + centers, gt_boxes, len(gt_boxes), down_sample=False) + return ref_pts, ref_label + + def get_evidence(self, ref_pts, coor, feat): + raise NotImplementedError + + def forward(self, stensor_list, gt_boxes, gt_labels, **kwargs): + coor, feat, ctr = self.format_input(stensor_list) + centers = indices2metric(coor, self.voxel_size) + ref_pts, ref_label = self.sample_reference_points( + centers, gt_boxes, gt_labels) + evidence = self.get_evidence(ref_pts, coor, feat) + conf, unc = edl.evidence_to_conf_unc(evidence) + + out = { + 'ref_pts': ref_pts, + 'ref_lbls': ref_label, + 'evi': evidence, + 'conf': conf, + 'unc': unc + } + + return self.format_output(out, len(stensor_list)) + + def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride) + + def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['ref_pts'][:, 0] == i + output_new['ref_pts'].append(output['ref_pts'][mask, 1:]) + output_new['ref_lbls'].append(output['ref_lbls'][mask]) + output_new['evi'].append(output['evi'][mask]) + output_new['conf'].append(output['conf'][mask]) + output_new['unc'].append(output['unc'][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output + + def down_sample(self, coor, feat): + keep = torch.rand_like(feat[:, 0]) > 0.5 + coor = coor[keep] + feat = feat[keep] + + return coor, feat + + def loss(self, batch_list, **kwargs): + tgt_lbl = self.cat_data_from_list(batch_list, 'ref_lbls') + epoch_num = kwargs.get('epoch', 0) + evidence = self.cat_data_from_list(batch_list, 'evi') + # avg_factor = max(tgt_label.sum(), 1) + loss_cls = self.loss_cls( + evidence, + tgt_lbl, + temp=epoch_num, + # avg_factor=avg_factor + ) + loss_dict = {'bev_loss': loss_cls} + return loss_dict + + +class ContiGevBEV(ContinuousBEV): + + def get_evidence(self, ref_pts, coor, feat): + reg = self.reg_layer(feat) + reg = self.context_decoder(ref_pts, coor, reg) + return reg + + +class ContiAttnBEV(ContinuousBEV): + + def get_evidence(self, ref_pts, coor, feat): + ref_context = self.context_decoder(ref_pts, coor, feat) + reg = self.reg_layer(ref_context) + return reg.relu() + + + + + diff --git a/cosense3d/modules/heads/bev_dense.py b/cosense3d/modules/heads/bev_dense.py new file mode 100644 index 00000000..eeeeea2e --- /dev/null +++ b/cosense3d/modules/heads/bev_dense.py @@ -0,0 +1,78 @@ +""" +Seg head for bev understanding +""" + +import torch +import torch.nn as nn +from einops import rearrange + +from cosense3d.modules import BaseModule +from cosense3d.modules.losses import build_loss + + +class BevSegHead(BaseModule): + def __init__(self, target, input_dim, output_class, loss_cls, **kwargs): + super(BevSegHead, self).__init__(**kwargs) + self.target = target + if 'dynamic' in self.target: + self.dynamic_head = nn.Conv2d(input_dim, + output_class, + kernel_size=3, + padding=1) + if 'static' in self.target: + self.static_head = nn.Conv2d(input_dim, + output_class, + kernel_size=3, + padding=1) + self.loss_cls = build_loss(**loss_cls) + + def forward(self, x, **kwargs): + x = self.stack_data_from_list(x) + out_dict = {} + if 'dynamic' in self.target: + out_dict['dynamic_bev_pred'] = self.dynamic_head(x) + if not self.training: + out_dict['dynamic_bev_pred'] = out_dict['dynamic_bev_pred'].permute(0, 2, 3, 1).softmax(dim=-1) + if 'static' in self.target: + out_dict['dynamic_bev_pred'] = self.static_head(x) + if not self.training: + out_dict['static_bev_pred'] = out_dict['dynamic_bev_pred'].permute(0, 2, 3, 1).softmax(dim=1) + + # output_list = self.compose_result_list(out_dict, len(x)) + return out_dict + + def loss(self, dynamic_bev_preds, dynamic_bev, **kwargs): + dynamic_bev_preds = self.stack_data_from_list(dynamic_bev_preds) + dynamic_bev_gt = torch.stack(dynamic_bev, dim=0) + loss_dict = self.loss_cls( + dynamic_pred=dynamic_bev_preds, + dynamic_gt=dynamic_bev_gt + ) + return loss_dict + + +class BevRoIDenseHead(BaseModule): + def __init__(self, in_dim, stride, num_cls=1, loss_cls=None, **kwargs): + super(BevRoIDenseHead, self).__init__(**kwargs) + self.head = nn.Conv2d(in_dim, num_cls, kernel_size=1) + self.stride = stride + if loss_cls is not None: + self.loss_cls = build_loss(**loss_cls) + + def forward(self, input, **kwargs): + x = self.stack_data_from_list([x[f'p{self.stride}'] for x in input]) + x = self.head(x) + + # output_list = self.compose_result_list(out_dict, len(x)) + return {self.scatter_keys[0]: x} + + def loss(self, bev_preds, bev_tgt, **kwargs): + bev_preds = self.stack_data_from_list(bev_preds) + dynamic_bev_gt = torch.stack(bev_tgt, dim=0) + loss_dict = self.loss_cls( + dynamic_pred=bev_preds, + dynamic_gt=dynamic_bev_gt + ) + return loss_dict + + diff --git a/cosense3d/modules/heads/bev_roadline.py b/cosense3d/modules/heads/bev_roadline.py new file mode 100644 index 00000000..ad9e1d51 --- /dev/null +++ b/cosense3d/modules/heads/bev_roadline.py @@ -0,0 +1,121 @@ +import os + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.me_utils import * +from cosense3d.modules.utils.common import pad_r, linear_last, cat_coor_with_idx +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.losses import edl, build_loss +from cosense3d.modules.plugin import build_plugin_module +from cosense3d.modules.plugin.attn import NeighborhoodAttention + + +class BEVRoadLine(BaseModule): + def __init__(self, + data_info, + in_dim, + stride, + target_assigner, + loss_cls, + num_cls=1, + **kwargs): + super(BEVRoadLine, self).__init__(**kwargs) + self.in_dim = in_dim + self.stride = stride + self.num_cls = num_cls + + setattr(self, f'p{stride}_cls', linear_last(in_dim, 32, num_cls, bias=True)) + + self.tgt_assigner = build_plugin_module(target_assigner) + self.loss_cls = build_loss(**loss_cls) + self.is_edl = True if 'edl' in self.loss_cls.name.lower() else False + + def forward(self, stensor_list, **kwargs): + coor, feat, ctr = self.format_input(stensor_list) + cls = getattr(self, f'p{self.stride}_cls')(feat) + + out = { + 'coor': coor, + 'ctr': ctr, + 'cls': cls, + } + + # import matplotlib.pyplot as plt + # bmsk = coor[:, 0] == 0 + # pts_vis = coor[bmsk] + # pts_vis = pts_vis[:, 1:].detach().cpu().numpy() + # scr_vis = cls[bmsk].detach().cpu().numpy().squeeze() + # + # fig = plt.figure(figsize=(10, 5)) + # ax = fig.add_subplot() + # ax.scatter(pts_vis[:, 0], pts_vis[:, 1], c=scr_vis, marker='.', vmin=0, vmax=1, s=3) + # plt.show() + # plt.close() + + return self.format_output(out, len(stensor_list)) + + def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride) + + def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['coor'][:, 0] == i + output_new['coor'].append(output['coor'][mask, 1:]) + output_new['ctr'].append(output['ctr'][mask]) + output_new['cls'].append(output['cls'][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output + + def down_sample(self, coor, feat): + keep = torch.rand_like(feat[:, 0]) > 0.5 + coor = coor[keep] + feat = feat[keep] + + return coor, feat + + def loss(self, batch_list, tgt_pts, **kwargs): + coor = self.cat_data_from_list(batch_list, 'coor', pad_idx=True) + tgt_pts = self.cat_data_from_list(tgt_pts, pad_idx=True) + tgt_label, valid = self.tgt_assigner.assign( + coor, tgt_pts, len(batch_list), **kwargs) + epoch_num = kwargs.get('epoch', 0) + cls = self.cat_data_from_list(batch_list, 'cls') + + # import matplotlib.pyplot as plt + # bmsk = coor[:, 0] == 0 + # pts_vis = coor[bmsk][valid[bmsk]] + # pts_vis = pts_vis[:, 1:].detach().cpu().numpy() + # lbl_vis = tgt_label[bmsk[valid]].detach().cpu().numpy() + # scr_vis = cls[bmsk][valid[bmsk]].detach().cpu().numpy().squeeze() + # + # fig = plt.figure(figsize=(10, 5)) + # axs = fig.subplots(1, 2) + # axs[0].scatter(pts_vis[:, 0], pts_vis[:, 1], c=lbl_vis, marker='.', vmin=0, vmax=1, s=1) + # axs[1].scatter(pts_vis[:, 0], pts_vis[:, 1], c=scr_vis, marker='.', vmin=0, vmax=1, s=1) + # plt.show() + # plt.close() + + # targets are not down-sampled + cared = tgt_label >= 0 + n_cared = cared.sum() + if n_cared == len(tgt_label): + avg_factor = max(tgt_label.bool().sum(), 1) + else: + avg_factor = n_cared + loss_cls = self.loss_cls( + cls[valid][cared], + tgt_label[cared], + temp=epoch_num, + avg_factor=avg_factor + ) + loss_dict = {'rl_loss': loss_cls} + return loss_dict + + + + + + + + diff --git a/cosense3d/modules/heads/bev_semseg.py b/cosense3d/modules/heads/bev_semseg.py new file mode 100644 index 00000000..dd5cd6a1 --- /dev/null +++ b/cosense3d/modules/heads/bev_semseg.py @@ -0,0 +1,144 @@ +import os + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.me_utils import * +from cosense3d.modules.utils.common import pad_r, linear_last, cat_coor_with_idx +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.losses import edl, build_loss +from cosense3d.modules.plugin import build_plugin_module +from cosense3d.modules.plugin.attn import NeighborhoodAttention + + +class SemsegHead(BaseModule): + def __init__(self, + data_info, + in_dim, + stride, + target_assigner, + loss_cls, + num_cls=2, + static_head=True, + dynamic_head=True, + **kwargs): + super(SemsegHead, self).__init__(**kwargs) + self.in_dim = in_dim + self.stride = stride + self.num_cls = num_cls + for k, v in data_info.items(): + setattr(self, k, v) + update_me_essentials(self, data_info, self.stride) + assert (static_head or dynamic_head), "At least one of static_head or dynamic_head should be True." + + self.init_layers(in_dim, num_cls, static_head, dynamic_head) + self.tgt_assigner = build_plugin_module(target_assigner) + self.loss_cls = build_loss(**loss_cls) + self.is_edl = True if 'edl' in self.loss_cls.name.lower() else False + + def init_layers(self, in_dim, num_cls, static_head, dynamic_head): + raise NotImplementedError + + def forward(self, stensor_list, **kwargs): + B = len(stensor_list) + coor, feat, ctr = self.format_input(stensor_list) + + out = {'ctr': ctr, 'coor': coor} + if hasattr(self, 'static_head'): + out['reg_static'] = self.static_head(feat) + if not self.training: + out.update(self.tgt_assigner.get_predictions(out, B, 'static')) + if hasattr(self, 'dynamic_head'): + out['reg_dynamic'] = self.dynamic_head(feat) + if not self.training: + out.update(self.tgt_assigner.get_predictions(out, B, 'dynamic')) + + # a1 = (out['reg_static'] > 0).sum(0) + # a2 = (out['reg_dynamic'] > 0).sum(0) + # import matplotlib.pyplot as plt + # from cosense3d.modules.utils.edl_utils import logit_to_edl + # fig = plt.figure(figsize=(14, 5)) + # mask = coor[:, 0] == 0 + # xy = ctr[mask].detach().cpu().numpy() + # conf, unc = logit_to_edl(out['reg_static'][mask, :2]) + # colors = conf[:, 1].detach().cpu().numpy() + # # neg = colors <= 0.5 + # plt.scatter(xy[:, 0], xy[:, 1], cmap='jet', c=colors, edgecolors=None, marker='.', s=1, vmin=0, vmax=1) + # plt.show() + # plt.close() + + return self.format_output(out, B) + + def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride) + + def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + batch_inds = output['coor'][:, 0] + output['coor'] = output['coor'][:, 1:] + + for i in range(B): + mask = batch_inds == i + for k in output.keys(): + if 'map' in k or 'mask' in k: + output_new[k].append(output[k][i]) + else: + output_new[k].append(output[k][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output + + def loss(self, batch_list, tgt_pts, gt_boxes, **kwargs): + coor = self.cat_data_from_list(batch_list, 'coor', pad_idx=True) + coor[:, 1:] = coor[:, 1:] / self.stride + keys = list(batch_list[0].keys()) + keys.remove('coor') + ctr_pts = {'coor': coor} + for k in keys: + ctr_pts[k] = self.cat_data_from_list(batch_list, k) + B = len(tgt_pts) + tgt_pts = cat_coor_with_idx(tgt_pts) + gt_boxes = cat_coor_with_idx(gt_boxes) + + tgt = self.tgt_assigner.assign( + ctr_pts, tgt_pts, B, gt_boxes, **kwargs) + epoch_num = kwargs.get('epoch', 0) + + loss = 0 + loss_dict = {} + if 'reg_static' in keys: + loss, loss_dict = self.cal_loss(loss_dict, loss, tgt, 'static', epoch_num) + if 'reg_dynamic' in keys: + loss, loss_dict = self.cal_loss(loss_dict, loss, tgt, 'dynamic', epoch_num) + + loss_dict['bev_loss'] = loss + return loss_dict + + def cal_loss(self, loss_dict, loss, tgt, tag, epoch_num, **kwargs): + loss_cls = self.loss_cls( + tgt[f'evi_{tag}'], + tgt[f'lbl_{tag}'], + temp=epoch_num, + ) + loss = loss + loss_cls + loss_dict[f'bev_{tag}_loss'] = loss_cls + return loss, loss_dict + + def draw_bev_map(self, data_dict, B, **kwargs): + return self.tgt_assigner.get_predictions(data_dict, B, **kwargs) + + +class GevSemsegHead(SemsegHead): + def init_layers(self, in_dim, num_cls, static_head, dynamic_head): + if static_head: + self.static_head = linear_last(in_dim, 32, num_cls * 3, bias=True) + if dynamic_head: + self.dynamic_head = linear_last(in_dim, 32, num_cls * 3, bias=True) + + +class EviSemsegHead(SemsegHead): + def init_layers(self, in_dim, num_cls, static_head, dynamic_head): + if static_head: + self.static_head = linear_last(in_dim, 32, num_cls, bias=True) + if dynamic_head: + self.dynamic_head = linear_last(in_dim, 32, num_cls, bias=True) + + diff --git a/cosense3d/modules/heads/det_anchor_dense.py b/cosense3d/modules/heads/det_anchor_dense.py new file mode 100644 index 00000000..21bd91a6 --- /dev/null +++ b/cosense3d/modules/heads/det_anchor_dense.py @@ -0,0 +1,149 @@ +from typing import List + +import torch +from torch import nn +from cosense3d.modules import BaseModule +from cosense3d.modules import plugin +from cosense3d.modules.losses import build_loss +from cosense3d.utils.misc import multi_apply + + +class DetAnchorDense(BaseModule): + def __init__(self, + in_channels, + loss_cls, + loss_box, + num_classes=1, + stride=None, + target_assigner=None, + get_boxes_when_training=False, + box_stamper=None, + **kwargs): + super(DetAnchorDense, self).__init__(**kwargs) + assert num_classes == 1, 'currently only support binary classification.' + self.num_classes = num_classes + self.get_boxes_when_training = get_boxes_when_training + self.target_assigner = plugin.build_plugin_module(target_assigner) + self.stride = stride + if self.stride is None: + assert target_assigner is not None + self.stride = self.target_assigner.stride + self.num_anchors = self.target_assigner.num_anchors + self.code_size = self.target_assigner.box_coder.code_size + self.cls_head = nn.Conv2d(in_channels, self.num_anchors, kernel_size=1) + self.reg_head = nn.Conv2d(in_channels, self.code_size * self.num_anchors, kernel_size=1) + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + if box_stamper is not None: + self.box_stamper = plugin.build_plugin_module(box_stamper) + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True + + def forward(self, bev_feat_list, points=None, **kwargs): + if isinstance(bev_feat_list[0], torch.Tensor): + bev_feat = torch.stack(bev_feat_list, dim=0) + elif isinstance(bev_feat_list[0], dict): + bev_feat = torch.stack([x[f'p{self.stride}'] for x in bev_feat_list], dim=0) + else: + raise NotImplementedError + + cls = self.cls_head(bev_feat) + reg = self.reg_head(bev_feat) + + out = {'cls': cls, 'reg': reg} + + if self.get_boxes_when_training or not self.training: + preds = self.predictions(out) + if hasattr(self, 'box_stamper'): + assert points is not None + preds = self.box_stamper(preds, points) + out['preds'] = preds + + return self.format_output(out, len(bev_feat)) + + def format_output(self, output, B): + # decompose batch + if 'preds' in output: + preds_list = [] + for i in range(B): + preds = {} + mask = output['preds']['idx'] == i + for k, v in output['preds'].items(): + preds[k] = v[mask] + preds_list.append(preds) + output['preds'] = preds_list + output = {self.scatter_keys[0]: self.compose_result_list(output, B)} + return output + + def loss(self, preds, gt_boxes, gt_labels, **kwargs): + """The dense bev maps show have the shape ((b, c, h, w))""" + pred_cls = self.stack_data_from_list(preds, 'cls') + pred_reg = self.stack_data_from_list(preds, 'reg') + # convert to shape(b, c, h, w) -> (nwh, c) to match the anchors + b, c, h, w = pred_cls.shape + pred_cls = pred_cls.permute(0, 3, 2, 1).reshape(-1) + pred_reg = pred_reg.permute(0, 3, 2, 1).reshape(-1, 7) + cls_tgt, reg_tgt, _ = multi_apply( + self.target_assigner.assign, gt_boxes) + cls_tgt = torch.cat(cls_tgt, dim=0) + reg_tgt = torch.cat(reg_tgt, dim=0) + + # vis_cls_pred = pred_cls.view(b, w, h, c).softmax(dim=-1).max(dim=-1).values[0] + # vis_cls_tgt = cls_tgt.view(b, w, h, c).max(dim=-1).values[0] + # img = torch.cat([vis_cls_pred, vis_cls_tgt], dim=1).detach().cpu().numpy().T + # import matplotlib.pyplot as plt + # + # plt.imshow(img) + # plt.show() + # plt.close() + + pos_mask = cls_tgt > 0 + cared = cls_tgt >= 0 + avg_factor = max(pos_mask.sum(), 1) + # downsample negative + # neg_inds = torch.where(cls_tgt == 0)[0] + # neg_inds = neg_inds[torch.randperm(len(neg_inds))[:avg_factor * 5]] + # cared[neg_inds] = True + + # focal loss encode the last dim of tgt as background + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = 0 + + loss_cls = self.loss_cls(pred_cls[cared].view(-1, 1), labels[cared], + avg_factor=avg_factor) + + reg_preds_sin, reg_tgts_sin = self.add_sin_difference(pred_reg[pos_mask], reg_tgt) + loss_box = self.loss_box(reg_preds_sin, reg_tgts_sin, + avg_factor=avg_factor / reg_preds_sin.shape[-1]) + + return { + 'cls_loss': loss_cls, + 'box_loss': loss_box + } + + @staticmethod + def add_sin_difference(boxes1, boxes2, dim=6): + assert dim != -1 + rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * \ + torch.cos(boxes2[..., dim:dim + 1]) + rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * \ + torch.sin(boxes2[..., dim:dim + 1]) + + boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, + boxes1[..., dim + 1:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, + boxes2[..., dim + 1:]], dim=-1) + return boxes1, boxes2 + + def predictions(self, preds): + return self.target_assigner.get_predictions(preds) + + + + + diff --git a/cosense3d/modules/heads/det_anchor_sparse.py b/cosense3d/modules/heads/det_anchor_sparse.py new file mode 100644 index 00000000..ba65e886 --- /dev/null +++ b/cosense3d/modules/heads/det_anchor_sparse.py @@ -0,0 +1,139 @@ +from typing import List + +import torch +from torch import nn +from cosense3d.modules import BaseModule +from cosense3d.modules import plugin +from cosense3d.modules.losses import build_loss +from cosense3d.utils.misc import multi_apply +from cosense3d.modules.utils.common import linear_last + + +class DetAnchorSparse(BaseModule): + def __init__(self, + in_channels, + loss_cls, + loss_box, + num_classes=1, + target_assigner=None, + get_boxes_when_training=False, + get_roi_scores=False, + **kwargs): + super(DetAnchorSparse, self).__init__(**kwargs) + assert num_classes == 1, 'currently only support binary classification.' + self.num_classes = num_classes + self.get_boxes_when_training = get_boxes_when_training + self.get_roi_scores = get_roi_scores + self.target_assigner = plugin.build_plugin_module(target_assigner) + self.num_anchors = self.target_assigner.num_anchors + self.code_size = self.target_assigner.box_coder.code_size + self.cls_head = linear_last(in_channels, in_channels * 3, self.num_anchors) + self.reg_head = linear_last(in_channels, in_channels * 3, self.code_size * self.num_anchors) + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + + def init_weights(self): + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True + + def forward(self, stensor_list, **kwargs): + coor, feat, ctr = self.compose_stensor(stensor_list, self.target_assigner.stride) + cls = self.cls_head(feat) + reg = self.reg_head(feat) + + out = {'cls': cls, 'reg': reg, 'ctr': ctr} + + if self.get_roi_scores: + out['scr'] = cls.sigmoid().max(dim=-1).values + + if self.get_boxes_when_training or not self.training: + out['preds'] = self.predictions(coor, out) + + return self.format(out, coor, len(stensor_list)) + + def format(self, output, coor, B): + res_list = [] + for i in range(B): + mask = coor[:, 0] == i + res_dict = {k: v[mask] for k, v in output.items() if k!='preds'} + if 'preds' in output: + preds = {} + mask = output['preds']['idx'] == i + for k, v in output['preds'].items(): + preds[k] = v[mask] + res_dict['preds'] = preds + res_list.append(res_dict) + output = {self.scatter_keys[0]: res_list} + return output + + def loss(self, preds, stensor_list, gt_boxes, gt_labels, **kwargs): + coor = [x[f'p{self.target_assigner.stride}']['coor'] for x in stensor_list] + pred_cls = self.cat_data_from_list(preds, 'cls') + pred_reg = self.cat_data_from_list(preds, 'reg') + + pred_cls = pred_cls.reshape(-1, self.num_classes) + pred_reg = pred_reg.reshape(-1, self.code_size) + cls_tgt, reg_tgt, _ = multi_apply( + self.target_assigner.assign, coor, gt_boxes) + cls_tgt = torch.cat(cls_tgt, dim=0) + reg_tgt = torch.cat(reg_tgt, dim=0) + + # vis_cls_pred = pred_cls.view(b, w, h, c).softmax(dim=-1).max(dim=-1).values[0] + # vis_cls_tgt = cls_tgt.view(b, w, h, c).max(dim=-1).values[0] + # img = torch.cat([vis_cls_pred, vis_cls_tgt], dim=1).detach().cpu().numpy().T + # import matplotlib.pyplot as plt + # + # plt.imshow(img) + # plt.show() + # plt.close() + + pos_mask = cls_tgt > 0 + cared = cls_tgt >= 0 + avg_factor = max(pos_mask.sum(), 1) + # downsample negative + # neg_inds = torch.where(cls_tgt == 0)[0] + # neg_inds = neg_inds[torch.randperm(len(neg_inds))[:avg_factor * 5]] + # cared[neg_inds] = True + + # focal loss encode the last dim of tgt as background + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = 0 + + if len(cared) != len(pred_cls): + print([x['cls'].shape for x in preds]) + print(cared.shape) + loss_cls = self.loss_cls(pred_cls[cared], labels[cared], + avg_factor=avg_factor) + + reg_preds_sin, reg_tgts_sin = self.add_sin_difference(pred_reg[pos_mask], reg_tgt) + loss_box = self.loss_box(reg_preds_sin, reg_tgts_sin, + avg_factor=avg_factor / reg_preds_sin.shape[-1]) + + return { + 'cls_loss': loss_cls, + 'box_loss': loss_box + } + + @staticmethod + def add_sin_difference(boxes1, boxes2, dim=6): + assert dim != -1 + rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * \ + torch.cos(boxes2[..., dim:dim + 1]) + rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * \ + torch.sin(boxes2[..., dim:dim + 1]) + + boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, + boxes1[..., dim + 1:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, + boxes2[..., dim + 1:]], dim=-1) + return boxes1, boxes2 + + def predictions(self, coors, preds): + return self.target_assigner.get_predictions(coors, preds) + + + + + diff --git a/cosense3d/modules/heads/det_center_sparse.py b/cosense3d/modules/heads/det_center_sparse.py new file mode 100644 index 00000000..6f005887 --- /dev/null +++ b/cosense3d/modules/heads/det_center_sparse.py @@ -0,0 +1,454 @@ +from einops import rearrange + +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.utils.common import linear_last +from cosense3d.utils.misc import multi_apply +from cosense3d.modules.losses import build_loss, pred_to_conf_unc +from cosense3d.modules.utils.me_utils import * +from cosense3d.modules.utils.positional_encoding import ratio2coord + + +class UnitedClsHead(nn.Module): + def __init__(self, + class_names_each_head, + in_channel, + one_hot_encoding=True, + use_bias=False, + norm='BN', + **kwargs): + super().__init__() + n_cls = sum([len(c) for c in class_names_each_head]) + out_channel = n_cls + 1 if one_hot_encoding else n_cls + self.head = linear_last(in_channel, in_channel, out_channel, use_bias, norm) + + def forward(self, x): + return [self.head(x)] + + +class SeparatedClsHead(nn.Module): + def __init__(self, + class_names_each_head, + in_channel, + one_hot_encoding=True, + use_bias=False, + norm='BN', + **kwargs): + super().__init__() + self.n_head = len(class_names_each_head) + for i, cls_names in enumerate(class_names_each_head): + out_channel = len(cls_names) + if one_hot_encoding: + out_channel += 1 + setattr(self, f'head_{i}', + linear_last(in_channel, in_channel, out_channel, use_bias, norm)) + + def forward(self, x): + out = [] + for i in range(self.n_head): + out.append(getattr(self, f'head_{i}')(x)) + return out + + +class UnitedRegHead(nn.Module): + def __init__(self, + reg_channels, + in_channel, + combine_channels=True, + sigmoid_keys=None, + use_bias=False, + norm='BN', + **kwargs): + super().__init__() + self.combine_channels = combine_channels + self.sigmoid_keys = [] if sigmoid_keys is None else sigmoid_keys + self.reg_channels = {} + for c in reg_channels: + name, channel = c.split(':') + self.reg_channels[name] = int(channel) + + if combine_channels: + out_channel = sum(list(self.reg_channels.values())) + self.head = linear_last(in_channel, in_channel, out_channel, use_bias, norm) + else: + for name, channel in self.reg_channels.items(): + setattr(self, f'head_{name}', + linear_last(in_channel, in_channel, int(channel), use_bias, norm)) + + def forward(self, x): + out_dict = {} + if self.combine_channels: + out_tensor = self.head(x) + ptr = 0 + for k, v in self.reg_channels.items(): + out = out_tensor[:, ptr:ptr+v] + if k in self.sigmoid_keys: + out = out.sigmoid() + out_dict[k] = [out] # list compatible with separated head + ptr += v + else: + for k in self.reg_channels.keys(): + out_dict[k] = [getattr(self, f'head_{k}')(x)] + return out_dict + + +class DetCenterSparse(BaseModule): + def __init__(self, + data_info, + stride, + class_names_each_head, + shared_conv_channel, + cls_head_cfg, + reg_head_cfg, + reg_channels, + cls_assigner, + box_assigner, + loss_cls, + loss_box, + center_threshold=0.5, + generate_roi_scr=False, + norm='BN', + **kwargs): + super(DetCenterSparse, self).__init__(**kwargs) + update_me_essentials(self, data_info, stride) + self.center_threshold = center_threshold + self.n_heads = len(class_names_each_head) + self.class_names_each_head = class_names_each_head + self.generate_roi_scr = generate_roi_scr + self.reg_heads = [] + + self.cls_head = globals()[cls_head_cfg['name']]( + class_names_each_head, + shared_conv_channel, + one_hot_encoding=cls_head_cfg.get('one_hot_encoding', True), + norm=norm + ) + self.reg_head = globals()[reg_head_cfg['name']]( + reg_channels, + shared_conv_channel, + combine_channels=reg_head_cfg['combine_channels'], + sigmoid_keys=reg_head_cfg['sigmoid_keys'], + norm=norm + ) + + self.cls_assigner = plugin.build_plugin_module(cls_assigner) + self.box_assigner = plugin.build_plugin_module(box_assigner) + + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + + self.out_dict = {'cls': []} + for name in self.reg_heads: + self.out_dict[f'reg_{name}'] = [] + + self.temp = 1 + + def forward(self, stensor_list, **kwargs): + self.temp += 1 + B = len(stensor_list) + coor, feat, centers = self.format_input(stensor_list) + if centers is not None: + centers = indices2metric(coor, self.voxel_size) + cls = self.cls_head(feat) + reg = self.reg_head(feat) + + out_dict = { + 'ctr': centers, + 'cls': cls, + 'reg': reg, + } + + if self.generate_roi_scr: + is_edl = 'edl' in self.loss_cls.name.lower() + conf = [pred_to_conf_unc(x, self.loss_cls.activation, edl=is_edl)[0] for x in cls] + conf = torch.stack(conf, dim=0).max(dim=0).values + if len(conf) == 0: + print('det_coor', coor.shape) + print('det_feat', feat.shape) + if is_edl: + out_dict['scr'] = conf[:, 1:].max(dim=-1).values + else: + out_dict['scr'] = conf.max(dim=-1).values + if not self.training: + out_dict['preds'], out_dict['conf'] = self.predictions(out_dict) + + return self.format_output(out_dict, B) + + def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride) + + def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['ctr'][:, 0] == i + output_new['ctr'].append(output['ctr'][mask, 1:]) + output_new['cls'].append([h_cls[mask] for h_cls in output['cls']]) + output_new['reg'].append({k:[vi[mask] for vi in v] for k, v in output['reg'].items()}) + if 'conf' in output: + output_new['conf'].append(output['conf'][mask]) + if 'scr' in output: + output_new['scr'].append(output['scr'][mask]) + if 'preds' in output: + mask = output['preds']['idx'][:, 0] == i + preds = {} + for k, v in output['preds'].items(): + if k in ['idx', 'box']: + preds[k] = v[mask][:, 1:] + else: + preds[k] = v[mask] + output_new['preds'].append(preds) + + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output + + def loss(self, batch_list, gt_boxes, gt_labels, gt_mask=None, **kwargs): + epoch = kwargs.get('epoch', 0) + centers = [batch['ctr'] for batch in batch_list] + pred_cls_list = [torch.stack(batch['cls'], dim=0) for batch in batch_list] + if 'scr' in batch_list[0]: + pred_scores = [batch['scr'] for batch in batch_list] + else: + pred_scores = [pred_to_conf_unc(x)[0][..., 1:].sum(dim=-1) for x in pred_cls_list] + if gt_mask is not None: + for i, m in enumerate(gt_mask): + gt_boxes[i] = gt_boxes[i][m] + gt_labels[i] = gt_labels[i][m] + cls_tgt = multi_apply(self.cls_assigner.assign, + centers, gt_boxes, gt_labels, pred_scores, **kwargs) + + # import matplotlib.pyplot as plt + # ctrs_vis = centers[0].detach().cpu().numpy().T + # scrs_vis = pred_cls_list[0][0].softmax(dim=-1).detach().cpu().numpy().T + # gt_vis = (cls_tgt[0] == 1).squeeze().detach().cpu().numpy() + # fig = plt.figure() + # ax = fig.add_subplot() + # ax.scatter(ctrs_vis[0], ctrs_vis[1], c=scrs_vis[1], edgecolors='none', marker='.', vmin=0, vmax=1, cmap='jet') + # ax.scatter(ctrs_vis[0][gt_vis], ctrs_vis[1][gt_vis], c='g', edgecolors='none', marker='.', alpha=0.5) + # plt.show() + # plt.close() + + cls_tgt = torch.cat(cls_tgt, dim=0) + + n_classes = [len(n) for n in self.class_names_each_head] + + # get reg target + box_tgt = self.box_assigner.assign( + self.cat_data_from_list(centers, pad_idx=True), + self.cat_data_from_list(gt_boxes, pad_idx=True), + self.cat_data_from_list(gt_labels) + ) + + ptr = 0 + loss_cls = 0 + loss_box = 0 + for h in range(self.n_heads): + # center loss + cur_cls_src = torch.cat([x[h] for x in pred_cls_list], dim=0).contiguous() + cur_cls_tgt = cls_tgt[..., ptr:ptr+n_classes[h]].contiguous() # one hot foreground labels + + cared = (cur_cls_tgt >= 0).any(dim=-1) + cur_cls_src = cur_cls_src[cared] + cur_cls_tgt = cur_cls_tgt[cared] + ptr += n_classes[h] + + # convert one-hot to labels + cur_labels = torch.zeros_like(cur_cls_tgt[..., 0]).long() + lbl_inds, cls_inds = torch.where(cur_cls_tgt) + if 'edl' in self.loss_cls.name.lower(): + cur_labels[lbl_inds] = cls_inds + 1 + cur_num_cls = n_classes[h] + 1 + avg_factor = None if self.cls_assigner.pos_neg_ratio else max((cur_labels > 0).sum(), 1) + elif 'focal' in self.loss_cls.name.lower(): + cur_num_cls = n_classes[h] + cur_labels += n_classes[h] + cur_labels[lbl_inds] = cls_inds + avg_factor = max(len(cls_inds), 1) + else: + raise NotImplementedError + + # focal loss encode the last dim of tgt as background + # labels = pos_mask.new_full((len(pos_mask),), self.num_classes, dtype=torch.long) + # labels[pos_mask] = 0 + + lcenter = self.loss_cls( + cur_cls_src, + cur_labels, + temp=epoch, + n_cls_override=cur_num_cls, + avg_factor=avg_factor + ) + loss_cls = loss_cls + lcenter + + # reg loss + ind = box_tgt['idx'][h] + if ind.shape[1] > 0: + for reg_name in self.reg_head.reg_channels.keys(): + pred_reg = torch.cat([x['reg'][reg_name][h] for x in batch_list], dim=0) + cur_reg_src = rearrange(pred_reg, 'n d ... -> n ... d').contiguous() + cur_reg_src = cur_reg_src[box_tgt['valid_mask'][h]] + cur_reg_tgt = box_tgt[reg_name][h] # N, C + cur_loss = self.loss_box(cur_reg_src, cur_reg_tgt) + + loss_box = loss_box + cur_loss + + loss_dict = {'ctr_loss': loss_cls, 'box_loss': loss_box} + return loss_dict + + def predictions(self, preds): + return self.box_assigner.get_predictions(preds) + + +class MultiLvlDetCenterSparse(DetCenterSparse): + def __init__(self, nlvls, sparse, *args, **kwargs): + super(MultiLvlDetCenterSparse, self).__init__(*args, **kwargs) + self.nlvls = nlvls + self.sparse = sparse + self.lidar_range_cuda = nn.Parameter(torch.tensor(self.lidar_range), requires_grad=False) + + def forward(self, feat_in, **kwargs): + outs_dec, reference_points, reference_inds = self.format_input(feat_in) + + assert outs_dec.isnan().sum() == 0, "found nan in outs_dec." + pos_dim = reference_points.shape[-1] + shape = outs_dec.shape + centers = ratio2coord(reference_points, self.lidar_range_cuda) + + cls = self.cls_head(outs_dec.view(-1, shape[-1])) + reg = self.reg_head(outs_dec.view(-1, shape[-1])) + + cls = torch.stack(cls, dim=0).view(self.n_heads, *shape[:-1], -1) # (nhead, nlvl, nbatch, nsample, ncls) + reg = {k: torch.stack(v, dim=0).view(self.n_heads, *shape[:-1], -1) for k, v in reg.items()} + pred_boxes = self.box_assigner.box_coder.decode( + centers.unsqueeze(0).unsqueeze(0).repeat((self.n_heads, self.nlvls,) + (1,) * len(shape[1:])), reg) + + out_dict = { + 'ctr': centers, + 'cls': cls, + 'reg': reg, + 'pred_boxes': pred_boxes + } + + out_dict['conf'] = pred_to_conf_unc(cls, self.loss_cls.activation)[0] + if 'edl' in self.loss_cls.name.lower(): + out_dict['scr'] = out_dict['conf'][..., 1:].max(dim=-1).values + else: + out_dict['scr'] = out_dict['conf'].max(dim=-1).values + + if not self.training: + out_dict['preds'], _ = self.predictions(out_dict) + + return self.format_output(out_dict, len(feat_in), reference_inds) + + def format_input(self, feat_in): + if self.sparse: + outs_dec = self.cat_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2) + reference_points = self.cat_data_from_list(feat_in, 'ref_pts', pad_idx=True) + reference_inds = reference_points[..., 0] + reference_points = reference_points[..., 1:] + else: + outs_dec = self.stack_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2, 3) + reference_points = self.stack_data_from_list(feat_in, 'ref_pts') + reference_inds = None + return outs_dec, reference_points, reference_inds + + def format_output(self, output, B=None, reference_inds=None): + outs = [] + for i in range(B): + if self.sparse: + m = reference_inds == i + else: + m = i + out = { + 'cls': output['cls'][:, :, m], + 'reg': {k: v[:, :, m] for k, v in output['reg'].items()}, + 'ctr': output['ctr'][m], + 'pred_boxes': output['pred_boxes'][:, :, m], + } + if 'scr' in output: + out['scr'] = output['scr'][:, :, m] + if 'preds' in output: + mask = output['preds']['idx'][:, 0] == i + preds = {} + for k, v in output['preds'].items(): + if k in ['idx', 'box']: + preds[k] = v[mask][:, 1:] + else: + preds[k] = v[mask] + out['preds'] = preds + outs.append(out) + + return {self.scatter_keys[0]: outs} + + def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + epoch = kwargs.get('epoch', 0) + centers = [batch['ctr'] for batch in batch_list for _ in range(self.nlvls)] + pred_cls_list = [x for batch in batch_list for x in batch['cls'].transpose(1, 0)] + pred_scores = [x for batch in batch_list for x in batch['scr'].transpose(1, 0)] + + cls_tgt = multi_apply(self.cls_assigner.assign, + centers, gt_boxes, gt_labels, pred_scores, **kwargs) + cls_tgt = torch.cat(cls_tgt, dim=0) + + n_classes = [len(n) for n in self.class_names_each_head] + + # get reg target + box_tgt = self.box_assigner.assign( + self.cat_data_from_list([batch['ctr'] for batch in batch_list], pad_idx=True), + self.cat_data_from_list(gt_boxes, pad_idx=True), + self.cat_data_from_list(gt_labels) + ) + + ptr = 0 + loss_cls = 0 + loss_box = 0 + for h in range(self.n_heads): + # center loss + cur_cls_src = torch.cat([x[h] for x in pred_cls_list], dim=0).contiguous() + cur_cls_tgt = cls_tgt[..., ptr:ptr+n_classes[h]].contiguous() # one hot foreground labels + + cared = (cur_cls_tgt >= 0).any(dim=-1) + cur_cls_src = cur_cls_src[cared] + cur_cls_tgt = cur_cls_tgt[cared] + ptr += n_classes[h] + + # convert one-hot to labels + cur_labels = torch.zeros_like(cur_cls_tgt[..., 0]).long() + lbl_inds, cls_inds = torch.where(cur_cls_tgt) + cur_labels[lbl_inds] = cls_inds + 1 + + if self.cls_assigner.pos_neg_ratio: + avg_factor = None + else: + avg_factor = max((cur_labels > 0).sum(), 1) + lcenter = self.loss_cls( + cur_cls_src, + cur_labels, + temp=epoch, + n_cls_override=n_classes[h] + 1, + avg_factor=avg_factor + ) + loss_cls = loss_cls + lcenter + + # reg loss + ind = box_tgt['idx'][h] + if ind.shape[1] > 0: + for reg_name, reg_dim in self.reg_head.reg_channels.items(): + pred_reg = torch.cat([x['reg'][reg_name][h].view(-1, reg_dim) for x in batch_list], dim=0) + cur_reg_src = rearrange(pred_reg, 'n d ... -> n ... d').contiguous() + cur_reg_src = cur_reg_src[torch.cat([box_tgt['valid_mask'][h]] * self.nlvls, dim=0)] + cur_reg_tgt = torch.cat([box_tgt[reg_name][h]] * self.nlvls, dim=0) # N, C + cur_loss = self.loss_box(cur_reg_src, cur_reg_tgt) + + loss_box = loss_box + cur_loss + + loss_dict = {'ctr_loss': loss_cls, 'box_loss': loss_box} + return loss_dict + + def predictions(self, preds): + return self.box_assigner.get_predictions({ + 'ctr': preds['ctr'], + 'cls': preds['cls'][:, -1], + 'reg': {k: v[:, -1] for k, v in preds['reg'].items()} + }) + + diff --git a/cosense3d/modules/heads/det_roi_refine.py b/cosense3d/modules/heads/det_roi_refine.py new file mode 100644 index 00000000..2a8ac4bd --- /dev/null +++ b/cosense3d/modules/heads/det_roi_refine.py @@ -0,0 +1,265 @@ +import copy + +import torch.nn as nn +import torch +import numpy as np +from cosense3d.ops import pointnet2_utils +from cosense3d.utils.pclib import rotate_points_along_z_torch +from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu +from cosense3d.utils import box_utils +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.losses.common import (weighted_smooth_l1_loss, + weighted_sigmoid_binary_cross_entropy) + + +class KeypointRoIHead(BaseModule): + def __init__(self, + num_cls, + in_channels, + n_fc_channels, + roi_grid_pool, + target_assigner, + dp_ratio=0.3, + train_from_epoch=0, + **kwargs): + super().__init__(**kwargs) + self.code_size = 7 + self.dp_ratio = dp_ratio + self.train_from_epoch = train_from_epoch + self.target_assigner = plugin.build_plugin_module(target_assigner) + mlps = copy.copy(roi_grid_pool['mlps']) + for k in range(len(mlps)): + mlps[k] = [in_channels] + mlps[k] + + self.roi_grid_pool_layer = pointnet2_utils.StackSAModuleMSG( + radii=roi_grid_pool['pool_radius'], + nsamples=roi_grid_pool['n_sample'], + mlps=mlps, + use_xyz=True, + pool_method=roi_grid_pool['pool_method'], + ) + + grid_size = roi_grid_pool['grid_size'] + self.grid_size = grid_size + c_out = sum([x[-1] for x in mlps]) + pre_channel = grid_size * grid_size * grid_size * c_out + fc_layers = [n_fc_channels] * 2 + self.shared_fc_layers, pre_channel = self._make_fc_layers(pre_channel, + fc_layers) + + self.cls_layers, pre_channel = self._make_fc_layers(pre_channel, + fc_layers, + output_channels= + num_cls) + self.iou_layers, _ = self._make_fc_layers(pre_channel, fc_layers, + output_channels= + num_cls) + self.reg_layers, _ = self._make_fc_layers(pre_channel, fc_layers, + output_channels=num_cls * 7) + + self._init_weights(weight_init='xavier') + + def _init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001) + + def _make_fc_layers(self, input_channels, fc_list, output_channels=None): + fc_layers = [] + pre_channel = input_channels + for k in range(len(fc_list)): + fc_layers.extend([ + nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False), + # nn.BatchNorm1d(fc_list[k]), + nn.ReLU() + ]) + pre_channel = fc_list[k] + if self.dp_ratio > 0: + fc_layers.append(nn.Dropout(self.dp_ratio)) + if output_channels is not None: + fc_layers.append( + nn.Conv1d(pre_channel, output_channels, kernel_size=1, + bias=True)) + fc_layers = nn.Sequential(*fc_layers) + return fc_layers, pre_channel + + def get_global_grid_points_of_roi(self, rois): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + # (B, 6x6x6, 3) + local_roi_grid_points = self.get_dense_grid_points(rois, + batch_size_rcnn, + self.grid_size) + global_roi_grid_points = rotate_points_along_z_torch( + local_roi_grid_points.clone(), rois[:, 6] + ).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + global_roi_grid_points += global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points + + @staticmethod + def get_dense_grid_points(rois, batch_size_rcnn, grid_size): + """ + Get the local coordinates of each grid point of a roi in the coordinate + system of the roi(origin lies in the center of this roi. + """ + faked_features = rois.new_ones((grid_size, grid_size, grid_size)) + dense_idx = torch.stack(torch.where(faked_features), + dim=1) # (N, 3) [x_idx, y_idx, z_idx] + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, + 1).float() # (B, 6x6x6, 3) + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = ( + dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze( + dim=1) \ + - (local_roi_size.unsqueeze( + dim=1) / 2) # (B, 6x6x6, 3) + return roi_grid_points + + def roi_grid_pool(self, preds): + B = len(preds) + rois = torch.cat([p['boxes'] for p in preds], dim=0) + point_features = torch.cat([p['feat'] for p in preds], dim=0) + # (BxN, 6x6x6, 3) + global_roi_grid_points, local_roi_grid_points = \ + self.get_global_grid_points_of_roi(rois) + + xyz = torch.cat([p['coor'] for p in preds], dim=0) + xyz_batch_cnt = xyz.new_zeros(B).int() + for bs_idx in range(B): + xyz_batch_cnt[bs_idx] = len(preds[bs_idx]['coor']) + new_xyz = global_roi_grid_points.view(-1, 3) + new_xyz_batch_cnt = xyz.new_zeros(B).int() + for bs_idx in range(B): + new_xyz_batch_cnt[bs_idx] = len(preds[bs_idx]['boxes']) * self.grid_size ** 3 + + pooled_points, pooled_features = self.roi_grid_pool_layer( + xyz=xyz[:, :3].contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz[:, :3].contiguous(), + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=point_features.contiguous(), # weighted point features + ) # (M1 + M2 ..., C) + # (BxN, 6x6x6, C) + pooled_features = pooled_features.view(-1, self.grid_size ** 3, + pooled_features.shape[-1]) + + return pooled_features + + def forward(self, preds, **kwargs): + epoch = kwargs.get('epoch', self.train_from_epoch + 1) + if epoch < self.train_from_epoch: + return {self.scatter_keys[0]: [None for _ in preds]} + # RoI aware pooling + pooled_features = self.roi_grid_pool(preds) + + batch_size_rcnn = pooled_features.shape[0] + pooled_features = pooled_features.permute(0, 2, 1). \ + contiguous().view(batch_size_rcnn, -1, self.grid_size, + self.grid_size, + self.grid_size) # (BxN, C, 6, 6, 6) + shared_features = self.shared_fc_layers( + pooled_features.view(batch_size_rcnn, -1, 1)) + rcnn_cls = self.cls_layers(shared_features).transpose( + 1, 2).contiguous().squeeze( dim=1) # (B, 1 or 2) + rcnn_iou = self.iou_layers(shared_features).transpose( + 1, 2).contiguous().squeeze( dim=1) # (B, 1) + rcnn_reg = self.reg_layers(shared_features).transpose( + 1, 2).contiguous().squeeze( dim=1) # (B, C) + + roi_preds = None + if not self.training: + rois = torch.cat([p['boxes'] for p in preds], dim=0) + roi_preds = self.target_assigner.get_predictions( + rcnn_cls, rcnn_iou, rcnn_reg, rois + ) + + idx = 0 + out_list = [] + for p in preds: + num = len(p['boxes']) + out_dict = { + 'rois': p['boxes'], + 'rcnn_cls': rcnn_cls[idx:idx+num], + 'rcnn_iou': rcnn_iou[idx:idx+num], + 'rcnn_reg': rcnn_reg[idx:idx+num], + } + if roi_preds is not None: + out_dict['preds'] = {k: v[idx:idx+num] for k, v in roi_preds.items()} + out_list.append(out_dict) + idx += num + + return {self.scatter_keys[0]: out_list} + + def loss(self, out, gt_boxes, epoch, **kwargs): + """ + Parameters + ---------- + output_dict : dict + target_dict : dict + """ + if epoch < self.train_from_epoch: + return {} + rois = [x['rois'] for x in out] + label_dict = self.target_assigner.assign(rois, gt_boxes) + + # rcnn out + rcnn_cls = self.cat_data_from_list(out, 'rcnn_cls').view(1, -1, 1) + rcnn_iou = self.cat_data_from_list(out, 'rcnn_iou').view(1, -1, 1) + rcnn_reg = self.cat_data_from_list(out, 'rcnn_reg').view(1, -1, 7) + + tgt_cls = label_dict['cls_tgt'].view(1, -1, 1) + tgt_iou = label_dict['iou_tgt'].view(1, -1, 1) + tgt_reg = label_dict['reg_tgt'].view(1, -1, 7) + + pos_norm = tgt_cls.sum() + # cls loss + loss_cls = weighted_sigmoid_binary_cross_entropy(rcnn_cls, tgt_cls) + + # iou loss + # TODO: also count the negative samples + loss_iou = weighted_smooth_l1_loss(rcnn_iou, tgt_iou, + weights=tgt_cls).mean() + + # regression loss + # Target resampling : Generate a weights mask to force the regressor concentrate on low iou predictions + # sample 50% with iou>0.7 and 50% < 0.7 + weights = torch.ones(tgt_iou.shape, device=tgt_iou.device) + weights[tgt_cls == 0] = 0 + neg = torch.logical_and(tgt_iou < 0.7, tgt_cls != 0) + pos = torch.logical_and(tgt_iou >= 0.7, tgt_cls != 0) + num_neg = int(neg.sum(dim=1)) + num_pos = int(pos.sum(dim=1)) + num_pos_smps = max(num_neg, 2) + pos_indices = torch.where(pos)[1] + not_selsected = torch.randperm(num_pos)[:num_pos - num_pos_smps] + # not_selsected_indices = pos_indices[not_selsected] + weights[:, pos_indices[not_selsected]] = 0 + loss_reg = weighted_smooth_l1_loss(rcnn_reg, tgt_reg, + weights=weights / max(weights.sum(), + 1)).sum() + + loss_dict = { + 'rcnn_cls_loss': loss_cls, + 'rcnn_iou_loss': loss_iou, + 'rcnn_reg_loss': loss_reg, + } + + return loss_dict diff --git a/cosense3d/modules/heads/img_focal.py b/cosense3d/modules/heads/img_focal.py new file mode 100644 index 00000000..142c71d1 --- /dev/null +++ b/cosense3d/modules/heads/img_focal.py @@ -0,0 +1,214 @@ +import torch +from torch import nn +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.utils.init import bias_init_with_prob +from cosense3d.modules.utils.common import inverse_sigmoid, clip_sigmoid +from cosense3d.utils.box_utils import bbox_xyxy_to_cxcywh +from cosense3d.utils.iou2d_calculator import bbox_overlaps +from cosense3d.utils.misc import multi_apply +from cosense3d.modules.losses import build_loss + + +class ImgFocal(BaseModule): + def __init__(self, in_channels, embed_dims, num_classes, center_assigner, box_assigner, + loss_cls2d, loss_centerness, loss_bbox2d, loss_iou2d, loss_centers2d, + with_depth=False, **kwargs): + super().__init__(**kwargs) + self.in_channels = in_channels + self.embed_dims = embed_dims + self.num_classes = num_classes + self.with_depth = with_depth + + self.center_assigner = plugin.build_plugin_module(center_assigner) + self.box_assigner = plugin.build_plugin_module(box_assigner) + + self.loss_cls2d = build_loss(**loss_cls2d) + self.loss_centerness = build_loss(**loss_centerness) + self.loss_bbox2d = build_loss(**loss_bbox2d) + self.loss_iou2d = build_loss(**loss_iou2d) + self.loss_centers2d = build_loss(**loss_centers2d) + + self._init_layers() + + def _init_layers(self): + self.cls = nn.Conv2d(self.embed_dims, self.num_classes, kernel_size=1) + self.shared_cls = nn.Sequential( + nn.Conv2d(self.in_channels, self.embed_dims, kernel_size=(3, 3), padding=1), + nn.GroupNorm(32, num_channels=self.embed_dims), + nn.ReLU(),) + self.centerness = nn.Conv2d(self.embed_dims, 1, kernel_size=1) + bias_init = bias_init_with_prob(0.01) + nn.init.constant_(self.cls.bias, bias_init) + nn.init.constant_(self.centerness.bias, bias_init) + + self.shared_reg = nn.Sequential( + nn.Conv2d(self.in_channels, self.embed_dims, kernel_size=(3, 3), padding=1), + nn.GroupNorm(32, num_channels=self.embed_dims), + nn.ReLU(), ) + self.ltrb = nn.Conv2d(self.embed_dims, 4, kernel_size=1) + self.center2d = nn.Conv2d(self.embed_dims, 2, kernel_size=1) + if self.with_depth: + self.depth = nn.Conv2d(self.embed_dims, 1, kernel_size=1) + + def forward(self, img_feat, img_coor, **kwargs): + out_dict = {} + x = self.cat_data_from_list(img_feat) + N, c, h, w = x.shape + n_pixels = h * w + + cls_feat = self.shared_cls(x) + cls = self.cls(cls_feat) + centerness = self.centerness(cls_feat) + cls_logits = cls.permute(0,2,3,1).reshape(-1, n_pixels, self.num_classes) + centerness = centerness.permute(0,2,3,1).reshape(-1, n_pixels, 1) + cls_score = cls_logits.topk(1, dim=2).values[..., 0].view(-1, n_pixels, 1) + sample_weight = cls_score.detach().sigmoid() * centerness.detach().view(-1, n_pixels, 1).sigmoid() + + out_dict.update({ + 'feat_size': [h, w], + 'centerness': centerness, + 'cls_score': cls_score, + 'sample_weight': sample_weight + }) + + + img_coor = self.cat_data_from_list(img_coor) + reg_feat = self.shared_reg(x) + ltrb = self.ltrb(reg_feat).permute(0, 2, 3, 1).contiguous() + ltrb = ltrb.sigmoid() + centers2d_offset = self.center2d(reg_feat).permute(0, 2, 3, 1).contiguous() + centers2d = self.apply_center_offset(img_coor, centers2d_offset) + bboxes = self.apply_ltrb(img_coor, ltrb) + + pred_bboxes = bboxes.view(-1, n_pixels, 4) + pred_centers2d = centers2d.view(-1, n_pixels, 2) + out_dict.update({ + 'pred_boxes': pred_bboxes, + 'pred_centers2d': pred_centers2d + }) + + if self.with_depth: + # TODO + raise NotImplementedError + + return self.format_output(out_dict, img_feat) + + def format_output(self, out_dict, img_feat): + ptr = 0 + output_list = [] + for imgs in img_feat: + n = imgs.shape[0] + output_list.append({k: v[ptr:ptr+n] for k, v in out_dict.items()}) + ptr += n + return {self.scatter_keys[0]: output_list} + + def loss(self, batch_list, labels2d, centers2d, bboxes2d, img_size, **kwargs): + feat_size = batch_list[0]['feat_size'] + centerness = self.cat_data_from_list(batch_list, 'centerness') + cls_score = self.cat_data_from_list(batch_list, 'cls_score') + pred_boxes = self.cat_data_from_list(batch_list, 'pred_boxes') + pred_centers2d = self.cat_data_from_list(batch_list, 'pred_centers2d') + labels2d = self.cat_list(labels2d) + centers2d = self.cat_list(centers2d) + bboxes2d = self.cat_list(bboxes2d) + img_size = self.cat_list(img_size) + B = len(img_size) + + num_gts, assigned_gt_inds, assigned_labels = multi_apply( + self.box_assigner.assign, + pred_boxes, cls_score, pred_centers2d, + bboxes2d, labels2d, centers2d, img_size) + + cared_pred_boxes = [] + cared_centers = [] + aligned_bboxes_gt = [] + aligned_centers_gt = [] + aligned_labels = [] + factors = [] + mask = [] + for i, s in enumerate(img_size): + pos_mask = assigned_gt_inds[i] > 0 + mask.append(pos_mask) + pos_inds = assigned_gt_inds[i][pos_mask] - 1 + boxes = pred_boxes[i][pos_mask] + cared_pred_boxes.append(boxes) + factors.append(pred_boxes.new_tensor( + [s[1], s[0], s[1], s[0]]).unsqueeze(0).repeat(boxes.shape[0], 1)) + aligned_bboxes_gt.append(bboxes2d[i][pos_inds]) + cared_centers.append(pred_centers2d[i][pos_mask]) + aligned_centers_gt.append(centers2d[i][pos_inds]) + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = labels2d[i][pos_inds] + aligned_labels.append(labels) + + factors = torch.cat(factors, dim=0) + cared_pred_boxes = torch.cat(cared_pred_boxes, dim=0) + cared_pred_boxes_pix = cared_pred_boxes * factors + cared_centers = torch.cat(cared_centers, dim=0) + factors_inv = 1 / factors + aligned_bboxes_gt = torch.cat(aligned_bboxes_gt, dim=0) + aligned_centers_gt = torch.cat(aligned_centers_gt, dim=0) + aligned_labels = torch.cat(aligned_labels, dim=0) + mask = torch.cat(mask, dim=0) + + loss_iou = self.loss_iou2d(cared_pred_boxes_pix, aligned_bboxes_gt) + + cls_score = cls_score.reshape(-1, cls_score.shape[-1]) + iou_score = torch.zeros_like(cls_score[..., 0]) + iou_score[mask] = bbox_overlaps(aligned_bboxes_gt, cared_pred_boxes_pix, + is_aligned=True).reshape(-1) + cls_avg_factor = max(sum(num_gts), 1) + loss_cls = self.loss_cls2d( + cls_score, (aligned_labels, iou_score.detach()), avg_factor=cls_avg_factor) + + loss_box = self.loss_bbox2d(cared_pred_boxes, aligned_bboxes_gt * factors_inv) + loss_center = self.loss_centers2d(cared_centers, aligned_centers_gt * factors_inv[:, :2]) + + heatmaps = multi_apply(self.center_assigner.assign, centers2d, bboxes2d, + img_size, [img_size[0][0] // feat_size[0]] * B) + heatmaps = torch.stack(heatmaps, dim=0).view(B, -1, 1) + centerness = clip_sigmoid(centerness).view(B, -1, 1) + loss_centerness = self.loss_centerness(centerness, heatmaps, avg_factor=cls_avg_factor) + return { + 'img_cls_loss': loss_cls, + 'img_iou_loss': loss_iou, + 'img_box_loss': loss_box, + 'img_ctr_loss': loss_center, + 'img_ctrness_loss': loss_centerness, + } + + + @staticmethod + def apply_center_offset(locations, center_offset): + """ + :param locations: (1, H, W, 2) + :param pred_ltrb: (N, H, W, 4) + """ + centers_2d = torch.zeros_like(center_offset) + locations = inverse_sigmoid(locations) + centers_2d[..., 0] = locations[..., 0] + center_offset[..., 0] # x1 + centers_2d[..., 1] = locations[..., 1] + center_offset[..., 1] # y1 + centers_2d = centers_2d.sigmoid() + + return centers_2d + + @staticmethod + def apply_ltrb(locations, pred_ltrb): + """ + :param locations: (1, H, W, 2) + :param pred_ltrb: (N, H, W, 4) + """ + pred_boxes = torch.zeros_like(pred_ltrb) + pred_boxes[..., 0] = (locations[..., 0] - pred_ltrb[..., 0]) # x1 + pred_boxes[..., 1] = (locations[..., 1] - pred_ltrb[..., 1]) # y1 + pred_boxes[..., 2] = (locations[..., 0] + pred_ltrb[..., 2]) # x2 + pred_boxes[..., 3] = (locations[..., 1] + pred_ltrb[..., 3]) # y2 + min_xy = pred_boxes[..., 0].new_tensor(0) + max_xy = pred_boxes[..., 0].new_tensor(1) + pred_boxes = torch.where(pred_boxes < min_xy, min_xy, pred_boxes) + pred_boxes = torch.where(pred_boxes > max_xy, max_xy, pred_boxes) + pred_boxes = bbox_xyxy_to_cxcywh(pred_boxes) + + return pred_boxes + + diff --git a/cosense3d/modules/heads/lidar_petr_head.py b/cosense3d/modules/heads/lidar_petr_head.py new file mode 100644 index 00000000..fb52d9fe --- /dev/null +++ b/cosense3d/modules/heads/lidar_petr_head.py @@ -0,0 +1,113 @@ +import torch +import torch.nn as nn + +from cosense3d.modules import BaseModule, plugin +from cosense3d.modules.utils.misc import SELayer_Linear, MLN +from cosense3d.modules.utils.positional_encoding import pos2posemb2d + + +class LidarPETRHead(BaseModule): + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + topk=2048, + memory_len=256, + num_query=644, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = 64 + self.pos_dim = 2 + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk = topk + self.num_query = num_query + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + self.reference_points = nn.Embedding(self.num_query, self.pos_dim) + + self._init_layers() + + def _init_layers(self): + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True + + def forward(self, rois, bev_feat, memory, **kwargs): + feat, ctr = self.gather_topk(rois, bev_feat) + + pos = ((ctr - self.lidar_range[:2]) / + (self.lidar_range[3:5] - self.lidar_range[:2])) + pos_emb = self.position_embeding(pos2posemb2d(pos, self.num_pose_feat)) + memory = self.memory_embed(feat) + pos_emb = self.featurized_pe(pos_emb, memory) + + reference_points = (self.reference_points.weight).unsqueeze(0).repeat(memory.shape[0], 1, 1) + query_pos = self.query_embedding(pos2posemb2d(reference_points, self.num_pose_feat)) + tgt = torch.zeros_like(query_pos) + outs_dec, _ = self.transformer(memory, tgt, query_pos, pos_emb) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(rois)) + ] + + return {self.scatter_keys[0]: outs} + + def format_input(self, input): + memory = [] + for x in input: + x = x.permute(0, 2, 3, 1).flatten(0, 2) + memory.append(x) + max_l = max([m.shape[0] for m in memory]) + out = x.new_zeros(len(memory), max_l, x.shape[-1]) + mask = x.new_ones(len(memory), max_l) + for i, m in enumerate(memory): + out[i, :len(m)] = m + mask[i, :len(m)] = False + return out, mask + + def gather_topk(self, rois, bev_feats): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{self.feature_stride}']['ctr'] + feat = bev_feat[f'p{self.feature_stride}']['feat'] + scores = roi['scr'] + if scores.shape[0] < self.topk: + raise NotImplementedError + else: + topk_inds = torch.topk(scores, k=self.topk).indices + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + return topk_feat, topk_ctr + diff --git a/cosense3d/modules/heads/multitask_head.py b/cosense3d/modules/heads/multitask_head.py new file mode 100644 index 00000000..cb7ef844 --- /dev/null +++ b/cosense3d/modules/heads/multitask_head.py @@ -0,0 +1,62 @@ +from torch import nn + +from cosense3d.modules import BaseModule +from cosense3d.modules import build_module +from cosense3d.modules.plugin import build_plugin_module + + +class MultiTaskHead(BaseModule): + def __init__(self, + heads, + strides, + losses, + formatting=None, + **kwargs): + super().__init__(**kwargs) + self.losses = losses + modules = [] + gt_keys = set(self.gt_keys) + for i, h in enumerate(heads): + if len(h.get('gt_keys', [])) == 0: + cur_gt_keys = self.gt_keys + else: + cur_gt_keys = h['gt_keys'] + gt_keys.update(set(cur_gt_keys)) + h.update(dict( + stride=strides[i], + gather_keys=self.gather_keys, + scatter_keys=[self.scatter_keys[i]], + gt_keys=cur_gt_keys, + )) + modules.append(build_module(h)) + self.heads = nn.ModuleList(modules) + self.gt_keys = list(gt_keys) + if formatting is None: + self.formatting = [None] * len(self.heads) + else: + assert len(formatting) == len(self.heads) + self.formatting = [] + for fmt in formatting: + self.formatting.append(build_plugin_module(fmt)) + + def forward(self, tensor_list, *args, **kwargs): + out = {} + for i, h in enumerate(self.heads): + x = h(tensor_list, *args, **kwargs) + if self.formatting[i] is not None: + for k, v in x.items(): + x[k] = self.formatting[i](x[k]) + out.update(x) + + return out + + def loss(self, *args, **kwargs): + kl = len(self.scatter_keys) + heads_out = args[:kl] + gt_dict = {k:args[kl+i] for i, k in enumerate(self.gt_keys)} + loss_dict = {} + for i, h in enumerate(self.heads): + if self.losses[i]: + gt_list = [gt_dict[k] for k in h.gt_keys] + loss_dict.update(h.loss(heads_out[i], *gt_list, **kwargs)) + return loss_dict \ No newline at end of file diff --git a/cosense3d/modules/heads/nbr_attn_bev.py b/cosense3d/modules/heads/nbr_attn_bev.py new file mode 100644 index 00000000..2944b74f --- /dev/null +++ b/cosense3d/modules/heads/nbr_attn_bev.py @@ -0,0 +1,188 @@ +import torch + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.me_utils import * +from cosense3d.modules.utils.common import pad_r, linear_last, cat_coor_with_idx +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.losses.edl import edl_mse_loss, evidence_to_conf_unc +from cosense3d.modules.utils.nbr_attn import NeighborhoodAttention + + +class NbrAttentionBEV(BaseModule): + def __init__(self, + data_info, + in_dim, + stride, + annealing_step, + sampling, + target_assigner=None, + class_names_each_head=None, + **kwargs): + super(NbrAttentionBEV, self).__init__(**kwargs) + self.in_dim = in_dim + self.class_names_each_head = class_names_each_head + self.stride = stride + self.annealing_step = annealing_step + self.sampling = sampling + for k, v in data_info.items(): + setattr(self, k, v) + update_me_essentials(self, data_info, self.stride) + + self.nbr_attn = NeighborhoodAttention(emb_dim=in_dim) + self.reg_layer = linear_last(in_dim, 32, 2, bias=True) + + if class_names_each_head is not None: + from cosense3d.model.utils.target_assigner import TargetAssigner + self.tgt_assigner = TargetAssigner(target_assigner, + class_names_each_head) + + def forward(self, stensor_list, **kwargs): + coor, feat, ctr = self.format_input(stensor_list) + centers = indices2metric(coor, self.voxel_size) + reference_points = self.generate_reference_points(centers) + out = self.nbr_attn(feat, coor, reference_points, len(stensor_list)) + reg = self.reg_layer(out) + conf, unc = evidence_to_conf_unc(reg.relu()) + + out_dict = { + 'center': centers, + 'reg': reg, + 'conf': conf, + 'unc': unc + } + + return self.format_output(out_dict, len(stensor_list)) + + def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride) + + def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['center'][:, 0] == i + output_new['center'].append(output['center'][mask, 1:]) + output_new['reg'].append(output['reg'][mask]) + output_new['conf'].append(output['conf'][mask]) + output_new['unc'].append(output['unc'][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output + + def generate_reference_points(self, centers): + if self.training: + reference_points = centers[torch.rand_like(centers[:, 0]) > 0.5] + else: + reference_points = centers + noise = torch.rand_like(reference_points[:, 1:]) * self.voxel_size[0] * self.stride + reference_points[:, 1:] = reference_points[:, 1:] + noise + return reference_points + + def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + tgt_pts, tgt_label, valid = self.get_tgt(batch_list, gt_boxes, gt_labels, **kwargs) + epoch_num = kwargs.get('epoch', 0) + reg = self.cat_data_from_list(batch_list, 'reg') + loss_dict = edl_mse_loss(preds=reg[valid], + tgt=tgt_label, + n_cls=2, + temp=epoch_num, + annealing_step=self.annealing_step, + model_label='bev') + return loss_dict + + @torch.no_grad() + def get_tgt(self, batch_list, gt_boxes, gt_labels, **kwargs): + epoch_num = kwargs.get('epoch', 0) + B = len(batch_list) + tgt_pts = self.cat_data_from_list(batch_list, 'center', pad_idx=True) + boxes = self.cat_data_from_list(gt_boxes, pad_idx=True).clone() + boxes[:, 3] = 0 + pts = pad_r(tgt_pts) + try: + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + boxes[:, 4:6] *= 2 + _, box_idx_of_pts2 = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + except: + print(boxes.shape) + print(pts.shape) + # set area B: dense neg as -1 for down-sampling, differentiate from area C: sparse neg. + tgt_label = - (box_idx_of_pts >= 0).int() + tgt_label[box_idx_of_pts >= 0] = 1 + + n_sam = len(boxes) * 50 + if self.sampling['annealing']: + annealing_ratio = epoch_num / self.annealing_step + n_sam = n_sam + annealing_ratio * len(tgt_label) / 50 + # down-sample + mask = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + tgt_label[tgt_label == -1] = 0 # set area B to 0 + + # positive sample annealing + conf = self.cat_data_from_list(batch_list, 'conf') + labeled_pos = tgt_label == 1 + potential_pos = (conf[..., 1] > (1 - annealing_ratio * 0.5)) + unlabeled_potential_pos = torch.logical_and(potential_pos, + torch.logical_not(labeled_pos)) + if self.sampling['topk']: + k = int(labeled_pos.sum().item() * (1 + 30 * annealing_ratio)) + topk = torch.topk(conf[..., 1], k) + is_topk = torch.zeros_like(labeled_pos) + is_topk[topk.indices] = 1 + topk_potential_pos = torch.logical_and(is_topk, unlabeled_potential_pos) + unlabeled_potential_pos = topk_potential_pos + + # set potential positive samples label to ignore + tgt_label[unlabeled_potential_pos] = -1 + else: + mask = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + # mask = torch.ones_like(tgt_label).bool() + tgt_label[tgt_label == -1] = 0 # set area B to 0 + + # get final tgt + tgt_pts = tgt_pts[mask] + tgt_label = tgt_label[mask] + + # from cosense3d.utils.vislib import draw_points_boxes_plt + # boxes_src = batch_dict['objects'][:, [0, 3, 4, 5, 6, 7, 8, 11]] + # ax = draw_points_boxes_plt( + # pc_range=self.lidar_range, + # points=tgt_pts[tgt_pts[:, 0] == 0, 1:].cpu().numpy(), + # boxes_gt=boxes_src[boxes_src[:, 0] == 0, 1:], + # return_ax=True + # ) + # pts_ = tgt_pts[tgt_label==1] + # ax = draw_points_boxes_plt( + # points=pts_[pts_[:, 0] == 0, 1:].cpu().numpy(), + # points_c='r', + # ax=ax, + # return_ax=True, + # ) + # pts_ = tgt_pts[tgt_label==-1] + # draw_points_boxes_plt( + # points=pts_[pts_[:, 0] == 0, 1:].cpu().numpy(), + # points_c='orange', + # filename='/home/yuan/Downloads/tmp1.png', + # ax=ax + # ) + + return tgt_pts, tgt_label, mask + + @torch.no_grad() + def downsample_tgt_pts(self, tgt_label, max_sam): + selected = torch.ones_like(tgt_label.bool()) + pos = tgt_label == 1 + if pos.sum() > max_sam: + mask = torch.rand_like(tgt_label[pos].float()) < max_sam / pos.sum() + selected[pos] = mask + + neg = tgt_label == 0 + if neg.sum() > max_sam: + mask = torch.rand_like(tgt_label[neg].float()) < max_sam / neg.sum() + selected[neg] = mask + return selected + + + diff --git a/cosense3d/modules/heads/petr_head.py b/cosense3d/modules/heads/petr_head.py new file mode 100644 index 00000000..15cc8fda --- /dev/null +++ b/cosense3d/modules/heads/petr_head.py @@ -0,0 +1,209 @@ +from typing import List + +import torch +from torch import nn + +from cosense3d.modules import BaseModule +from cosense3d.modules.plugin import build_plugin_module +from cosense3d.modules.utils.common import inverse_sigmoid +from cosense3d.utils.misc import multi_apply +from cosense3d.utils.box_utils import normalize_bbox, denormalize_bbox +from cosense3d.modules.losses import build_loss + + +class PETRHead(BaseModule): + def __init__(self, + embed_dims, + pc_range, + code_weights, + num_classes, + box_assigner, + loss_cls, + loss_bbox, + loss_iou=None, + num_reg_fcs=2, + num_pred=3, + use_logits=True, + **kwargs): + super().__init__(**kwargs) + self.embed_dims = embed_dims + self.code_size = 10 + self.num_classes = num_classes + self.num_reg_fcs = num_reg_fcs + self.num_pred = num_pred + self.use_logits = use_logits + + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.code_weights = nn.Parameter(torch.tensor(code_weights), requires_grad=False) + + self.box_assigner = build_plugin_module(box_assigner) + + self.loss_cls = build_loss(**loss_cls) + self.loss_bbox = build_loss(**loss_bbox) + if loss_iou is not None: + self.loss_iou = build_loss(**loss_iou) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(nn.Linear(self.embed_dims, self.num_classes)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(nn.Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(self.num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(self.num_pred)]) + + def init_weights(self): + for m in self.cls_branches: + nn.init.constant_(m[-1].bias, 2.0) + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.init.xavier_uniform_(m.weight) + self._is_init = True + + def forward(self, feat_in, **kwargs): + outs_dec = self.stack_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2, 3) + reference_points = self.stack_data_from_list(feat_in, 'ref_pts') + pos_dim = reference_points.shape[-1] + outputs_classes = [] + outputs_coords = [] + for lvl in range(len(outs_dec)): + out_dec = outs_dec[lvl] + out_dec = torch.nan_to_num(out_dec) + + pred_cls = self.cls_branches[lvl](out_dec) + pred_reg = self.reg_branches[lvl](out_dec) + + if self.use_logits: + reference = inverse_sigmoid(reference_points.clone()) + pred_reg[..., :pos_dim] += reference + pred_reg[..., :3] = pred_reg[..., :3].sigmoid() + else: + reference = reference_points.clone() + reference[..., :pos_dim] = (reference[..., :pos_dim] * ( + self.pc_range[3:3+pos_dim] - self.pc_range[0:pos_dim]) + + self.pc_range[0:pos_dim]) + pred_reg[..., :pos_dim] = pred_reg[..., :pos_dim] + reference + + outputs_classes.append(pred_cls) + outputs_coords.append(pred_reg) + + all_cls_scores = torch.stack(outputs_classes) + all_bbox_preds = torch.stack(outputs_coords) + if self.use_logits: + all_bbox_preds[..., :3] = (all_bbox_preds[..., :3] * ( + self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3]) + + outs = [ + { + 'all_cls_scores': all_cls_scores[:, i], + 'all_bbox_preds': all_bbox_preds[:, i], + 'ref_pts': reference_points[i] + } for i in range(len(feat_in)) + ] + + return {self.scatter_keys[0]: outs} + + def loss(self, petr_out, gt_boxes, gt_labels, det, **kwargs): + cls_scores = self.stack_data_from_list(petr_out, 'all_cls_scores').flatten(0, 1) + bbox_preds = self.stack_data_from_list(petr_out, 'all_bbox_preds').flatten(0, 1) + gt_boxes = [boxes for boxes in gt_boxes for _ in range(self.num_pred)] + gt_labels = [labels for labels in gt_labels for _ in range(self.num_pred)] + code_weights = [self.code_weights] * len(gt_labels) + + num_gts, assigned_gt_inds, assigned_labels = multi_apply( + self.box_assigner.assign, + bbox_preds, + cls_scores, + gt_boxes, + gt_labels, + code_weights + ) + + cared_pred_boxes = [] + aligned_bboxes_gt = [] + aligned_labels = [] + mask = [] + for i in range(len(cls_scores)): + pos_mask = assigned_gt_inds[i] > 0 + mask.append(pos_mask) + pos_inds = assigned_gt_inds[i][pos_mask] - 1 + boxes = bbox_preds[i][pos_mask] + cared_pred_boxes.append(boxes) + aligned_bboxes_gt.append(gt_boxes[i][pos_inds]) + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = gt_labels[i][pos_inds] + # ignore part of negative samples, set labels of them to -1 + inds = torch.where(labels == self.num_classes)[0] + inds = inds[torch.randperm(len(inds))][pos_mask.sum() * 5] + labels[inds] = -1 + aligned_labels.append(labels) + + # # plot + # if i > 0: + # continue + # ref_pts = petr_out[0]['ref_pts'] + # ref_pts = (ref_pts * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3]) + # ref_pts_pos = ref_pts[pos_mask].detach().cpu().numpy() + # ref_pts = ref_pts.detach().cpu().numpy() + # scores = cls_scores[i].sigmoid().squeeze().detach().cpu().numpy() + # gt_boxes_vis = gt_boxes[i][pos_inds].detach().cpu().numpy() + # pred_boxes_vis = denormalize_bbox(boxes).detach().cpu().numpy() + # det_ctr = det[0]['ctr'].detach().cpu().numpy() + # det_scr = det[0]['scr'].detach().cpu().numpy() + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # fig = plt.figure(figsize=(12, 5)) + # ax = fig.add_subplot() + # # ax.scatter(det_ctr[:, 0], det_ctr[:, 1], c=det_scr, vmin=0, vmax=0.5, s=1) + # ax.scatter(ref_pts_pos[:, 0], ref_pts_pos[:, 1], c='r') + # ax.scatter(ref_pts[:, 0], ref_pts[:, 1], c=scores, s=2) + # ax = draw_points_boxes_plt( + # pc_range=self.pc_range.tolist(), + # boxes_pred=pred_boxes_vis[:, :7], + # boxes_gt=gt_boxes_vis[:, :7], + # ax=ax, + # return_ax=True + # ) + # plt.savefig("/mars/projects20/CoSense3D/cosense3d/logs/stream_lidar/tmp.png") + # plt.close() + + cared_pred_boxes = torch.cat(cared_pred_boxes, dim=0) + aligned_bboxes_gt = torch.cat(aligned_bboxes_gt, dim=0) + aligned_labels = torch.cat(aligned_labels, dim=0) + mask = torch.cat(mask, dim=0) + + cls_avg_factor = max(sum(num_gts), 1) + cared = aligned_labels >= 0 + loss_cls = self.loss_cls(cls_scores.reshape(-1, cls_scores.shape[-1])[cared], + aligned_labels[cared], avg_factor=cls_avg_factor) + + bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1))[mask] + normalized_bbox_targets = normalize_bbox(aligned_bboxes_gt) + isnotnan = torch.isfinite(bbox_preds).all(dim=-1) + bbox_weights = torch.ones_like(cared_pred_boxes) * self.code_weights + loss_box = self.loss_bbox(cared_pred_boxes[isnotnan], + normalized_bbox_targets[isnotnan], + bbox_weights[isnotnan]) + + return { + 'petr_cls_loss': loss_cls, + 'petr_box_loss': loss_box + } + + + diff --git a/cosense3d/modules/heads/query_guided_petr_head.py b/cosense3d/modules/heads/query_guided_petr_head.py new file mode 100644 index 00000000..a9775b02 --- /dev/null +++ b/cosense3d/modules/heads/query_guided_petr_head.py @@ -0,0 +1,344 @@ +from typing import List +import os +import torch +from torch import nn + +from cosense3d.modules import BaseModule +from cosense3d.modules.plugin import build_plugin_module +from cosense3d.modules.utils.common import inverse_sigmoid +from cosense3d.utils.misc import multi_apply +from cosense3d.utils.box_utils import normalize_bbox, denormalize_bbox +from cosense3d.modules.losses import build_loss +from cosense3d.modules.losses.edl import pred_to_conf_unc + + +class QueryGuidedPETRHead(BaseModule): + def __init__(self, + embed_dims, + pc_range, + code_weights, + num_classes, + cls_assigner, + box_assigner, + loss_cls, + loss_box, + num_reg_fcs=3, + num_pred=3, + use_logits=False, + reg_channels=None, + sparse=False, + pred_while_training=False, + **kwargs): + super().__init__(**kwargs) + self.embed_dims = embed_dims + self.reg_channels = {} + if reg_channels is None: + self.code_size = 10 + else: + for c in reg_channels: + name, channel = c.split(':') + self.reg_channels[name] = int(channel) + self.code_size = sum(self.reg_channels.values()) + self.num_classes = num_classes + self.num_reg_fcs = num_reg_fcs + self.num_pred = num_pred + self.use_logits = use_logits + self.sparse = sparse + self.pred_while_training = pred_while_training + + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.code_weights = nn.Parameter(torch.tensor(code_weights), requires_grad=False) + + self.box_assigner = build_plugin_module(box_assigner) + self.cls_assigner = build_plugin_module(cls_assigner) + + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + self.is_edl = True if 'edl' in self.loss_cls.name.lower() else False + + self._init_layers() + self.init_weights() + + def _init_layers(self): + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(nn.Linear(self.embed_dims, self.num_classes)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(nn.Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(self.num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(self.num_pred)]) + + def init_weights(self): + for m in self.cls_branches: + nn.init.xavier_uniform_(m[-1].weight) + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.init.xavier_uniform_(m.weight) + self._is_init = True + + def forward(self, feat_in, **kwargs): + if self.sparse: + outs_dec = self.cat_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2) + reference_points = self.cat_data_from_list(feat_in, 'ref_pts', pad_idx=True) + reference_inds = reference_points[..., 0] + reference_points = reference_points[..., 1:] + else: + outs_dec = self.stack_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2, 3) + reference_points = self.stack_data_from_list(feat_in, 'ref_pts') + reference_inds = None + pos_dim = reference_points.shape[-1] + assert outs_dec.isnan().sum() == 0, "found nan in outs_dec." + # if outs_dec.isnan().any(): + # print('d') + + outputs_classes = [] + outputs_coords = [] + for lvl in range(len(outs_dec)): + out_dec = outs_dec[lvl] + # out_dec = torch.nan_to_num(out_dec) + + pred_cls = self.cls_branches[lvl](out_dec) + pred_reg = self.reg_branches[lvl](out_dec) + + if self.use_logits: + reference = inverse_sigmoid(reference_points.clone()) + pred_reg[..., :pos_dim] += reference + pred_reg[..., :3] = pred_reg[..., :3].sigmoid() + + outputs_classes.append(pred_cls) + outputs_coords.append(pred_reg) + + all_cls_logits = torch.stack(outputs_classes) + all_bbox_reg = torch.stack(outputs_coords) + if self.use_logits: + all_bbox_reg[..., :3] = (all_bbox_reg[..., :3] * ( + self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3]) + + reference_points = reference_points * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + det_boxes, pred_boxes = self.get_pred_boxes(all_bbox_reg, reference_points) + cls_scores = pred_to_conf_unc(all_cls_logits, self.loss_cls.activation, self.is_edl)[0] + + if self.sparse: + outs = [] + for i in range(len(feat_in)): + mask = reference_inds == i + outs.append( + { + 'all_cls_logits': all_cls_logits[:, mask], + 'all_bbox_reg': all_bbox_reg[:, mask], + 'ref_pts': reference_points[mask], + 'all_cls_scores': cls_scores[:, mask], + 'all_bbox_preds': det_boxes[:, mask], + 'all_bbox_preds_t': pred_boxes[:, mask] if pred_boxes is not None else None, + } + ) + else: + outs = [ + { + 'all_cls_logits': all_cls_logits[:, i], + 'all_bbox_reg': all_bbox_reg[:, i], + 'ref_pts': reference_points[i], + 'all_cls_scores': cls_scores[:, i], + 'all_bbox_preds': det_boxes[:, i], + 'all_bbox_preds_t': pred_boxes[:, i] if pred_boxes is not None else None, + } for i in range(len(feat_in)) + ] + + if self.pred_while_training or not self.training: + dets = self.get_predictions(cls_scores, det_boxes, pred_boxes, batch_inds=reference_inds) + for i, out in enumerate(outs): + out['preds'] = dets[i] + + return {self.scatter_keys[0]: outs} + + def loss(self, petr_out, gt_boxes_global, gt_labels_global, *args, **kwargs): + aux_dict = {self.gt_keys[2:][i]: x for i, x in enumerate(args)} + epoch = kwargs.get('epoch', 0) + if self.sparse: + cls_scores = torch.cat([x for out in petr_out for x in out['all_cls_logits']], dim=0) + bbox_reg = torch.cat([x for out in petr_out for x in out['all_bbox_reg']], dim=0) + ref_pts = [x['ref_pts'] for x in petr_out for _ in range(self.num_pred)] + else: + cls_scores = self.stack_data_from_list(petr_out, 'all_cls_logits').flatten(0, 1) + bbox_reg = self.stack_data_from_list(petr_out, 'all_bbox_reg').flatten(0, 1) + ref_pts = self.stack_data_from_list(petr_out, 'ref_pts').unsqueeze(1).repeat( + 1, self.num_pred, 1, 1).flatten(0, 1) + gt_boxes_global = [x for x in gt_boxes_global for _ in range(self.num_pred)] + # gt_velos = [x[:, 7:] for x in gt_boxes for _ in range(self.num_pred)] + gt_labels_global = [x for x in gt_labels_global for _ in range(self.num_pred)] + if 'gt_preds' in aux_dict: + gt_preds = [x.transpose(1, 0) for x in aux_dict['gt_preds'] for _ in range(self.num_pred)] + else: + gt_preds = None + + # cls loss + cls_tgt = multi_apply(self.cls_assigner.assign, + ref_pts, gt_boxes_global, gt_labels_global, **kwargs) + cls_src = cls_scores.view(-1, self.num_classes) + + from cosense3d.utils.vislib import draw_points_boxes_plt, plt + points = ref_pts[0].detach().cpu().numpy() + boxes = gt_boxes_global[0][:, :7].detach().cpu().numpy() + scores = petr_out[0]['all_cls_scores'][0] + scores = scores[:, self.num_classes - 1:].squeeze().detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.pc_range.tolist(), + boxes_gt=boxes, + return_ax=True + ) + ax.scatter(points[:, 0], points[:, 1], c=scores, cmap='jet', s=3, marker='s', vmin=0.0, vmax=1) + plt.savefig(f"{os.environ['HOME']}/Downloads/tmp.jpg") + plt.close() + + # if kwargs['itr'] % 1 == 0: + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # points = ref_pts[0].detach().cpu().numpy() + # boxes = gt_boxes[0][:, :7].detach().cpu().numpy() + # scores = pred_to_conf_unc( + # cls_scores[0], getattr(self.loss_cls, 'activation'), edl=self.is_edl)[0] + # scores = scores[:, self.num_classes - 1:].squeeze().detach().cpu().numpy() + # ax = draw_points_boxes_plt( + # pc_range=self.pc_range.tolist(), + # boxes_gt=boxes, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], c=scores, cmap='jet', s=3, marker='s', vmin=0.0, vmax=1.0) + # # ax = draw_points_boxes_plt( + # # pc_range=self.pc_range.tolist(), + # # points=points[cls_tgt[0].squeeze().detach().cpu().numpy() > 0], + # # points_c="green", + # # ax=ax, + # # return_ax=True + # # ) + # # ax = draw_points_boxes_plt( + # # pc_range=self.pc_range.tolist(), + # # points=points[scores > 0.5], + # # points_c="magenta", + # # ax=ax, + # # return_ax=True + # # ) + # plt.savefig(f"{os.environ['HOME']}/Downloads/tmp.jpg") + # plt.close() + + cls_tgt = torch.cat(cls_tgt, dim=0) + cared = (cls_tgt >= 0).any(dim=-1) + cls_src = cls_src[cared] + cls_tgt = cls_tgt[cared] + + # convert one-hot to labels( + cur_labels = torch.zeros_like(cls_tgt[..., 0]).long() + lbl_inds, cls_inds = torch.where(cls_tgt) + cur_labels[lbl_inds] = cls_inds + 1 + + avg_factor = max((cur_labels > 0).sum(), 1) + loss_cls = self.loss_cls( + cls_src, + cur_labels, + temp=epoch, + avg_factor=avg_factor + ) + + # box loss + # pad ref pts with batch index + if 'gt_preds' in aux_dict: + gt_preds = self.cat_data_from_list(gt_preds) + box_tgt = self.box_assigner.assign( + self.cat_data_from_list(ref_pts, pad_idx=True), + self.cat_data_from_list(gt_boxes_global, pad_idx=True), + self.cat_data_from_list(gt_labels_global), + gt_preds + ) + ind = box_tgt['idx'][0] # only one head + loss_box = 0 + bbox_reg = bbox_reg.view(-1, self.code_size) + if ind.shape[1] > 0: + ptr = 0 + for reg_name, reg_dim in self.reg_channels.items(): + pred_reg = bbox_reg[:, ptr:ptr+reg_dim].contiguous() + if reg_name == 'scr': + pred_reg = pred_reg.sigmoid() + cur_reg_src = pred_reg[box_tgt['valid_mask'][0]] + if reg_name == 'vel': + cur_reg_tgt = box_tgt['vel'][0] * 0.1 + elif reg_name == 'pred': + cur_reg_tgt = box_tgt[reg_name][0] + mask = cur_reg_tgt[..., 0].bool() + cur_reg_src = cur_reg_src[mask] + cur_reg_tgt = cur_reg_tgt[mask, 1:] + else: + cur_reg_tgt = box_tgt[reg_name][0] # N, C + cur_loss = self.loss_box(cur_reg_src, cur_reg_tgt) + + loss_box = loss_box + cur_loss + ptr += reg_dim + + return { + 'cls_loss': loss_cls, + 'box_loss': loss_box, + 'cls_max': pred_to_conf_unc( + cls_src, self.loss_cls.activation, self.is_edl)[0][..., self.num_classes - 1:].max() + } + + def get_pred_boxes(self, bbox_preds, ref_pts): + reg = {} + + ptr = 0 + for reg_name, reg_dim in self.reg_channels.items(): + reg[reg_name] = bbox_preds[..., ptr:ptr + reg_dim].contiguous() + ptr += reg_dim + + out = self.box_assigner.box_coder.decode(ref_pts[None], reg) + if isinstance(out, tuple): + det, pred = out + else: + det = out + pred = None + return det, pred + + def get_predictions(self, cls_scores, det_boxes, pred_boxes, batch_inds=None): + if self.is_edl: + scores = cls_scores[-1][..., 1:].sum(dim=-1) + else: + scores = cls_scores[-1].sum(dim=-1) + labels = cls_scores[-1].argmax(dim=-1) + pos = scores > self.box_assigner.center_threshold + + dets = [] + if batch_inds is None: + inds = range(cls_scores.shape[1]) + for i in inds: + dets.append({ + 'box': det_boxes[-1][i][pos[i]], + 'scr': scores[i][pos[i]], + 'lbl': labels[i][pos[i]], + 'idx': torch.ones_like(labels[i][pos[i]]) * i, + }) + else: + inds = batch_inds.unique() + for i in inds: + mask = batch_inds == i + pos_mask = pos[mask] + dets.append({ + 'box': det_boxes[-1][mask][pos_mask], + 'scr': scores[mask][pos_mask], + 'lbl': labels[mask][pos_mask], + 'pred': pred_boxes[-1][mask][pos_mask] if pred_boxes is not None else None, + 'idx': batch_inds[mask][pos_mask].long() + }) + + return dets + + diff --git a/cosense3d/modules/losses/__init__.py b/cosense3d/modules/losses/__init__.py new file mode 100644 index 00000000..93bcb94d --- /dev/null +++ b/cosense3d/modules/losses/__init__.py @@ -0,0 +1,12 @@ +from .focal_loss import * +from .l1_loss import * +from .iou_loss import * +from .edl import * +from .vanilla_seg_loss import VanillaSegLoss + + +def build_loss(type, **kwargs): + return globals()[type](**kwargs) + + + diff --git a/cosense3d/modules/losses/base_loss.py b/cosense3d/modules/losses/base_loss.py new file mode 100644 index 00000000..51273bd1 --- /dev/null +++ b/cosense3d/modules/losses/base_loss.py @@ -0,0 +1,72 @@ +import torch +from torch import nn + + +class BaseLoss(nn.Module): + def __init__(self, + reduction: str = 'mean', + activation: str = 'none', + loss_weight: float = 1.0): + """ + :param reduction: (optional) the method to reduce the loss. + :param activation: options are "none", "mean" and "sum". + :param loss_weight: (optional) the weight of loss. + """ + super().__init__() + self.reduction = reduction + self.loss_weight = loss_weight + self.activation = activation + + @property + def name(self): + return self.__class__.__name__ + + def loss(self, *args, **kwargs): + raise NotImplementedError + + def forward(self, + preds: torch.Tensor, + targets: torch.Tensor, + weight: torch.Tensor=None, + avg_factor: int=None, + reduction_override: str=None, + *args, **kwargs) -> torch.Tensor: + """ + + :param preds: prediction tensor. + :param targets: target tensor. + :param weight: The weight of loss for each + prediction. Defaults to None. + :param avg_factor: Average factor that is used to average + the loss. Defaults to None. + :param reduction_override: The reduction method used to + override the original reduction method of the loss. + Defaults to None. + :param args: additional arguments. + :param kwargs: + :return: weighted loss. + """ + loss = self.loss(preds, targets, *args, **kwargs) + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + if reduction == 'mean': + loss = loss.mean() + elif reduction == 'sum': + loss = loss.sum() + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + # Avoid causing ZeroDivisionError when avg_factor is 0.0, + # i.e., all labels of an image belong to ignore index. + eps = torch.finfo(torch.float32).eps + loss = loss.sum() / (avg_factor + eps) + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return self.loss_weight * loss \ No newline at end of file diff --git a/cosense3d/modules/losses/common.py b/cosense3d/modules/losses/common.py new file mode 100644 index 00000000..9c0f20a5 --- /dev/null +++ b/cosense3d/modules/losses/common.py @@ -0,0 +1,139 @@ +import torch +import torch.nn.functional as F + + +def weighted_smooth_l1_loss(preds, targets, sigma=3.0, weights=None): + diff = preds - targets + abs_diff = torch.abs(diff) + abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff) + loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + \ + (abs_diff - 0.5 / (sigma ** 2)) * (1.0 - abs_diff_lt_1) + if weights is not None: + if len(loss.shape) > len(weights.shape): + weights = weights.unsqueeze(dim=-1) + loss *= weights + return loss + + +def weighted_l1_loss(preds, targets, sigma=3.0, weights=None): + diff = preds - targets + loss = torch.abs(diff) + if weights is not None: + if len(loss.shape) > len(weights.shape): + weights = weights.unsqueeze(dim=-1) + loss *= weights + return loss + + +def sigmoid_binary_cross_entropy(preds, tgts, weights=None, reduction='none'): + """ + Parameters + ---------- + preds: Tensor(d1, ..., dn) + tgts: Tensor(d1, ..., dn) + weights. Tensor(d1, ..., dn) + reduction: str('none' | 'mean' | 'sum') + ------- + """ + assert preds.shape == tgts.shape + if weights is not None: + assert weights.shape == preds.shape + per_entry_cross_ent = F.binary_cross_entropy_with_logits( + preds, tgts, + weights, reduction=reduction + ) + return per_entry_cross_ent + + +def weighted_sigmoid_binary_cross_entropy(preds, tgts, weights=None, + class_indices=None): + if weights is not None: + weights = weights.unsqueeze(-1) + if class_indices is not None: + weights *= ( + indices_to_dense_vector(class_indices, preds.shape[2]) + .view(1, 1, -1) + .type_as(preds) + ) + per_entry_cross_ent = F.binary_cross_entropy_with_logits(preds, tgts, weights) + return per_entry_cross_ent + + +def indices_to_dense_vector( + indices: torch.Tensor, + size: int, + indices_value: float = 1.0, + default_value: float = 0.0 +) -> torch.Tensor: + """ + Creates dense vector with indices set to specific value and rest to zeros. + + This function exists because it is unclear if it is safe to use + tf.sparse_to_dense(indices, [size], 1, validate_indices=False) + with indices which are not ordered. + This function accepts a dynamic size (e.g. `tf.shape(tensor)[0]`) + + :param indices: 1d Tensor with integer indices which are to be set to indices_values. + :param size: size of output Tensor. + :param indices_value: values of elements specified by indices in the output vector. + :param default_value: values of other elements in the output vector. + :return: dense 1D Tensor of shape [size] with indices set to indices_values and the + rest set to default_value. + """ + dense = torch.zeros(size).fill_(default_value) + dense[indices] = indices_value + + return dense + + +def cross_entroy_with_logits(preds, tgts, n_cls, weights=None, reduction='none'): + cared = tgts >= 0 + preds = preds[cared] + tgts = tgts[cared] + tgt_onehot = torch.zeros((len(tgts), n_cls), device=preds.device) + tgt_onehot[torch.arange(len(tgts), device=tgts.device), tgts.long()] = 1 + + loss = F.cross_entropy(preds, tgt_onehot, weight=weights, reduction=reduction) + return loss + + +def focal_loss(preds, tgts, weights=None, reduction='none', + gamma=2.0, alpha=0.25, use_sigmoid=True): + """ + + Parameters + ---------- + preds: FloatTensor(..., n_cls) + tgts: FloatTensor(..., n_cls) or LongTensor(...,) or LongTensor(...,1), largest label is background + weights: same as preds or tgts + ------- + """ + assert len(preds.shape) == len(tgts.shape) or len(preds.shape) - 1 == len(tgts.shape) + if use_sigmoid: + pred_sigmoid = torch.sigmoid(preds) + else: + pred_sigmoid = preds + + if preds.shape[-1] != tgts.shape[-1]: + num_classes = preds.size(1) + one_hot_tgts = F.one_hot(tgts, num_classes=num_classes + 1) + one_hot_tgts = one_hot_tgts[:, :num_classes] + else: + one_hot_tgts = tgts + + alpha_weight = one_hot_tgts * alpha + (1 - one_hot_tgts) * (1 - alpha) + pt = one_hot_tgts * (1.0 - pred_sigmoid) + (1.0 - one_hot_tgts) * pred_sigmoid + focal_weight = alpha_weight * torch.pow(pt, gamma) + + bce_loss = torch.clamp(preds, min=0) - preds * one_hot_tgts + \ + torch.log1p(torch.exp(-torch.abs(preds))) + + loss = focal_weight * bce_loss + if weights is None: + return loss + elif weights.shape.__len__() < preds.shape.__len__(): + weights = weights.unsqueeze(-1) + + assert weights.shape.__len__() == loss.shape.__len__() + + return loss * weights \ No newline at end of file diff --git a/cosense3d/modules/losses/edl.py b/cosense3d/modules/losses/edl.py new file mode 100644 index 00000000..8a18d629 --- /dev/null +++ b/cosense3d/modules/losses/edl.py @@ -0,0 +1,196 @@ +import torch +import torch.nn.functional as F + +from cosense3d.modules.losses import BaseLoss + + +def relu_evidence(y): + return F.relu(y) + + +def exp_evidence(y): + return torch.exp(torch.clamp(y, -6, 6)) + + +def softplus_evidence(y): + return F.softplus(y) + + +def kl_divergence(alpha, num_classes): + device = alpha.device + ones = torch.ones([1, num_classes], dtype=torch.float32, device=device) + sum_alpha = torch.sum(alpha, dim=1, keepdim=True) + first_term = ( + torch.lgamma(sum_alpha) + - torch.lgamma(alpha).sum(dim=1, keepdim=True) + # + torch.lgamma(ones).sum(dim=1, keepdim=True) + - torch.lgamma(ones.sum(dim=1, keepdim=True)) + ) + second_term = ( + (alpha - ones) + .mul(torch.digamma(alpha) - torch.digamma(sum_alpha)) + .sum(dim=1, keepdim=True) + ) + kl = first_term + second_term + return kl + + +def loglikelihood_loss(y, alpha): + S = torch.sum(alpha, dim=1, keepdim=True) + loglikelihood_err = torch.sum((y - (alpha / S)) ** 2, dim=1, keepdim=True) + loglikelihood_var = torch.sum( + alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True + ) + loglikelihood = loglikelihood_err + loglikelihood_var + return loglikelihood + + +def mse_loss(y, alpha, epoch_num, num_classes, annealing_step): + loglikelihood = loglikelihood_loss(y, alpha) + + annealing_coef = torch.min( + torch.tensor(1.0, dtype=torch.float32), + torch.tensor(epoch_num / annealing_step, dtype=torch.float32), + ) + + kl_alpha = (alpha - 1) * (1 - y) + 1 + kl_div = annealing_coef * kl_divergence(kl_alpha, num_classes) + return loglikelihood + kl_div + + +def edl_mse_loss(preds, tgt, n_cls, temp, annealing_step, model_label='edl'): + """ + Calculate evidential loss + :param model_label: (str) a name to distinguish edl loss of different modules + :param preds: (N, n_cls) the logits of each class + :param tgt: (N,) labels with values from 0...(n_cls - 1) or (N, n_cls) + :param n_cls: (int) number of classes, including background + :param temp: current temperature for annealing of KL Divergence term of the loss + :param annealing_step: maximum annealing step + :return: + """ + evidence = relu_evidence(preds) + if len(tgt.shape) == 1: + cared = tgt >= 0 + evidence = evidence[cared] + tgt = tgt[cared] + tgt_onehot = F.one_hot(tgt.long(), n_cls).float() + elif len(tgt.shape) == 2 and tgt.shape[1] > 1: + cared = (tgt >= 0).all(dim=-1) + evidence = evidence[cared] + tgt_onehot = tgt[cared] + else: + raise NotImplementedError + alpha = evidence + 1 + loss = mse_loss(tgt_onehot, alpha, temp, n_cls, annealing_step).mean() + + ss = evidence.detach() + tt = tgt_onehot.detach() + acc = (torch.argmax(ss, dim=1) == torch.argmax(tt, dim=1)).sum() / len(tt) * 100 + loss_dict = { + f'{model_label}_loss': loss, + f'{model_label}_ac': acc, + } + + # Uncomment to log recall of all classes + # for cls in [1, 2]: + # loss_dict[f'acc{cls}'] = torch.logical_and( + # torch.argmax(ss, dim=1) == cls, tt == cls).sum() \ + # / max((tt == cls).sum(), 1) * 100 + + return loss_dict + + +def evidence_to_conf_unc(evidence, edl=True): + if edl: + # used edl loss + alpha = evidence + 1 + S = torch.sum(alpha, dim=-1, keepdim=True) + conf = torch.div(alpha, S) + K = evidence.shape[-1] + unc = torch.div(K, S) + # conf = torch.sqrt(conf * (1 - unc)) + unc = unc.squeeze(dim=-1) + else: + # use entropy as uncertainty + entropy = -evidence * torch.log2(evidence) + unc = entropy.sum(dim=-1) + # conf = torch.sqrt(evidence * (1 - unc.unsqueeze(-1))) + conf = evidence + return conf, unc + + +def pred_to_conf_unc(preds, activation='relu', edl=True): + if callable(activation): + evidence = activation(preds) + elif activation == 'relu': + evidence = relu_evidence(preds) + elif activation == 'exp': + evidence = exp_evidence(preds) + elif activation == 'sigmoid': + evidence = preds.sigmoid() + elif activation == 'softmax': + evidence = preds.softmax(dim=-1) + else: + evidence = preds + + if edl: + alpha = evidence + 1 + S = torch.sum(alpha, dim=-1, keepdim=True) + conf = torch.div(alpha, S) + K = evidence.shape[-1] + unc = torch.div(K, S) + # conf = torch.sqrt(conf * (1 - unc)) + unc = unc.squeeze(dim=-1) + else: + # use entropy as uncertainty + entropy = -evidence * torch.log2(evidence) + unc = entropy.sum(dim=-1) + # conf = torch.sqrt(evidence * (1 - unc.unsqueeze(-1))) + conf = evidence + return conf, unc + + +class EDLLoss(BaseLoss): + def __init__(self, + n_cls: int, + annealing_step: int, + **kwargs): + """ + Evidential loss. + + :param n_cls: number of classes, including background. + :param annealing_step: maximum temperature annealing step for KL regularization of EDL loss . + :param kwargs: + """ + super().__init__(**kwargs) + self.n_cls = n_cls + self.annealing_step = annealing_step + if self.activation == 'relu': + self.activation = relu_evidence + elif self.activation == 'exp': + self.activation = exp_evidence + else: + self.activation = None + + def loss(self, preds, tgt, temp, n_cls_override=None): + if self.activation is None: + evidence = preds + else: + evidence = self.activation(preds) + if len(tgt.shape) == 1: + cared = tgt >= 0 + evidence = evidence[cared] + tgt = tgt[cared] + tgt_onehot = F.one_hot(tgt.long(), self.n_cls).float() + elif len(tgt.shape) == 2 and tgt.shape[1] > 1: + cared = (tgt >= 0).all(dim=-1) + evidence = evidence[cared] + tgt_onehot = tgt[cared] + else: + raise NotImplementedError + alpha = evidence + 1 + n_cls = self.n_cls if n_cls_override is None else n_cls_override + loss = mse_loss(tgt_onehot, alpha, temp, n_cls, self.annealing_step) + + return loss \ No newline at end of file diff --git a/cosense3d/modules/losses/focal_loss.py b/cosense3d/modules/losses/focal_loss.py new file mode 100644 index 00000000..9ca12a50 --- /dev/null +++ b/cosense3d/modules/losses/focal_loss.py @@ -0,0 +1,312 @@ +import torch +import torch.nn.functional as F + +from .base_loss import BaseLoss + + +def quality_focal_loss(pred: torch.Tensor, + target: tuple([torch.Tensor]), + beta: float = 2.0) -> torch.Tensor: + r""" + Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning + Qualified and Distributed Bounding Boxes for Dense Object Detection + `_. + + :param pred: Predicted joint representation of classification + and quality (IoU) estimation with shape (N, C), C is the number of + classes. + :param target: Target category label with shape (N,) + and target quality label with shape (N,). + :param beta: The beta parameter for calculating the modulating factor. + Defaults to 2.0. + :return: Loss tensor with shape (N,). + """ + assert len(target) == 2, """target for QFL must be a tuple of two elements, + including category label and quality label, respectively""" + # label denotes the category id, score denotes the quality score + label, score = target + + # negatives are supervised by 0 quality score + pred_sigmoid = pred.sigmoid() + scale_factor = pred_sigmoid + zerolabel = scale_factor.new_zeros(pred.shape) + loss = F.binary_cross_entropy_with_logits( + pred, zerolabel, reduction='none') * scale_factor.pow(beta) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = pred.size(1) + pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) + pos_label = label[pos].long() + # positives are supervised by bbox quality (IoU) score + scale_factor = score[pos] - pred_sigmoid[pos, pos_label] + loss[pos, pos_label] = F.binary_cross_entropy_with_logits( + pred[pos, pos_label], score[pos], + reduction='none') * scale_factor.abs().pow(beta) + + loss = loss.sum(dim=1, keepdim=False) + return loss + + +def quality_focal_loss_with_prob(pred: torch.Tensor, + target: tuple([torch.Tensor]), + beta: float = 2.0) -> torch.Tensor: + r""" + Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning + Qualified and Distributed Bounding Boxes for Dense Object Detection + `_. + + :param pred: Predicted joint representation of classification + and quality (IoU) estimation with shape (N, C), C is the number of + classes. + :param target: Target category label with shape (N,) + and target quality label with shape (N,). + :param beta: The beta parameter for calculating the modulating factor. + Defaults to 2.0. + :return: Loss tensor with shape (N,). + """ + assert len(target) == 2, """target for QFL must be a tuple of two elements, + including category label and quality label, respectively""" + # label denotes the category id, score denotes the quality score + label, score = target + + # negatives are supervised by 0 quality score + pred_sigmoid = pred + scale_factor = pred_sigmoid + zerolabel = scale_factor.new_zeros(pred.shape) + loss = F.binary_cross_entropy( + pred, zerolabel, reduction='none') * scale_factor.pow(beta) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = pred.size(1) + pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) + pos_label = label[pos].long() + # positives are supervised by bbox quality (IoU) score + scale_factor = score[pos] - pred_sigmoid[pos, pos_label] + loss[pos, pos_label] = F.binary_cross_entropy( + pred[pos, pos_label], score[pos], + reduction='none') * scale_factor.abs().pow(beta) + + loss = loss.sum(dim=1, keepdim=False) + return loss + + +class QualityFocalLoss(BaseLoss): + def __init__(self, + use_sigmoid: bool=True, + beta: float=2.0, + activated: bool=False, + **kwargs): + r""" + Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: + Learning Qualified and Distributed Bounding Boxes for Dense Object + Detection `_. + + :param use_sigmoid: Whether sigmoid operation is conducted in QFL. + Defaults to True. + :param beta: The beta parameter for calculating the modulating factor. + Defaults to 2.0. + :param activated: (optional) Whether the input is activated. + If True, it means the input has been activated and can be + treated as probabilities. Else, it should be treated as logits. + Defaults to False. + :param kwargs: + """ + super(QualityFocalLoss, self).__init__(**kwargs) + assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' + self.use_sigmoid = use_sigmoid + self.beta = beta + self.activated = activated + + def loss(self, pred: torch.Tensor, target: torch.Tensor): + """Forward function. + + :param pred: Predicted joint representation of + classification and quality (IoU) estimation with shape (N, C), + C is the number of classes. + :param target: Target category label with shape + (N,) and target quality label with shape (N,). + :return: loss result. + """ + if self.use_sigmoid: + if self.activated: + loss_cls = quality_focal_loss_with_prob(pred, target, self.beta) + else: + loss_cls = quality_focal_loss(pred, target, self.beta) + else: + raise NotImplementedError + return loss_cls + + +class GaussianFocalLoss(BaseLoss): + """GaussianFocalLoss is a variant of focal loss. + + More details can be found in the `paper + `_ + Code is modified from `kp_utils.py + `_ # noqa: E501 + Please notice that the target in GaussianFocalLoss is a gaussian heatmap, + not 0/1 binary target. + """ + + def __init__(self, + alpha: float=2.0, + gamma: float=4.0, + reduction: str='mean', + loss_weight: float=1.0): + """ + + :param alpha: Power of prediction. + :param gamma: Power of target for negative samples. + :param reduction: Options are "none", "mean" and "sum". + :param loss_weight: Loss weight of current loss. + """ + super(GaussianFocalLoss, self).__init__() + self.alpha = alpha + self.gamma = gamma + self.reduction = reduction + self.loss_weight = loss_weight + + def loss(self, pred: torch.Tensor, target: torch.Tensor): + """`Focal Loss `_ for targets in gaussian + distribution. + + :param pred: The prediction. + :param target: The learning target of the prediction + in gaussian distribution. + :return: loss result. + """ + eps = 1e-12 + pos_weights = target.eq(1) + neg_weights = (1 - target).pow(self.gamma) + pos_loss = -(pred + eps).log() * (1 - pred).pow(self.alpha) * pos_weights + neg_loss = -(1 - pred + eps).log() * pred.pow(self.alpha) * neg_weights + return pos_loss + neg_loss + + +def py_focal_loss_with_prob(pred: torch.Tensor, + target: torch.Tensor, + gamma: float=2.0, + alpha: float=0.25): + """PyTorch version of `Focal Loss `_. + Different from `py_sigmoid_focal_loss`, this function accepts probability + as input. + + :param pred: The prediction probability with shape (N, C), + C is the number of classes. + :param target: The learning label of the prediction. + :param gamma: The gamma for calculating the modulating + factor. Defaults to 2.0. + :param alpha: A balanced form for Focal Loss. + Defaults to 0.25. + :return: loss result. + """ + num_classes = pred.size(1) + target = F.one_hot(target, num_classes=num_classes + 1) + target = target[:, :num_classes] + + target = target.type_as(pred) + pt = (1 - pred) * target + pred * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy( + pred, target, reduction='none') * focal_weight + return loss + + +def py_sigmoid_focal_loss(pred: torch.Tensor, + target: torch.Tensor, + gamma: float=2.0, + alpha: float=0.25): + """PyTorch version of `Focal Loss `_. + Different from `py_sigmoid_focal_loss`, this function accepts probability + as input. + + :param pred: The prediction probability with shape (N, C), + C is the number of classes. + :param target: The learning label of the prediction. + :param gamma: The gamma for calculating the modulating + factor. Defaults to 2.0. + :param alpha: A balanced form for Focal Loss. + Defaults to 0.25. + :return: loss result. + """ + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + return loss + + +class FocalLoss(BaseLoss): + + def __init__(self, + use_sigmoid: bool=True, + gamma: float=2.0, + alpha: float=0.25, + activated: bool=False, + bg_idx: int=None, + **kwargs): + """`Focal Loss `_ + + :param use_sigmoid: Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + :param gamma: The gamma for calculating the modulating + factor. Defaults to 2.0. + :param alpha: A balanced form for Focal Loss. + Defaults to 0.25. + :param activated: Whether the input is activated. + If True, it means the input has been activated and can be + treated as probabilities. Else, it should be treated as logits. + Defaults to False. + :param bg_idx: background class index. + :param kwargs: + """ + super(FocalLoss, self).__init__(**kwargs) + assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.activated = activated + self.bg_idx = bg_idx + if use_sigmoid: + self.activation = 'sigmoid' + elif activated is False: + self.activation = 'softmax' + + def loss(self, pred: torch.Tensor, target: torch.Tensor, *args, **kwargs): + """ + :param pred: prediction. + :param target: ground truth targets. + :param args: + :param kwargs: + :return: + """ + if self.use_sigmoid: + if self.activated: + calculate_loss_func = py_focal_loss_with_prob + else: + num_classes = pred.size(1) + if isinstance(target, torch.cuda.FloatTensor) and target.ndim == 1: + target = torch.stack([1 - target, target], dim=1) + else: + target = F.one_hot(target, num_classes=num_classes + 1) + if self.bg_idx is None: + target = target[:, :num_classes] + else: + target = target[:, [c for c in range(num_classes + 1) if c != self.bg_idx]] + calculate_loss_func = py_sigmoid_focal_loss + + loss_cls = calculate_loss_func( + pred, + target, + gamma=self.gamma, + alpha=self.alpha) + + else: + raise NotImplementedError + return loss_cls + + diff --git a/cosense3d/modules/losses/iou_loss.py b/cosense3d/modules/losses/iou_loss.py new file mode 100644 index 00000000..4afd69ee --- /dev/null +++ b/cosense3d/modules/losses/iou_loss.py @@ -0,0 +1,46 @@ +from .base_loss import BaseLoss +from cosense3d.utils.iou2d_calculator import bbox_overlaps + + +class IoULoss(BaseLoss): + def __init__(self, mode: str='log', eps:float=1e-6, **kwargs): + """ + + :param mode: Loss scaling mode, including "linear", "square", and "log". + Default: 'log' + :param eps: Eps to avoid log(0). + :param kwargs: + """ + super(IoULoss, self).__init__(**kwargs) + assert mode in ['linear', 'square', 'log'] + self.mode = mode + self.eps = eps + + def loss(self, pred, target): + ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=self.eps) + if self.mode == 'linear': + loss = 1 - ious + elif self.mode == 'square': + loss = 1 - ious ** 2 + elif self.mode == 'log': + loss = -ious.log() + else: + raise NotImplementedError + return loss + + +class GIoULoss(BaseLoss): + def __init__(self, eps: float=1e-7, **kwargs): + """ + + :param eps: Eps to avoid log(0). + :param kwargs: + """ + super(GIoULoss, self).__init__(**kwargs) + self.eps = eps + + def loss(self, pred, target): + gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=self.eps) + loss = 1 - gious + return loss + diff --git a/cosense3d/modules/losses/l1_loss.py b/cosense3d/modules/losses/l1_loss.py new file mode 100644 index 00000000..58565691 --- /dev/null +++ b/cosense3d/modules/losses/l1_loss.py @@ -0,0 +1,36 @@ +import torch +from .base_loss import BaseLoss + + +class L1Loss(BaseLoss): + + def loss(self, pred, target): + if target.numel() == 0: + return pred.sum() * 0 + + assert pred.size() == target.size() + loss = torch.abs(pred - target) + return loss + + +class SmoothL1Loss(BaseLoss): + def __init__(self, beta: float=1.0, **kwargs): + """ + :param beta: The threshold in the piecewise function. + Defaults to 1.0. + :param kwargs: + """ + super(SmoothL1Loss, self).__init__(**kwargs) + assert beta > 0 + self.beta = beta + + def loss(self, pred, target): + if target.numel() == 0: + return pred.sum() * 0 + + assert pred.size() == target.size() + diff = torch.abs(pred - target) + loss = torch.where(diff < self.beta, + 0.5 * diff * diff / self.beta, + diff - 0.5 * self.beta) + return loss \ No newline at end of file diff --git a/cosense3d/modules/losses/vanilla_seg_loss.py b/cosense3d/modules/losses/vanilla_seg_loss.py new file mode 100644 index 00000000..443a9341 --- /dev/null +++ b/cosense3d/modules/losses/vanilla_seg_loss.py @@ -0,0 +1,63 @@ +import torch +import torch.nn as nn + +from einops import rearrange + + +class VanillaSegLoss(nn.Module): + def __init__(self, d_weights, s_weights, d_coe, s_coe, l_weights=50, **kwargs): + super(VanillaSegLoss, self).__init__() + + self.d_weights = d_weights + self.s_weights = s_weights + self.l_weights = l_weights + + self.d_coe = d_coe + self.s_coe = s_coe + + self.loss_func_static = \ + nn.CrossEntropyLoss( + weight=torch.Tensor([1., self.s_weights, self.l_weights]).cuda()) + self.loss_func_dynamic = \ + nn.CrossEntropyLoss( + weight=torch.Tensor([1., self.d_weights]).cuda()) + + def forward(self, static_pred=None, dynamic_pred=None, + static_gt=None, dynamic_gt=None): + """ + Perform loss function on the prediction. + + Parameters + ---------- + output_dict : dict + The dictionary contains the prediction. + + gt_dict : dict + The dictionary contains the groundtruth. + + Returns + ------- + Loss dictionary. + """ + loss_dict = {} + + if static_pred is not None: + # during training, only need to compute the ego vehicle's gt loss + # static_gt = rearrange(static_gt, 'b l h w -> (b l) h w') + # static_pred = rearrange(static_pred, 'b l c h w -> (b l) c h w') + static_loss = self.loss_func_static(static_pred, static_gt.long()) + loss_dict['static_loss'] = self.s_coe * static_loss + + if dynamic_pred is not None: + # dynamic_gt = rearrange(dynamic_gt, 'b l h w -> (b l) h w') + # dynamic_pred = rearrange(dynamic_pred, 'b l c h w -> (b l) c h w') + dynamic_loss = self.loss_func_dynamic(dynamic_pred, dynamic_gt.long()) + loss_dict['dynamic_loss'] = self.d_coe * dynamic_loss + + return loss_dict + + + + + + diff --git a/cosense3d/modules/necks/__init__.py b/cosense3d/modules/necks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/modules/necks/cpm_composer.py b/cosense3d/modules/necks/cpm_composer.py new file mode 100644 index 00000000..39a75dc1 --- /dev/null +++ b/cosense3d/modules/necks/cpm_composer.py @@ -0,0 +1,21 @@ +import torch +from torch import nn + +from cosense3d.modules import BaseModule, plugin + + +class KeypointComposer(BaseModule): + def __init__(self, vsa, train_from_epoch=0, **kwargs): + super().__init__(**kwargs) + self.train_from_epoch = train_from_epoch + self.vsa = plugin.build_plugin_module(vsa) + + def forward(self, preds, bev_feat, voxel_feat, points, **kwargs): + epoch = kwargs.get('epoch', self.train_from_epoch + 1) + if epoch < self.train_from_epoch: + return {self.scatter_keys[0]: [None for _ in preds]} + + res = self.vsa(preds, bev_feat, voxel_feat, points) + res = self.compose_result_list(res, len(preds)) + return {self.scatter_keys[0]: res} + diff --git a/cosense3d/modules/necks/dilation_spconv.py b/cosense3d/modules/necks/dilation_spconv.py new file mode 100644 index 00000000..ad2a288e --- /dev/null +++ b/cosense3d/modules/necks/dilation_spconv.py @@ -0,0 +1,161 @@ +import functools +import torch + +from cosense3d.modules import BaseModule, nn +from cosense3d.modules.utils.me_utils import mink_coor_limit, minkconv_conv_block, ME, indices2metric + + +class DilationSpconv(BaseModule): + def __init__(self, data_info, convs, d=2, n_layers=None, **kwargs): + super(DilationSpconv, self).__init__(**kwargs) + self.det_r = data_info.get('det_r', False) + self.lidar_range = data_info.get('lidar_range', False) + self.voxel_size = data_info['voxel_size'] + self.d = d + self.n_layers = n_layers + self.conv_args = convs + self.convs = [] + for k, conv_args in convs.items(): + self.convs.append(k) + setattr(self, f'convs_{k}', self.get_conv_layer(conv_args)) + stride = int(k[1]) + + if self.det_r: + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif self.lidar_range: + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, f'mink_xylim_{k}', mink_coor_limit(lr, self.voxel_size, stride)) # relevant to ME + + def to_gpu(self, gpu_id): + self.to(gpu_id) + return ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm + + def forward(self, stensor_list, **kwargs): + out_dict = {} + for k in self.convs: + stride = int(k[1]) + coor, feat, ctr = self.compose_stensor(stensor_list, stride) + stensor2d = ME.SparseTensor( + coordinates=coor[:, :3].contiguous(), + features=feat, + tensor_stride=[stride] * 2 + ) + + stensor2d = getattr(self, f'convs_{k}')(stensor2d) + # after coordinate expansion, some coordinates will exceed the maximum detection + # range, therefore they are removed here. + xylim = getattr(self, f'mink_xylim_{k}') + mask = (stensor2d.C[:, 1] > xylim[0]) & (stensor2d.C[:, 1] <= xylim[1]) & \ + (stensor2d.C[:, 2] > xylim[2]) & (stensor2d.C[:, 2] <= xylim[3]) + + coor = stensor2d.C[mask] + feat = stensor2d.F[mask] + ctr = indices2metric(coor, self.voxel_size)[:, 1:] + + out_dict[k] = { + 'coor': coor, + 'feat': feat, + 'ctr': ctr + } + return self.format_output(out_dict, len(stensor_list)) + + def format_output(self, out_dict, B): + out_list = self.decompose_stensor(out_dict, B) + return {self.scatter_keys[0]: out_list} + + def get_conv_layer(self, args): + minkconv_layer = functools.partial( + minkconv_conv_block, d=self.d, bn_momentum=0.1, + ) + in_dim = args['in_dim'] + out_dim = args['out_dim'] + layers = [minkconv_layer(in_dim, out_dim, args['kernels'][0], 1, + expand_coordinates=True)] + for ks in args['kernels'][1:]: + layers.append(minkconv_layer(out_dim, out_dim, ks, 1, + expand_coordinates=True)) + if self.n_layers is not None and self.n_layers > len(args['kernels']): + for _ in range(self.n_layers - len(args['kernels'])): + layers.append(minkconv_layer(out_dim, out_dim, 3, 1, + expand_coordinates=False)) + return nn.Sequential(*layers) + + +class DilationSpconvAblation(BaseModule): + def __init__(self, data_info, convs, d=2, n_layers=None, **kwargs): + super(DilationSpconvAblation, self).__init__(**kwargs) + self.det_r = data_info.get('det_r', False) + self.lidar_range = data_info.get('lidar_range', False) + self.voxel_size = data_info['voxel_size'] + self.d = d + self.n_layers = n_layers + self.conv_args = convs + self.convs = [] + for k, conv_args in convs.items(): + self.convs.append(k) + setattr(self, f'convs_{k}', self.get_conv_layer(conv_args)) + stride = int(k[1]) + + if self.det_r: + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif self.lidar_range: + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, f'mink_xylim_{k}', mink_coor_limit(lr, self.voxel_size, stride)) # relevant to ME + + def to_gpu(self, gpu_id): + self.to(gpu_id) + return ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm + + def forward(self, stensor_list, **kwargs): + out_dict = {} + for k in self.convs: + stride = int(k[1]) + coor, feat, ctr = self.compose_stensor(stensor_list, stride) + stensor2d = ME.SparseTensor( + coordinates=coor[:, :3].contiguous(), + features=feat, + tensor_stride=[stride] * 2 + ) + + stensor2d = getattr(self, f'convs_{k}')(stensor2d) + # after coordinate expansion, some coordinates will exceed the maximum detection + # range, therefore they are removed here. + xylim = getattr(self, f'mink_xylim_{k}') + mask = (stensor2d.C[:, 1] > xylim[0]) & (stensor2d.C[:, 1] <= xylim[1]) & \ + (stensor2d.C[:, 2] > xylim[2]) & (stensor2d.C[:, 2] <= xylim[3]) + + coor = stensor2d.C[mask] + feat = stensor2d.F[mask] + ctr = indices2metric(coor, self.voxel_size)[:, 1:] + + out_dict[k] = { + 'coor': coor, + 'feat': feat, + 'ctr': ctr + } + return self.format_output(out_dict, len(stensor_list)) + + def format_output(self, out_dict, B): + out_list = self.decompose_stensor(out_dict, B) + return {self.scatter_keys[0]: out_list} + + def get_conv_layer(self, args): + minkconv_layer = functools.partial( + minkconv_conv_block, d=self.d, bn_momentum=0.1, + ) + in_dim = args['in_dim'] + out_dim = args['out_dim'] + layers = [minkconv_layer(in_dim, out_dim, args['kernels'][0], 1, + expand_coordinates=False)] + for ks in args['kernels'][1:]: + layers.append(minkconv_layer(out_dim, out_dim, ks, 1, + expand_coordinates=False)) + if self.n_layers is not None and self.n_layers > len(args['kernels']): + for _ in range(self.n_layers - len(args['kernels'])): + layers.append(minkconv_layer(out_dim, out_dim, 3, 1, + expand_coordinates=False)) + return nn.Sequential(*layers) \ No newline at end of file diff --git a/cosense3d/modules/necks/formatting.py b/cosense3d/modules/necks/formatting.py new file mode 100644 index 00000000..f772be60 --- /dev/null +++ b/cosense3d/modules/necks/formatting.py @@ -0,0 +1,157 @@ +import torch +from torch import nn + +from cosense3d.modules import BaseModule + + +class DenseToSparse(BaseModule): + def __init__(self, + data_info, + strides=None, + **kwargs): + super(DenseToSparse, self).__init__(**kwargs) + self.lidar_range = data_info['lidar_range'] + self.voxel_size = data_info['voxel_size'] + self.strides = strides + + def forward(self, *args, **kwargs): + input_dict = {self.gather_keys[i]: x for i, x in enumerate(args)} + out_dict = {} + multi_scale_bev_feat = [] + for x in input_dict['multi_scale_bev_feat']: + tmp = {} + for s in self.strides: + tmp[f'p{s}'] = { + 'ctr': self.get_centers(s, device=x[f'p{s}'].device).flatten(0, 1), + 'feat': x[f'p{s}'].permute(1, 2, 0).flatten(0, 1) + } + multi_scale_bev_feat.append(tmp) + out_dict['multi_scale_bev_feat'] = multi_scale_bev_feat + + det_local_sparse = [] + for x in input_dict['det_local_dense']: + det_local_sparse.append({'scr': x['cls'].max(dim=0).values.flatten()}) + out_dict['det_local_sparse'] = det_local_sparse + + bev_local_sparse = [] + for x in input_dict['bev_local_dense']: + bev_local_sparse.append({'scr': x.max(dim=0).values.flatten()}) + out_dict['bev_local_sparse'] = bev_local_sparse + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # draw_points_boxes_plt( + # pc_range=self.lidar_range, + # points=input_dict['points'][0][:, :3].detach().cpu().numpy(), + # filename="/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/points.png" + # ) + # fig = plt.figure(figsize=(10, 5)) + # ax = fig.add_subplot() + # pts = multi_scale_bev_feat[0]['p2']['ctr'].detach().cpu().numpy() + # # colors = det_local_sparse[0]['scr'].sigmoid().detach().cpu().numpy() + # colors = multi_scale_bev_feat[0]['p2']['feat'].mean(dim=1).detach().cpu().numpy() + # ax.scatter(pts[:, 0], pts[:, 1], c=colors) + # plt.savefig("/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/scores.png") + return out_dict + + def get_centers(self, stride, device): + pix_x = self.voxel_size[0] * stride + pix_y = self.voxel_size[1] * stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x, device=device) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y, device=device) + pix_y * 0.5 + centers = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + return centers.permute(1, 0, 2) + + +class DetDenseToSparse(nn.Module): + def __init__(self, + data_info, + stride, + **kwargs): + super(DetDenseToSparse, self).__init__(**kwargs) + self.lidar_range = data_info['lidar_range'] + self.voxel_size = data_info['voxel_size'] + self.stride = stride + + def forward(self, input): + out_list = [] + for x in input: + # select the max of two anchors at each position + h, w = x['cls'].shape[1:] + cls, max_inds = x['cls'].permute(0, 2, 1).max(dim=0) + scr = cls.sigmoid() + reg = x['reg'].view(x['cls'].shape[0], -1, h, w).permute(3, 2, 0, 1) + ctr = self.get_centers() + out_list.append({ + 'ctr': ctr.flatten(0, 1), + 'cls': cls.flatten(0, 1), + 'reg': reg.flatten(0, 1), + 'scr': scr.flatten(0, 1) + }) + + return out_list + + def get_centers(self): + pix_x = self.voxel_size[0] * self.stride + pix_y = self.voxel_size[1] * self.stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y) + pix_y * 0.5 + centers = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + return centers + + +class FPVRCNNToLTS(BaseModule): + def __init__(self, + data_info, + strides=None, + **kwargs): + super(FPVRCNNToLTS, self).__init__(**kwargs) + self.lidar_range = data_info['lidar_range'] + self.voxel_size = data_info['voxel_size'] + + def forward(self, *args, **kwargs): + input_dict = {self.gather_keys[i]: x for i, x in enumerate(args)} + out_dict = {} + multi_scale_feat = [] + roi_local = [] + for x, y in zip(input_dict['multi_scale_bev_feat'], input_dict['keypoint_feat']): + multi_scale_feat.append({ + 'p2': { + 'ctr': y['point_coords'][:, 1:4], + 'feat': y['point_features'] + }, + 'p8': { + 'ctr': self.get_centers(32, device=x[f'p32'].device).flatten(0, 1), + 'feat': x['p32'].permute(1, 2, 0).flatten(0, 1) + } + }) + roi_local.append({'scr': y['point_scores']}) + out_dict['multi_scale_feat'] = multi_scale_feat + out_dict['roi_local'] = roi_local + + bev_local_sparse = [] + for x in input_dict['bev_local_dense']: + bev_local_sparse.append({'scr': x.max(dim=0).values.flatten()}) + out_dict['roi_global'] = bev_local_sparse + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # draw_points_boxes_plt( + # pc_range=self.lidar_range, + # points=input_dict['points'][0][:, :3].detach().cpu().numpy(), + # filename="/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/points.png" + # ) + # fig = plt.figure(figsize=(10, 5)) + # ax = fig.add_subplot() + # pts = multi_scale_bev_feat[0]['p2']['ctr'].detach().cpu().numpy() + # # colors = det_local_sparse[0]['scr'].sigmoid().detach().cpu().numpy() + # colors = multi_scale_bev_feat[0]['p2']['feat'].mean(dim=1).detach().cpu().numpy() + # ax.scatter(pts[:, 0], pts[:, 1], c=colors) + # plt.savefig("/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/scores.png") + return out_dict + + def get_centers(self, stride, device): + pix_x = self.voxel_size[0] * stride + pix_y = self.voxel_size[1] * stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x, device=device) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y, device=device) + pix_y * 0.5 + centers = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + return centers.permute(1, 0, 2) diff --git a/cosense3d/modules/necks/spatial_alignment.py b/cosense3d/modules/necks/spatial_alignment.py new file mode 100644 index 00000000..1e5a0437 --- /dev/null +++ b/cosense3d/modules/necks/spatial_alignment.py @@ -0,0 +1,89 @@ +import copy +import torch + +from cosense3d.modules import BaseModule +from cosense3d.modules.utils.common import pad_r +from cosense3d.modules.utils.localization_utils import register_points +from cosense3d.utils.pclib import tf2pose, project_points_by_matrix_torch + + +class SpatialAlignment(BaseModule): + def __init__(self, **kwargs): + super(SpatialAlignment, self).__init__(**kwargs) + + def forward(self, dets_local, roadline_pred, feats, **kwargs): + for det, rl, rl_ref, feat in zip(dets_local, roadline_pred, feats): + det_ctr = det['preds']['box'][:, :3] + rl_pts = self.roadline_map_to_points(rl) + + import matplotlib.pyplot as plt + pts0 = det_ctr.detach().cpu().numpy() + pts1 = rl_pts.detach().cpu().numpy() + plt.plot(pts0[:, 0], pts0[:, 1], 'g.') + plt.plot(pts1[:, 0], pts1[:, 1], 'r.') + plt.show() + plt.close() + + def roadline_map_to_points(self, roadline_map): + scr = roadline_map['cls'].sigmoid().squeeze() + pos = scr > 0.5 + return roadline_map['ctr'][pos] + + +class MapRegistration(BaseModule): + """ + Register local detected roadline points into global roadline map. + """ + def __init__(self, **kwargs): + super(MapRegistration, self).__init__(**kwargs) + self.seq_len = 4 + + def forward(self, roadline, road_line_ref, poses_err, poses_gt=None, **kwargs): + """ + Register local detected roadline points into global roadline map. + + :param roadline: dict, coor (Nx2, voxel indices), ctr (Nx2, voxel center coordinates in meter), + cls (NxC, classification logits) + :param road_line_ref: Nx2, ground-truth BEV roadline points in global world coordinates. + :param poses_err: 4x4, LiDARs' erroneous poses in global world coordinates. + :param poses_gt: 4x4, LiDARs' ground-truth poses in global world coordinates. + :param kwargs: + :return: + - poses_corrected: 4x4, corrected LiDAR poses. + - roadline_preds: Nx2, BEV roadline points in local LiDAR coordinates. + """ + poses_corrected = [] + roadline_preds = [] + for i, rl in enumerate(roadline): + rl_ref = road_line_ref[i] # in world-frame + rl_pts = self.roadline_map_to_points(rl) + roadline_preds.append(rl_pts) + + if self.training: + poses_corrected.append(poses_gt[i]) + else: + pose = poses_err[i] + rl_pts = pad_r(copy.deepcopy(rl_pts), 0.0) + rl_ref = pad_r(rl_ref) + pose_corr, rl_pts_tf = register_points(rl_pts, rl_ref, pose) + pose_corr = torch.from_numpy(pose_corr).float().to(pose.device) + poses_corrected.append(pose_corr) + + # import matplotlib.pyplot as plt + # pts0 = rl_ref.detach().cpu().numpy() + # pts1 = project_points_by_matrix_torch(rl_pts, pose).detach().cpu().numpy() + # plt.plot(pts0[:, 0], pts0[:, 1], 'g.', markersize=1) + # plt.plot(pts1[:, 0], pts1[:, 1], 'r.', markersize=1) + # plt.plot(rl_pts_tf[:, 0], rl_pts_tf[:, 1], 'b.', markersize=1) + # plt.show() + # plt.close() + return {self.scatter_keys[0]: poses_corrected, + self.scatter_keys[1]: roadline_preds} + + def roadline_map_to_points(self, roadline_map): + """Parse roadline detection results to 2d BEV points.""" + scr = roadline_map['cls'].sigmoid().squeeze() + pos = scr > 0.5 + return roadline_map['ctr'][pos] + + diff --git a/cosense3d/modules/plugin/__init__.py b/cosense3d/modules/plugin/__init__.py new file mode 100644 index 00000000..d4d1e969 --- /dev/null +++ b/cosense3d/modules/plugin/__init__.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. Modified by Yunshuang Yuan. +import inspect +from typing import Dict, Tuple, Union +from importlib import import_module + +import torch.nn as nn +import re # type: ignore + + +def infer_abbr(class_type: type) -> str: + """Infer abbreviation from the class name. + + This method will infer the abbreviation to map class types to + abbreviations. + + Rule 1: If the class has the property "abbr", return the property. + Rule 2: Otherwise, the abbreviation falls back to snake case of class + name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. + + :param class_type: The norm layer type. + :return: The inferred abbreviation. + """ + + def camel2snack(word): + """Convert camel case word into snack case. + + Modified from `inflection lib + `_. + + Example:: + + >>> camel2snack("FancyBlock") + 'fancy_block' + """ + + word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) + word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) + word = word.replace('-', '_') + return word.lower() + + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ # type: ignore + else: + return camel2snack(class_type.__name__) + + +def build_plugin_layer(cfg: Dict, + postfix: Union[int, str] = '', + **kwargs) -> Tuple[str, nn.Module]: + """Build plugin layer. + + :param cfg: cfg should contain: + + - type (str): identify plugin layer type. + - layer args: args needed to instantiate a plugin layer. + :param postfix: appended into norm abbreviation to + create named layer. Default: ''. + :param kwargs: + :return: The first one is the concatenation of + abbreviation and postfix. The second is the created plugin layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + try: + pkg, cls = layer_type.rsplit('.', 1) + plugin_layer = import_module(pkg).get(cls) + except: + raise KeyError(f'Unrecognized plugin type {layer_type}') + + abbr = infer_abbr(plugin_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + layer = plugin_layer(**kwargs, **cfg_) + + return name, layer + + +def build_plugin_module(cfg: Dict): + cfg_ = cfg.copy() + type_ = cfg_.pop('type') + module_name, cls_name = type_.split('.') + module = import_module(f'{__package__}.{module_name}') + cls_inst = getattr(module, cls_name)(**cfg_) + return cls_inst \ No newline at end of file diff --git a/cosense3d/modules/plugin/attn.py b/cosense3d/modules/plugin/attn.py new file mode 100644 index 00000000..9005baf9 --- /dev/null +++ b/cosense3d/modules/plugin/attn.py @@ -0,0 +1,145 @@ +import torch +import torch.nn.functional as F +import numpy as np +from torch import nn + +from cosense3d.modules.utils.misc import SELayer_Linear +from cosense3d.modules.utils.positional_encoding import pos2posemb2d +from cosense3d.modules.utils.me_utils import indices2metric, metric2indices, update_me_essentials + + +class ScaledDotProductAttention(nn.Module): + """ + Scaled Dot-Product Attention proposed in "Attention Is All You Need" + Compute the dot products of the query with all keys, divide each by sqrt(dim), + and apply a softmax function to obtain the weights on the values + """ + def __init__(self, dim: int): + """ + :param dim: imention of attention + """ + super(ScaledDotProductAttention, self).__init__() + self.sqrt_dim = np.sqrt(dim) + + def forward(self, query, key, value): + """ + :param query: (batch, q_len, d_model) tensor containing projection vector for decoder. + :param key: (batch, k_len, d_model) tensor containing projection vector for encoder. + :param value: (batch, v_len, d_model) tensor containing features of the encoded input sequence. + :return: context, attn + - **context**: tensor containing the context vector from attention mechanism. + - **attn**: tensor containing the attention (alignment) from the encoder outputs. + """ + score = torch.bmm(query, key.transpose(1, 2)) / self.sqrt_dim + attn = F.softmax(score, -1) + context = torch.bmm(attn, value) + return context + + +class NeighborhoodAttention(nn.Module): + def __init__(self, data_info, stride, emb_dim=128): + super().__init__() + self.stride = stride + update_me_essentials(self, data_info, self.stride) + self.lr = nn.Parameter(torch.tensor(self.lidar_range), requires_grad=False) + self.vs = nn.Parameter(torch.tensor(self.voxel_size), requires_grad=False) + # self.grid_size = ( + # round((lr[3] - lr[0]) / vs[0] / stride), + # round((lr[4] - lr[1]) / vs[1] / stride), + # ) + self.emb_dim = emb_dim + self.num_pos_feat = emb_dim // 2 + self.sqrt_dim = np.sqrt(emb_dim) + x = torch.arange(-1, 2) + self.nbrs = torch.stack(torch.meshgrid(x, x, indexing='ij'), + dim=-1).reshape(-1, 2) + self.nbrs = nn.Parameter(self.nbrs, requires_grad=False) + self.n_nbrs = len(self.nbrs) + + self.query_pos_encoder = nn.Sequential( + nn.Linear(emb_dim, emb_dim * 2), + nn.ReLU(), + nn.Linear(emb_dim * 2, emb_dim), + ) + self.value_pos_encoder = nn.Sequential( + nn.Linear(emb_dim, emb_dim * 2), + nn.ReLU(), + nn.Linear(emb_dim * 2, emb_dim), + ) + self.featurized_pe = SELayer_Linear(emb_dim) + + def coor_to_indices(self, coor): + inds = coor.clone() + inds[:, 1] = inds[:, 1] / self.stride - self.offset_sz_x + inds[:, 2] = inds[:, 2] / self.stride - self.offset_sz_y + return inds.long() + + def forward(self, ref_pts, ctr_coor, ctr_feat): + """ + + Parameters + ---------- + ref_pts LongTensor(Q, 3): 2d coordinates in metrics(batch_idx, x, y) + ctr_coor LongTensor(V, 3): 2d coordinates in indices (batch_idx, x, y) + ctr_feat FloatTensor(V, d): bev grid center point features + + Returns + ------- + out_features FloatTensor(Q, d): attended features + """ + Q = ref_pts.shape[0] + V, Vd = ctr_feat.shape + + ctr_pts = indices2metric(ctr_coor, self.vs) + ctr_inds = self.coor_to_indices(ctr_coor) + ref_coor = metric2indices(ref_pts, self.vs) + ref_inds = self.coor_to_indices(ref_coor) + query_pos = (ref_pts[:, 1:] - self.lr[0:2]) / (self.lr[3:5] - self.lr[0:2]) + value_pos = (ctr_pts[:, 1:] - self.lr[0:2]) / (self.lr[3:5] - self.lr[0:2]) + + qpos_emb = self.query_pos_encoder( + pos2posemb2d(query_pos, num_pos_feats=self.num_pos_feat)) + vpos_emb = self.value_pos_encoder( + pos2posemb2d(value_pos, num_pos_feats=self.num_pos_feat)) + vpos_emb = self.featurized_pe(vpos_emb, ctr_feat) + + q_inds, v_inds = self.get_nbr_mapping(ref_inds, ctr_inds) + # pad pos_embs with zeros at the 1st entry + # points outside the grid will retrieve the embedding in the 1st padded row + qpos_emb = torch.cat([torch.zeros_like(qpos_emb[:1]), qpos_emb], dim=0) + vpos_emb = torch.cat([torch.zeros_like(vpos_emb[:1]), vpos_emb], dim=0) + ctr_feat = torch.cat([torch.zeros_like(ctr_feat[:1]), ctr_feat], dim=0) + + score = (qpos_emb[q_inds] * vpos_emb[v_inds]).sum(dim=-1) / self.sqrt_dim + attn = F.softmax(score.view(-1, self.n_nbrs), dim=-1) + context = attn.unsqueeze(-1) * ctr_feat[v_inds].view(-1, self.n_nbrs, Vd) + return context.sum(1) + + def get_nbr_mapping(self, query_pos, value_pos): + B = query_pos[:, 0].max() + 1 + pad_width = 2 + query_pos[:, 1:] += pad_width + value_pos[:, 1:] += pad_width + query_inds = torch.arange(len(query_pos), dtype=torch.long) + value_inds = torch.arange(len(value_pos), dtype=torch.long) + + # index -1 indicates that this nbr is outside the grid range + value_map = - torch.ones((B, self.size_x + pad_width * 2, + self.size_y + pad_width * 2), dtype=torch.long) + value_map[value_pos[:, 0], + value_pos[:, 1], + value_pos[:, 2]] = value_inds + + query_inds_nbrs = query_pos.unsqueeze(dim=1).repeat(1, self.n_nbrs, 1) + query_inds_nbrs[..., 1:] += self.nbrs.view(1, -1, 2) + query_inds_nbrs = query_inds_nbrs.view(-1, 3) + mask = ((query_inds_nbrs >= 0).all(dim=-1) & + (query_inds_nbrs[:, 1] < self.size_x + pad_width * 2) & + (query_inds_nbrs[:, 2] < self.size_y + pad_width * 2)) + assert torch.logical_not(mask).sum() == 0 + query_inds_mapped = query_inds.unsqueeze(1).repeat(1, self.n_nbrs).view(-1) + value_inds_mapped = value_map[query_inds_nbrs[:, 0], + query_inds_nbrs[:, 1], + query_inds_nbrs[:, 2]] + # shift the overall indices by 1 step, index -1 will then become 0 + return query_inds_mapped + 1, value_inds_mapped + 1 diff --git a/cosense3d/modules/plugin/bev_rpn.py b/cosense3d/modules/plugin/bev_rpn.py new file mode 100644 index 00000000..e4d5ff9c --- /dev/null +++ b/cosense3d/modules/plugin/bev_rpn.py @@ -0,0 +1,103 @@ +import torch +from torch import nn +import torch.nn.functional as F + + +class Conv2d(nn.Module): + + def __init__(self, in_channels, out_channels, k, s, p, activation=True, + batch_norm=True): + super(Conv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=k, + stride=s, padding=p) + if batch_norm: + self.bn = nn.BatchNorm2d(out_channels) + else: + self.bn = None + self.activation = activation + + def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + if self.activation: + return F.relu(x, inplace=True) + else: + return x + + +class RPN(nn.Module): + def __init__(self, anchor_num=2): + super(RPN, self).__init__() + self.anchor_num = anchor_num + + self.block_1 = [Conv2d(128, 128, 3, 2, 1)] + self.block_1 += [Conv2d(128, 128, 3, 1, 1) for _ in range(3)] + self.block_1 = nn.Sequential(*self.block_1) + + self.block_2 = [Conv2d(128, 128, 3, 2, 1)] + self.block_2 += [Conv2d(128, 128, 3, 1, 1) for _ in range(5)] + self.block_2 = nn.Sequential(*self.block_2) + + self.block_3 = [Conv2d(128, 256, 3, 2, 1)] + self.block_3 += [nn.Conv2d(256, 256, 3, 1, 1) for _ in range(5)] + self.block_3 = nn.Sequential(*self.block_3) + + self.deconv_1 = nn.Sequential(nn.ConvTranspose2d(256, 256, 4, 4, 0), + nn.BatchNorm2d(256)) + self.deconv_2 = nn.Sequential(nn.ConvTranspose2d(128, 256, 2, 2, 0), + nn.BatchNorm2d(256)) + self.deconv_3 = nn.Sequential(nn.ConvTranspose2d(128, 256, 1, 1, 0), + nn.BatchNorm2d(256)) + + def forward(self, x): + x = self.block_1(x) + x_skip_1 = x + x = self.block_2(x) + x_skip_2 = x + x = self.block_3(x) + x_0 = self.deconv_1(x) + x_1 = self.deconv_2(x_skip_2) + x_2 = self.deconv_3(x_skip_1) + x = torch.cat((x_0, x_1, x_2), 1) + return x + + +class CustomRPN(nn.Module): + def __init__(self, strides=[2, 2, 2], down_sample=2, num_layers=3, in_channels=128, out_channels=256): + super(CustomRPN, self).__init__() + self.strides = strides + mid_channels = in_channels * 2 + self.n_blocks = len(strides) + up_stride = 1 + + for i, s in enumerate(self.strides): + channels = mid_channels if i == self.n_blocks - 1 else in_channels + block = [Conv2d(in_channels, channels, 3, s, 1)] + block += [Conv2d(channels, channels, 3, 1, 1) for _ in range(num_layers)] + setattr(self, f'block_{i + 1}', nn.Sequential(*block)) + up_stride *= s + stride = up_stride // down_sample + setattr(self, f'deconv_{self.n_blocks - i}', + nn.Sequential(nn.ConvTranspose2d(channels, mid_channels, stride, stride, 0), + nn.BatchNorm2d(mid_channels)) + ) + self.out_conv = nn.Sequential(nn.ConvTranspose2d(mid_channels * 3, out_channels, 1, 1, 0), + nn.BatchNorm2d(out_channels)) + + def forward(self, x): + ret_dict = {} + down_stride = 1 + for i, s in enumerate(self.strides): + x = getattr(self, f'block_{i + 1}')(x) + down_stride *= s + ret_dict[f'p{down_stride}'] = x + + out = [] + for i, s in enumerate(self.strides): + x = getattr(self, f'deconv_{i + 1}')(ret_dict[f'p{down_stride}']) + down_stride = down_stride // s + out.append(x) + out = self.out_conv(torch.cat(out, 1)) + + return out, ret_dict \ No newline at end of file diff --git a/cosense3d/modules/plugin/box_stamper.py b/cosense3d/modules/plugin/box_stamper.py new file mode 100644 index 00000000..b2621d33 --- /dev/null +++ b/cosense3d/modules/plugin/box_stamper.py @@ -0,0 +1,30 @@ +import torch +from torch import nn +from torch_scatter import scatter_mean + +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.utils.common import cat_coor_with_idx + + +class BoxTimeStamper(nn.Module): + def __init__(self, filter_empty_boxes=True, **kwargs): + super().__init__() + self.filter_empty_boxes = filter_empty_boxes + + def forward(self, preds, points_list): + boxes = torch.cat([preds['idx'].view(-1, 1), preds['box']], dim=-1) + points = cat_coor_with_idx(points_list) + + box_idx_of_pts = points_in_boxes_gpu(points[:, :4], boxes, + batch_size=len(points_list))[1] + mask = box_idx_of_pts >= 0 + inds = box_idx_of_pts[mask] + times = points[mask, -1] + mean_time = times.new_zeros(boxes.shape[:1]) + scatter_mean(times, inds, dim=0, out=mean_time) + preds['time'] = mean_time + + valid = inds.unique() + for k, v in preds.items(): + preds[k] = v[valid] + return preds \ No newline at end of file diff --git a/cosense3d/modules/plugin/cobevt.py b/cosense3d/modules/plugin/cobevt.py new file mode 100644 index 00000000..73ad9a68 --- /dev/null +++ b/cosense3d/modules/plugin/cobevt.py @@ -0,0 +1,514 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from collections import OrderedDict +from torch import einsum +from einops import rearrange, repeat, reduce + + +def generate_grid(height: int, width: int): + xs = torch.linspace(0, 1, width) + ys = torch.linspace(0, 1, height) + + indices = torch.stack(torch.meshgrid((xs, ys), indexing='xy'), 0) # 2 h w + indices = F.pad(indices, (0, 0, 0, 0, 0, 1), value=1) # 3 h w + indices = indices[None] # 1 3 h w + + return indices + + +def get_view_matrix(h=200, w=200, h_meters=100.0, w_meters=100.0, offset=0.0): + """ + copied from ..data.common but want to keep models standalone + """ + sh = h / h_meters + sw = w / w_meters + + return [ + [ 0., -sw, w/2.], + [-sh, 0., h*offset+h/2.], + [ 0., 0., 1.] + ] + + +class BEVEmbedding(nn.Module): + def __init__( + self, + dim: int, + sigma: int, + bev_height: int, + bev_width: int, + h_meters: int, + w_meters: int, + offset: int, + upsample_scales: list, + ): + """ + Only real arguments are: + + dim: embedding size + sigma: scale for initializing embedding + + The rest of the arguments are used for constructing the view matrix. + + In hindsight we should have just specified the view matrix in config + and passed in the view matrix... + """ + super().__init__() + + # map from bev coordinates to ego frame + V = get_view_matrix(bev_height, bev_width, h_meters, w_meters, + offset) # 3 3 + V_inv = torch.FloatTensor(V).inverse() # 3 3 + + for i, scale in enumerate(upsample_scales): + # each decoder block upsamples the bev embedding by a factor of 2 + h = bev_height // scale + w = bev_width // scale + + # bev coordinates + grid = generate_grid(h, w).squeeze(0) + grid[0] = bev_width * grid[0] + grid[1] = bev_height * grid[1] + + grid = V_inv @ rearrange(grid, 'd h w -> d (h w)') # 3 (h w) + grid = rearrange(grid, 'd (h w) -> d h w', h=h, w=w) # 3 h w + # egocentric frame + self.register_buffer('grid%d'%i, grid, persistent=False) + + # 3 h w + self.learned_features = nn.Parameter( + sigma * torch.randn(dim, + bev_height//upsample_scales[0], + bev_width//upsample_scales[0])) # d h w + + def get_prior(self): + return self.learned_features + + +class Attention(nn.Module): + def __init__( + self, + dim, + dim_head = 32, + dropout = 0., + window_size = 25 + ): + super().__init__() + assert (dim % dim_head) == 0, 'dimension should be divisible by dimension per head' + + self.heads = dim // dim_head + self.scale = dim_head ** -0.5 + + self.to_qkv = nn.Linear(dim, dim * 3, bias = False) + + self.attend = nn.Sequential( + nn.Softmax(dim = -1), + nn.Dropout(dropout) + ) + + self.to_out = nn.Sequential( + nn.Linear(dim, dim, bias = False), + nn.Dropout(dropout) + ) + + # relative positional bias + + self.rel_pos_bias = nn.Embedding((2 * window_size - 1) ** 2, self.heads) + + pos = torch.arange(window_size) + grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij')) + grid = rearrange(grid, 'c i j -> (i j) c') + rel_pos = rearrange(grid, 'i ... -> i 1 ...') - rearrange(grid, 'j ... -> 1 j ...') + rel_pos += window_size - 1 + rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1) + + self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False) + + def forward(self, x): + batch, _, height, width, device, h = *x.shape, x.device, self.heads + + # flatten + + x = rearrange(x, 'b d h w -> b (h w) d') + + # project for queries, keys, values + + q, k, v = self.to_qkv(x).chunk(3, dim = -1) + + # split heads + + q, k, v = map(lambda t: rearrange(t, 'b n (h d ) -> b h n d', h = h), (q, k, v)) + + # scale + + q = q * self.scale + + # sim + + sim = einsum('b h i d, b h j d -> b h i j', q, k) + + # add positional bias + + bias = self.rel_pos_bias(self.rel_pos_indices) + sim = sim + rearrange(bias, 'i j h -> h i j') + + # attention + + attn = self.attend(sim) + + # aggregate + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + + # merge heads + + out = rearrange(out, 'b m (h w) d -> b h w (m d)', + h = height, w = width) + + # combine heads out + + out = self.to_out(out) + return rearrange(out, 'b h w d -> b d h w') + + +class CrossWinAttention(nn.Module): + def __init__(self, dim, heads, dim_head, qkv_bias, rel_pos_emb=False, norm=nn.LayerNorm): + super().__init__() + + self.scale = dim_head ** -0.5 + + self.heads = heads + self.dim_head = dim_head + self.rel_pos_emb = rel_pos_emb + + self.to_q = nn.Sequential(norm(dim), nn.Linear(dim, heads * dim_head, bias=qkv_bias)) + self.to_k = nn.Sequential(norm(dim), nn.Linear(dim, heads * dim_head, bias=qkv_bias)) + self.to_v = nn.Sequential(norm(dim), nn.Linear(dim, heads * dim_head, bias=qkv_bias)) + + self.proj = nn.Linear(heads * dim_head, dim) + + def add_rel_pos_emb(self, x): + return x + + def forward(self, q, k, v, skip=None): + """ + q: (b n X Y W1 W2 d) + k: (b n x y w1 w2 d) + v: (b n x y w1 w2 d) + return: (b X Y W1 W2 d) + """ + assert k.shape == v.shape + _, view_size, q_height, q_width, q_win_height, q_win_width, _ = q.shape + _, _, kv_height, kv_width, _, _, _ = k.shape + assert q_height * q_width == kv_height * kv_width + + # flattening + q = rearrange(q, 'b n x y w1 w2 d -> b (x y) (n w1 w2) d') + k = rearrange(k, 'b n x y w1 w2 d -> b (x y) (n w1 w2) d') + v = rearrange(v, 'b n x y w1 w2 d -> b (x y) (n w1 w2) d') + + # Project with multiple heads + q = self.to_q(q) # b (X Y) (n W1 W2) (heads dim_head) + k = self.to_k(k) # b (X Y) (n w1 w2) (heads dim_head) + v = self.to_v(v) # b (X Y) (n w1 w2) (heads dim_head) + + # Group the head dim with batch dim + q = rearrange(q, 'b ... (m d) -> (b m) ... d', m=self.heads, d=self.dim_head) + k = rearrange(k, 'b ... (m d) -> (b m) ... d', m=self.heads, d=self.dim_head) + v = rearrange(v, 'b ... (m d) -> (b m) ... d', m=self.heads, d=self.dim_head) + + # Dot product attention along cameras + dot = self.scale * torch.einsum('b l Q d, b l K d -> b l Q K', q, k) # b (X Y) (n W1 W2) (n w1 w2) + # dot = rearrange(dot, 'b l n Q K -> b l Q (n K)') # b (X Y) (W1 W2) (n w1 w2) + + if self.rel_pos_emb: + dot = self.add_rel_pos_emb(dot) + att = dot.softmax(dim=-1) + + # Combine values (image level features). + a = torch.einsum('b n Q K, b n K d -> b n Q d', att, v) # b (X Y) (n W1 W2) d + a = rearrange(a, '(b m) ... d -> b ... (m d)', m=self.heads, d=self.dim_head) + a = rearrange(a, ' b (x y) (n w1 w2) d -> b n x y w1 w2 d', + x=q_height, y=q_width, w1=q_win_height, w2=q_win_width) + + # Combine multiple heads + z = self.proj(a) + + # reduce n: (b n X Y W1 W2 d) -> (b X Y W1 W2 d) + z = z.mean(1) # for sequential usage, we cannot reduce it! + + # Optional skip connection + if skip is not None: + z = z + skip + return z + + +class CrossViewSwapAttention(nn.Module): + def __init__( + self, + feat_height: int, + feat_width: int, + feat_dim: int, + dim: int, + index: int, + img_size: tuple, # (h, w) + qkv_bias: bool, + q_win_size: list, + feat_win_size: list, + heads: list, + dim_head: list, + bev_embedding_flag: list, + rel_pos_emb: bool = False, # to-do + no_image_features: bool = False, + skip: bool = True, + norm=nn.LayerNorm, + ): + super().__init__() + + # 1 1 3 h w + image_plane = generate_grid(feat_height, feat_width)[None] + image_plane[:, :, 0] *= img_size[1] + image_plane[:, :, 1] *= img_size[0] + + self.register_buffer('image_plane', image_plane, persistent=False) + + self.feature_linear = nn.Sequential( + nn.BatchNorm2d(feat_dim), + nn.ReLU(), + nn.Conv2d(feat_dim, dim, 1, bias=False)) + + if no_image_features: + self.feature_proj = None + else: + self.feature_proj = nn.Sequential( + nn.BatchNorm2d(feat_dim), + nn.ReLU(), + nn.Conv2d(feat_dim, dim, 1, bias=False)) + + self.bev_embed_flag = bev_embedding_flag[index] + if self.bev_embed_flag: + self.bev_embed = nn.Conv2d(2, dim, 1) + self.img_embed = nn.Conv2d(4, dim, 1, bias=False) + self.cam_embed = nn.Conv2d(4, dim, 1, bias=False) + + self.q_win_size = q_win_size[index] + self.feat_win_size = feat_win_size[index] + self.rel_pos_emb = rel_pos_emb + + self.cross_win_attend_1 = CrossWinAttention(dim, heads[index], dim_head[index], qkv_bias) + self.cross_win_attend_2 = CrossWinAttention(dim, heads[index], dim_head[index], qkv_bias) + self.skip = skip + # self.proj = nn.Linear(2 * dim, dim) + + self.prenorm_1 = norm(dim) + self.prenorm_2 = norm(dim) + self.mlp_1 = nn.Sequential(nn.Linear(dim, 2 * dim), nn.GELU(), nn.Linear(2 * dim, dim)) + self.mlp_2 = nn.Sequential(nn.Linear(dim, 2 * dim), nn.GELU(), nn.Linear(2 * dim, dim)) + self.postnorm = norm(dim) + + def pad_divisble(self, x, win_h, win_w): + """Pad the x to be divible by window size.""" + _, _, _, h, w = x.shape + h_pad, w_pad = ((h + win_h) // win_h) * win_h, ((w + win_w) // win_w) * win_w + padh = h_pad - h if h % win_h != 0 else 0 + padw = w_pad - w if w % win_w != 0 else 0 + return F.pad(x, (0, padw, 0, padh), value=0) + + def forward( + self, + index: int, + x: torch.FloatTensor, + bev: BEVEmbedding, + feature: torch.FloatTensor, + I_inv: torch.FloatTensor, + E_inv: torch.FloatTensor, + ): + """ + x: (b, c, H, W) + feature: (b, n, dim_in, h, w) + I_inv: (b, n, 3, 3) + E_inv: (b, n, 4, 4) + + Returns: (b, d, H, W) + """ + b, n, _, _, _ = feature.shape + _, _, H, W = x.shape + + pixel = self.image_plane # b n 3 h w + _, _, _, h, w = pixel.shape + + c = E_inv[..., -1:] # b n 4 1 + c_flat = rearrange(c, 'b n ... -> (b n) ...')[..., None] # (b n) 4 1 1 + c_embed = self.cam_embed(c_flat) # (b n) d 1 1 + + pixel_flat = rearrange(pixel, '... h w -> ... (h w)') # 1 1 3 (h w) + cam = I_inv @ pixel_flat # b n 3 (h w) + cam = F.pad(cam, (0, 0, 0, 1, 0, 0, 0, 0), value=1) # b n 4 (h w) + d = E_inv @ cam # b n 4 (h w) + d_flat = rearrange(d, 'b n d (h w) -> (b n) d h w', h=h, w=w) # (b n) 4 h w + d_embed = self.img_embed(d_flat) # (b n) d h w + + img_embed = d_embed - c_embed # (b n) d h w + img_embed = img_embed / (img_embed.norm(dim=1, keepdim=True) + 1e-7) # (b n) d h w + + world = getattr(bev, f'grid{index}')[:2] + + if self.bev_embed_flag: + # 2 H W + w_embed = self.bev_embed(world[None]) # 1 d H W + bev_embed = w_embed - c_embed # (b n) d H W + bev_embed = bev_embed / (bev_embed.norm(dim=1, keepdim=True) + 1e-7) # (b n) d H W + query_pos = rearrange(bev_embed, '(b n) ... -> b n ...', b=b, n=n) # b n d H W + + feature_flat = rearrange(feature, 'b n ... -> (b n) ...') # (b n) d h w + + if self.feature_proj is not None: + key_flat = img_embed + self.feature_proj(feature_flat) # (b n) d h w + else: + key_flat = img_embed # (b n) d h w + + val_flat = self.feature_linear(feature_flat) # (b n) d h w + + # Expand + refine the BEV embedding + if self.bev_embed_flag: + query = query_pos + x[:, None] + else: + query = x[:, None] # b n d H W + key = rearrange(key_flat, '(b n) ... -> b n ...', b=b, n=n) # b n d h w + val = rearrange(val_flat, '(b n) ... -> b n ...', b=b, n=n) # b n d h w + + # pad divisible + key = self.pad_divisble(key, self.feat_win_size[0], self.feat_win_size[1]) + val = self.pad_divisble(val, self.feat_win_size[0], self.feat_win_size[1]) + + # local-to-local cross-attention + query = rearrange(query, 'b n d (x w1) (y w2) -> b n x y w1 w2 d', + w1=self.q_win_size[0], w2=self.q_win_size[1]) # window partition + key = rearrange(key, 'b n d (x w1) (y w2) -> b n x y w1 w2 d', + w1=self.feat_win_size[0], w2=self.feat_win_size[1]) # window partition + val = rearrange(val, 'b n d (x w1) (y w2) -> b n x y w1 w2 d', + w1=self.feat_win_size[0], w2=self.feat_win_size[1]) # window partition + query = rearrange( + self.cross_win_attend_1( + query, key, val, + skip=rearrange(x, 'b d (x w1) (y w2) -> b x y w1 w2 d', + w1=self.q_win_size[0], w2=self.q_win_size[1]) if self.skip else None), + 'b x y w1 w2 d -> b (x w1) (y w2) d') # reverse window to feature + + query = query + self.mlp_1(self.prenorm_1(query)) + + x_skip = query + query = repeat(query, 'b x y d -> b n x y d', n=n) # b n x y d + + # local-to-global cross-attention + query = rearrange(query, 'b n (x w1) (y w2) d -> b n x y w1 w2 d', + w1=self.q_win_size[0], w2=self.q_win_size[1]) # window partition + key = rearrange(key, 'b n x y w1 w2 d -> b n (x w1) (y w2) d') # reverse window to feature + key = rearrange(key, 'b n (w1 x) (w2 y) d -> b n x y w1 w2 d', + w1=self.feat_win_size[0], w2=self.feat_win_size[1]) # grid partition + val = rearrange(val, 'b n x y w1 w2 d -> b n (x w1) (y w2) d') # reverse window to feature + val = rearrange(val, 'b n (w1 x) (w2 y) d -> b n x y w1 w2 d', + w1=self.feat_win_size[0], w2=self.feat_win_size[1]) # grid partition + query = rearrange( + self.cross_win_attend_2( + query, key, val, skip=rearrange(x_skip, 'b (x w1) (y w2) d -> b x y w1 w2 d', + w1=self.q_win_size[0], w2=self.q_win_size[1]) + if self.skip else None), + 'b x y w1 w2 d -> b (x w1) (y w2) d') # reverse grid to feature + + query = query + self.mlp_2(self.prenorm_2(query)) + + query = self.postnorm(query) + + query = rearrange(query, 'b H W d -> b d H W') + + return query + + + +class NaiveDecoder(nn.Module): + """ + A Naive decoder implementation + + Parameters + ---------- + params: dict + + Attributes + ---------- + num_ch_dec : list + The decoder layer channel numbers. + + num_layer : int + The number of decoder layers. + + input_dim : int + The channel number of the input to + """ + def __init__(self, params): + super(NaiveDecoder, self).__init__() + + self.num_ch_dec = params['num_ch_dec'] + self.num_layer = params['num_layer'] + self.input_dim = params['input_dim'] + + assert len(self.num_ch_dec) == self.num_layer + + # decoder + self.convs = OrderedDict() + for i in range(self.num_layer-1, -1, -1): + # upconv_0 + num_ch_in = self.input_dim if i == self.num_layer-1\ + else self.num_ch_dec[i + 1] + num_ch_out = self.num_ch_dec[i] + + self.convs[("upconv", i, 0)] = nn.Conv2d( + num_ch_in, num_ch_out, 3, 1, 1) + self.convs[("norm", i, 0)] = nn.BatchNorm2d(num_ch_out) + self.convs[("relu", i, 0)] = nn.ReLU(True) + + # upconv_1 + self.convs[("upconv", i, 1)] = nn.Conv2d( + num_ch_out, num_ch_out, 3, 1, 1) + self.convs[("norm", i, 1)] = nn.BatchNorm2d(num_ch_out) + self.convs[("relu", i, 1)] = nn.ReLU(True) + self.decoder = nn.ModuleList(list(self.convs.values())) + + @staticmethod + def upsample(x): + """Upsample input tensor by a factor of 2 + """ + return F.interpolate(x, scale_factor=2, mode="nearest") + + def forward(self, x): + """ + Upsample to + + Parameters + ---------- + x : torch.tensor + The bev bottleneck feature, shape: (B, L, C1, H, W) + + Returns + ------- + Output features with (B, L, C2, H, W) + """ + b, l, c, h, w = x.shape + x = rearrange(x, 'b l c h w -> (b l) c h w') + + for i in range(self.num_layer-1, -1, -1): + x = self.convs[("upconv", i, 0)](x) + x = self.convs[("norm", i, 0)](x) + x = self.convs[("relu", i, 0)](x) + + x = self.upsample(x) + + x = self.convs[("upconv", i, 1)](x) + x = self.convs[("norm", i, 1)](x) + x = self.convs[("relu", i, 1)](x) + + x = rearrange(x, '(b l) c h w -> b l c h w', + b=b, l=l) + return x \ No newline at end of file diff --git a/cosense3d/modules/plugin/downsample_conv.py b/cosense3d/modules/plugin/downsample_conv.py new file mode 100644 index 00000000..45c85422 --- /dev/null +++ b/cosense3d/modules/plugin/downsample_conv.py @@ -0,0 +1,53 @@ +""" +Class used to downsample features by 3*3 conv +""" +import torch.nn as nn + + +class DoubleConv(nn.Module): + """ + Double convoltuion + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int, + padding: bool): + super().__init__() + self.double_conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, + stride=stride, padding=padding), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + return self.double_conv(x) + + +class DownsampleConv(nn.Module): + def __init__(self, + in_channels, + kernel_sizes=[1], + dims=[256], + strides=[1], + paddings=[0]): + super(DownsampleConv, self).__init__() + self.layers = nn.ModuleList([]) + + for ksize, dim, stride, padding in zip( + kernel_sizes, dims, strides, paddings): + self.layers.append(DoubleConv(in_channels, + dim, + kernel_size=ksize, + stride=stride, + padding=padding)) + in_channels = dim + + def forward(self, x): + for i in range(len(self.layers)): + x = self.layers[i](x) + return x \ No newline at end of file diff --git a/cosense3d/modules/plugin/flash_attn.py b/cosense3d/modules/plugin/flash_attn.py new file mode 100644 index 00000000..6b58f41f --- /dev/null +++ b/cosense3d/modules/plugin/flash_attn.py @@ -0,0 +1,192 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2023 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified by Yunshuang Yuan +# ------------------------------------------------------------------------ +# flash-attention +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.init import ( + xavier_uniform_, + constant_, + xavier_normal_ +) +from torch.nn.functional import linear + +from einops import rearrange + + +from flash_attn.flash_attn_interface import flash_attn_unpadded_kvpacked_func, _get_block_size +from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis +from cosense3d.modules.utils.test_flash_attn import convert_flash_attn_S_to_softmax, \ + generate_random_padding_mask + + + +def flash_attn_unpadded_kvpacked_test(q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, dropout_p, softmax_scale, + causal, batch_size): + d = q.shape[-1] + device = q.device + output_unpad, sm_lse, S_dmask = flash_attn_unpadded_kvpacked_func( + q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + dropout_p, return_attn_probs=True, causal=causal, softmax_scale=softmax_scale + ) + query_padding_mask = generate_random_padding_mask(max_sq, batch_size, device, mode='full') + key_padding_mask = generate_random_padding_mask(max_sk, batch_size, device, mode='full') + S_dmask_converted = convert_flash_attn_S_to_softmax( + S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + return output_unpad, S_dmask_converted + + +def _in_projection_packed(q, k, v, w, b=None): + w_q, w_k, w_v = w.chunk(3) + if b is None: + b_q = b_k = b_v = None + else: + b_q, b_k, b_v = b.chunk(3) + return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) + + +class FlashAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + """ + + def __init__(self, + softmax_scale: float=None, + attention_dropout: float=0.0, + return_attn_weights: float=False, + device: str=None, + dtype: type=None): + """ + + :param softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + :param attention_dropout: The dropout rate to apply to the attention + (default: 0.1) + :param return_attn_weights: + :param device: + :param dtype: + """ + super().__init__() + self.softmax_scale = softmax_scale + self.dropout_p = attention_dropout + self.fp16_enabled = True + self.return_attn_weights = return_attn_weights + + def forward(self, + q: torch.Tensor, + kv: torch.Tensor, + causal: bool=False, + key_padding_mask: torch.Tensor=None): + """Implements the multihead softmax attention. + + :param q: The tensor containing the query. (B, T, H, D) + :param kv: The tensor containing the key, and value. (B, S, 2, H, D) + :param causal: + :param key_padding_mask: a bool tensor of shape (B, S) + :return: + """ + # assert q.dtype in [torch.float16, torch.bfloat16] and kv.dtype in [torch.float16, torch.bfloat16] + assert q.is_cuda and kv.is_cuda + assert q.shape[0] == kv.shape[0] and q.shape[-2] == kv.shape[-2] and q.shape[-1] == kv.shape[-1] + + batch_size = q.shape[0] + seqlen_q, seqlen_k = q.shape[1], kv.shape[1] + if key_padding_mask is None: + q, kv = rearrange(q, 'b s ... -> (b s) ...'), rearrange(kv, 'b s ... -> (b s) ...') + max_sq, max_sk = seqlen_q, seqlen_k + cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, + device=q.device) + cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, + device=kv.device) + if self.training or not self.return_attn_weights: + output = flash_attn_unpadded_kvpacked_func( + q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) + attn_weights = None + else: + Q, K, V = q.permute(1, 0, 2), kv[:, 0].permute(1, 0, 2), kv[:, 1].permute(1, 0, 2) + attn_weights = torch.softmax((Q @ K.transpose(-2, -1) / math.sqrt(Q.size(-1))), dim=-1) + # attn_weights = torch.dropout(attn_weights, self.dropout_p, train=False) + output = attn_weights @ V + attn_weights = attn_weights.mean(dim=0) + output = output.permute(1, 0, 2) + + # output, attn_weights = flash_attn_unpadded_kvpacked_test( + # q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + # self.dropout_p if self.training else 0.0, + # softmax_scale=self.softmax_scale, causal=causal, batch_size=batch_size + # ) + output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) + attn_weights = rearrange(attn_weights, '(b s) ... -> b s ...', b=batch_size) + # attn_weights = attn_weights.mean(dim=1) + else: + nheads = kv.shape[-2] + q = rearrange(q, 'b s ... -> (b s) ...') + max_sq = seqlen_q + cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, + device=q.device) + x = rearrange(kv, 'b s two h d -> b s (two h d)') + x_unpad, indices, cu_seqlens_k, max_sk = unpad_input(x, key_padding_mask) + x_unpad = rearrange(x_unpad, 'nnz (two h d) -> nnz two h d', two=2, h=nheads) + output_unpad = flash_attn_unpadded_kvpacked_func( + q, x_unpad, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output_unpad, '(b s) ... -> b s ...', b=batch_size) + attn_weights = None + + return output, attn_weights + + +class FlashMHA(nn.Module): + + def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0, + causal=False, device=None, dtype=None, **kwargs) -> None: + assert batch_first + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.embed_dim = embed_dim + self.causal = causal + self.bias = bias + + self.num_heads = num_heads + assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads" + self.head_dim = self.embed_dim // num_heads + assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" + + self.in_proj_weight = nn.Parameter(torch.empty((3 * embed_dim, embed_dim))) + if bias: + self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.inner_attn = FlashAttention(attention_dropout=attention_dropout, **factory_kwargs) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self._reset_parameters() + + def _reset_parameters(self) -> None: + xavier_uniform_(self.in_proj_weight) + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + + def forward(self, q, k, v, key_padding_mask=None): + """x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) + key_padding_mask: bool tensor of shape (batch, seqlen) + """ + # q, k, v = self.Wq(q), self.Wk(k), self.Wv(v) + q, k, v = _in_projection_packed(q, k, v, self.in_proj_weight, self.in_proj_bias) + q = rearrange(q, 'b s (h d) -> b s h d', h=self.num_heads) + k = rearrange(k, 'b s (h d) -> b s h d', h=self.num_heads) + v = rearrange(v, 'b s (h d) -> b s h d', h=self.num_heads) + kv = torch.stack([k, v], dim=2) + context, attn_weights = self.inner_attn(q, kv, key_padding_mask=key_padding_mask, causal=self.causal) + return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights diff --git a/cosense3d/modules/plugin/flash_attn_new.py b/cosense3d/modules/plugin/flash_attn_new.py new file mode 100644 index 00000000..d3320a3b --- /dev/null +++ b/cosense3d/modules/plugin/flash_attn_new.py @@ -0,0 +1,1050 @@ +# Copyright (c) 2023, Tri Dao. + +import math +from functools import partial + +import torch +import torch.nn as nn +from torch.nn.init import ( + xavier_uniform_, + constant_, + xavier_normal_ +) +from torch.nn.functional import linear +from einops import rearrange, repeat + +from flash_attn.utils.distributed import get_dim_for_local_rank + +try: + from flash_attn import ( + flash_attn_kvpacked_func, + flash_attn_qkvpacked_func, + flash_attn_varlen_kvpacked_func, + flash_attn_varlen_qkvpacked_func, + flash_attn_with_kvcache, + ) +except ImportError: + flash_attn_varlen_qkvpacked_func, flash_attn_varlen_kvpacked_func = None, None + flash_attn_qkvpacked_func, flash_attn_kvpacked_func = None, None + flash_attn_with_kvcache = None + +try: + from flash_attn.ops.fused_dense import ColumnParallelLinear, FusedDense, RowParallelLinear +except ImportError: + FusedDense, ColumnParallelLinear, RowParallelLinear = None, None, None + +try: + from flash_attn.layers.rotary import RotaryEmbedding +except ImportError: + RotaryEmbedding = None + + +# From https://github.com/ofirpress/attention_with_linear_biases/blob/4b92f28a005ead2567abe2359f633e73e08f3833/fairseq/models/transformer.py#L742 +def get_alibi_slopes(nheads): + def get_slopes_power_of_2(nheads): + start = 2 ** (-(2 ** -(math.log2(nheads) - 3))) + ratio = start + return [start * ratio**i for i in range(nheads)] + + if math.log2(nheads).is_integer(): + return get_slopes_power_of_2(nheads) + else: + closest_power_of_2 = 2 ** math.floor(math.log2(nheads)) + return ( + get_slopes_power_of_2(closest_power_of_2) + + get_alibi_slopes(2 * closest_power_of_2)[0::2][: nheads - closest_power_of_2] + ) + + +class FlashSelfAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + Arguments + --------- + softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + attention_dropout: The dropout rate to apply to the attention + (default: 0.0) + """ + + def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, alibi_slopes=None, deterministic=False): + super().__init__() + assert flash_attn_varlen_qkvpacked_func is not None, "FlashAttention is not installed" + assert flash_attn_qkvpacked_func is not None, "FlashAttention is not installed" + self.causal = causal + self.softmax_scale = softmax_scale + self.drop = nn.Dropout(attention_dropout) + self.register_buffer("alibi_slopes", alibi_slopes, persistent=False) + self.deterministic = deterministic + + def forward(self, qkv, causal=None, cu_seqlens=None, max_seqlen=None): + """Implements the multihead softmax attention. + Arguments + --------- + qkv: The tensor containing the query, key, and value. + If cu_seqlens is None and max_seqlen is None, then qkv has shape (B, S, 3, H, D). + If cu_seqlens is not None and max_seqlen is not None, then qkv has shape + (total, 3, H, D), where total is the sum of the sequence lengths in the batch. + causal: if passed, will override self.causal + cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths + of the sequences in the batch, used to index into qkv. + max_seqlen: int. Maximum sequence length in the batch. + Returns: + -------- + out: (total, H, D) if cu_seqlens is not None and max_seqlen is not None, + else (B, S, H, D). + """ + assert qkv.dtype in [torch.float16, torch.bfloat16] + assert qkv.is_cuda + causal = self.causal if causal is None else causal + unpadded = cu_seqlens is not None + if unpadded: + assert cu_seqlens.dtype == torch.int32 + assert max_seqlen is not None + assert isinstance(max_seqlen, int) + return flash_attn_varlen_qkvpacked_func( + qkv, + cu_seqlens, + max_seqlen, + self.drop.p if self.training else 0.0, + softmax_scale=self.softmax_scale, + causal=causal, + alibi_slopes=self.alibi_slopes, + deterministic=self.deterministic, + ) + else: + return flash_attn_qkvpacked_func( + qkv, + self.drop.p if self.training else 0.0, + softmax_scale=self.softmax_scale, + causal=causal, + alibi_slopes=self.alibi_slopes, + deterministic=self.deterministic, + ) + + +class FlashCrossAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + Arguments + --------- + softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + attention_dropout: The dropout rate to apply to the attention + (default: 0.0) + """ + + def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, alibi_slopes=None, deterministic=False): + super().__init__() + assert flash_attn_varlen_kvpacked_func is not None, "FlashAttention is not installed" + assert flash_attn_kvpacked_func is not None, "FlashAttention is not installed" + self.causal = causal + self.softmax_scale = softmax_scale + self.drop = nn.Dropout(attention_dropout) + self.register_buffer("alibi_slopes", alibi_slopes, persistent=False) + self.deterministic = deterministic + + def forward( + self, + q, + kv, + causal=None, + cu_seqlens=None, + max_seqlen=None, + cu_seqlens_k=None, + max_seqlen_k=None, + ): + """Implements the multihead softmax attention. + Arguments + --------- + q: The tensor containing the query. (B, Sq, H, D) + kv: The tensor containing the key and value. (B, Sk, 2, H_k, D) + causal: if passed, will override self.causal + cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths + of the sequences in the batch, used to index into q. + max_seqlen: int. Maximum sequence length in the batch of q. + cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths + of the sequences in the batch, used to index into kv. + max_seqlen_k: int. Maximum sequence length in the batch of k and v. + """ + assert q.dtype in [torch.float16, torch.bfloat16] + assert q.is_cuda and kv.is_cuda + causal = self.causal if causal is None else causal + unpadded = cu_seqlens is not None + if unpadded: + assert cu_seqlens.dtype == torch.int32 + assert max_seqlen is not None + assert isinstance(max_seqlen, int) + assert cu_seqlens_k is not None + assert cu_seqlens_k.dtype == torch.int32 + assert max_seqlen_k is not None + assert isinstance(max_seqlen, int) + return flash_attn_varlen_kvpacked_func( + q, + kv, + cu_seqlens, + cu_seqlens_k, + max_seqlen, + max_seqlen_k, + self.drop.p if self.training else 0.0, + softmax_scale=self.softmax_scale, + causal=causal, + alibi_slopes=self.alibi_slopes, + deterministic=self.deterministic, + ) + else: + batch_size, seqlen_q = q.shape[0], q.shape[1] + seqlen_k = kv.shape[1] + assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3] + return flash_attn_kvpacked_func( + q, + kv, + self.drop.p if self.training else 0.0, + causal=causal, + softmax_scale=self.softmax_scale, + alibi_slopes=self.alibi_slopes, + deterministic=self.deterministic, + ) + + +class SelfAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + Arguments + --------- + softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + attention_dropout: The dropout rate to apply to the attention + (default: 0.0) + """ + + def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0): + super().__init__() + self.causal = causal + self.softmax_scale = softmax_scale + self.drop = nn.Dropout(attention_dropout) + + def forward(self, qkv, causal=None, key_padding_mask=None): + """Implements the multihead softmax attention. + Arguments + --------- + qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) + causal: if passed, will override self.causal + key_padding_mask: boolean mask to apply to the attention weights. True means to keep, + False means to mask out. (B, S) + """ + batch_size, seqlen = qkv.shape[0], qkv.shape[1] + causal = self.causal if causal is None else causal + q, k, v = qkv.unbind(dim=2) + softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1]) + scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale) + if key_padding_mask is not None: + padding_mask = torch.full( + (batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device + ) + padding_mask.masked_fill_(key_padding_mask, 0.0) + # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess) + scores = scores + rearrange(padding_mask, "b s -> b 1 1 s") + if causal: + # "triu_tril_cuda_template" not implemented for 'BFloat16' + # So we have to construct the mask in float + causal_mask = torch.triu( + torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1 + ) + # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess) + scores = scores + causal_mask.to(dtype=scores.dtype) + attention = torch.softmax(scores, dim=-1, dtype=v.dtype) + attention_drop = self.drop(attention) + output = torch.einsum("bhts,bshd->bthd", attention_drop, v) + return output + + +class CrossAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + Arguments + --------- + softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + attention_dropout: The dropout rate to apply to the attention + (default: 0.0) + """ + + def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0): + super().__init__() + self.causal = causal + self.softmax_scale = softmax_scale + self.drop = nn.Dropout(attention_dropout) + + def forward(self, q, kv, causal=None, key_padding_mask=None): + """Implements the multihead softmax attention. + Arguments + --------- + q: The tensor containing the query. (B, Sq, H, D) + kv: The tensor containing the key and value. (B, Sk, 2, H_k, D) + causal: if passed, will override self.causal + key_padding_mask: boolean mask to apply to the attention weights. True means to keep, + False means to mask out. (B, Sk) + """ + batch_size, seqlen_q = q.shape[0], q.shape[1] + causal = self.causal if causal is None else causal + seqlen_k = kv.shape[1] + assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3] + if kv.shape[3] != q.shape[2]: # MQA/GQA + kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3]) + k, v = kv.unbind(dim=2) + softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1]) + scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale) + if key_padding_mask is not None: + padding_mask = torch.full( + (batch_size, seqlen_k), -10000.0, dtype=scores.dtype, device=scores.device + ) + padding_mask.masked_fill_(key_padding_mask, 0.0) + # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess) + scores = scores + rearrange(padding_mask, "b s -> b 1 1 s") + if causal: + # causal mask needs to take into account the difference between seqlen_q and seqlen_k + row_idx = rearrange( + torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1" + ) + col_idx = torch.arange(seqlen_k, device=kv.device, dtype=torch.long) + sk = ( + seqlen_k + if key_padding_mask is None + else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1") + ) + causal_mask = col_idx > row_idx + sk - seqlen_q + scores = scores.masked_fill(causal_mask, -10000.0) + attention = torch.softmax(scores, dim=-1, dtype=v.dtype) + attention_drop = self.drop(attention) + output = torch.einsum("bhts,bshd->bthd", attention_drop, v) + return output + + +class LinearResidual(nn.Linear): + """Wrap nn.Linear to return the residual as well. For compatibility with FusedDense.""" + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return super().forward(input), input + + +def _update_kv_cache(kv, inference_params, layer_idx): + """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)""" + # Pre-allocate memory for key-values for inference. + num_heads, head_dim = kv.shape[-2:] + if layer_idx not in inference_params.key_value_memory_dict: + kv_cache = torch.empty( + inference_params.max_batch_size, + inference_params.max_seqlen, + 2, + num_heads, + head_dim, + dtype=kv.dtype, + device=kv.device, + ) + inference_params.key_value_memory_dict[layer_idx] = kv_cache + else: + kv_cache = inference_params.key_value_memory_dict[layer_idx] + # Adjust key and value for inference + batch_start = inference_params.batch_size_offset + batch_end = batch_start + kv.shape[0] + sequence_start = inference_params.seqlen_offset + sequence_end = sequence_start + kv.shape[1] + assert batch_end <= kv_cache.shape[0] + assert sequence_end <= kv_cache.shape[1] + assert kv_cache is not None + kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv + return kv_cache[batch_start:batch_end, :sequence_end, ...] + + +class MHA(nn.Module): + """Multi-head self-attention and cross-attention""" + + def __init__( + self, + embed_dim, + num_heads, + num_heads_kv=None, + cross_attn=False, + qkv_proj_bias=True, + out_proj_bias=True, + dropout=0.0, + softmax_scale=None, + causal=False, + layer_idx=None, + dwconv=False, + rotary_emb_dim=0, + rotary_emb_base=10000.0, + rotary_emb_scale_base=None, + rotary_emb_interleaved=False, + use_alibi=False, + fused_bias_fc=False, + use_flash_attn=False, + return_residual=False, + checkpointing=False, + device=None, + dtype=None, + ) -> None: + """ + num_heads_kv: can be used to toggle MQA / GQA. If None, use num_heads. + return_residual: whether to return the input x along with the output. This is for + performance reason: for post-norm architecture, returning the input allows us + to fuse the backward of nn.Linear with the residual connection. + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.embed_dim = embed_dim + self.cross_attn = cross_attn + self.causal = causal + self.layer_idx = layer_idx + self.dwconv = dwconv + self.rotary_emb_dim = rotary_emb_dim + self.use_flash_attn = use_flash_attn + self.return_residual = return_residual + self.checkpointing = checkpointing + if use_alibi: + assert use_flash_attn, "ALiBi code path requires flash_attn" + alibi_slopes = torch.tensor(get_alibi_slopes(num_heads), device=device) + else: + alibi_slopes = None + + self.num_heads = num_heads + self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads + assert ( + self.num_heads % self.num_heads_kv == 0 + ), "num_heads must be divisible by num_heads_kv" + assert self.embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads" + self.head_dim = self.embed_dim // num_heads + qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv) + kv_dim = 2 * self.head_dim * self.num_heads_kv + + if self.rotary_emb_dim > 0: + assert not cross_attn, "MHA with rotary embedding does not support cross-attention yet" + assert RotaryEmbedding is not None, "rotary_emb is not installed" + self.rotary_emb = RotaryEmbedding( + self.rotary_emb_dim, + base=rotary_emb_base, + scale_base=rotary_emb_scale_base, + interleaved=rotary_emb_interleaved, + device=device, + ) + + if fused_bias_fc and FusedDense is None: + raise ImportError("fused_dense is not installed") + linear_cls = nn.Linear if not fused_bias_fc else FusedDense + linear_resid_cls = ( + LinearResidual if not fused_bias_fc else partial(FusedDense, return_residual=True) + ) + wqkv_cls = linear_cls if not self.return_residual else linear_resid_cls + inner_attn_cls = ( + partial(FlashSelfAttention, alibi_slopes=alibi_slopes) + if use_flash_attn + else SelfAttention + ) + inner_cross_attn_cls = ( + partial(FlashCrossAttention, alibi_slopes=alibi_slopes) + if use_flash_attn + else CrossAttention + ) + if not self.cross_attn: + self.Wqkv = wqkv_cls(embed_dim, qkv_dim, bias=qkv_proj_bias, **factory_kwargs) + else: + self.Wq = linear_cls(embed_dim, embed_dim, bias=qkv_proj_bias, **factory_kwargs) + self.Wkv = wqkv_cls(embed_dim, kv_dim, bias=qkv_proj_bias, **factory_kwargs) + if self.dwconv: + if self.num_heads_kv == self.num_heads: + self.dwconv_qkv = nn.Conv1d( + qkv_dim, qkv_dim, kernel_size=3, padding=2, groups=qkv_dim + ) + else: + self.dwconv_q = nn.Conv1d( + embed_dim, embed_dim, kernel_size=3, padding=2, groups=embed_dim + ) + self.dwconv_kv = nn.Conv1d(kv_dim, kv_dim, kernel_size=3, padding=2, groups=kv_dim) + self.inner_attn = inner_attn_cls( + causal=causal, + softmax_scale=softmax_scale, + attention_dropout=dropout, + ) + self.inner_cross_attn = inner_cross_attn_cls( + causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout + ) + self.out_proj = linear_cls(embed_dim, embed_dim, bias=out_proj_bias, **factory_kwargs) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None): + dtype = self.out_proj.weight.dtype if dtype is None else dtype + device = self.out_proj.weight.device + return torch.empty( + batch_size, + max_seqlen, + 2, + self.num_heads_kv, + self.head_dim, + dtype=dtype, + device=device, + ) + + def _update_kv_cache(self, kv, inference_params): + """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)""" + assert not self.dwconv, "Generation does not support dwconv yet" + assert self.layer_idx is not None, "Generation requires layer_idx in the constructor" + return _update_kv_cache(kv, inference_params, self.layer_idx) + + def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params): + """ + Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention. + q: (batch_size, seqlen_q, nheads, head_dim) + kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim) + """ + assert inference_params is not None and inference_params.seqlen_offset > 0 + assert self.use_flash_attn + if self.rotary_emb_dim > 0: + assert self.rotary_emb.scale is None, "This code path does not support xPos" + self.rotary_emb._update_cos_sin_cache( + inference_params.max_seqlen, device=q.device, dtype=q.dtype + ) + rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached + else: + rotary_cos, rotary_sin = None, None + batch = q.shape[0] + kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None) + context = flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + rotary_cos=rotary_cos, + rotary_sin=rotary_sin, + cache_seqlens=cache_seqlens, + softmax_scale=self.inner_cross_attn.softmax_scale, + causal=self.inner_cross_attn.causal, + rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False, + alibi_slopes=alibi_slopes, + ) + return context + + def _update_kvcache_attention(self, q, kv, inference_params): + """Write kv to inference_params, then do attention""" + if ( + inference_params.seqlen_offset == 0 + or flash_attn_with_kvcache is None + or not self.use_flash_attn + ): + # TODO: this only uses seqlen_offset and not lengths_per_sample. + kv = self._update_kv_cache(kv, inference_params) + return self.inner_cross_attn(q, kv) + else: + batch = q.shape[0] + kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None) + return flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + cache_seqlens=cache_seqlens, + softmax_scale=self.inner_cross_attn.softmax_scale, + causal=self.inner_cross_attn.causal, + alibi_slopes=alibi_slopes, + ) + + def forward( + self, + x, + x_kv=None, + key_padding_mask=None, + cu_seqlens=None, + max_seqlen=None, + mixer_subset=None, + inference_params=None, + **kwargs, + ): + """ + Arguments: + x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if + cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total + is the is the sum of the sequence lengths in the batch. + x_kv: (batch, seqlen, hidden_dim), only applicable for cross-attention. If None, use x. + cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths + of the sequences in the batch, used to index into x. Only applicable when using + FlashAttention. + max_seqlen: int. Maximum sequence length in the batch. + key_padding_mask: boolean mask, True means to keep, False means to mask out. + (batch, seqlen). Only applicable when not using FlashAttention. + mixer_subset: for cross-attention only. If not None, will take a subset of x + before applying the query projection. Useful for e.g., ViT where we only care + about the CLS token in the last layer. + inference_params: for generation. Adapted from Megatron-LM (and Apex) + https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470 + """ + if cu_seqlens is not None: + assert max_seqlen is not None + assert key_padding_mask is None + assert self.use_flash_attn + assert not self.dwconv + assert self.rotary_emb_dim == 0 + if key_padding_mask is not None: + assert cu_seqlens is None + assert max_seqlen is None + assert not self.use_flash_attn + if inference_params is not None: + assert key_padding_mask is None + assert cu_seqlens is None and max_seqlen is None + assert not self.dwconv + + kwargs = ( + {"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen, **kwargs} + if self.use_flash_attn + else {"key_padding_mask": key_padding_mask, **kwargs} + ) + seqlen_offset = ( + 0 + if inference_params is None + else ( + inference_params.lengths_per_sample + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + ) + rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None + batch, seqlen = x.shape[:2] + if not self.cross_attn and self.num_heads_kv == self.num_heads: + assert x_kv is None and mixer_subset is None + if not self.return_residual: + qkv = self.Wqkv(x) + else: + qkv, x = self.Wqkv(x) + if self.dwconv: + qkv = rearrange( + self.dwconv_qkv(rearrange(qkv, "b s d -> b d s"))[..., :-2], "b d s -> b s d" + ).contiguous() + qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim) + if ( + inference_params is None + or inference_params.seqlen_offset == 0 + or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0) + or not self.use_flash_attn + ): + if self.rotary_emb_dim > 0: + qkv = self.rotary_emb( + qkv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen + ) + if inference_params is None: + if not self.checkpointing: + context = self.inner_attn(qkv, **kwargs) + else: + context = torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, **kwargs) + else: + context = self._update_kvcache_attention( + qkv[:, :, 0], qkv[:, :, 1:], inference_params + ) + else: + context = self._apply_rotary_update_kvcache_attention( + qkv[:, :, 0], qkv[:, :, 1:], inference_params + ) + else: + if self.cross_attn: + if not self.return_residual: + q = self.Wq(x if mixer_subset is None else x[:, mixer_subset]) + kv = self.Wkv(x_kv if x_kv is not None else x) + else: + if x_kv is not None: + kv, x_kv = self.Wkv(x_kv) + else: + kv, x = self.Wkv(x) + q = self.Wq(x if mixer_subset is None else x[:, mixer_subset]) + else: + assert self.num_heads_kv != self.num_heads + if not self.return_residual: + qkv = self.Wqkv(x) + else: + qkv, x = self.Wqkv(x) + q = qkv[..., : self.num_heads * self.head_dim] + kv = qkv[..., self.num_heads * self.head_dim :] + q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim) + kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim) + if self.dwconv: + q = rearrange( + self.dwconv_q(rearrange(q, "b s d -> b d s"))[..., :-2], "b d s -> b s d" + ).contiguous() + kv = rearrange( + self.dwconv_kv(rearrange(kv, "b s d -> b d s"))[..., :-2], "b d s -> b s d" + ).contiguous() + if ( + inference_params is None + or inference_params.seqlen_offset == 0 + or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0) + or not self.use_flash_attn + ): + if self.rotary_emb_dim > 0: + q, kv = self.rotary_emb( + q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen + ) + if inference_params is None: + if not self.checkpointing: + context = self.inner_cross_attn(q, kv, **kwargs) + else: + context = torch.utils.checkpoint.checkpoint( + self.inner_cross_attn, q, kv, **kwargs + ) + else: + context = self._update_kvcache_attention(q, kv, inference_params) + else: + context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params) + out = self.out_proj(rearrange(context, "... h d -> ... (h d)")) + return out if not self.return_residual else (out, x) + + +class ParallelMHA(nn.Module): + """Multi-head self-attention and cross-attention""" + + def __init__( + self, + embed_dim, + num_heads, + process_group, + num_heads_kv=None, + qkv_proj_bias=True, + out_proj_bias=True, + dropout=0.0, + softmax_scale=None, + causal=False, + layer_idx=None, + rotary_emb_dim=0, + rotary_emb_base=10000.0, + rotary_emb_scale_base=None, + rotary_emb_interleaved=False, + use_alibi=False, + use_flash_attn=False, + checkpointing=False, + sequence_parallel=True, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.embed_dim = embed_dim + self.causal = causal + self.layer_idx = layer_idx + self.rotary_emb_dim = rotary_emb_dim + self.use_flash_attn = use_flash_attn + self.checkpointing = checkpointing + self.process_group = process_group + self.world_size = process_group.size() + self.local_rank = torch.distributed.get_rank(process_group) + + self.num_heads = num_heads + assert self.embed_dim % self.num_heads == 0, "embed_dim must be divisible by num_heads" + + self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads + assert ( + self.num_heads % self.num_heads_kv == 0 + ), "num_heads must be divisible by num_heads_kv" + + self.num_heads_per_rank = get_dim_for_local_rank( + self.num_heads, self.world_size, self.local_rank + ) + self.num_heads_kv_per_rank = get_dim_for_local_rank( + self.num_heads_kv, self.world_size, self.local_rank + ) + self.head_dim = self.embed_dim // num_heads + qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv) + + if use_alibi: + assert use_flash_attn, "ALiBi code path requires flash_attn" + num_heads_local = math.ceil(self.num_heads / self.world_size) + alibi_slopes = torch.tensor( + get_alibi_slopes(num_heads)[ + self.local_rank * num_heads_local : (self.local_rank + 1) * num_heads_local + ], + device=device, + ) + else: + alibi_slopes = None + + if self.rotary_emb_dim > 0: + assert RotaryEmbedding is not None, "rotary_emb is not installed" + self.rotary_emb = RotaryEmbedding( + self.rotary_emb_dim, + base=rotary_emb_base, + scale_base=rotary_emb_scale_base, + interleaved=rotary_emb_interleaved, + device=device, + ) + + if ColumnParallelLinear is None or RowParallelLinear is None: + raise ImportError("fused_dense is not installed") + self.Wqkv = ColumnParallelLinear( + embed_dim, + qkv_dim, + process_group, + bias=qkv_proj_bias, + sequence_parallel=sequence_parallel, + multiple_of=self.head_dim * (self.num_heads // self.num_heads_kv + 2), + **factory_kwargs, + ) + inner_attn_cls = ( + partial(FlashSelfAttention, alibi_slopes=alibi_slopes) + if use_flash_attn + else SelfAttention + ) + inner_cross_attn_cls = ( + partial(FlashCrossAttention, alibi_slopes=alibi_slopes) + if use_flash_attn + else CrossAttention + ) + self.inner_attn = inner_attn_cls( + causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout + ) + self.inner_cross_attn = inner_cross_attn_cls( + causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout + ) + self.out_proj = RowParallelLinear( + embed_dim, + embed_dim, + process_group, + bias=out_proj_bias, + sequence_parallel=sequence_parallel, + multiple_of=self.head_dim, + **factory_kwargs, + ) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None): + dtype = self.out_proj.weight.dtype if dtype is None else dtype + device = self.out_proj.weight.device + return torch.empty( + batch_size, + max_seqlen, + 2, + self.num_heads_kv_per_rank, + self.head_dim, + dtype=dtype, + device=device, + ) + + def _update_kv_cache(self, kv, inference_params): + """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)""" + assert self.layer_idx is not None, "Generation requires layer_idx in the constructor" + return _update_kv_cache(kv, inference_params, self.layer_idx) + + def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params): + """ + Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention. + q: (batch_size, seqlen_q, nheads, head_dim) + kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim) + """ + assert inference_params is not None and inference_params.seqlen_offset > 0 + assert self.use_flash_attn + if self.rotary_emb_dim > 0: + assert self.rotary_emb.scale is None, "This code path does not support xPos" + self.rotary_emb._update_cos_sin_cache( + inference_params.max_seqlen, device=q.device, dtype=q.dtype + ) + rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached + else: + rotary_cos, rotary_sin = None, None + batch = q.shape[0] + kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None) + context = flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + rotary_cos=rotary_cos, + rotary_sin=rotary_sin, + cache_seqlens=cache_seqlens, + softmax_scale=self.inner_cross_attn.softmax_scale, + causal=self.inner_cross_attn.causal, + rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False, + alibi_slopes=alibi_slopes, + ) + return context + + def _update_kvcache_attention(self, q, kv, inference_params): + """Write kv to inference_params, then do attention""" + if inference_params.seqlen_offset == 0 or not self.use_flash_attn: + # TODO: this only uses seqlen_offset and not lengths_per_sample. + kv = self._update_kv_cache(kv, inference_params) + return self.inner_cross_attn(q, kv) + else: + batch = q.shape[0] + kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None) + context = flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + cache_seqlens=cache_seqlens, + softmax_scale=self.inner_cross_attn.softmax_scale, + causal=self.inner_cross_attn.causal, + alibi_slopes=alibi_slopes, + ) + return context + + def forward(self, x, seqlen=None, inference_params=None, **kwargs): + """ + Arguments: + x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if seqlen=None. + If seqlen is not None, x is (batch * seqlen, hidden_dim). This is so that when we + split x during sequence parallel, we split the batch * seqlen dimension + (in case batch is small). + """ + qkv = self.Wqkv(x) + if seqlen is not None: + qkv = rearrange(qkv, "(b s) ... -> b s ...", s=seqlen) + seqlen_offset = ( + 0 + if inference_params is None + else ( + inference_params.lengths_per_sample + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + ) + rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None + if self.num_heads_kv == self.num_heads: + qkv = rearrange(qkv, "b s (three h d) -> b s three h d", three=3, d=self.head_dim) + if ( + inference_params is None + or inference_params.seqlen_offset == 0 + or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0) + or not self.use_flash_attn + ): + if self.rotary_emb_dim > 0: + qkv = self.rotary_emb( + qkv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen + ) + if inference_params is None: + if not self.checkpointing: + context = self.inner_attn(qkv, **kwargs) + else: + context = torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, **kwargs) + else: + context = self._update_kvcache_attention( + qkv[:, :, 0], qkv[:, :, 1:], inference_params + ) + else: + context = self._apply_rotary_update_kvcache_attention( + qkv[:, :, 0], qkv[:, :, 1:], inference_params + ) + else: + q = rearrange( + qkv[..., : self.num_heads_per_rank * self.head_dim], + "... (h d) -> ... h d", + d=self.head_dim, + ) + kv = rearrange( + qkv[..., self.num_heads_per_rank * self.head_dim :], + "... (two hkv d) -> ... two hkv d", + two=2, + d=self.head_dim, + ) + if ( + inference_params is None + or inference_params.seqlen_offset == 0 + or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0) + or not self.use_flash_attn + ): + if self.rotary_emb_dim > 0: + q, kv = self.rotary_emb( + q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen + ) + if inference_params is None: + if not self.checkpointing: + context = self.inner_cross_attn(q, kv, **kwargs) + else: + context = torch.utils.checkpoint.checkpoint( + self.inner_cross_attn, q, kv, **kwargs + ) + else: + context = self._update_kvcache_attention(q, kv, inference_params) + else: + context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params) + context = rearrange(context, "b s h d -> b s (h d)") + if seqlen is not None: + context = rearrange(context, "b s d -> (b s) d") + out = self.out_proj(context) + return out + + +def _in_projection_packed(q, k, v, w, b=None): + w_q, w_k, w_v = w.chunk(3) + if b is None: + b_q = b_k = b_v = None + else: + b_q, b_k, b_v = b.chunk(3) + return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) + + +class FlashMHA(nn.Module): + + def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0, + causal=False, device=None, dtype=None, **kwargs) -> None: + assert batch_first + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.embed_dim = embed_dim + self.causal = causal + self.bias = bias + + self.num_heads = num_heads + assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads" + self.head_dim = self.embed_dim // num_heads + assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" + + self.in_proj_weight = nn.Parameter(torch.empty((3 * embed_dim, embed_dim))) + if bias: + self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.inner_attn = FlashCrossAttention( + causal=causal, + attention_dropout=attention_dropout) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self._reset_parameters() + + def _reset_parameters(self) -> None: + xavier_uniform_(self.in_proj_weight) + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + + def forward(self, q, k, v, key_padding_mask=None): + """x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) + key_padding_mask: bool tensor of shape (batch, seqlen) + """ + # q, k, v = self.Wq(q), self.Wk(k), self.Wv(v) + q, k, v = _in_projection_packed(q, k, v, self.in_proj_weight, self.in_proj_bias) + q = rearrange(q, 'b s (h d) -> b s h d', h=self.num_heads) + k = rearrange(k, 'b s (h d) -> b s h d', h=self.num_heads) + v = rearrange(v, 'b s (h d) -> b s h d', h=self.num_heads) + kv = torch.stack([k, v], dim=2) + context = self.inner_attn(q, kv, causal=self.causal) + return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), None \ No newline at end of file diff --git a/cosense3d/modules/plugin/fpn.py b/cosense3d/modules/plugin/fpn.py new file mode 100644 index 00000000..4527d2b6 --- /dev/null +++ b/cosense3d/modules/plugin/fpn.py @@ -0,0 +1,156 @@ +from torch import nn +import torch.nn.functional as F + +from cosense3d.modules.utils.conv import ConvModule +from cosense3d.modules.utils.init import xavier_init + + +class FPN(nn.Module): + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(FPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + self.lateral_convs.append(l_conv) + if i == 0 : + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] += F.interpolate(laterals[i], + **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) if i==0 else laterals[i] for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/cosense3d/modules/plugin/gevbev_decoder.py b/cosense3d/modules/plugin/gevbev_decoder.py new file mode 100644 index 00000000..09a50da3 --- /dev/null +++ b/cosense3d/modules/plugin/gevbev_decoder.py @@ -0,0 +1,82 @@ +import torch +import torch_scatter +from torch import nn + +from cosense3d.modules.utils.misc import SELayer_Linear +from cosense3d.modules.utils.gaussian_utils import weighted_mahalanobis_dists +from cosense3d.modules.utils.me_utils import indices2metric, metric2indices, update_me_essentials + + +class GevBEVDecoder(nn.Module): + def __init__(self, data_info, stride, kernel=3, var0=0.1): + super().__init__() + update_me_essentials(self, data_info, stride) + self.lr = nn.Parameter(torch.tensor(self.lidar_range), requires_grad=False) + self.vs = nn.Parameter(torch.tensor(self.voxel_size), requires_grad=False) + self.var0 = [var0, var0] + x = torch.arange(kernel) - kernel // 2 + self.nbrs = torch.stack(torch.meshgrid(x, x, indexing='ij'), + dim=-1).reshape(-1, 2) + self.nbrs = nn.Parameter(self.nbrs, requires_grad=False) + self.n_nbrs = len(self.nbrs) + + def coor_to_indices(self, coor): + inds = coor.clone() + inds[:, 1] = inds[:, 1] / self.stride - self.offset_sz_x + inds[:, 2] = inds[:, 2] / self.stride - self.offset_sz_y + return inds.long() + + def forward(self, ref_pts, ctr_coor, ctr_reg): + """ + :param ref_pts: LongTensor(Q, 3) 2d coordinates in metrics(batch_idx, x, y) + :param ctr_coor: LongTensor(V, 3) 2d coordinates in indices (batch_idx, x, y) + :param ctr_reg: FloatTensor(V, d) bev grid center point regression result + + :return: out_evidence FloatTensor(Q, d): attended features + """ + reg = ctr_reg.relu() + reg_evi = reg[:, :2] + reg_var = reg[:, 2:].view(-1, 2, 2) + + ctr_pts = indices2metric(ctr_coor, self.vs) + ctr_inds = self.coor_to_indices(ctr_coor) + ref_coor = metric2indices(ref_pts, self.vs) + ref_inds = self.coor_to_indices(ref_coor) + + q_inds, v_inds, mask = self.get_nbr_mapping(ref_inds, ctr_inds) + + evidence = torch.zeros_like(ref_pts[:, :2]) + dists = ref_pts[q_inds[mask], 1:3] - ctr_pts[v_inds[mask], 1:3] + probs_weighted = weighted_mahalanobis_dists(reg_evi[v_inds[mask]], reg_var[v_inds[mask]], dists, self.var0) + torch_scatter.scatter(probs_weighted, q_inds[mask], + dim=0, out=evidence, reduce='sum') + return evidence.reshape(len(ref_pts), self.n_nbrs, 2) + + def get_nbr_mapping(self, query_pos, value_pos): + B = query_pos[:, 0].max() + 1 + pad_width = 2 + query_pos[:, 1:] += pad_width + value_pos[:, 1:] += pad_width + query_inds = torch.arange(len(query_pos), dtype=torch.long) + value_inds = torch.arange(len(value_pos), dtype=torch.long) + + # index -1 indicates that this nbr is outside the grid range + value_map = - torch.ones((B, self.size_x + pad_width * 2, + self.size_y + pad_width * 2), dtype=torch.long) + value_map[value_pos[:, 0], + value_pos[:, 1], + value_pos[:, 2]] = value_inds + + query_inds_nbrs = query_pos.unsqueeze(dim=1).repeat(1, self.n_nbrs, 1) + query_inds_nbrs[..., 1:] += self.nbrs.view(1, -1, 2) + query_inds_nbrs = query_inds_nbrs.view(-1, 3) + mask = ((query_inds_nbrs >= 0).all(dim=-1) & + (query_inds_nbrs[:, 1] < self.size_x + pad_width * 2) & + (query_inds_nbrs[:, 2] < self.size_y + pad_width * 2)) + assert torch.logical_not(mask).sum() == 0 + query_inds_mapped = query_inds.unsqueeze(1).repeat(1, self.n_nbrs).view(-1) + value_inds_mapped = value_map[query_inds_nbrs[:, 0], + query_inds_nbrs[:, 1], + query_inds_nbrs[:, 2]] + mask = torch.logical_and(query_inds_mapped >= 0, value_inds_mapped >= 0) + return query_inds_mapped, value_inds_mapped, mask diff --git a/cosense3d/modules/plugin/mink_spconv.py b/cosense3d/modules/plugin/mink_spconv.py new file mode 100644 index 00000000..e284c480 --- /dev/null +++ b/cosense3d/modules/plugin/mink_spconv.py @@ -0,0 +1,79 @@ +import functools +import torch + +from cosense3d.modules import BaseModule, nn +from cosense3d.modules.utils.me_utils import mink_coor_limit, minkconv_conv_block, ME + + +class Spconv(nn.Module): + def __init__(self, data_info, convs, d=2, dilation=False, **kwargs): + super(Spconv, self).__init__() + assert d == 2, 'only support dim=2' + self.det_r = data_info.get('det_r', False) + self.lidar_range = data_info.get('lidar_range', False) + self.voxel_size = data_info['voxel_size'] + self.d = d + self.dilation = dilation + self.convs = [] + for k, conv_args in convs.items(): + self.convs.append(k) + setattr(self, f'convs_{k}', self.get_conv_layer(conv_args)) + stride = int(k[1]) + + if self.det_r: + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif self.lidar_range: + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, f'mink_xylim_{k}', mink_coor_limit(lr, self.voxel_size, stride)) # relevant to ME + + def forward(self, stensor_dict, **kwargs): + out_dict = {} + for k in self.convs: + stride = int(k[1]) + stensor2d = self.get_2d_stensor(stensor_dict, stride) + + stensor2d = getattr(self, f'convs_{k}')(stensor2d) + # after coordinate expansion, some coordinates will exceed the maximum detection + # range, therefore they are removed here. + xylim = getattr(self, f'mink_xylim_{k}') + mask = (stensor2d.C[:, 1] > xylim[0]) & (stensor2d.C[:, 1] <= xylim[1]) & \ + (stensor2d.C[:, 2] > xylim[2]) & (stensor2d.C[:, 2] <= xylim[3]) + + coor = stensor2d.C[mask] + feat = stensor2d.F[mask] + + out_dict[k] = { + 'coor': coor, + 'feat': feat + } + return out_dict + + def get_2d_stensor(self, stensor_dict, stride): + stensor = stensor_dict[f'p{stride}'] + if isinstance(stensor, ME.SparseTensor) and stensor.C.shape[-1] == 3: + return stensor + else: + if isinstance(stensor, dict): + coor, feat = stensor['coor'][:, :3], stensor['feat'] + elif isinstance(stensor, ME.SparseTensor): + coor, feat = stensor.C[:, :3], stensor.F + return ME.SparseTensor( + coordinates=coor[:, :3].contiguous(), + features=feat, + tensor_stride=[stride] * 2 + ) + + def get_conv_layer(self, args): + minkconv_layer = functools.partial( + minkconv_conv_block, d=self.d, bn_momentum=0.1, + ) + in_dim = args['in_dim'] + out_dim = args['out_dim'] + layers = [minkconv_layer(in_dim, out_dim, args['kernels'][0], 1, + expand_coordinates=self.dilation)] + for ks in args['kernels'][1:]: + layers.append(minkconv_layer(out_dim, out_dim, ks, 1, + expand_coordinates=self.dilation)) + return nn.Sequential(*layers) \ No newline at end of file diff --git a/cosense3d/modules/plugin/naive_compressor.py b/cosense3d/modules/plugin/naive_compressor.py new file mode 100644 index 00000000..403fa0bf --- /dev/null +++ b/cosense3d/modules/plugin/naive_compressor.py @@ -0,0 +1,31 @@ +import torch.nn as nn + + +class NaiveCompressor(nn.Module): + """ + A very naive compression that only compress on the channel. + """ + def __init__(self, input_dim, compress_ratio): + super().__init__() + self.encoder = nn.Sequential( + nn.Conv2d(input_dim, input_dim//compress_ratio, kernel_size=3, + stride=1, padding=1), + nn.BatchNorm2d(input_dim//compress_ratio, eps=1e-3, momentum=0.01), + nn.ReLU() + ) + self.decoder = nn.Sequential( + nn.Conv2d(input_dim//compress_ratio, input_dim, kernel_size=3, + stride=1, padding=1), + nn.BatchNorm2d(input_dim, eps=1e-3, momentum=0.01), + nn.ReLU(), + nn.Conv2d(input_dim, input_dim, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(input_dim, eps=1e-3, + momentum=0.01), + nn.ReLU() + ) + + def forward(self, x): + x = self.encoder(x) + x = self.decoder(x) + + return x \ No newline at end of file diff --git a/cosense3d/modules/plugin/pillar_encoder.py b/cosense3d/modules/plugin/pillar_encoder.py new file mode 100644 index 00000000..296f2d34 --- /dev/null +++ b/cosense3d/modules/plugin/pillar_encoder.py @@ -0,0 +1,141 @@ +import torch +from torch import nn +import torch.nn.functional as F + +from cosense3d.modules.utils.conv import ConvModule +from cosense3d.modules.utils.init import xavier_init + + +class PFNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + use_norm=True, + last_layer=False): + super().__init__() + self.last_vfe = last_layer + self.use_norm = use_norm + if not self.last_vfe: + out_channels = out_channels // 2 + + if self.use_norm: + self.linear = nn.Linear(in_channels, out_channels, bias=False) + self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01) + else: + self.linear = nn.Linear(in_channels, out_channels, bias=True) + + self.part = 50000 + + def forward(self, inputs): + if inputs.shape[0] > self.part: + # nn.Linear performs randomly when batch size is too large + num_parts = inputs.shape[0] // self.part + part_linear_out = [self.linear( + inputs[num_part * self.part:(num_part + 1) * self.part]) + for num_part in range(num_parts + 1)] + x = torch.cat(part_linear_out, dim=0) + else: + x = self.linear(inputs) + torch.backends.cudnn.enabled = False + x = self.norm(x.permute(0, 2, 1)).permute(0, 2, + 1) if self.use_norm else x + torch.backends.cudnn.enabled = True + x = F.relu(x) + x_max = torch.max(x, dim=1, keepdim=True)[0] + + if self.last_vfe: + return x_max + else: + x_repeat = x_max.repeat(1, inputs.shape[1], 1) + x_concatenated = torch.cat([x, x_repeat], dim=2) + return x_concatenated + + +class PillarEncoder(nn.Module): + def __init__(self, + features, + voxel_size, + lidar_range, + channels, + use_norm=True): + super(PillarEncoder, self).__init__() + self.voxel_size = nn.Parameter(torch.tensor(voxel_size), requires_grad=False) + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + self.offset = nn.Parameter(self.voxel_size / 2 + self.lidar_range[:3], + requires_grad=False) + self.num_point_features = sum( + [getattr(self, f"{f}_dim") for f in features]) + self.features = features + assert isinstance(channels, list) + self.channels = [self.num_point_features] + channels + self.out_channels = channels[-1] + self.use_norm = use_norm + self._init_layers(self.channels) + + def _init_layers(self, channels): + pfn_layers = [] + for i in range(len(channels) - 1): + in_filters = channels[i] + out_filters = channels[i + 1] + pfn_layers.append( + PFNLayer(in_filters, out_filters, self.use_norm, + last_layer=(i >= len(channels) - 2)) + ) + self.pfn_layers = nn.ModuleList(pfn_layers) + + def forward(self, voxel_features, coords, voxel_num_points): + points_mean = voxel_features[..., :3].sum(dim=1, keepdim=True) / \ + voxel_num_points.view(-1, 1, 1) + f_cluster = voxel_features[..., :3] - points_mean + + coords_metric = coords[:, [3, 2, 1]].unsqueeze(1) * self.voxel_size + self.offset + f_center = voxel_features[..., :3] - coords_metric + + features = self.compose_voxel_feature(voxel_features) + [f_cluster, f_center] + features = torch.cat(features, dim=-1) + + voxel_count = features.shape[1] + mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0) + features *= mask.unsqueeze(-1) + for pfn in self.pfn_layers: + features = pfn(features) + features = features.squeeze() + return features + + def compose_voxel_feature(self, voxel_features): + features = [] + if 'absolute_xyz' in self.features: + features.append(voxel_features[..., :3]) + if 'distance' in self.features: + features.append(torch.norm(voxel_features[..., :3], 2, -1, + keepdim=True)) + if 'intensity' in self.features: + assert voxel_features.shape[-1] >= 4 + features.append(voxel_features[..., 3:4]) + return features + + @staticmethod + def get_paddings_indicator(actual_num, max_num, axis=0): + actual_num = torch.unsqueeze(actual_num, axis + 1) + max_num_shape = [1] * len(actual_num.shape) + max_num_shape[axis + 1] = -1 + max_num = torch.arange(max_num, + dtype=torch.int, + device=actual_num.device).view(max_num_shape) + paddings_indicator = actual_num.int() > max_num + return paddings_indicator + + @property + def distance_dim(self): + return 1 + + @property + def absolute_xyz_dim(self): + return 6 + + @property + def xyz_dim(self): + return 3 + @property + def intensity_dim(self): + return 1 diff --git a/cosense3d/modules/plugin/ssfa.py b/cosense3d/modules/plugin/ssfa.py new file mode 100644 index 00000000..61cc562e --- /dev/null +++ b/cosense3d/modules/plugin/ssfa.py @@ -0,0 +1,104 @@ +import torch +from torch import nn + + +class SSFA(nn.Module): + def __init__(self, in_channels, out_channels=128, shrink_strides=None, shrink_channels=None): + super(SSFA, self).__init__() + self._num_input_features = in_channels # 128 + self.shrink_strides = shrink_strides + + seq = [nn.ZeroPad2d(1)] + get_conv_layers('Conv2d', 128, 128, + n_layers=3, kernel_size=[3, 3, 3], + stride=[1, 1, 1], padding=[0, 1, 1], + sequential=False) + self.bottom_up_block_0 = nn.Sequential(*seq) + self.bottom_up_block_1 = get_conv_layers('Conv2d', 128, 256, + n_layers=3, kernel_size=[3, 3, 3], + stride=[2, 1, 1], padding=[1, 1, 1]) + + self.trans_0 = get_conv_layers('Conv2d', 128, 128, + n_layers=1, kernel_size=[1], stride=[1], padding=[0]) + self.trans_1 = get_conv_layers('Conv2d', 256, 256, + n_layers=1, kernel_size=[1], stride=[1], padding=[0]) + + self.deconv_block_0 = get_conv_layers('ConvTranspose2d', 256, 128, + n_layers=1, kernel_size=[3], stride=[2], + padding=[1], output_padding=[1]) + self.deconv_block_1 = get_conv_layers('ConvTranspose2d', 256, 128, + n_layers=1, kernel_size=[3], stride=[2], + padding=[1], output_padding=[1]) + + self.conv_0 = get_conv_layers('Conv2d', out_channels, 128, + n_layers=1, kernel_size=[3], stride=[1], padding=[1]) + self.conv_1 = get_conv_layers('Conv2d', out_channels, 128, + n_layers=1, kernel_size=[3], stride=[1], padding=[1]) + + self.w_0 = get_conv_layers('Conv2d', out_channels, 1, + n_layers=1, kernel_size=[1], stride=[1], padding=[0], relu_last=False) + self.w_1 = get_conv_layers('Conv2d', out_channels, 1, + n_layers=1, kernel_size=[1], stride=[1], padding=[0], relu_last=False) + + if isinstance(shrink_strides, list): + assert len(shrink_channels) == len(shrink_strides) + shrink_convs = [] + in_channels = out_channels + for s, c in zip(shrink_strides, shrink_channels): + shrink_convs.append(nn.Conv2d(in_channels, c, 3, s, padding=1)) + in_channels = c + self.shrink_convs = nn.ModuleList(shrink_convs) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight, gain=1) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x_0 = self.bottom_up_block_0(x) + x_1 = self.bottom_up_block_1(x_0) + x_trans_0 = self.trans_0(x_0) + x_trans_1 = self.trans_1(x_1) + x_middle_0 = self.deconv_block_0(x_trans_1) + x_trans_0 + x_middle_1 = self.deconv_block_1(x_trans_1) + x_output_0 = self.conv_0(x_middle_0) + x_output_1 = self.conv_1(x_middle_1) + + x_weight_0 = self.w_0(x_output_0) + x_weight_1 = self.w_1(x_output_1) + x_weight = torch.softmax(torch.cat([x_weight_0, x_weight_1], dim=1), dim=1) + x_output = x_output_0 * x_weight[:, 0:1, :, :] + x_output_1 * x_weight[:, 1:, :, :] + + if self.shrink_strides is None: + return x_output.contiguous() + else: + assert isinstance(self.shrink_strides, list) + downx = 1 + ret_dict = {} + x = x_output + for i, s in enumerate(self.shrink_strides): + downx *= s + x = self.shrink_convs[i](x) + ret_dict[downx] = x + return x_output.contiguous(), ret_dict + + +def get_conv_layers(conv_name, in_channels, out_channels, n_layers, kernel_size, stride, + padding, relu_last=True, sequential=True, **kwargs): + """ + Build convolutional layers. kernel_size, stride and padding should be a list with the lengths that match n_layers + """ + seq = [] + for i in range(n_layers): + seq.extend([getattr(nn, conv_name)(in_channels, out_channels, kernel_size[i], stride=stride[i], + padding=padding[i], bias=False, **{k: v[i] for k, v in kwargs.items()}), + nn.BatchNorm2d(out_channels, eps=1e-3, momentum=0.01)]) + if i < n_layers - 1 or relu_last: + seq.append(nn.ReLU()) + in_channels = out_channels + if sequential: + return nn.Sequential(*seq) + else: + return seq \ No newline at end of file diff --git a/cosense3d/modules/plugin/target_assigners.py b/cosense3d/modules/plugin/target_assigners.py new file mode 100644 index 00000000..1bd6ce94 --- /dev/null +++ b/cosense3d/modules/plugin/target_assigners.py @@ -0,0 +1,1630 @@ +import math +from abc import ABCMeta, abstractmethod +from functools import partial +from typing import List, Dict, Optional, Tuple + +import torch +from torch import nn +import torch_scatter +from scipy.optimize import linear_sum_assignment + +from cosense3d.utils.box_utils import (bbox_xyxy_to_cxcywh, + bbox_cxcywh_to_xyxy, + normalize_bbox, + boxes3d_to_standup_bboxes, + rotate_points_batch) +from cosense3d.utils.pclib import rotate_points_along_z_torch +from cosense3d.utils.iou2d_calculator import bbox_overlaps +from cosense3d.modules.utils.gaussian_utils import gaussian_2d +from cosense3d.modules.utils.gevbev_utils import draw_sample_evis, weighted_mahalanobis_dists +from cosense3d.modules.utils.me_utils import metric2indices, update_me_essentials +from cosense3d.modules.utils.box_coder import build_box_coder +from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu +from cosense3d.dataset.const import CoSenseBenchmarks as csb +from cosense3d.modules.utils.common import pad_r, pad_l, meshgrid +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.losses import pred_to_conf_unc +from cosense3d.utils.misc import PI + + +def sample_mining(scores: torch.Tensor, + labels: torch.Tensor, + dists=None, + sample_mining_thr=0.5, + max_sample_ratio=5, + max_num_sample=None): + """ + When only limited numbers of negative targets are sampled for training, + and the majority of the negative samples are ignored, then there is a + high probability that hard negative targets are also ignored. This will + weaken the model to learn from these hard negative targets and generate + a lot of false positives. + Therefore, this function mines the samples that have high predictive + scores as training targets. This function should be used after 'pos_neg_sampling'. + + :param scores: (N1, ...Nk) classification scores/confidences that the + sample belong to foreground. + :param labels: (N1..., Nk) class labels, -1 indicates ignore, 0 indicates negative, + positive numbers indicates classes. + :param dists: distances. + :param sample_mining_thr: score threshold for sampling + :param max_sample_ratio: `n_sample` / `n_pos_sample` + :param max_num_sample: maximum number of samples. + :return: + """ + assert scores.ndim == labels.ndim + assert scores.shape == labels.shape + pred_pos = scores > sample_mining_thr + if dists is not None: + # only mine points that are not too close to the real positive samples + pred_pos[dists < 3] = False + not_cared = labels == -1 + sample_inds = torch.where(torch.logical_and(pred_pos, not_cared))[0] + n_pos = (labels > 0).sum() + max_num_sample = int(n_pos * max_sample_ratio) if max_num_sample is None else max_num_sample + if len(sample_inds) > max_num_sample: + sample_inds = sample_inds[torch.randperm(len(sample_inds))[:max_num_sample]] + labels[sample_inds] = 0 + return labels + + +def pos_neg_sampling(labels: torch.Tensor, pos_neg_ratio: float) -> torch.Tensor: + """ + Downsample negative targets. + + :param labels: class labels. + :param pos_neg_ratio: ratio = num_neg_samples / num_pos_samples. + :return: class labels with -1 labels to be ignored during training. + """ + pos = labels > 0 + neg = labels == 0 + n_neg_sample = pos.sum(dim=0) * pos_neg_ratio + if neg.sum() > n_neg_sample: + neg_inds = torch.where(neg)[0] + perm = torch.randperm(len(neg_inds))[n_neg_sample:] + labels[neg_inds[perm]] = -1 + return labels + + +class BaseAssigner(metaclass=ABCMeta): + """Base assigner.""" + + @abstractmethod + def assign(self, *args, **kwargs): + """Assign preds to targets.""" + + +class MatchCost: + """This class is modified from mmdet.""" + @staticmethod + def classification(cls_pred: torch.Tensor, + gt_labels: torch.Tensor, + weight: float=1.0) -> torch.Tensor: + """ + + :param cls_pred: Predicted classification logits, shape + (num_query, num_class). + :param gt_labels: Label of `gt_bboxes`, shape (num_gt,). + :param weight: loss_weight. + :return: cls_cost value with weight + """ + # Following the official DETR repo, contrary to the loss that + # NLL is used, we approximate it in 1 - cls_score[gt_label]. + # The 1 is a constant that doesn't change the matching, + # so it can be omitted. + cls_score = cls_pred.softmax(-1) + cls_cost = -cls_score[:, gt_labels] + return cls_cost * weight + + @staticmethod + def bboxl1(bbox_pred: torch.Tensor, + gt_bboxes: torch.Tensor, + weight: float=1., + box_format: str='xyxy') -> torch.Tensor: + """ + + :param bbox_pred: Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + (num_query, 4). + :param gt_bboxes: Ground truth boxes with normalized + coordinates (x1, y1, x2, y2). Shape (num_gt, 4). + :param weight: loss_weight. + :param box_format: 'xyxy' for DETR, 'xywh' for Sparse_RCNN. + :return: bbox_cost value with weight + """ + if box_format == 'xywh': + gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) + elif box_format == 'xyxy': + bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) + else: + raise NotImplementedError + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * weight + + @staticmethod + def giou(bboxes: torch.Tensor, + gt_bboxes: torch.Tensor, + weight: float=1.0): + """ + + :param bboxes: Predicted boxes with unnormalized coordinates + (x1, y1, x2, y2). Shape (num_query, 4). + :param gt_bboxes: Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape (num_gt, 4). + :param weight: loss weight. + :return: giou_cost value with weight + """ + # overlaps: [num_bboxes, num_gt] + overlaps = bbox_overlaps( + bboxes, gt_bboxes, mode="giou", is_aligned=False) + # The 1 is a constant that doesn't change the matching, so omitted. + iou_cost = -overlaps + return iou_cost * weight + + @staticmethod + def iou(bboxes, gt_bboxes, weight=1.0): + """See giou""" + # overlaps: [num_bboxes, num_gt] + overlaps = bbox_overlaps( + bboxes, gt_bboxes, mode="iou", is_aligned=False) + # The 1 is a constant that doesn't change the matching, so omitted. + iou_cost = -overlaps + return iou_cost * weight + + @staticmethod + def l1(pred, gt, weight=1.0): + """L1 distance between pred and gt Tensors""" + cost = torch.cdist(pred, gt, p=1) + return cost * weight + + @staticmethod + def binary_focal_loss(cls_pred, gt_labels, weight=1., alpha=0.25, gamma=2, eps=1e-12,): + cls_pred = cls_pred.flatten(1) + gt_labels = gt_labels.flatten(1).float() + n = cls_pred.shape[1] + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + eps).log() * ( + 1 - alpha) * cls_pred.pow(gamma) + pos_cost = -(cls_pred + eps).log() * alpha * ( + 1 - cls_pred).pow(gamma) + + cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ + torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) + return cls_cost / n * weight + + @staticmethod + def focal_loss(cls_pred, gt_labels, weight=1., alpha=0.25, gamma=2, eps=1e-12,): + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + eps).log() * ( + 1 - alpha) * cls_pred.pow(gamma) + pos_cost = -(cls_pred + eps).log() * alpha * ( + 1 - cls_pred).pow(gamma) + + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * weight + + def build(self, type, **kwargs): + return partial(getattr(self, type), **kwargs) + + +class HungarianAssigner2D(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost, regression iou cost and center2d l1 cost. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + """ + + def __init__(self, + cls_cost=dict(type='classification', weight=1.), + reg_cost=dict(type='bboxl1', weight=1.0), + iou_cost=dict(type='giou', weight=1.0), + centers2d_cost=dict(type='l1', weight=1.0)): + cost_builder = MatchCost() + self.cls_cost = cost_builder.build(**cls_cost) + self.reg_cost = cost_builder.build(**reg_cost) + self.iou_cost = cost_builder.build(**iou_cost) + self.centers2d_cost = cost_builder.build(**centers2d_cost) + + def assign(self, + bbox_pred, + cls_pred, + pred_centers2d, + gt_bboxes, + gt_labels, + centers2d, + img_size, + eps: float = 1e-7 + ): + """Computes one-to-one matching based on the weighted costs. + + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + + :param bbox_pred: Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + :param cls_pred: Predicted classification logits, shape + [num_query, num_class]. + :param pred_centers2d: prediction 2d center points. + :param gt_bboxes: ground truth bboxes. + :param gt_labels: Label of `gt_bboxes`, shape (num_gt,). + img_size: input image size. + :param centers2d: 2d center points. + :param img_size: input image size. + :param eps: A value added to the denominator for + numerical stability. Default 1e-7. + :return: + """ + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return num_gts, assigned_gt_inds, assigned_labels + img_h, img_w = img_size + factor = gt_bboxes.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalize_gt_bboxes = gt_bboxes / factor + reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) + # regression iou cost, defaultly giou is used in official DETR. + bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor + iou_cost = self.iou_cost(bboxes, gt_bboxes) + + # center2d L1 cost + normalize_centers2d = centers2d / factor[:, 0:2] + centers2d_cost = self.centers2d_cost(pred_centers2d, normalize_centers2d) + + # weighted sum of above four costs + cost = cls_cost + reg_cost + iou_cost + centers2d_cost + cost = torch.nan_to_num(cost, nan=100.0, posinf=100.0, neginf=-100.0) + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return num_gts, assigned_gt_inds, assigned_labels + + +class HungarianAssigner3D(BaseAssigner): + def __init__(self, + cls_cost=dict(type='focal_loss', weight=1.0), + reg_cost=dict(type='l1', weight=1.0), + iou_cost=dict(type='iou', weight=1.0)): + cost_builder = MatchCost() + self.cls_cost = cost_builder.build(**cls_cost) + self.reg_cost = cost_builder.build(**reg_cost) + self.iou_cost = cost_builder.build(**iou_cost) + + def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + code_weights=None, + eps=1e-7): + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes,), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes,), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return num_gts, assigned_gt_inds, assigned_labels + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalized_gt_bboxes = normalize_bbox(gt_bboxes) + if code_weights is not None: + bbox_pred = bbox_pred * code_weights + normalized_gt_bboxes = normalized_gt_bboxes * code_weights + + reg_cost = self.reg_cost(bbox_pred[:, :8], normalized_gt_bboxes[:, :8]) + + # weighted sum of above two costs + cost = cls_cost + reg_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + cost = torch.nan_to_num(cost, nan=100.0, posinf=100.0, neginf=-100.0) + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + + # # 5. align matched pred and gt + # aligned_tgt_boxes = torch.zeros_like(bbox_pred) + # assign_mask = assigned_gt_inds > 0 + # aligned_tgt_boxes[assign_mask] = normalized_gt_bboxes[assigned_gt_inds[assign_mask] - 1] + + # from projects.utils.vislib import draw_points_boxes_plt + # vis_boxes_pred = denormalize_bbox(bbox_pred[assign_mask], self.pc_range)[:, :-2] + # vis_boxes_pred[:, :2] /= code_weights[:2] + # vis_boxes_gt = denormalize_bbox(aligned_tgt_boxes[assign_mask], self.pc_range)[:, :-2] + # vis_boxes_gt[:, :2] /= code_weights[:2] + # draw_points_boxes_plt( + # pc_range=51.2, + # boxes_pred=vis_boxes_pred.detach().cpu().numpy(), + # bbox_pred_label=[str(i) for i in range(vis_boxes_pred.shape[0])], + # boxes_gt=vis_boxes_gt.detach().cpu().numpy(), + # bbox_gt_label=[str(i) for i in range(vis_boxes_gt.shape[0])], + # filename='/home/yuan/Downloads/tmp.png' + # ) + + return num_gts, assigned_gt_inds, assigned_labels + + +class HeatmapAssigner(BaseAssigner): + + @staticmethod + def draw_heatmap_gaussian(heatmap, center, radius, k=1): + """Get gaussian masked heatmap. + + Args: + heatmap (torch.Tensor): Heatmap to be masked. + center (torch.Tensor): Center coord of the heatmap. + radius (int): Radius of gaussian. + k (int, optional): Multiple of masked_gaussian. Defaults to 1. + + Returns: + torch.Tensor: Masked heatmap. + """ + diameter = 2 * radius + 1 + gaussian = gaussian_2d((diameter, diameter), sigma=diameter / 6) + + x, y = int(center[0]), int(center[1]) + + height, width = heatmap.shape[0:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = torch.from_numpy( + gaussian[radius - top:radius + bottom, + radius - left:radius + right]).to(heatmap.device, + torch.float32) + if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: + torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap) + return heatmap + + def assign(self, obj_centers2d, obj_bboxes, img_shape, stride): + img_h, img_w = img_shape[:2] + heatmap = torch.zeros(img_h // stride, img_w // stride, device=obj_centers2d.device) + if len(obj_centers2d) != 0: + l = obj_centers2d[..., 0:1] - obj_bboxes[..., 0:1] + t = obj_centers2d[..., 1:2] - obj_bboxes[..., 1:2] + r = obj_bboxes[..., 2:3] - obj_centers2d[..., 0:1] + b = obj_bboxes[..., 3:4] - obj_centers2d[..., 1:2] + bound = torch.cat([l, t, r, b], dim=-1) + radius = torch.ceil(torch.min(bound, dim=-1)[0] / 16) + radius = torch.clamp(radius, 1.0).cpu().numpy().tolist() + for center, r in zip(obj_centers2d, radius): + heatmap = self.draw_heatmap_gaussian(heatmap, center / 16, radius=int(r), k=1) + return heatmap + + +class BoxAnchorAssigner(BaseAssigner, torch.nn.Module): + def __init__(self, + box_size, + dirs, + voxel_size, + lidar_range, + stride, + box_coder, + pos_threshold=0.6, + neg_threshold=0.45, + score_thrshold=0.25, + ): + super().__init__() + self.voxel_size = voxel_size + self.lidar_range = lidar_range + self.num_anchors = len(dirs) + self.stride = stride + self.pos_threshold = pos_threshold + self.neg_threshold = neg_threshold + self.score_thrshold = score_thrshold + self.box_coder = build_box_coder(**box_coder) + anchors, standup_anchors = self.get_anchor_template(box_size, dirs) + self.anchors = nn.Parameter(anchors, requires_grad=False) + self.standup_anchors = nn.Parameter(standup_anchors, requires_grad=False) + + def get_anchor_template(self, box_size, dirs): + pix_x = self.voxel_size[0] * self.stride + pix_y = self.voxel_size[1] * self.stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y) + pix_y * 0.5 + xys = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + xys = xys.unsqueeze(2).repeat(1, 1, self.num_anchors, 1) + zs = - torch.ones_like(xys[..., :1]) + h, w = xys.shape[:2] + lwh = torch.tensor(box_size).reshape( + 1, 1, 1, -1).repeat(h, w, self.num_anchors, 1) + rs = torch.deg2rad(torch.tensor(dirs)).reshape( + 1, 1, -1, 1).repeat(h, w, 1, 1) + # (w, h, num_anchor, 7) --> (whn, 7) + anchors = torch.cat([xys, zs, lwh, rs], dim=-1) + self.anchor_shape = anchors.shape + anchors = anchors.view(-1, 7) + standup_anchors = boxes3d_to_standup_bboxes(anchors) + return anchors, standup_anchors + + def assign(self, gt_boxes): + """ + + Parameters + ---------- + gt_boxes Tensor(N, 7): [x, y, z, l, w, h, r, ...] + + Returns + ------- + reg Tensor(H, W, num_anchors, code_size): box regression targets + """ + if len(gt_boxes) == 0: + labels = gt_boxes.new_full((self.standup_anchors.shape[0],), -1) + reg_tgt = gt_boxes.new_zeros((0, self.box_coder.code_size)) + dir_scores = gt_boxes.new_zeros((0, 4)) + # Todo dir_score, gt_boxes, correct shape + return labels, reg_tgt, dir_scores + + standup_boxes = boxes3d_to_standup_bboxes(gt_boxes[:, :7]) + ious = self.box_overlaps(self.standup_anchors, standup_boxes) + iou_max, max_inds = ious.max(dim=1) + top1_inds = torch.argmax(ious, dim=0) + + pos = iou_max > self.pos_threshold + pos_inds = torch.cat([top1_inds, torch.where(pos)[0]]).unique() + neg = iou_max < self.neg_threshold + neg[pos_inds] = False + + labels = gt_boxes.new_full((ious.shape[0],), -1) + labels[neg] = 0 + labels[pos_inds] = 1 + + aligned_gt_boxes = gt_boxes[max_inds[pos_inds], :7] + aligned_anchors = self.anchors[pos_inds] + reg_tgt, dir_score = self.box_coder.encode(aligned_anchors, aligned_gt_boxes) + + return labels, reg_tgt, dir_score + + def box_overlaps(self, boxes1, boxes2): + areas1 = (boxes1[:, 2] - boxes1[:, 0] + 1) * \ + (boxes1[:, 3] - boxes1[:, 1] + 1) + areas2 = (boxes2[:, 2] - boxes2[:, 0] + 1) * \ + (boxes2[:, 3] - boxes2[:, 1] + 1) + + boxes1_mat = boxes1.unsqueeze(1).repeat(1, boxes2.shape[0], 1) + boxes2_mat = boxes2.unsqueeze(0).repeat(boxes1.shape[0], 1, 1) + x_extend = torch.minimum(boxes1_mat[..., 2], boxes2_mat[..., 2]) - \ + torch.maximum(boxes1_mat[..., 0], boxes2_mat[..., 0]) + 1 + y_extend = torch.minimum(boxes1_mat[..., 3], boxes2_mat[..., 3]) - \ + torch.maximum(boxes1_mat[..., 1], boxes2_mat[..., 1]) + 1 + + overlaps = torch.zeros_like(boxes1_mat[..., 0]) + + pos = torch.logical_and(x_extend > 0, y_extend > 0) + intersection = x_extend[pos] * y_extend[pos] + union = (areas1.unsqueeze(1) + areas2.unsqueeze(0))[pos] - intersection + overlaps[pos] = intersection / union + + return overlaps + + def get_predictions(self, preds): + # roi = {'box': [], 'scr': [], 'lbl': [], 'idx': []} + roi = {} + B = len(preds['cls']) + pred_cls = preds['cls'].sigmoid().permute(0, 3, 2, 1).reshape(B, -1) + pred_reg = preds['reg'].permute(0, 3, 2, 1).reshape(B, -1, 7) + indices = torch.stack([torch.ones_like(pred_cls[0]) * i for i in range(B)], dim=0) + + anchors = self.anchors.unsqueeze(0).repeat(B, 1, 1) + pos = pred_cls > self.score_thrshold + + boxes_dec = self.box_coder.decode(anchors, pred_reg) + # remove abnormal boxes + mask = (boxes_dec[..., 3:6] > 0.1) & (boxes_dec[..., 3:6] < 10) + pos = torch.logical_and(pos, mask.all(dim=-1)) + + pred_cls = pred_cls[pos] + pred_box = boxes_dec[pos] + roi['scr'] = pred_cls + roi['box'] = pred_box + # TODO currently only support class car + roi['lbl'] = torch.zeros_like(pred_cls) + roi['idx'] = indices[pos] + + return roi + + +class BoxSparseAnchorAssigner(BaseAssigner, torch.nn.Module): + def __init__(self, + box_size, + dirs, + voxel_size, + lidar_range, + stride, + box_coder, + me_coor=True, + pos_threshold=0.6, + neg_threshold=0.45, + score_thrshold=0.25, + ): + super().__init__() + self.voxel_size = voxel_size + self.lidar_range = lidar_range + self.num_anchors = len(dirs) + self.stride = stride + self.pos_threshold = pos_threshold + self.neg_threshold = neg_threshold + self.score_thrshold = score_thrshold + self.box_coder = build_box_coder(**box_coder) + anchors, standup_anchors = self.get_anchor_template(box_size, dirs) + self.anchors = nn.Parameter(anchors, requires_grad=False) + self.standup_anchors = nn.Parameter(standup_anchors, requires_grad=False) + if me_coor: + lr = lidar_range + res_x, res_y = stride * voxel_size[0], stride * voxel_size[1] + self.size_x = round((lr[3] - lr[0]) / res_x) + self.size_y = round((lr[4] - lr[1]) / res_y) + self.offset_sz_x = round(lr[0] / res_x) + self.offset_sz_y = round(lr[1] / res_y) + self.coor_to_inds = self.me_coor_to_grid_indices + else: + raise NotImplementedError + + def me_coor_to_grid_indices(self, coor): + inds = coor / self.stride + inds[:, 0] -= self.offset_sz_x + inds[:, 1] -= self.offset_sz_y + in_range_mask = (inds >= 0).all(dim=-1) & (inds[:, 0] < self.size_x) & (inds[:, 1] < self.size_y) + return inds[in_range_mask].long(), in_range_mask + + def get_anchor_template(self, box_size, dirs): + pix_x = self.voxel_size[0] * self.stride + pix_y = self.voxel_size[1] * self.stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y) + pix_y * 0.5 + xys = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + xys = xys.unsqueeze(2).repeat(1, 1, self.num_anchors, 1) + zs = - torch.ones_like(xys[..., :1]) + h, w = xys.shape[:2] + lwh = torch.tensor(box_size).reshape( + 1, 1, 1, -1).repeat(h, w, self.num_anchors, 1) + rs = torch.deg2rad(torch.tensor(dirs)).reshape( + 1, 1, -1, 1).repeat(h, w, 1, 1) + # (w, h, num_anchor, 7) --> (whn, 7) + anchors = torch.cat([xys, zs, lwh, rs], dim=-1) + standup_anchors = boxes3d_to_standup_bboxes( + anchors.view(-1, 7)).reshape(h, w, self.num_anchors, 4) + return anchors, standup_anchors + + def assign(self, coors: torch.Tensor, gt_boxes: torch.Tensor): + """ + + :param coors: (N, 2) 2D mink coor [x, y] + :param gt_boxes: (M, 7) [x, y, z, l, w, h, r] + :return: + - labels Tensor(N, num_anchors): box regression targets + - reg_tgt Tensor(N, num_anchors, code_size): box regression targets + - ir_score Tensor(N, num_anchors, 4) or None: direction score target + """ + gt_boxes = gt_boxes[:, :7] + if len(gt_boxes) == 0: + labels = gt_boxes.new_full((coors.shape[0] * self.num_anchors,), -1) + reg_tgt = gt_boxes.new_zeros((0, self.box_coder.code_size)) + dir_scores = gt_boxes.new_zeros((0, 4)) + # Todo dir_score, gt_boxes, correct shape + return labels, reg_tgt, dir_scores + inds, in_range_mask = self.coor_to_inds(coors) + gt_standup_boxes = boxes3d_to_standup_bboxes(gt_boxes) + standup_anchors = self.standup_anchors[inds[:, 0], inds[:, 1]].view(-1, 4) + ious = self.box_overlaps(standup_anchors, gt_standup_boxes) + iou_max, max_inds = ious.max(dim=1) + top1_inds = torch.argmax(ious, dim=0) + + pos = iou_max > self.pos_threshold + pos_inds = torch.cat([top1_inds, torch.where(pos)[0]]).unique() + neg = iou_max < self.neg_threshold + neg[pos_inds] = False + + labels = gt_boxes.new_full((ious.shape[0],), -1) + labels[neg] = 0 + labels[pos_inds] = 1 + + aligned_gt_boxes = gt_boxes[max_inds[pos_inds]] + aligned_anchors = self.anchors[inds[:, 0], inds[:, 1]].view(-1, self.box_coder.code_size)[pos_inds] + reg_tgt, dir_score = self.box_coder.encode(aligned_anchors, aligned_gt_boxes) + + labels_final = gt_boxes.new_full((in_range_mask.shape[0], self.num_anchors), -1) + labels_final[in_range_mask] = labels.view(-1, self.num_anchors) + return labels_final.view(-1), reg_tgt, dir_score + + def box_overlaps(self, boxes1, boxes2): + areas1 = (boxes1[:, 2] - boxes1[:, 0] + 1) * \ + (boxes1[:, 3] - boxes1[:, 1] + 1) + areas2 = (boxes2[:, 2] - boxes2[:, 0] + 1) * \ + (boxes2[:, 3] - boxes2[:, 1] + 1) + + boxes1_mat = boxes1.unsqueeze(1).repeat(1, boxes2.shape[0], 1) + boxes2_mat = boxes2.unsqueeze(0).repeat(boxes1.shape[0], 1, 1) + x_extend = torch.minimum(boxes1_mat[..., 2], boxes2_mat[..., 2]) - \ + torch.maximum(boxes1_mat[..., 0], boxes2_mat[..., 0]) + 1 + y_extend = torch.minimum(boxes1_mat[..., 3], boxes2_mat[..., 3]) - \ + torch.maximum(boxes1_mat[..., 1], boxes2_mat[..., 1]) + 1 + + overlaps = torch.zeros_like(boxes1_mat[..., 0]) + + pos = torch.logical_and(x_extend > 0, y_extend > 0) + intersection = x_extend[pos] * y_extend[pos] + union = (areas1.unsqueeze(1) + areas2.unsqueeze(0))[pos] - intersection + overlaps[pos] = intersection / union + + return overlaps + + def get_predictions(self, coors, preds): + """ + + :param coors: Tensor(N, 3) mink coor [batch_idx, x, y] + :param preds: + :return: + """ + # roi = {'box': [], 'scr': [], 'lbl': [], 'idx': []} + roi = {} + inds, in_range_mask = self.coor_to_inds(coors[:, 1:]) + pred_cls = preds['cls'][in_range_mask].sigmoid().reshape(-1) + pred_reg = preds['reg'][in_range_mask].reshape(-1, 7) + indices = coors[:, 0:1][in_range_mask].repeat(1, self.num_anchors).reshape(-1) + + anchors = self.anchors[inds[:, 0], inds[:, 1]].view(-1, self.box_coder.code_size) + pos = pred_cls > self.score_thrshold + anchors = anchors[pos] + pred_cls = pred_cls[pos] + pred_reg = pred_reg[pos] + indices = indices[pos] + + boxes_dec = self.box_coder.decode(anchors, pred_reg) + + # remove abnormal boxes + mask = (boxes_dec[..., 3:6] > 0.1) & (boxes_dec[..., 3:6] < 10) + mask = mask.all(dim=-1) + pred_cls = pred_cls[mask] + pred_box = boxes_dec[mask] + indices = indices[mask] + + roi['scr'] = pred_cls + roi['box'] = pred_box + # TODO currently only support class car + roi['lbl'] = torch.zeros_like(pred_cls) + roi['idx'] = indices + + return roi + + +class BoxCenterAssigner(BaseAssigner, torch.nn.Module): + def __init__(self, + voxel_size, + lidar_range, + stride, + detection_benchmark, + class_names_each_head, + center_threshold, + box_coder, + activation='relu', + edl=True, + ): + super().__init__() + self.voxel_size = voxel_size + self.lidar_range = lidar_range + self.meter_per_pixel = (voxel_size[0] * stride, voxel_size[1] * stride) + self.csb = csb.get(detection_benchmark) + self.class_names_each_head = class_names_each_head + self.activation = activation + self.center_threshold = center_threshold + self.box_coder = build_box_coder(**box_coder) + self.edl = edl + + def pts_to_indices(self, bev_pts: torch.Tensor): + """ + :param bev_pts: (N, 3+), BEV points, 1st column should be batch index. + :return: + """ + x = (bev_pts[:, 1] - self.meter_per_pixel[0] * 0.5 - self.lidar_range[0]) \ + / self.meter_per_pixel[0] + y = (bev_pts[:, 2] - self.meter_per_pixel[1] * 0.5 - self.lidar_range[1]) \ + / self.meter_per_pixel[1] + indices = torch.stack([bev_pts[:, 0].long(), x.long(), y.long()], dim=1) + return indices + + @torch.no_grad() + def assign(self, centers, gt_boxes, gt_labels, gt_preds=None, **kwargs): + box_names = [self.csb[c.item()][0] for c in gt_labels] + + # cal regression targets + reg_tgt = {'box': [], 'dir': [], 'scr': [], 'idx': [], 'valid_mask': [], 'vel': [], 'pred': []} + for h, cur_cls_names in enumerate(self.class_names_each_head): + center_indices = self.pts_to_indices(centers).T + box_mask = [n in cur_cls_names for n in box_names] + cur_boxes = gt_boxes[box_mask] + res = self.box_coder.encode(centers, cur_boxes, self.meter_per_pixel, gt_preds) + reg_box, reg_dir, dir_score, valid = res[:4] + + reg_tgt['idx'].append(center_indices[:, valid]) + reg_tgt['valid_mask'].append(valid) + reg_tgt['box'].append(reg_box) + reg_tgt['dir'].append(reg_dir) + reg_tgt['scr'].append(dir_score) + if getattr(self.box_coder, 'with_velo', False): + reg_tgt['vel'].append(res[4]) + if getattr(self.box_coder, 'with_pred', False): + reg_tgt['pred'].append(res[5]) + return reg_tgt + + def get_predictions(self, preds): + """Decode the center and regression maps into BBoxes. + + :param preds: + - cls: list[Tensor], each tensor is the result from a cls head with shape (B or N, Ncls, ...). + - reg: + * box: list[Tensor], one tensor per reg head with shape (B or N, 6, ...). + * dir: list[Tensor], one tensor per reg head with shape (B or N, 8, ...). + * scr: list[Tensor], one tensor per reg head with shape (B or N, 4, ...). + :return: roi: + * box: list[Tensor], one tensor per head with shape (N, 8). + * scr: list[Tensor], one tensor per head with shape (N,). + * lbl: list[Tensor], one tensor per head with shape (N,). + * idx: list[Tensor], one tensor per head with shape (3, N), center map indices of the boxes. + """ + roi = {'box': [], 'scr': [], 'lbl': [], 'idx': []} + lbl_cnt = torch.cumsum(torch.Tensor([0] + [m.shape[1] for m in preds['cls']]), dim=0) + confs = [] + for h, center_cls in enumerate(preds['cls']): + if center_cls.ndim == 4: + conf, _ = pred_to_conf_unc(center_cls.permute(0, 2, 3, 1), self.activation) + center_mask = conf[..., 1:].max(dim=-1).values > self.center_threshold # b, h, w + center_indices = torch.stack(torch.where(center_mask), dim=0) + centers = self.indices_to_pts(center_indices[1:]).T + cur_centers = torch.cat([center_indices[0].unsqueeze(-1), centers], dim=-1) + cur_reg = {k: preds['reg'][k][h].permute(0, 2, 3, 1)[center_mask] + for k in ['box', 'dir', 'scr']} + else: + conf, _ = pred_to_conf_unc(center_cls, self.activation, self.edl) + centers = preds['ctr'] + if self.edl: + center_mask = conf[..., 1:].max(dim=-1).values > self.center_threshold # b, h, w + else: + center_mask = conf.max(dim=-1).values > self.center_threshold # b, h, w + + if center_cls.ndim == 3: + indices = torch.stack([torch.zeros_like(centers[i, :, :1]) + i for i in range(centers.shape[0])], dim=0) + centers = torch.cat([indices, centers], dim=-1) + + cur_centers = centers[center_mask] + center_indices = self.pts_to_indices(cur_centers) + cur_reg = {k: preds['reg'][k][h][center_mask] + for k in preds['reg'].keys()} + + # from cosense3d.utils import vislib + # mask = cur_centers[:, 0].int() == 0 + # confs = conf[center_mask][mask, 1].detach().cpu().numpy() + # points = cur_centers[mask, 1:].detach().cpu().numpy() + # fig = vislib.plt.figure(figsize=(6, 6)) + # vislib.plt.scatter(points[:, 0], points[:, 1], c=confs, s=1) + # vislib.plt.show() + # vislib.plt.close() + + cur_box = self.box_coder.decode(cur_centers, cur_reg) + cur_scr, cur_lbl = conf[center_mask].max(dim=-1) + cur_lbl = cur_lbl + lbl_cnt[h] + roi['box'].append(cur_box) + roi['scr'].append(cur_scr) + roi['lbl'].append(cur_lbl) + roi['idx'].append(center_indices) + confs.append(conf) + + # from cosense3d.utils.vislib import draw_points_boxes_plt + # points = centers[:, 1:].detach().cpu().numpy() + # boxes = cur_box[:, 1:].detach().cpu().numpy() + # draw_points_boxes_plt( + # pc_range=self.lidar_range, + # boxes_pred=boxes, + # points=points, + # filename="/home/yuan/Pictures/tmp.png" + # ) + + # merge detections from all heads + roi['box'] = torch.cat(roi['box'], dim=0) + roi['scr'] = torch.cat(roi['scr'], dim=0) + roi['lbl'] = torch.cat(roi['lbl'], dim=0) + roi['idx'] = torch.cat(roi['idx'], dim=0) + confs = torch.stack(confs, dim=1) + return roi, confs + + +class BEVCenternessAssigner(BaseAssigner): + """ + Assign center points in the BEV maps to positve if the point is in the range 'min_radius' of any gt box center. + """ + def __init__(self, + n_cls, + min_radius=1.0, + pos_neg_ratio=5, + mining_thr=0, + max_mining_ratio=3, + mining_start_epoch=5, + merge_all_classes=False, + use_gaussian=False, + sigma=1.0 + ): + super().__init__() + self.n_cls = n_cls + self.min_radius = min_radius + self.pos_neg_ratio = pos_neg_ratio + self.sample_mining_thr = mining_thr + self.max_mining_ratio = max_mining_ratio + self.mining_start_epoch = mining_start_epoch + self.merge_all_classes = merge_all_classes + self.use_gaussian = use_gaussian + self.sigma = sigma + + def get_labels_single_head(self, centers, gt_boxes, pred_scores=None, **kwargs): + diff = centers[:, :2].unsqueeze(1) - gt_boxes[:, :2].unsqueeze(0) + dists = torch.norm(diff, dim=-1) + dists_min, dists_min_arg = dists.min(dim=1) + if self.use_gaussian: + labels = torch.exp(-0.5 * torch.sqrt(dists_min) / self.sigma ** 2) + # sigmas = gt_boxes[:, 3:5][dists_min_arg] / 4 * self.sigma + # labels = weighted_mahalanobis_dists( + # sigmas ** 2, diff[torch.arange(len(diff)), dists_min_arg].abs().unsqueeze(1)) + labels[labels < 1e-4] = 0 + else: + labels = (dists_min < self.min_radius).float() + + if self.pos_neg_ratio: + labels = pos_neg_sampling(labels, self.pos_neg_ratio) + if self.sample_mining_thr > 0 and kwargs.get('epoch', 0) > self.mining_start_epoch: + assert pred_scores is not None + labels = sample_mining(pred_scores, labels, + dists_min, + self.sample_mining_thr, + self.max_mining_ratio) + + return labels + + @torch.no_grad() + def assign(self, centers, gt_boxes, gt_labels, pred_scores=None, **kwargs): + if len(gt_boxes) == 0: + labels = torch.zeros_like(centers[:, :1]) + return labels + if self.merge_all_classes: + labels = self.get_labels_single_head(centers, gt_boxes).unsqueeze(-1) + else: + labels = [] + for n in range(self.n_cls): + cur_boxes = gt_boxes[gt_labels == n] + cur_scores = None if pred_scores is None else pred_scores[n] + labels.append(self.get_labels_single_head(centers, cur_boxes, cur_scores, **kwargs)) + labels = torch.stack(labels, dim=-1) + + # import matplotlib.pyplot as plt + # + # from cosense3d.utils import vislib + # pc_range = [-100, -41.6, -3.0, 100, 41.6, 3.0] + # label = labels.detach().cpu().numpy() + # label = label[:, 0] + # points = centers.detach().cpu().numpy() + # boxes = gt_boxes.cpu().numpy() + # ax = vislib.draw_points_boxes_plt( + # pc_range=pc_range, + # boxes_gt=boxes, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], cmap='jet', c=label, s=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + + return labels + + +class BEVBoxAssigner(BaseAssigner): + """ + Assign center points in the BEV maps to positve if the point is in the range 'min_radius' of any gt box center. + """ + def __init__(self, + n_cls, + pos_neg_ratio=5, + mining_thr=0, + max_mining_ratio=3, + mining_start_epoch=5, + merge_all_classes=False, + ): + super().__init__() + self.n_cls = n_cls + self.pos_neg_ratio = pos_neg_ratio + self.sample_mining_thr = mining_thr + self.max_mining_ratio = max_mining_ratio + self.mining_start_epoch = mining_start_epoch + self.merge_all_classes = merge_all_classes + + def get_labels_single_head(self, centers, gt_boxes, pred_scores=None, **kwargs): + boxes = pad_l(gt_boxes[:, :7]).clone() + boxes[:, 3] = 0 + pts = pad_r(pad_l(centers[:, :2])) + + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=1 + ) + labels = (box_idx_of_pts >= 0).float() + if self.pos_neg_ratio: + labels = pos_neg_sampling(labels, self.pos_neg_ratio) + + return labels + + @torch.no_grad() + def assign(self, centers, gt_boxes, gt_labels, pred_scores=None, **kwargs): + if len(gt_boxes) == 0: + labels = torch.zeros_like(centers[:, :1]) + return labels + if self.merge_all_classes: + labels = self.get_labels_single_head(centers, gt_boxes).unsqueeze(-1) + else: + labels = [] + for n in range(self.n_cls): + cur_boxes = gt_boxes[gt_labels == n] + cur_scores = None if pred_scores is None else pred_scores[n] + labels.append(self.get_labels_single_head(centers, cur_boxes, cur_scores, **kwargs)) + labels = torch.stack(labels, dim=-1) + + # import matplotlib.pyplot as plt + # + # from cosense3d.utils import vislib + # pc_range = [-100, -41.6, -3.0, 100, 41.6, 3.0] + # label = labels.detach().cpu().numpy() + # label = label[:, 0] + # points = centers.detach().cpu().numpy() + # boxes = gt_boxes.cpu().numpy() + # ax = vislib.draw_points_boxes_plt( + # pc_range=pc_range, + # boxes_gt=boxes, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], cmap='jet', c=label, s=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + + return labels + + +class BEVPointAssigner(BaseAssigner): + """ + Assign target points to BEV boxes and down-sample the target points with buffered-based method. + """ + def __init__(self, + down_sample=True, + sample_mining_thr=0., + max_mining_ratio=3, + annealing_step=None, + topk_sampling=False, + annealing_sampling=False, + ): + super().__init__() + self.down_sample = down_sample + self.sample_mining_thr = sample_mining_thr + self.max_mining_ratio = max_mining_ratio + self.annealing_step = annealing_step + self.topk_sampling = topk_sampling + self.annealing_sampling = annealing_sampling + + def downsample_tgt_pts(self, tgt_label, max_sam): + selected = torch.ones_like(tgt_label.bool()) + pos = tgt_label == 1 + if pos.sum() > max_sam: + mask = torch.rand_like(tgt_label[pos].float()) < max_sam / pos.sum() + selected[pos] = mask + + buffer = tgt_label == 0 + if buffer.sum() > max_sam: + mask = torch.rand_like(tgt_label[buffer].float()) < max_sam / buffer.sum() + selected[buffer] = mask + + neg = tgt_label == -1 + if neg.sum() > max_sam: + mask = torch.rand_like(tgt_label[neg].float()) < max_sam / neg.sum() + selected[neg] = mask + labels = - torch.ones_like(mask).long() + labels[mask] = 0 + tgt_label[neg] = labels + return selected, tgt_label + + def assign(self, tgt_pts, gt_boxes, B, conf=None, down_sample=True, **kwargs): + boxes = gt_boxes.clone() + boxes[:, 3] = 0 + pts = pad_r(tgt_pts) + + if not down_sample or not self.down_sample: + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + tgt_label = torch.zeros_like(box_idx_of_pts) + tgt_label[box_idx_of_pts >= 0] = 1 + return tgt_pts, tgt_label, None + + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + boxes[:, 4:6] *= 2 + _, enlarged_box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + + pos_mask = box_idx_of_pts >= 0 + buffer_mask = (box_idx_of_pts < 0) & (enlarged_box_idx_of_pts >= 0) + tgt_label = - torch.ones_like(box_idx_of_pts) + tgt_label[pos_mask] = 1 + tgt_label[buffer_mask] = 0 + n_sam = len(boxes) * 50 + + # add points that have high pred scores + if self.sample_mining_thr > 0: + scores = conf[..., 1:].sum(dim=-1) + tgt_label = sample_mining(scores, tgt_label, self.sample_mining_thr, + max_num_sample=n_sam) + + mask, tgt_label = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + + # get final tgt + tgt_pts = tgt_pts[mask] + tgt_label = tgt_label[mask] + + return tgt_pts, tgt_label, mask + + def get_predictions(self, x, edl=True, activation='none'): + conf, unc = pred_to_conf_unc(x, activation, edl) + return conf, unc + + +class BEVSemsegAssigner(BaseAssigner): + def __init__(self, + data_info, + stride, + tgt_range=None, + down_sample=False, + annealing_step=None, + ): + super().__init__() + update_me_essentials(self, data_info, stride) + self.tgt_range = tgt_range + self.downsample = down_sample + self.annealing_step = annealing_step + + def pts_to_inds(self, pts): + """Calculate indices of samples in the bev map""" + ixy = metric2indices(pts[:, :3], self.res).long() + ixy[:, 1] -= self.offset_sz_x + ixy[:, 2] -= self.offset_sz_y + maskx = torch.logical_and(ixy[:, 1] >= 0, ixy[:, 1] < self.size_x) + masky = torch.logical_and(ixy[:, 2] >= 0, ixy[:, 2] < self.size_y) + mask = torch.logical_and(maskx, masky) + indices = ixy[mask] + return indices.T, mask + + def get_obs_mask(self, inds, B): + obs_mask = torch.zeros((B, self.size_x, self.size_y), device=inds.device) + inds = inds.clone().long().T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + obs_mask[inds[0], inds[1], inds[2]] = 1 + return obs_mask.bool() + + @staticmethod + def down_sample_pred_pts(ctr_pts): + keep = torch.rand_like(ctr_pts['ctr'][:, 0]) > 0.5 + for k in ctr_pts.keys(): + ctr_pts[k] = ctr_pts[k][keep] + + return ctr_pts + + @torch.no_grad() + def downsample_tgt_pts(self, tgt_label, max_sam): + selected = torch.ones_like(tgt_label.bool()) + pos = tgt_label == 1 + if pos.sum() > max_sam: + mask = torch.rand_like(tgt_label[pos].float()) < max_sam / pos.sum() + selected[pos] = mask + + neg = tgt_label == 0 + if neg.sum() > max_sam: + mask = torch.rand_like(tgt_label[neg].float()) < max_sam / neg.sum() + selected[neg] = mask + return selected + + def filter_range(self, ctr_pts, samples): + mask = (ctr_pts['ctr'].abs() < self.tgt_range).all(1) + for k in ctr_pts.keys(): + ctr_pts[k] = ctr_pts[k][mask] + + mask = (samples[:, 1:3].abs() < self.tgt_range).all(1) + samples = samples[mask] + return ctr_pts, samples + + def assign(self, ctr_pts, samples, B, gt_boxes=None, **kwargs): + raise NotImplementedError + + def get_predictions(self, data_dict, B, edl=True, activation='none', **kwargs): + raise NotImplementedError + + +class ContiBEVAssigner(BEVSemsegAssigner): + def __init__(self, + distr_r=2.0, + var0=0.1, + **kwargs): + super().__init__(**kwargs) + self.distr_r = distr_r + self.var0 = var0 + steps = int(self.distr_r / self.res[0]) * 2 + 1 + offset = meshgrid(-self.distr_r, self.distr_r, 2, + n_steps=steps).cuda().view(-1, 2) + self.nbrs = offset[torch.norm(offset, dim=1) < 2].view(1, -1, 2) + + def sample_dynamic_tgt_pts(self, ctr_pts: dict, gt_boxes: torch.Tensor, B: int) \ + -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Given the input coordinates of the center points and the ground truth BBoxes, + sample the BEV target points for BEV semantic segmentation following the buffer-based sampling as illustrated + in the following image: + + .. image:: _static/imgs/buffer_based_sampling.png + :width: 400 + :alt: Buffer-based sampling of the BEV target + + :param ctr_pts: center points of bev maps, including indices, metric centers and regression results. + :param gt_boxes: ground truth BBoxes. + :param B: batch size. + :return: + - tgt_pts: sampled target points. + - tgt_lbl: labels of the sampled target points. + - inds: map indices of the sampled target points. + """ + tgt_pts = ctr_pts['ctr'].clone() + tgt_pts[:, :2] = tgt_pts[:, :2] + torch.randn_like(tgt_pts[:, :2]) * 3 + tgt_pts = torch.cat([ctr_pts['coor'][:, :1], tgt_pts], dim=-1) + obs_mask = self.get_obs_mask(ctr_pts['coor'], B) + inds, mask = self.pts_to_inds(tgt_pts) + tgt_pts = tgt_pts[mask] + mask = obs_mask[inds[0], inds[1], inds[2]] + tgt_pts = tgt_pts[mask] + inds = inds.T[mask] + + if len(gt_boxes) == 0 or len(tgt_pts) == 0: + tgt_label = torch.zeros_like(tgt_pts[:, 0]).int() + else: + boxes = gt_boxes.clone() + boxes[:, 3] = 0 + pts = pad_r(tgt_pts) + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + boxes[:, 4:6] *= 4 + _, box_idx_of_pts2 = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + tgt_label = - (box_idx_of_pts2 >= 0).int() + tgt_label[box_idx_of_pts >= 0] = 1 + + n_sam = len(gt_boxes) * 50 + mask = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + tgt_label = tgt_label > 0 + return tgt_pts[mask], tgt_label[mask], inds[mask].T + + def assign(self, ctr_pts, samples, B, gt_boxes=None, **kwargs) -> dict: + """ + Assign target. + + :param ctr_pts: center points of bev maps, including indices, metric centers and regression results. + :param samples: BEV target point samples. + :param B: batch size. + :param gt_boxes: ground truth BBoxes. + :param kwargs: keyword arguments. + :return: target_dict that contains the static or/and dynamic target points and their corresponding labels. + """ + lr = self.lidar_range + if self.tgt_range is not None: + ctr_pts, samples = self.filter_range(ctr_pts, samples) + lr = [-self.tgt_range, -self.tgt_range, -3, self.tgt_range, self.tgt_range, 1] + if self.downsample: + ctr_pts = self.down_sample_pred_pts(ctr_pts) + + tgt = {} + if 'reg_static' in ctr_pts: + tgt['evi_static'] = draw_sample_evis( + ctr_pts, samples, 'static', self.res[0], self.distr_r, lr, B, self.var0) + tgt['lbl_static'] = samples[:, -1] + if 'reg_dynamic' in ctr_pts: + assert gt_boxes is not None + tgt_pts, tgt_label, inds = self.sample_dynamic_tgt_pts(ctr_pts, gt_boxes, B) + tgt['evi_dynamic'] = draw_sample_evis( + ctr_pts, tgt_pts, 'dynamic', self.res[0], self.distr_r, lr, B, self.var0) + tgt['lbl_dynamic'] = tgt_label + + # + # import matplotlib.pyplot as plt + # from cosense3d.modules.utils.edl_utils import logit_to_edl + # fig = plt.figure(figsize=(10, 10)) + # coor = ctr_pts['coor'] + # ctr = ctr_pts['ctr'] + # sams = samples[samples[:, 0]==0][:, 1:].cpu().numpy() + # mask = coor[:, 0] == 0 + # xy = ctr[mask].cpu().numpy() + # conf, unc = logit_to_edl(ctr_pts['reg_static'][mask, :2]) + # colors = conf[:, 1].detach().cpu().numpy() + # plt.scatter(xy[:, 0], xy[:, 1], cmap='jet', c=colors, edgecolors=None, marker='.', s=2, vmin=0, vmax=1) + # plt.show() + # plt.close() + # + # fig = plt.figure(figsize=(10, 10)) + # pos = sams[:, -1] == 1 + # plt.scatter(sams[:, 0], sams[:, 1], c='k', facecolors=None, marker='o', s=5) + # plt.scatter(sams[pos, 0], sams[pos, 1], c='r', facecolors=None, marker='o', s=5) + # plt.show() + # plt.close() + # + # fig = plt.figure(figsize=(10, 10)) + # mask = tgt_pts[:, 0] == 0 + # sams = tgt_pts[mask][:, 1:].cpu().numpy() + # pos = tgt_label[mask].cpu().numpy() == 1 + # mask = coor[:, 0] == 0 + # xy = ctr[mask].cpu().numpy() + # conf, unc = logit_to_edl(ctr_pts['reg_dynamic'][mask, :2]) + # colors = conf[:, 1].detach().cpu().numpy() + # plt.scatter(xy[:, 0], xy[:, 1], cmap='jet', c=colors, edgecolors=None, marker='.', s=2, vmin=0, vmax=1) + # plt.show() + # plt.close() + # + # fig = plt.figure(figsize=(10, 10)) + # plt.scatter(sams[:, 0], sams[:, 1], c='k', facecolors=None, marker='o', s=5) + # plt.scatter(sams[pos, 0], sams[pos, 1], c='r', facecolors=None, marker='o', s=5) + # plt.show() + # plt.close() + + return tgt + + def get_predictions(self, ctr_pts, B, tag, **kwargs): + """ + Given center points and its corresponding regressions, generate the dense bev semseg maps + and its uncertainty and observation mask. + + :param ctr_pts: center points of bev maps, including indices, metric centers and regression results. + :param B: batch size. + :param tag: tag for regression key "static | dynamic". + :param kwargs: keyword arguments + :return: + - conf: confidence bev map. + - unc: uncertainty bev map. + - obs_mask: observation mask of the bev map. + """ + reg = ctr_pts[f'reg_{tag}'].relu() + reg_evi = reg[:, :2] + reg_var = reg[:, 2:].view(-1, 2, 2) + ctr = ctr_pts['ctr'] + coor = ctr_pts['coor'] + + nbrs = self.nbrs.to(reg_evi.device) + dists = torch.zeros_like(ctr.view(-1, 1, 2)) + nbrs + vars0 = [self.var0, self.var0] + probs_weighted = weighted_mahalanobis_dists(reg_evi, reg_var, dists, vars0) + voxel_new = ctr.view(-1, 1, 2) + nbrs + # convert metric voxel points to map indices + x = (torch.floor(voxel_new[..., 0] / self.res[0]) - self.offset_sz_x).long() + y = (torch.floor(voxel_new[..., 1] / self.res[1]) - self.offset_sz_y).long() + batch_indices = (torch.ones_like(probs_weighted[:, :, 0]) * coor[:, :1]).long() + mask = (x >= 0) & (x < self.size_x) & (y >= 0) & (y < self.size_y) + x, y = x[mask], y[mask] + batch_indices = batch_indices[mask] + + # copy sparse probs to the dense evidence map + indices = batch_indices * self.size_x * self.size_y + x * self.size_y + y + batch_size = coor[:, 0].max().int().item() + 1 + probs_weighted = probs_weighted[mask].view(-1, 2) + evidence = torch.zeros((batch_size, self.size_x, self.size_y, 2), + device=probs_weighted.device).view(-1, 2) + torch_scatter.scatter(probs_weighted, indices, + dim=0, out=evidence, reduce='sum') + evidence = evidence.view(batch_size, self.size_x, self.size_y, 2) + + # create observation mask + obs_mask = torch.zeros_like(evidence[..., 0]).view(-1) + obs = indices.unique().long() + obs_mask[obs] = 1 + obs_mask = obs_mask.view(batch_size, self.size_x, self.size_y).bool() + conf, unc = pred_to_conf_unc(evidence) + + # import matplotlib.pyplot as plt + # plt.imshow(conf[0, :, :, 1].T.detach().cpu().numpy()) + # plt.show() + # plt.close() + return {f'conf_map_{tag}': conf, f'unc_map_{tag}': unc, f'obs_mask_{tag}': obs_mask} + + +class DiscreteBEVAssigner(BaseAssigner): + def __init__(self, + data_info, + stride, + down_sample=False, + annealing_step=None, + ): + super().__init__() + update_me_essentials(self, data_info, stride) + self.down_sample = down_sample + self.annealing_step = annealing_step + + def pts_to_inds(self, samples): + """Calculate indices of samples in the bev map""" + ixy = metric2indices(samples[:, :3], self.res).long() + ixy[:, 1] -= self.offset_sz_x + ixy[:, 2] -= self.offset_sz_y + maskx = torch.logical_and(ixy[:, 1] >= 0, ixy[:, 1] < self.size_x) + masky = torch.logical_and(ixy[:, 2] >= 0, ixy[:, 2] < self.size_y) + mask = torch.logical_and(maskx, masky) + indices = ixy[mask] + return indices.T, mask + + def get_obs_mask(self, inds, B): + obs_mask = torch.zeros((B, self.size_x, self.size_y), device=inds.device) + inds = inds.T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + obs_mask[inds[0], inds[1], inds[2]] = 1 + return obs_mask.bool() + + + def assign(self, ctr_pts, samples, B, gt_boxes=None, **kwargs): + bevmap = self.get_predictions(ctr_pts, B) + inds, mask = self.pts_to_inds(samples) + labels = samples[mask][:, -1] + preds = bevmap[inds[0], inds[1], inds[2]] + + # import matplotlib.pyplot as plt + # img = pred_to_conf_unc(bevmap)[0][..., 1].detach().cpu().numpy() + # plt.imshow(img[0].T) + # plt.show() + # plt.close() + return preds, labels + + def get_predictions(self, data_dict, B, edl=True, activation='none', **kwargs): + reg = data_dict['reg'] + inds = data_dict['coor'] + reg_evi = reg.relu() + + bevmap = torch.zeros((B, self.size_x, self.size_y, reg_evi.shape[-1]), + device=reg_evi.device) + inds = inds.T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + # obs_mask = evidence[..., 0].bool() + # obs_mask[inds[0], inds[1], inds[2]] = True + bevmap[inds[0], inds[1], inds[2]] = reg_evi + return bevmap + + +class RoIBox3DAssigner(BaseAssigner): + def __init__(self, + box_coder, + ): + self.box_coder = build_box_coder(**box_coder) + self.code_size = self.box_coder.code_size + + def assign(self, pred_boxes, gt_boxes, **kwargs): + tgt_dict = { + 'rois': [], + 'gt_of_rois': [], + 'gt_of_rois_src': [], + 'cls_tgt': [], + 'reg_tgt': [], + 'iou_tgt': [], + 'rois_anchor': [], + 'record_len': [] + } + + for rois, gts in zip(pred_boxes, gt_boxes): + gts[:, -1] *= 1 + ious = boxes_iou3d_gpu(rois, gts) + max_ious, gt_inds = ious.max(dim=1) + gt_of_rois = gts[gt_inds] + rcnn_labels = (max_ious > 0.3).float() + mask = torch.logical_not(rcnn_labels.bool()) + + # set negative samples back to rois, no correction in stage2 for them + gt_of_rois[mask] = rois[mask] + gt_of_rois_src = gt_of_rois.clone().detach() + + # canoical transformation + roi_center = rois[:, 0:3] + # TODO: roi_ry > 0 in pcdet + roi_ry = rois[:, 6] % (2 * PI) + gt_of_rois[:, 0:3] = gt_of_rois[:, 0:3] - roi_center + gt_of_rois[:, 6] = gt_of_rois[:, 6] - roi_ry + + # transfer LiDAR coords to local coords + gt_of_rois = rotate_points_along_z_torch( + points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), + angle=-roi_ry.view(-1) + ).view(-1, gt_of_rois.shape[-1]) + + # flip orientation if rois have opposite orientation + heading_label = (gt_of_rois[:, 6] + ( + torch.div(torch.abs(gt_of_rois[:, 6].min()), + (2 * PI), rounding_mode='trunc') + + 1) * 2 * PI) % (2 * PI) # 0 ~ 2pi + opposite_flag = (heading_label > PI * 0.5) & ( + heading_label < PI * 1.5) + + # (0 ~ pi/2, 3pi/2 ~ 2pi) + heading_label[opposite_flag] = (heading_label[ + opposite_flag] + PI) % ( + 2 * PI) + flag = heading_label > PI + heading_label[flag] = heading_label[ + flag] - PI * 2 # (-pi/2, pi/2) + heading_label = torch.clamp(heading_label, min=-PI / 2, + max=PI / 2) + gt_of_rois[:, 6] = heading_label + + # generate regression target + rois_anchor = rois.clone().detach().view(-1, self.code_size) + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + + reg_targets, _ = self.box_coder.encode( + rois_anchor, gt_of_rois.view(-1, self.code_size) + ) + + tgt_dict['rois'].append(rois) + tgt_dict['gt_of_rois'].append(gt_of_rois) + tgt_dict['gt_of_rois_src'].append(gt_of_rois_src) + tgt_dict['cls_tgt'].append(rcnn_labels) + tgt_dict['reg_tgt'].append(reg_targets) + tgt_dict['iou_tgt'].append(max_ious) + tgt_dict['rois_anchor'].append(rois_anchor) + tgt_dict['record_len'].append(rois.shape[0]) + + # cat list to tensor + for k, v in tgt_dict.items(): + if k == 'record_len': + continue + tgt_dict[k] = torch.cat(v, dim=0) + return tgt_dict + + def get_predictions(self, rcnn_cls, rcnn_iou, rcnn_reg, rois): + rcnn_cls = rcnn_cls.sigmoid().view(-1) + rcnn_iou = rcnn_iou.view(-1) + rcnn_score = rcnn_cls * rcnn_iou**4 + rcnn_reg = rcnn_reg.view(-1, 7) + + rois_anchor = rois.clone().detach().view(-1, self.code_size) + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + + roi_center = rois[:, 0:3] + roi_ry = rois[:, 6] % (2 * PI) + + boxes_local = self.box_coder.decode(rois_anchor, rcnn_reg) + # boxes_local = rcnn_reg + rois_anchor + detections = rotate_points_along_z_torch( + points=boxes_local.view(-1, 1, boxes_local.shape[-1]), angle=roi_ry.view(-1) + ).view(-1, boxes_local.shape[-1]) + detections[:, :3] = detections[:, :3] + roi_center + detections[:, 6] = detections[:, 6] + roi_ry + mask = rcnn_score >= 0.01 + detections = detections[mask] + scores = rcnn_score[mask] + + return { + 'box': detections, + 'scr': scores, + # Todo currently only support cars + 'lbl': torch.zeros_like(scores), + # map indices to be aligned with sparse detection head format + 'idx': torch.zeros_like(scores), + } + + +class RoadLineAssigner(BaseAssigner): + def __init__(self, + res, + range, + pos_neg_ratio=2): + super().__init__() + self.res = res + self.range = range + self.size = int(round(range / res * 2)) + self.pos_neg_ratio = pos_neg_ratio + + def assign(self, coor, tgt_pts, B, **kwargs): + ctr_coor = coor.clone() + ctr_coor[:, 1:] = ctr_coor[:, 1:] + self.size / 2 + ctr_coor = ctr_coor.long() + roadline_maps = torch.zeros((B, self.size, self.size), device=tgt_pts.device) + mask = (tgt_pts[:, 1:3].abs() < self.range).all(dim=-1) + tgt_pts = tgt_pts[mask] + + tgt_coor = torch.floor((tgt_pts[:, 1:3] + self.range) / self.res).long() + mask = torch.logical_and((tgt_coor >= 0).all(dim=-1), (tgt_coor < self.size).all(dim=-1)) + roadline_maps[tgt_pts[mask, 0].long(), tgt_coor[mask, 0], tgt_coor[mask, 1]] = tgt_pts[mask, -1] + + valid = torch.logical_and((ctr_coor[:, 1:3] >= 0).all(dim=-1), (ctr_coor[:, 1:3] < self.size).all(dim=-1)) + labels = roadline_maps[ctr_coor[valid, 0], ctr_coor[valid, 1], ctr_coor[valid, 2]] + + if self.pos_neg_ratio: + labels = pos_neg_sampling(labels, self.pos_neg_ratio) + + # import matplotlib.pyplot as plt + # pts_vis = ctr_coor[ctr_coor[:, 0] == 0, 1:].detach().cpu().numpy() + # lbl_vis = labels.detach().cpu().numpy() + # fig = plt.figure(figsize=(8, 8)) + # ax = fig.add_subplot() + # ax.scatter(pts_vis[:, 0], pts_vis[:, 1], c=lbl_vis, marker='.') + # plt.show() + # plt.close() + return labels, valid + + + + + + diff --git a/cosense3d/modules/plugin/transformer.py b/cosense3d/modules/plugin/transformer.py new file mode 100644 index 00000000..efe49f35 --- /dev/null +++ b/cosense3d/modules/plugin/transformer.py @@ -0,0 +1,900 @@ +import warnings, copy +from typing import List, Optional + +import torch +from torch import nn +import torch.utils.checkpoint as cp + +from cosense3d.modules.utils import build_torch_module +from cosense3d.modules.utils.norm import build_norm_layer +from cosense3d.modules.utils.init import xavier_init +try: + from cosense3d.modules.plugin.flash_attn import FlashMHA +except: + from cosense3d.modules.plugin.flash_attn_new import FlashMHA +from cosense3d.modules.utils.amp import auto_fp16 + + +def build_module(cfg): + cfg_ = copy.deepcopy(cfg) + attn_typ = cfg_.pop('type') + return globals()[attn_typ](**cfg_) + + +class FFN(nn.Module): + """Implements feed-forward networks (FFNs) with residual connection. + """ + + def __init__(self, + embed_dims: int, + feedforward_channels: int, + num_fcs: int=2, + act_cfg: dict=dict(type='ReLU', inplace=True), + dropout: float=0.0, + add_residual: bool=True): + """ + + :param embed_dims: The feature dimension. Same as + `MultiheadAttention`. + :param feedforward_channels: The hidden dimension of FFNs. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Defaluts to 2. + :param num_fcs: number of fully connected layers. + :param act_cfg: activation config. + :param dropout: Probability of an element to be + zeroed. Default 0.0. + :param add_residual: Add resudual connection. + Defaults to True. + """ + super(FFN, self).__init__() + assert num_fcs >= 2, 'num_fcs should be no less ' \ + f'than 2. got {num_fcs}.' + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.num_fcs = num_fcs + self.act_cfg = act_cfg + self.dropout = dropout + self.activate = build_torch_module(act_cfg) + + layers = nn.ModuleList() + in_channels = embed_dims + for _ in range(num_fcs - 1): + layers.append( + nn.Sequential( + nn.Linear(in_channels, feedforward_channels), + self.activate, + nn.Dropout(dropout))) + in_channels = feedforward_channels + layers.append(nn.Linear(feedforward_channels, embed_dims)) + self.layers = nn.Sequential(*layers) + self.dropout = nn.Dropout(dropout) + self.add_residual = add_residual + + def forward(self, x, residual=None): + """Forward function for `FFN`.""" + out = self.layers(x) + if not self.add_residual: + return out + if residual is None: + residual = x + return residual + self.dropout(out) + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(embed_dims={self.embed_dims}, ' + repr_str += f'feedforward_channels={self.feedforward_channels}, ' + repr_str += f'num_fcs={self.num_fcs}, ' + repr_str += f'act_cfg={self.act_cfg}, ' + repr_str += f'dropout={self.dropout}, ' + repr_str += f'add_residual={self.add_residual})' + return repr_str + + +class MultiheadFlashAttention(nn.Module): + r"""A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + attn_drop: float=0., + proj_drop: float=0., + dropout: float=None, + batch_first: bool=True, + cache_attn_weights: bool=False, + **kwargs): + """ + :param embed_dims: The embedding dimension. + :param num_heads: Parallel attention heads. + :param attn_drop: A Dropout layer on attn_output_weights. Default: 0.0. + :param proj_drop: A Dropout layer after `nn.MultiheadAttention`. Default: 0.0. + :param dropout: united dropout for both attention and projection layer. + :param batch_first: When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + :param cache_attn_weights: whether to cache the intermediate attention weights. + :param kwargs: + """ + super(MultiheadFlashAttention, self).__init__() + if dropout is not None: + attn_drop = dropout + proj_drop = dropout + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = True + self.cache_attn_weights = cache_attn_weights + self.attn_weights = None + + self.attn = FlashMHA(embed_dims, num_heads, attn_drop, dtype=torch.float16, device='cuda', + **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = nn.Dropout(attn_drop) + + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """ + Forward function for `MultiheadAttention`. + + :param query: The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else [bs, num_queries embed_dims]. + :param key: The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims]. If None, the ``query`` will be used. Defaults to None. + :param value: The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. + Defaults to None. If None, the `key` will be used. + :param identity: This tensor, with the same shape as x, will be used for the identity link. + If None, `x` will be used. Defaults to None. + :param query_pos: The positional encoding for query, with the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + :param key_pos: The positional encoding for `key`, with the same shape as `key`. Defaults to None. + If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same + shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. + :param attn_mask: ByteTensor mask with shape [num_queries, num_keys]. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + :param key_padding_mask: ByteTensor with shape [bs, num_keys]. Defaults to None. + :param kwargs: allow passing a more general data flow when combining with + other operations in `transformerlayer`. + :return: forwarded results with shape [num_queries, bs, embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + with torch.autocast(device_type='cuda', dtype=torch.float16): + # flash attention only support f16 + out, attn_weights = self.attn( + q=query, + k=key, + v=value, + key_padding_mask=None) + + if self.cache_attn_weights: + self.attn_weights = attn_weights + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + +class MultiHeadAttentionWrapper(nn.MultiheadAttention): + def __init__(self, *args, **kwargs): + super(MultiHeadAttentionWrapper, self).__init__(*args, **kwargs) + self.fp16_enabled = True + + @auto_fp16(out_fp32=True) + def forward_fp16(self, *args, **kwargs): + return super(MultiHeadAttentionWrapper, self).forward(*args, **kwargs) + + def forward_fp32(self, *args, **kwargs): + return super(MultiHeadAttentionWrapper, self).forward(*args, **kwargs) + + def forward(self, *args, **kwargs): + if self.fp16_enabled and self.training: + return self.forward_fp16(*args, **kwargs) + else: + return self.forward_fp32(*args, **kwargs) + + +class MultiheadAttention(nn.Module): + r"""A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + dropout: float=0.1, + batch_first: bool=False, + cache_attn_weights: bool=False, + fp16: bool=False, + **kwargs): + """ + :param embed_dims: The embedding dimension. + :param num_heads: Parallel attention heads. + :param dropout: probability of Dropout layer, Default: 0.0. + :param batch_first: When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + :param cache_attn_weights: whether to cache attention weights. + :param fp16: whether set precision to float16 + :param kwargs: + """ + super(MultiheadAttention, self).__init__() + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = batch_first + self.cache_attn_weights = cache_attn_weights + self.attn_weights = None + self.fp16_enabled = fp16 + if fp16: + self.attn = MultiHeadAttentionWrapper(embed_dims, num_heads, dropout, **kwargs) + else: + self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout, **kwargs) + + self.proj_drop = nn.Dropout(dropout) + self.dropout_layer = nn.Dropout(dropout) + + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """ + Forward function for `MultiheadAttention`. + + :param query: The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else [bs, num_queries embed_dims]. + :param key: The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, + else [bs, num_keys, embed_dims]. If None, the ``query`` will be used. Defaults to None. + :param value: The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. + Defaults to None. If None, the `key` will be used. + :param identity: This tensor, with the same shape as x, will be used for the identity link. + If None, `x` will be used. Defaults to None. + :param query_pos: The positional encoding for query, with the same shape as `x`. + If not None, it will be added to `x` before forward function. Defaults to None. + :param key_pos: The positional encoding for `key`, with the same shape as `key`. + Defaults to None. If not None, it will be added to `key` before `query_pos` has the same shape as `key`, + then `query_pos` will be used for `key_pos`. Defaults to None. + :param attn_mask: ByteTensor mask with shape [num_queries, num_keys]. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + :param key_padding_mask: ByteTensor with shape [bs, num_keys]. Defaults to None. + :param kwargs: allow passing a more general data flow when combining with other operations in `transformerlayer`. + :return: forwarded results with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else[bs, num_queries embed_dims]. + + """ + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1).contiguous() + key = key.transpose(0, 1).contiguous() + value = value.transpose(0, 1).contiguous() + + out, attn_weights = self.attn( + query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask) + if self.batch_first: + out = out.transpose(0, 1).contiguous() + + if self.cache_attn_weights: + self.attn_weights = attn_weights + + return identity + self.dropout_layer(self.proj_drop(out)) + + +class TransformerDecoderLayer(nn.Module): + def __init__(self, + attn_cfgs=None, + ffn_cfgs=None, + operation_order=None, + norm_cfg=dict(type='LN'), + batch_first=False, + with_cp=True, + **kwargs): + super().__init__() + assert set(operation_order) & { + 'self_attn', 'norm', 'ffn', 'cross_attn'} == \ + set(operation_order), f'The operation_order of' \ + f' {self.__class__.__name__} should ' \ + f'contains all four operation type ' \ + f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" + num_attn = operation_order.count('self_attn') + operation_order.count('cross_attn') + if isinstance(attn_cfgs, dict): + attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] + else: + assert num_attn == len(attn_cfgs), f'The length ' \ + f'of attn_cfg {num_attn} is ' \ + f'not consistent with the number of attention' \ + f'in operation_order {operation_order}.' + + self.batch_first = batch_first + self.num_attn = num_attn + self.operation_order = operation_order + self.norm_cfg = norm_cfg + self.pre_norm = operation_order[0] == 'norm' + self.use_checkpoint = with_cp + + self._init_layers(operation_order, attn_cfgs, ffn_cfgs, norm_cfg) + + def _init_layers(self, operation_order, attn_cfgs, ffn_cfgs, norm_cfg): + self.attentions = nn.ModuleList() + index = 0 + for operation_name in operation_order: + if operation_name in ['self_attn', 'cross_attn']: + if 'batch_first' in attn_cfgs[index]: + assert self.batch_first == attn_cfgs[index]['batch_first'] + else: + attn_cfgs[index]['batch_first'] = self.batch_first + attention = build_module(attn_cfgs[index]) + # Some custom attentions used as `self_attn` + # or `cross_attn` can have different behavior. + attention.operation_name = operation_name + self.attentions.append(attention) + index += 1 + + self.embed_dims = self.attentions[0].embed_dims + + self.ffns = nn.ModuleList() + num_ffns = operation_order.count('ffn') + if isinstance(ffn_cfgs, dict): + ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] + assert len(ffn_cfgs) == num_ffns + for ffn_index in range(num_ffns): + if 'embed_dims' not in ffn_cfgs[ffn_index]: + ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims + else: + assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims + self.ffns.append(build_module(ffn_cfgs[ffn_index])) + + self.norms = nn.ModuleList() + num_norms = operation_order.count('norm') + for _ in range(num_norms): + self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) + + def _forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + temp_memory=None, + temp_pos=None, + attn_masks: List[torch.Tensor]=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """ + Forward function for `TransformerDecoderLayer`. + + :param query: The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else [bs, num_queries embed_dims]. + :param key: The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, + else [bs, num_keys, embed_dims]. + :param value: The value tensor with same shape as `key`. + :param query_pos: The positional encoding for `query`. Default: None. + :param key_pos: The positional encoding for `key`. Default: None. + :param temp_memory: 2D Tensor used in calculation of corresponding attention. The length of it should equal + to the number of `attention` in `operation_order`. Default: None. + :param temp_pos: + :param attn_masks: 2D Tensor used in calculation of corresponding attention. The length of it should equal + to the number of `attention` in `operation_order`. Default: None. + :param query_key_padding_mask: ByteTensor for `query`, with shape [bs, num_queries]. Only used in `self_attn` + layer. Defaults to None. + :param key_padding_mask: ByteTensor for `query`, with shape [bs, num_keys]. Default: None. + :param kwargs: contains some specific arguments of attentions. + :return: forwarded results with shape [num_queries, bs, embed_dims]. + """ + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + if layer == 'self_attn': + if temp_memory is not None: + temp_key = temp_value = torch.cat([query, temp_memory], dim=0) + temp_pos = torch.cat([query_pos, temp_pos], dim=0) + else: + temp_key = temp_value = query + temp_pos = query_pos + query = self.attentions[attn_index]( + query, + temp_key, + temp_value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=temp_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + temp_memory=None, + temp_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs + ): + """Forward function for `TransformerCoder`. + :returns: Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if self.use_checkpoint and self.training: + x = cp.checkpoint( + self._forward, + query, + key, + value, + query_pos, + key_pos, + temp_memory, + temp_pos, + attn_masks, + query_key_padding_mask, + key_padding_mask, + ) + else: + x = self._forward( + query, + key, + value, + query_pos, + key_pos, + temp_memory, + temp_pos, + attn_masks, + query_key_padding_mask, + key_padding_mask, + ) + return x + + +class TransformerLayerSequence(nn.Module): + """ + Base class for TransformerEncoder and TransformerDecoder in vision + transformer. + + As base-class of Encoder and Decoder in vision transformer. + Support customization such as specifying different kind + of `transformer_layer` in `transformer_coder`. + """ + + def __init__(self, transformerlayers=None, num_layers=None): + """ + :param transformerlayers: (list[obj:`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict`) + Config of transformerlayer in TransformerCoder. If it is obj:`mmcv.ConfigDict`, + it would be repeated `num_layer` times to a list[`mmcv.ConfigDict`]. Default: None. + :param num_layers: The number of `TransformerLayer`. Default: None. + """ + super().__init__() + if isinstance(transformerlayers, dict): + transformerlayers = [ + copy.deepcopy(transformerlayers) for _ in range(num_layers) + ] + else: + assert isinstance(transformerlayers, list) and \ + len(transformerlayers) == num_layers + self.num_layers = num_layers + self.layers = nn.ModuleList() + for i in range(num_layers): + self.layers.append(build_module(transformerlayers[i])) + self.embed_dims = self.layers[0].embed_dims + self.pre_norm = self.layers[0].pre_norm + + def forward(self, + query, + key, + value, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerCoder`. + + :param query: (Tensor) Input query with shape `(num_queries, bs, embed_dims)`. + :param key: (Tensor) The key tensor with shape `(num_keys, bs, embed_dims)`. + :param value: (Tensor) The value tensor with shape `(num_keys, bs, embed_dims)`. + :param query_pos: (Tensor) The positional encoding for `query`. Default: None. + :param key_pos: (Tensor) The positional encoding for `key`. Default: None. + :param attn_masks: (List[Tensor], optional) Each element is 2D Tensor which is + used in calculation of corresponding attention in operation_order. Default: None. + :param query_key_padding_mask: (Tensor) ByteTensor for `query`, with shape [bs, num_queries]. + Only used in self-attention Default: None. + :param key_padding_mask: (Tensor) ByteTensor for `query`, with shape [bs, num_keys]. Default: None. + + :returns: results with shape [num_queries, bs, embed_dims]. + """ + for layer in self.layers: + query = layer( + query, + key, + value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + **kwargs) + return query + + +class TransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer.""" + + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + return_intermediate=False, + **kwargs): + """ + :param args: + :param post_norm_cfg: Config of last normalization layer. Default: `LN`. + :param return_intermediate: Whether to return intermediate outputs. + :param kwargs: + """ + + super(TransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if post_norm_cfg is not None: + self.post_norm = build_norm_layer(post_norm_cfg, self.embed_dims)[1] + else: + self.post_norm = None + + def forward(self, query, *args, **kwargs): + """Forward function for `TransformerDecoder`. + + :param query: (Tensor) Input query with shape `(num_query, bs, embed_dims)`. + :return:Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape [num_layers, num_query, bs, embed_dims]. + """ + if not self.return_intermediate: + x = super().forward(query, *args, **kwargs) + if self.post_norm: + x = self.post_norm(x)[None] + return x + + intermediate = [] + for layer in self.layers: + query = layer(query, *args, **kwargs) + if self.return_intermediate: + if self.post_norm is not None: + intermediate.append(self.post_norm(query)) + else: + intermediate.append(query) + # if torch.isnan(query).any(): + # print('TransfromerDecoder: Found nan in query.') + # if torch.isnan(intermediate[-1]).any(): + # print('TransfromerDecoder: Found nan in intermediate result.') + return torch.stack(intermediate) + + +class PETRTemporalTransformer(nn.Module): + """Implements the DETR transformer. + + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + """ + + def __init__(self, encoder=None, decoder=None, cross=False): + """ + + :param encoder: (`mmcv.ConfigDict` | Dict) Config of + TransformerEncoder. Defaults to None. + :param decoder: ((`mmcv.ConfigDict` | Dict) Config of + TransformerDecoder. Defaults to None. + :param cross: whether to use cross-attention. + """ + super(PETRTemporalTransformer, self).__init__() + if encoder is not None: + self.encoder = build_module(encoder) + else: + self.encoder = None + self.decoder = build_module(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, memory, tgt, query_pos, pos_embed, attn_masks, temp_memory=None, temp_pos=None, + mask=None, query_mask=None, reg_branch=None): + """Forward function for `Transformer`. + """ + memory = memory.transpose(0, 1).contiguous() + query_pos = query_pos.transpose(0, 1).contiguous() + pos_embed = pos_embed.transpose(0, 1).contiguous() + + n, bs, c = memory.shape + + if tgt is None: + tgt = torch.zeros_like(query_pos) + else: + tgt = tgt.transpose(0, 1).contiguous() + + if temp_memory is not None: + temp_memory = temp_memory.transpose(0, 1).contiguous() + temp_pos = temp_pos.transpose(0, 1).contiguous() + + # out_dec: [num_layers, num_query, bs, dim] + if not isinstance(attn_masks, list): + attn_masks = [attn_masks, None] + out_dec = self.decoder( + query=tgt, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_pos, + temp_memory=temp_memory, + temp_pos=temp_pos, + query_key_padding_mask=query_mask, + key_padding_mask=mask, + attn_masks=attn_masks, + reg_branch=reg_branch, + ) + out_dec = out_dec.transpose(1, 2).contiguous() + memory = memory.reshape(-1, bs, c).transpose(0, 1).contiguous() + return out_dec, memory + + +class PETRTransformer(nn.Module): + """ + Implements the DETR transformer. + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + """ + + def __init__(self, encoder=None, decoder=None, cross=False): + super(PETRTransformer, self).__init__() + if encoder is not None: + self.encoder = build_module(encoder) + else: + self.encoder = None + self.decoder = build_module(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, memory, tgt, query_pos, pos_embed, attn_masks=None, + mask=None, query_mask=None): + """Forward function for `Transformer`. + """ + memory = memory.transpose(0, 1).contiguous() + query_pos = query_pos.transpose(0, 1).contiguous() + pos_embed = pos_embed.transpose(0, 1).contiguous() + + n, bs, c = memory.shape + + if tgt is None: + tgt = torch.zeros_like(query_pos) + else: + tgt = tgt.transpose(0, 1).contiguous() + + # out_dec: [num_layers, num_query, bs, dim] + if not isinstance(attn_masks, list): + attn_masks = [attn_masks] + assert len(attn_masks) == self.decoder.layers[0].num_attn + out_dec = self.decoder( + query=tgt, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_pos, + query_key_padding_mask=query_mask, + key_padding_mask=mask, + attn_masks=attn_masks, + ) + out_dec = out_dec.transpose(1, 2).contiguous() + memory = memory.reshape(-1, bs, c).transpose(0, 1).contiguous() + return out_dec, memory + + +class PETRTemporalTransformer(nn.Module): + r""" + Implements the DETR transformer. + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + """ + + def __init__(self, encoder=None, decoder=None, cross=False): + super(PETRTemporalTransformer, self).__init__() + if encoder is not None: + self.encoder = build_module(encoder) + else: + self.encoder = None + self.decoder = build_module(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, memory, tgt, query_pos, pos_embed, attn_masks, temp_memory=None, temp_pos=None, + mask=None, query_mask=None, reg_branch=None): + """Forward function for `Transformer`. + """ + query_pos = query_pos.transpose(0, 1).contiguous() + if memory is not None: + memory = memory.transpose(0, 1).contiguous() + n, bs, c = memory.shape + if pos_embed is not None: + pos_embed = pos_embed.transpose(0, 1).contiguous() + + if tgt is None: + tgt = torch.zeros_like(query_pos) + else: + tgt = tgt.transpose(0, 1).contiguous() + + if temp_memory is not None: + temp_memory = temp_memory.transpose(0, 1).contiguous() + temp_pos = temp_pos.transpose(0, 1).contiguous() + + # out_dec: [num_layers, num_query, bs, dim] + if not isinstance(attn_masks, list): + attn_masks = [attn_masks] + assert len(attn_masks) == self.decoder.layers[0].num_attn + out_dec = self.decoder( + query=tgt, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_pos, + temp_memory=temp_memory, + temp_pos=temp_pos, + query_key_padding_mask=query_mask, + key_padding_mask=mask, + attn_masks=attn_masks, + ) + out_dec = out_dec.transpose(1, 2).contiguous() + if memory is not None: + memory = memory.reshape(-1, bs, c).transpose(0, 1).contiguous() + return out_dec, memory \ No newline at end of file diff --git a/cosense3d/modules/plugin/voxel_encoder.py b/cosense3d/modules/plugin/voxel_encoder.py new file mode 100644 index 00000000..bcf59ead --- /dev/null +++ b/cosense3d/modules/plugin/voxel_encoder.py @@ -0,0 +1,28 @@ +import torch +from torch import nn + + +class MeanVFE(nn.Module): + def __init__(self, num_point_features, **kwargs): + super().__init__() + self.num_point_features = num_point_features + + def get_output_feature_dim(self): + return self.num_point_features + + def forward(self, voxel_features, voxel_num_points): + """ + Args: + voxels: (num_voxels, max_points_per_voxel, C) + voxel_num_points: optional (num_voxels) + + + Returns: + vfe_features: (num_voxels, C) + """ + points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False) + normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).\ + type_as(voxel_features) + points_mean = points_mean / normalizer + + return points_mean.contiguous() \ No newline at end of file diff --git a/cosense3d/modules/plugin/voxel_generator.py b/cosense3d/modules/plugin/voxel_generator.py new file mode 100644 index 00000000..c189b1bd --- /dev/null +++ b/cosense3d/modules/plugin/voxel_generator.py @@ -0,0 +1,41 @@ +import torch +from spconv.pytorch.utils import PointToVoxel + + +class VoxelGenerator: + def __init__(self, + voxel_size, + lidar_range, + max_points_per_voxel, + empty_mean=True, + mode='train', + device='cuda', + **kwargs): + self.voxel_size = torch.tensor(voxel_size) + self.lidar_range = torch.tensor(lidar_range) + self.max_points_per_voxel = max_points_per_voxel + self.max_voxels = kwargs.get(f"max_voxels_{mode}", 50000) + self.empty_mean = empty_mean + + self.grid_size = ((self.lidar_range[3:] - self.lidar_range[:3]) + / self.voxel_size).round().int() + self.voxel_generator = PointToVoxel( + vsize_xyz=self.voxel_size.tolist(), + coors_range_xyz=self.lidar_range.tolist(), + max_num_points_per_voxel=self.max_points_per_voxel, + num_point_features=4, + max_num_voxels=self.max_voxels, + device=torch.device(device) + ) + + def __call__(self, points_list): + voxels_list = [] + coordinates_list = [] + num_points_list = [] + for points in points_list: + voxels, coordinates, num_points = self.voxel_generator( + points, empty_mean=self.empty_mean) + voxels_list.append(voxels) + coordinates_list.append(coordinates) + num_points_list.append(num_points) + return voxels_list, coordinates_list, num_points_list \ No newline at end of file diff --git a/cosense3d/modules/plugin/voxnet_utils.py b/cosense3d/modules/plugin/voxnet_utils.py new file mode 100644 index 00000000..5e02a9b7 --- /dev/null +++ b/cosense3d/modules/plugin/voxnet_utils.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Author: Runsheng Xu , OpenPCDet, modified by Yunshuang Yuan +# License: TDG-Attribution-NonCommercial-NoDistrib +# Modified by Yunshuang Yuan + +import torch +from torch import nn +import torch.nn.functional as F +import MinkowskiEngine as ME + + +class Conv3d(nn.Module): + + def __init__(self, in_channels, out_channels, k, s, p, batch_norm=True): + super(Conv3d, self).__init__() + self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=k, + stride=s, padding=p) + if batch_norm: + self.bn = nn.BatchNorm3d(out_channels) + else: + self.bn = None + + def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + + return F.relu(x, inplace=True) + + +class CML(nn.Module): + def __init__(self, in_channels): + super(CML, self).__init__() + self.dense = True + self.conv3d_1 = Conv3d(in_channels, in_channels, 3, s=(2, 1, 1), p=(1, 1, 1)) + self.conv3d_2 = Conv3d(in_channels, in_channels, 3, s=(1, 1, 1), p=(0, 1, 1)) + self.conv3d_3 = Conv3d(in_channels, in_channels, 3, s=(2, 1, 1), p=(1, 1, 1)) + self.out_strides = (4, 1, 1) + + def forward(self, x): + x = self.conv3d_1(x) + x = self.conv3d_2(x) + x = self.conv3d_3(x) + return x + + +class CMLSparse(nn.Module): + def __init__(self, in_channels): + super(CMLSparse, self).__init__() + self.dense = False + self.conv3d_1 = ME.MinkowskiConvolution( + in_channels, in_channels, 3, (2, 1, 1), dimension=3, expand_coordinates=False) + self.conv3d_2 = ME.MinkowskiConvolution( + in_channels, in_channels, 3, (2, 1, 1), dimension=3, expand_coordinates=False) + self.conv3d_3 = ME.MinkowskiConvolution( + in_channels, in_channels, 3, (2, 1, 1), dimension=3, expand_coordinates=False) + self.out_strides = nn.Parameter(torch.Tensor([8, 1, 1])) + + def forward(self, feats, coords): + x = ME.SparseTensor(features=feats, coordinates=coords) + x = self.conv3d_1(x) + x = self.conv3d_2(x) + x = self.conv3d_3(x) + + feats_out = x.F + coords_out = x.C + coords_out[:, 1:] = coords_out[:, 1:] / self.out_strides + return feats_out, coords_out + + diff --git a/cosense3d/modules/plugin/vsa.py b/cosense3d/modules/plugin/vsa.py new file mode 100644 index 00000000..33eeae33 --- /dev/null +++ b/cosense3d/modules/plugin/vsa.py @@ -0,0 +1,319 @@ +import copy +import random + +import torch +import torch.nn as nn + +from cosense3d.ops import pointnet2_utils +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.modules.utils.common import get_voxel_centers, cat_coor_with_idx + +sa_layer_default=dict( + raw_points=dict( + mlps=[[16, 16], [16, 16]], + pool_radius=[0.4, 0.8], + n_sample=[16, 16], + ), + x_conv1=dict( + downsample_factor=1, + mlps=[[16, 16], [16, 16]], + pool_radius=[0.4, 0.8], + n_sample=[16, 16], + ), + x_conv2=dict( + downsample_factor=2, + mlps=[[32, 32], [32, 32]], + pool_radius=[0.8, 1.2], + n_sample=[16, 32], + ), + x_conv3=dict( + downsample_factor=4, + mlps=[[64, 64], [64, 64]], + pool_radius=[1.2, 2.4], + n_sample=[16, 32], + ), + x_conv4=dict( + downsample_factor=8, + mlps=[[64, 64], [64, 64]], + pool_radius=[2.4, 4.8], + n_sample=[16, 32], + ) +) + +default_feature_source = ['bev', 'x_conv1', 'x_conv2', 'x_conv3', 'x_conv4', 'raw_points'] + +def bilinear_interpolate_torch(im, x, y): + """ + Args: + im: (H, W, C) [y, x] + x: (N) + y: (N) + + Returns: + + """ + x0 = torch.floor(x).long() + x1 = x0 + 1 + + y0 = torch.floor(y).long() + y1 = y0 + 1 + + x0 = torch.clamp(x0, 0, im.shape[1] - 1) + x1 = torch.clamp(x1, 0, im.shape[1] - 1) + y0 = torch.clamp(y0, 0, im.shape[0] - 1) + y1 = torch.clamp(y1, 0, im.shape[0] - 1) + + Ia = im[y0, x0] + Ib = im[y1, x0] + Ic = im[y0, x1] + Id = im[y1, x1] + + wa = (x1.type_as(x) - x) * (y1.type_as(y) - y) + wb = (x1.type_as(x) - x) * (y - y0.type_as(y)) + wc = (x - x0.type_as(x)) * (y1.type_as(y) - y) + wd = (x - x0.type_as(x)) * (y - y0.type_as(y)) + ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd) + return ans + + +class VoxelSetAbstraction(nn.Module): + def __init__(self, + voxel_size, + point_cloud_range, + num_keypoints=4096, + num_out_features=32, + point_source='raw_points', + features_source=None, + num_bev_features=128, + bev_stride=8, + num_rawpoint_features=3, + enlarge_selection_boxes=True, + sa_layer=None, + min_selected_kpts=128, + **kwargs): + super().__init__() + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.features_source = default_feature_source \ + if features_source is None \ + else features_source + self.num_keypoints = num_keypoints + self.num_out_features = num_out_features + self.point_source = point_source + self.num_bev_features = num_bev_features + self.bev_stride = bev_stride + self.num_rawpoint_features = num_rawpoint_features + self.enlarge_selection_boxes = enlarge_selection_boxes + self.min_selected_kpts = min_selected_kpts + + self.SA_layers = nn.ModuleList() + self.SA_layer_names = [] + self.downsample_times_map = {} + c_in = 0 + sa_layer = sa_layer_default if sa_layer is None else sa_layer + for src_name in self.features_source : + if src_name in ['bev', 'raw_points']: + continue + self.downsample_times_map[src_name] = sa_layer[src_name]['downsample_factor'] + mlps = copy.copy(sa_layer[src_name]['mlps']) + for k in range(len(mlps)): + mlps[k] = [mlps[k][0]] + mlps[k] + cur_layer = pointnet2_utils.StackSAModuleMSG( + radii=sa_layer[src_name]['pool_radius'], + nsamples=sa_layer[src_name]['n_sample'], + mlps=mlps, + use_xyz=True, + pool_method='max_pool', + ) + self.SA_layers.append(cur_layer) + self.SA_layer_names.append(src_name) + + c_in += sum([x[-1] for x in mlps]) + + if 'bev' in self.features_source: + c_bev = num_bev_features + c_in += c_bev + + if 'raw_points' in self.features_source: + mlps = copy.copy(sa_layer['raw_points']['mlps']) + for k in range(len(mlps)): + mlps[k] = [num_rawpoint_features - 3] + mlps[k] + + self.SA_rawpoints = pointnet2_utils.StackSAModuleMSG( + radii=sa_layer['raw_points']['pool_radius'], + nsamples=sa_layer['raw_points']['n_sample'], + mlps=mlps, + use_xyz=True, + pool_method='max_pool' + ) + c_in += sum([x[-1] for x in mlps]) + + self.vsa_point_feature_fusion = nn.Sequential( + nn.Linear(c_in, self.num_out_features, bias=False), + nn.BatchNorm1d(self.num_out_features), + nn.ReLU(), + ) + self.num_point_features = self.num_out_features + self.num_point_features_before_fusion = c_in + + def interpolate_from_bev_features(self, keypoints_list, bev_features): + B = len(bev_features) + point_bev_features_list = [] + for i in range(B): + keypoints = keypoints_list[i][:, :3] + x_idxs = (keypoints[..., 0] - self.point_cloud_range[0]) / self.voxel_size[0] + y_idxs = (keypoints[..., 1] - self.point_cloud_range[1]) / self.voxel_size[1] + x_idxs = x_idxs / self.bev_stride + y_idxs = y_idxs / self.bev_stride + cur_bev_features = bev_features[i].permute(1, 2, 0) # (H, W, C) + point_bev_features = bilinear_interpolate_torch(cur_bev_features, x_idxs, y_idxs) + point_bev_features_list.append(point_bev_features) + + point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0) + return point_bev_features + + def get_sampled_points(self, points, voxel_coords): + B = len(points) + keypoints_list = [] + for i in range(B): + if self.point_source == 'raw_points': + src_points = points[i] + else: + raise NotImplementedError + # # generate random keypoints in the perception view field + # keypoints = torch.randn((self.num_keypoints, 4), device=src_points.device) + # keypoints[..., 0] = keypoints[..., 0] * 140 + # keypoints[..., 1] = keypoints[..., 1] * 40 + # # points with height flag 10 are padding/invalid, for later filtering + # keypoints[..., 2] = 10.0 + + sampled_points = src_points.unsqueeze(dim=0) # (1, N, 3) + # sample points with FPS + # some cropped pcd may have very few points, select various number + # of points to ensure similar sample density + # 50000 is approximately the number of points in one full pcd + num_kpts = int(self.num_keypoints * sampled_points.shape[1] / 50000) + 1 + num_kpts = min(num_kpts, self.num_keypoints) + cur_pt_idxs = pointnet2_utils.furthest_point_sample( + sampled_points[..., :3].contiguous(), num_kpts + ).long() + + if sampled_points.shape[1] < num_kpts: + empty_num = num_kpts - sampled_points.shape[1] + cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num] + + keypoints = sampled_points[0][cur_pt_idxs[0]] + + # keypoints[:len(kpts[0]), :] = kpts + keypoints_list.append(keypoints) + + # keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) + return keypoints_list + + def forward(self, det_out, bev_feat, voxel_feat, points): + B = len(points) + preds = [x['preds'] for x in det_out] + keypoints_list = self.get_sampled_points(points, voxel_feat) # BxNx4 + + # Only select the points that are in the predicted bounding boxes + boxes = cat_coor_with_idx([x['box'] for x in preds]) + scores = torch.cat([x['scr'] for x in preds]) + # At the early training stage, there might be too many boxes, + # we select limited number of boxes for the second stage. + if boxes.shape[0] > B * 100: + topk = scores.topk(k=100 * B).indices + scores = scores[topk] + boxes = boxes[topk] + + boxes_tmp = boxes.clone() + if self.enlarge_selection_boxes: + boxes_tmp[:, 4:7] += 0.5 + keypoints = cat_coor_with_idx(keypoints_list) + if len(boxes_tmp) > 0: + pts_idx_of_box = points_in_boxes_gpu(keypoints[:, :4], boxes_tmp, batch_size=B)[1] + else: + pts_idx_of_box = torch.full((len(keypoints),), fill_value=-1, device=keypoints.device) + kpt_mask = pts_idx_of_box >= 0 + # Ensure enough points are selected to satisfy the + # condition of batch norm in the FC layers of feature fusion module + for i in range(B): + batch_mask = keypoints[:, 0] == i + if kpt_mask[batch_mask].sum().item() < self.min_selected_kpts: + tmp = kpt_mask[batch_mask].clone() + tmp[torch.randint(0, batch_mask.sum().item(), (self.min_selected_kpts,))] = True + kpt_mask[batch_mask] = tmp + + point_features_list = [] + if 'bev' in self.features_source: + point_bev_features = self.interpolate_from_bev_features( + keypoints_list, bev_feat + ) + point_features_list.append(point_bev_features[kpt_mask]) + + new_xyz = keypoints[kpt_mask] + new_xyz_scrs = torch.zeros((kpt_mask.sum().item(),), device=keypoints.device) + valid = pts_idx_of_box[kpt_mask] >= 0 + new_xyz_scrs[valid] = scores[pts_idx_of_box[kpt_mask][valid]] + new_xyz_batch_cnt = torch.tensor([(new_xyz[:, 0] == b).sum() for b in range(B)], + device=new_xyz.device).int() + + if 'raw_points' in self.features_source: + xyz_batch_cnt = torch.tensor([len(pts) for pts in points], + device=points[0].device).int() + raw_points = cat_coor_with_idx(points) + xyz = raw_points[:, 1:4] + point_features = None + + pooled_points, pooled_features = self.SA_rawpoints( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz[:, :3].contiguous(), + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=point_features, + ) + point_features_list.append(pooled_features) + + for k, src_name in enumerate(self.SA_layer_names): + cur_stride = 2 ** (int(src_name[-1]) - 1) + cur_coords = [feat[f"p{cur_stride}"]['coor'] for feat in voxel_feat] + cur_feats = [feat[f"p{cur_stride}"]['feat'] for feat in voxel_feat] + xyz = get_voxel_centers( + torch.cat(cur_coords), + downsample_times=self.downsample_times_map[src_name], + voxel_size=self.voxel_size, + point_cloud_range=self.point_cloud_range + ) + xyz_batch_cnt = torch.tensor([len(coor) for coor in cur_coords], + device=cur_coords[0].device).int() + pooled_points, pooled_features = self.SA_layers[k]( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz[:, :3].contiguous(), + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=torch.cat(cur_feats, dim=0), + ) + + point_features_list.append(pooled_features) + + point_features = torch.cat(point_features_list, dim=1) + + out_dict = {} + # out_dict['point_features_before_fusion'] = point_features + point_features = self.vsa_point_feature_fusion(point_features) + + cur_idx = 0 + out_dict['point_features'] = [] + out_dict['point_coords'] = [] + out_dict['point_scores'] = [] + out_dict['boxes'] = [] + out_dict['scores'] = [] + for i, num in enumerate(new_xyz_batch_cnt): + out_dict['point_features'].append(point_features[cur_idx:cur_idx + num]) + out_dict['point_coords'].append(new_xyz[cur_idx:cur_idx + num]) + out_dict['point_scores'].append(new_xyz_scrs[cur_idx:cur_idx + num]) + mask = boxes[:, 0] == i + out_dict['boxes'].append(boxes[mask, 1:]) + out_dict['scores'].append(scores[mask]) + cur_idx += num + + return out_dict diff --git a/cosense3d/modules/projection/__init__.py b/cosense3d/modules/projection/__init__.py new file mode 100644 index 00000000..b5603050 --- /dev/null +++ b/cosense3d/modules/projection/__init__.py @@ -0,0 +1,3 @@ +""" +Modules for projecting image features to BEV space +""" \ No newline at end of file diff --git a/cosense3d/modules/projection/fax.py b/cosense3d/modules/projection/fax.py new file mode 100644 index 00000000..4202dd85 --- /dev/null +++ b/cosense3d/modules/projection/fax.py @@ -0,0 +1,89 @@ +import torch +from torch import nn +from einops import rearrange, repeat, reduce +from torchvision.models.resnet import Bottleneck + +from cosense3d.modules.plugin.cobevt import CrossViewSwapAttention, Attention, BEVEmbedding +from cosense3d.modules import BaseModule +ResNetBottleNeck = lambda c: Bottleneck(c, c // 4) + + +class FAXModule(BaseModule): + def __init__( + self, + middle, + dim, + img_size, + strides, + feat_dims, + cross_view, + cross_view_swap, + bev_embedding, + self_attn, + **kwargs + ): + super().__init__(**kwargs) + self.img_size = img_size + + cross_views = list() + layers = list() + downsample_layers = list() + + for i, (stride, num_layers) in enumerate(zip(strides, middle)): + feat_dim = feat_dims[i] + feat_height, feat_width = img_size[0] // stride, img_size[1] // stride + + cva = CrossViewSwapAttention(feat_height, feat_width, feat_dim, + dim[i], i, + **cross_view, **cross_view_swap) + cross_views.append(cva) + + layer = nn.Sequential(*[ResNetBottleNeck(dim[i]) for _ in range(num_layers)]) + layers.append(layer) + + if i < len(middle) - 1: + downsample_layers.append(nn.Sequential( + nn.Sequential( + nn.Conv2d(dim[i], dim[i] // 4, + kernel_size=3, stride=1, + padding=1, bias=False), + nn.PixelUnshuffle(2), + nn.Conv2d(dim[i+1], dim[i+1], + 3, padding=1, bias=False), + nn.BatchNorm2d(dim[i+1]), + nn.ReLU(inplace=True), + nn.Conv2d(dim[i+1], + dim[i+1], 1, padding=0, bias=False), + nn.BatchNorm2d(dim[i+1]) + ))) + + self.bev_embedding = BEVEmbedding(dim[0], **bev_embedding) + self.cross_views = nn.ModuleList(cross_views) + self.layers = nn.ModuleList(layers) + self.downsample_layers = nn.ModuleList(downsample_layers) + self.self_attn = Attention(dim[-1], **self_attn) + + def forward(self, img_feat, intrinsic, extrinsic, **kwargs): + B = len(img_feat) + N = len(intrinsic[0]) + intrinsic = self.cat_list(intrinsic, recursive=True) + extrinsic = self.cat_list(extrinsic, recursive=True) + I_inv = torch.stack([I.inverse()[:3, :3] for I in intrinsic], dim=0 + ).reshape(B, N, 3, 3) + E_inv = torch.stack([E.inverse() for E in extrinsic], dim=0 + ).reshape(B, N, 4, 4) + + x = self.bev_embedding.get_prior() # d H W + x = repeat(x, '... -> b ...', b=B) # B d H W + + for i, (cross_view, layer) in enumerate(zip(self.cross_views, self.layers)): + feature = torch.stack([feat[i] for feat in img_feat], dim=0) + + x = cross_view(i, x, self.bev_embedding, feature, I_inv, E_inv) + x = layer(x) + if i < len(img_feat[0])-1: + x = self.downsample_layers[i](x) + + x = self.self_attn(x) + return {self.scatter_keys[0]: x} + diff --git a/cosense3d/modules/projection/petr.py b/cosense3d/modules/projection/petr.py new file mode 100644 index 00000000..6d7a545d --- /dev/null +++ b/cosense3d/modules/projection/petr.py @@ -0,0 +1,180 @@ +from typing import List + +import torch +from torch import nn + +from cosense3d.modules import BaseModule +from cosense3d.modules.plugin import build_plugin_module +from cosense3d.modules.utils.common import inverse_sigmoid +from cosense3d.modules.utils.misc import SELayer_Linear, MLN +from cosense3d.modules.utils.positional_encoding import pos2posemb3d + + +class PETR(BaseModule): + def __init__(self, + in_channels, + transformer, + position_range, + num_reg_fcs=2, + num_pred=3, + topk=2048, + num_query=644, + depth_num=64, + LID=True, + depth_start=1, + **kwargs): + super().__init__(**kwargs) + self.transformer = build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.img_position_dim = depth_num * 3 + self.num_pose_feat = 64 + self.in_channels = in_channels + self.topk = topk + self.num_query = num_query + self.LID = LID + self.num_reg_fcs = num_reg_fcs + self.num_pred = num_pred + + if self.LID: # linear-increasing discretization + index = torch.arange(start=0, end=depth_num, step=1).float() + index_1 = index + 1 + bin_size = (position_range[3] - depth_start) / (depth_num * (1 + depth_num)) + coords_d = depth_start + bin_size * index * index_1 + else: + index = torch.arange(start=0, end=depth_num, step=1).float() + bin_size = (position_range[3] - depth_start) / depth_num + coords_d = depth_start + bin_size * index + + self.coords_d = nn.Parameter(coords_d, requires_grad=False) + self.position_range = nn.Parameter(torch.tensor(position_range), requires_grad=False) + self.reference_points = nn.Embedding(self.num_query, 3) + + self._init_layers() + + def _init_layers(self): + self.img_position_encoder = nn.Sequential( + nn.Linear(self.img_position_dim, self.embed_dims * 4), + nn.ReLU(), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.img_memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat*3, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.spatial_alignment = MLN(8, f_dim=self.embed_dims) + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True + + def forward(self, img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img, **kwargs): + img_memory, img_pos, img2lidars, Is = self.gather_topk( + img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img) + + img_pos_emb, cone = self.img_position_embeding(img_memory, img_pos, Is, img2lidars) + img_memory = self.img_memory_embed(img_memory) + + # spatial_alignment in focal petr + img_memory = self.spatial_alignment(img_memory, cone) + img_pos_emb = self.featurized_pe(img_pos_emb, img_memory) + + reference_points = (self.reference_points.weight).unsqueeze(0).repeat(img_memory.shape[0], 1, 1) + query_pos = self.query_embedding(pos2posemb3d(reference_points, self.num_pose_feat)) + tgt = torch.zeros_like(query_pos) + outs_dec, _ = self.transformer(img_memory, tgt, query_pos, img_pos_emb) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(img_memory)) + ] + + return {self.scatter_keys[0]: outs} + + def format_input(self, input: List): + memory = [] + for x in input: + x = x.permute(0, 2, 3, 1).flatten(0, 2) + memory.append(x) + max_l = max([m.shape[0] for m in memory]) + out = x.new_zeros(len(memory), max_l, x.shape[-1]) + mask = x.new_ones(len(memory), max_l) + for i, m in enumerate(memory): + out[i, :len(m)] = m + mask[i, :len(m)] = False + return out, mask + + def gather_topk(self, img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img): + B = len(img_feat) + out_feat = [] + out_coor = [] + mem_ctrs = [] + img2lidars = [] + Is = [] + for b in range(B): + topk_inds = img_roi[b]['sample_weight'].view(-1).topk(k=self.topk).indices + out_feat.append(img_feat[b].permute(0, 2, 3, 1).flatten(0, 2)[topk_inds]) + # out_coor.append(img_coor[b].flatten(0, 2)[topk_inds]) + N, _, h, w = img_feat[b].shape + H, W = img_size[b][0] + + # [alpha_x, alpha_y] + intrinsic = torch.stack(intrinsics[b], dim=0)[..., [0, 1], [0, 1]] + intrinsic = torch.abs(intrinsic) / 1e3 + intrinsic = intrinsic.view(N, -1, 2).repeat(1, h * w, 1).flatten(0, 1)[topk_inds] + Is.append(intrinsic) + + # transform memery_centers from ratio to pixel + img_coor[b][..., 0] = img_coor[b][..., 0] * W + img_coor[b][..., 1] = img_coor[b][..., 1] * H + topk_ctrs = img_coor[b].flatten(0, 2)[topk_inds] + mem_ctrs.append(topk_ctrs) + + img2lidar = torch.stack(lidar2img[b], dim=0).inverse() + img2lidar = img2lidar.view(N, 1, 4, 4).repeat(1, h * w, 1, 1) + img2lidars.append(img2lidar.flatten(0, 1)[topk_inds]) + + out_feat = torch.stack(out_feat, dim=0) + # out_coor = torch.stack(out_coor, dim=0) + mem_ctrs = torch.stack(mem_ctrs, dim=0) + img2lidars = torch.stack(img2lidars, dim=0) + Is = torch.stack(Is, dim=0) + + return out_feat, mem_ctrs, img2lidars, Is + + def img_position_embeding(self, img_memory, img_pos, Is, img2lidars): + eps = 1e-5 + B = len(img_memory) + D = self.coords_d.shape[0] + coords_d = self.coords_d.view(1, 1, D, 1).repeat(B, self.topk, 1, 1) + img_pos = img_pos.unsqueeze(-2).repeat(1, 1, D, 1) + coords = torch.cat([img_pos, coords_d], dim=-1) + coords = torch.cat((coords, torch.ones_like(coords_d)), -1) + coords[..., :2] = coords[..., :2] * torch.maximum( + coords[..., 2:3], torch.ones_like(coords_d) * eps) + coords = coords.unsqueeze(-1) + + coords3d = torch.matmul(img2lidars.unsqueeze(-3), coords).squeeze(-1)[..., :3] + coords3d[..., :3] = (coords3d[..., :3] - self.position_range[:3]) / ( + self.position_range[3:] - self.position_range[:3]) + coords3d = coords3d.reshape(B, -1, D * 3) + pos_embed = inverse_sigmoid(coords3d) + coords_position_embeding = self.img_position_encoder(pos_embed) + cone = torch.cat([Is, coords3d[..., -3:], coords3d[..., -90:-87]], dim=-1) + return coords_position_embeding, cone + + diff --git a/cosense3d/modules/projection/spatial_transform.py b/cosense3d/modules/projection/spatial_transform.py new file mode 100644 index 00000000..ac63d9fd --- /dev/null +++ b/cosense3d/modules/projection/spatial_transform.py @@ -0,0 +1,50 @@ +import torch.nn as nn +import torch +from einops import rearrange +from cosense3d.modules import BaseModule +from cosense3d.modules.utils import cobevt_utils as utils + + +class STTF(BaseModule): + def __init__(self, + resolution, + downsample_rate, + use_roi_mask=True, + **kwargs): + super(STTF, self).__init__(**kwargs) + self.discrete_ratio = resolution + self.downsample_rate = downsample_rate + self.use_roi_mask = use_roi_mask + + def forward(self, bev_feat, requests, coop_poses, **kwargs): + """ + Transform the bev features to ego space. + """ + x = self.stack_data_from_list(bev_feat) + coop_poses = self.stack_data_from_list(coop_poses) + ego_poses = self.stack_data_from_list(requests, 'lidar_pose') + transform_coop2ego = ego_poses.inverse() @ coop_poses + dist_correction_matrix = utils.get_discretized_transformation_matrix( + transform_coop2ego, self.discrete_ratio, self.downsample_rate) + + # transpose and flip to make the transformation correct + x = rearrange(x, 'b c h w -> b c w h') + x = torch.flip(x, dims=(3,)) + # Only compensate non-ego vehicles + B, C, H, W = x.shape + + T = utils.get_transformation_matrix( + dist_correction_matrix.reshape(-1, 2, 3), (H, W)) + cav_features = utils.warp_affine(x.reshape(-1, C, H, W), T, + (H, W)) + cav_features = cav_features.reshape(B, C, H, W) + + # flip and transpose back + x = cav_features + x = torch.flip(x, dims=(3,)) + x = rearrange(x, 'b c w h -> b c h w') + + bev_mask = utils.get_rotated_roi((B, 1, 1, H, W), T).squeeze(1) + + return {'bev_feat': x, 'bev_mask': bev_mask} + diff --git a/cosense3d/modules/utils/__init__.py b/cosense3d/modules/utils/__init__.py new file mode 100644 index 00000000..cb6128eb --- /dev/null +++ b/cosense3d/modules/utils/__init__.py @@ -0,0 +1,9 @@ +import copy +from torch import nn + + +def build_torch_module(cfg): + cfg_ = copy.deepcopy(cfg) + module_name = cfg_.pop('type') + module = getattr(nn, module_name)(**cfg_) + return module \ No newline at end of file diff --git a/cosense3d/modules/utils/amp.py b/cosense3d/modules/utils/amp.py new file mode 100644 index 00000000..f9964033 --- /dev/null +++ b/cosense3d/modules/utils/amp.py @@ -0,0 +1,396 @@ +# Copyright (c) OpenMMLab. All rights reserved. Modified by Yunshuang Yuan +import functools +import warnings +from collections import abc +from inspect import getfullargspec +from typing import Callable, Iterable, List, Optional + +import numpy as np +import torch +import torch.nn as nn +from torch.nn.parameter import Parameter + + +from torch.cuda.amp import autocast + + +def cast_tensor_type(inputs, src_type: torch.dtype, dst_type: torch.dtype): + """Recursively convert Tensor in inputs from src_type to dst_type. + + Note: + In v1.4.4 and later, ``cast_tersor_type`` will only convert the + torch.Tensor which is consistent with ``src_type`` to the ``dst_type``. + Before v1.4.4, it ignores the ``src_type`` argument, leading to some + potential problems. For example, + ``cast_tensor_type(inputs, torch.float, torch.half)`` will convert all + tensors in inputs to ``torch.half`` including those originally in + ``torch.Int`` or other types, which is not expected. + + :param inputs: Inputs that to be casted. + :param src_type: Source type. + :param dst_type: Destination type. + :return: The same type with inputs, but all contained Tensors have been cast. + """ + if isinstance(inputs, nn.Module): + return inputs + elif isinstance(inputs, torch.Tensor): + # we need to ensure that the type of inputs to be casted are the same + # as the argument `src_type`. + return inputs.to(dst_type) if inputs.dtype == src_type else inputs + elif isinstance(inputs, str): + return inputs + elif isinstance(inputs, np.ndarray): + return inputs + elif isinstance(inputs, abc.Mapping): + return type(inputs)({ # type: ignore + k: cast_tensor_type(v, src_type, dst_type) + for k, v in inputs.items() + }) + elif isinstance(inputs, abc.Iterable): + return type(inputs)( # type: ignore + cast_tensor_type(item, src_type, dst_type) for item in inputs) + else: + return inputs + + +def auto_fp16( + apply_to: Optional[Iterable] = None, + out_fp32: bool = False, + supported_types: tuple = (nn.Module, ), +) -> Callable: + """Decorator to enable fp16 training automatically. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If inputs arguments are fp32 tensors, they will + be converted to fp16 automatically. Arguments other than fp32 tensors are + ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the + backend, otherwise, original mmcv implementation will be adopted. + + :param apply_to: The argument names to be converted. + `None` indicates all arguments. + :param out_fp32: Whether to convert the output back to fp32. + :param supported_types: Classes can be decorated by ``auto_fp16``. + `New in version 1.5.0.` + :return: + Example: + + >>> import torch.nn as nn + >>> class MyModule1(nn.Module): + >>> + >>> # Convert x and y to fp16 + >>> @auto_fp16() + >>> def forward(self, x, y): + >>> pass + + >>> import torch.nn as nn + >>> class MyModule2(nn.Module): + >>> + >>> # convert pred to fp16 + >>> @auto_fp16(apply_to=('pred', )) + >>> def do_something(self, pred, others): + >>> pass + """ + + def auto_fp16_wrapper(old_func: Callable) -> Callable: + + @functools.wraps(old_func) + def new_func(*args, **kwargs) -> Callable: + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], supported_types): + raise TypeError('@auto_fp16 can only be used to decorate the ' + f'method of those classes {supported_types}') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + # NOTE: default args are not taken into consideration + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.float, torch.half)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = {} + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.float, torch.half) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + with autocast(enabled=True): + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp32: + output = cast_tensor_type(output, torch.half, torch.float) + return output + + return new_func + + return auto_fp16_wrapper + + +def force_fp32(apply_to: Optional[Iterable] = None, + out_fp16: bool = False) -> Callable: + """Decorator to convert input arguments to fp32 in force. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If there are some inputs that must be processed + in fp32 mode, then this decorator can handle it. If inputs arguments are + fp16 tensors, they will be converted to fp32 automatically. Arguments other + than fp16 tensors are ignored. If you are using PyTorch >= 1.6, + torch.cuda.amp is used as the backend, otherwise, original mmcv + implementation will be adopted. + + :param apply_to: (Iterable, optional) The argument names to be converted. + `None` indicates all arguments. + :param out_fp16: (bool) Whether to convert the output back to fp16. + + Example: + + >>> import torch.nn as nn + >>> class MyModule1(nn.Module): + >>> + >>> # Convert x and y to fp32 + >>> @force_fp32() + >>> def loss(self, x, y): + >>> pass + + >>> import torch.nn as nn + >>> class MyModule2(nn.Module): + >>> + >>> # convert pred to fp32 + >>> @force_fp32(apply_to=('pred', )) + >>> def post_process(self, pred, others): + >>> pass + """ + + def force_fp32_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs) -> Callable: + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@force_fp32 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.half, torch.float)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = dict() + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.half, torch.float) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + with autocast(enabled=False): + output = old_func(*new_args, **new_kwargs) + + # cast the results back to fp32 if necessary + if out_fp16: + output = cast_tensor_type(output, torch.float, torch.half) + return output + + return new_func + + return force_fp32_wrapper + + +def wrap_fp16_model(model: nn.Module) -> None: + """Wrap the FP32 model to FP16. + + If you are using PyTorch >= 1.6, torch.cuda.amp is used as the + backend, otherwise, original mmcv implementation will be adopted. + + For PyTorch >= 1.6, this function will + 1. Set fp16 flag inside the model to True. + + Otherwise: + 1. Convert FP32 model to FP16. + 2. Remain some necessary layers to be FP32, e.g., normalization layers. + 3. Set `fp16_enabled` flag inside the model to True. + + :param model: (nn.Module) Model in FP32. + """ + # convert model to fp16 + model.half() + # patch the normalization layers to make it work in fp32 mode + patch_norm_fp32(model) + # set `fp16_enabled` flag + for m in model.modules(): + if hasattr(m, 'fp16_enabled'): + m.fp16_enabled = True + + +def patch_norm_fp32(module: nn.Module) -> nn.Module: + """Recursively convert normalization layers from FP16 to FP32. + + :param module: (nn.Module) The modules to be converted in FP16. + + Returns: nn.Module: The converted module, the normalization layers have been + converted to FP32. + """ + if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): + module.float() + if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3': + module.forward = patch_forward_method(module.forward, torch.half, + torch.float) + for child in module.children(): + patch_norm_fp32(child) + return module + + +def patch_forward_method(func: Callable, + src_type: torch.dtype, + dst_type: torch.dtype, + convert_output: bool = True) -> Callable: + """Patch the forward method of a module. + + :param func: (callable) The original forward method. + :param src_type: (torch.dtype) Type of input arguments to be converted from. + :param dst_type: (torch.dtype) Type of input arguments to be converted to. + :param convert_output: (bool) Whether to convert the output back to src_type. + + :returns: callable: The patched forward method. + """ + + def new_forward(*args, **kwargs): + output = func(*cast_tensor_type(args, src_type, dst_type), + **cast_tensor_type(kwargs, src_type, dst_type)) + if convert_output: + output = cast_tensor_type(output, dst_type, src_type) + return output + + return new_forward + + +class LossScaler: + """Class that manages loss scaling in mixed precision training which + supports both dynamic or static mode. + + The implementation refers to + https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py. + Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling. + It's important to understand how :class:`LossScaler` operates. + Loss scaling is designed to combat the problem of underflowing + gradients encountered at long times when training fp16 networks. + Dynamic loss scaling begins by attempting a very high loss + scale. Ironically, this may result in OVERflowing gradients. + If overflowing gradients are encountered, :class:`FP16_Optimizer` then + skips the update step for this particular iteration/minibatch, + and :class:`LossScaler` adjusts the loss scale to a lower value. + If a certain number of iterations occur without overflowing gradients + detected,:class:`LossScaler` increases the loss scale once more. + In this way :class:`LossScaler` attempts to "ride the edge" of always + using the highest loss scale possible without incurring overflow. + + :param init_scale: (float) Initial loss scale value, default: 2**32. + :param scale_factor: (float) Factor used when adjusting the loss scale. + :param Default: 2. + :param mode: (str) Loss scaling mode. 'dynamic' or 'static' + :param scale_window: (int) Number of consecutive iterations without an + overflow to wait before increasing the loss scale. Default: 1000. + """ + + def __init__(self, + init_scale: float = 2**32, + mode: str = 'dynamic', + scale_factor: float = 2., + scale_window: int = 1000): + self.cur_scale = init_scale + self.cur_iter = 0 + assert mode in ('dynamic', + 'static'), 'mode can only be dynamic or static' + self.mode = mode + self.last_overflow_iter = -1 + self.scale_factor = scale_factor + self.scale_window = scale_window + + def has_overflow(self, params: List[Parameter]) -> bool: + """Check if params contain overflow.""" + if self.mode != 'dynamic': + return False + for p in params: + if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data): + return True + return False + + def _has_inf_or_nan(x: torch.Tensor) -> bool: + """Check if params contain NaN.""" + try: + cpu_sum = float(x.float().sum()) + except RuntimeError as instance: + if 'value cannot be converted' not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') \ + or cpu_sum != cpu_sum: + return True + return False + + def update_scale(self, overflow: bool) -> None: + """update the current loss scale value when overflow happens.""" + if self.mode != 'dynamic': + return + if overflow: + self.cur_scale = max(self.cur_scale / self.scale_factor, 1) + self.last_overflow_iter = self.cur_iter + else: + if (self.cur_iter - self.last_overflow_iter) % \ + self.scale_window == 0: + self.cur_scale *= self.scale_factor + self.cur_iter += 1 + + def state_dict(self) -> dict: + """Returns the state of the scaler as a :class:`dict`.""" + return dict( + cur_scale=self.cur_scale, + cur_iter=self.cur_iter, + mode=self.mode, + last_overflow_iter=self.last_overflow_iter, + scale_factor=self.scale_factor, + scale_window=self.scale_window) + + def load_state_dict(self, state_dict: dict) -> None: + """Loads the loss_scaler state dict. + + :param state_dict: (dict) scaler state. + """ + self.cur_scale = state_dict['cur_scale'] + self.cur_iter = state_dict['cur_iter'] + self.mode = state_dict['mode'] + self.last_overflow_iter = state_dict['last_overflow_iter'] + self.scale_factor = state_dict['scale_factor'] + self.scale_window = state_dict['scale_window'] + + @property + def loss_scale(self) -> float: + return self.cur_scale diff --git a/cosense3d/modules/utils/box_coder.py b/cosense3d/modules/utils/box_coder.py new file mode 100644 index 00000000..82c7772a --- /dev/null +++ b/cosense3d/modules/utils/box_coder.py @@ -0,0 +1,403 @@ +import copy +import math +import torch + +from cosense3d.ops.utils import points_in_boxes_gpu + + +def build_box_coder(type, **kwargs): + return globals()[type](**kwargs) + + +class ResidualBoxCoder(object): + def __init__(self, mode: str='simple_dist'): + """ + :param mode: str, simple_dist | sin_cos_dist | compass_rose + """ + self.mode = mode + if mode == 'simple_dist': + self.code_size = 7 + elif mode == 'sin_cos_dist': + self.code_size = 8 + elif mode == 'compass_rose': + self.code_size = 10 + self.cls_code_size = 2 + else: + raise NotImplementedError + + def encode_direction(self, ra, rg): + if self.mode == 'simple_dist': + reg = (rg - ra).view(-1, 1) + return reg, None + elif self.mode == 'sin_cos_dist': + rgx = torch.cos(rg) + rgy = torch.sin(rg) + rax = torch.cos(ra) + ray = torch.sin(ra) + rtx = rgx - rax + rty = rgy - ray + ret = [rtx, rty] + reg = torch.stack(ret, dim=-1) # N 2 + return reg, None + elif self.mode == 'compass_rose': + # encode box directions + rgx = torch.cos(rg).view(-1, 1) # N 1 + rgy = torch.sin(rg).view(-1, 1) # N 1 + ra_ext = torch.cat([ra, ra + math.pi], dim=-1) # N 2, invert + rax = torch.cos(ra_ext) # N 2 + ray = torch.sin(ra_ext) # N 2 + # cos(a - b) = cos(a)cos(b) + sin(a)sin(b) + # we use arccos instead of a-b to control the difference in 0-pi + diff_angle = torch.arccos(rax * rgx + ray * rgy) # N 2 + dir_score = 1 - diff_angle / math.pi # N 2 + rtx = rgx - rax # N 2 + rty = rgy - ray # N 2 + + dir_score = dir_score # N 2 + ret = [rtx, rty] + reg = torch.cat(ret, dim=-1) # N 4 + return reg, dir_score + else: + raise NotImplementedError + + def decode_direction(self, ra, vt, dir_scores=None): + if self.mode == 'simple_dist': + rg = vt + ra + return rg + elif self.mode == 'sin_cos_dist': + rax = torch.cos(ra) + ray = torch.sin(ra) + va = torch.cat([rax, ray], dim=-1) + vg = vt + va + rg = torch.atan2(vg[..., 1], vg[..., 0]) + return rg + elif self.mode == 'compass_rose': + ra_ext = torch.cat([ra, ra + math.pi], dim=-1) # N 2, invert + rax = torch.cos(ra_ext) # N 2 + ray = torch.sin(ra_ext) # N 2 + va = torch.cat([rax, ray], dim=-1) + vg = vt + va + rg = torch.atan2(vg[..., 2:], vg[..., :2]).view(-1, 2) + + dirs = torch.argmax(dir_scores, dim=-1).view(-1) + rg = rg[torch.arange(len(rg)), dirs].view(len(vt), -1, 1) + return rg + else: + raise NotImplementedError + + def encode(self, anchors, boxes): + xa, ya, za, la, wa, ha, ra = torch.split(anchors, 1, dim=-1) + xg, yg, zg, lg, wg, hg, rg = torch.split(boxes, 1, dim=-1) + + diagonal = torch.sqrt(la ** 2 + wa ** 2) + xt = (xg - xa) / diagonal + yt = (yg - ya) / diagonal + zt = (zg - za) / ha + + lt = torch.log(lg / la) + wt = torch.log(wg / wa) + ht = torch.log(hg / ha) + + reg_dir, dir_score = self.encode_direction(ra, rg) + ret = [xt, yt, zt, lt, wt, ht, reg_dir] + reg = torch.cat(ret, dim=1) # N 6+4 + + return reg, dir_score + + def decode(self, anchors, boxes_enc, dir_scores=None): + xa, ya, za, la, wa, ha, ra = torch.split(anchors, 1, dim=-1) + xt, yt, zt, lt, wt, ht = torch.split(boxes_enc[..., :6], 1, dim=-1) + vt = boxes_enc[..., 6:] + + diagonal = torch.sqrt(la ** 2 + wa ** 2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * ha + za + + lg = torch.exp(lt) * la + wg = torch.exp(wt) * wa + hg = torch.exp(ht) * ha + + rg = self.decode_direction(ra, vt, dir_scores) + + return torch.cat([xg, yg, zg, lg, wg, hg, rg], dim=-1) + + +class CenterBoxCoder(object): + def __init__(self, with_velo=False, with_pred=False, reg_radius=1.6, z_offset=1.0): + self.with_velo = with_velo + self.with_pred = with_pred + self.reg_radius = reg_radius + self.z_offset = z_offset + self.pred_max_offset = 2.0 + reg_radius + + def encode(self, centers, gt_boxes, meter_per_pixel, gt_preds=None): + """ + + :param centers: (N, 3) + :param gt_boxes: (N, 8) [batch_idx, x, y, z, l, w, h, r] + :param meter_per_pixel: tuple with 2 elements + :param gt_preds: + :return: + """ + if isinstance(meter_per_pixel, list): + assert meter_per_pixel[0] == meter_per_pixel[1], 'only support unified pixel size for x and y' + # TODO: adapt meter per pixel + meter_per_pixel = meter_per_pixel[0] + if len(gt_boxes) == 0: + valid = torch.zeros_like(centers[:, 0]).bool() + res = None, None, None, valid + if self.with_velo: + res = res + (None,) + return res + + # match centers and gt_boxes + dist_ctr_to_box = torch.norm(centers[:, 1:3].unsqueeze(1) + - gt_boxes[:, 1:3].unsqueeze(0), dim=-1) + cc, bb = torch.meshgrid(centers[:, 0], gt_boxes[:, 0], indexing='ij') + dist_ctr_to_box[cc != bb] = 1000 + min_dists, box_idx_of_pts = dist_ctr_to_box.min(dim=1) + diagnal = torch.norm(gt_boxes[:, 4:6].mean(dim=0) / 2) + valid = min_dists < max(diagnal, meter_per_pixel[0]) + # valid = min_dists < self.reg_radius + valid_center, valid_box = centers[valid], gt_boxes[box_idx_of_pts[valid]] + valid_pred = None + if self.with_pred and gt_preds is not None: + valid_pred = gt_preds[box_idx_of_pts[valid]] + + xc, yc = torch.split(valid_center[:, 1:3], 1, dim=-1) + xg, yg, zg, lg, wg, hg, rg = torch.split(valid_box[:, 1:8], 1, dim=-1) + + xt = xg - xc + yt = yg - yc + zt = zg # + self.z_offset + + lt = torch.log(lg) + wt = torch.log(wg) + ht = torch.log(hg) + + # encode box directions + rgx = torch.cos(rg).view(-1, 1) # N 1 + rgy = torch.sin(rg).view(-1, 1) # N 1 + ra = torch.arange(0, 2, 0.5).to(xc.device) * math.pi + ra_ext = torch.ones_like(valid_box[:, :4]) * ra.view(-1, 4) # N 4 + rax = torch.cos(ra_ext) # N 4 + ray = torch.sin(ra_ext) # N 4 + # cos(a - b) = cos(a)cos(b) + sin(a)sin(b) + # we use arccos instead of a-b to control the difference in 0-pi + diff_angle = torch.arccos(rax * rgx + ray * rgy) # N 4 + dir_score = 1 - diff_angle / math.pi # N 4 + rtx = rgx - rax # N 4 + rty = rgy - ray # N 4 + + reg_box = torch.cat([xt, yt, zt, lt, wt, ht], dim=1) # N 6 + reg_dir = torch.cat([rtx, rty], dim=1) # N 8 + # reg_box[..., :3] /= self.reg_radius + + res = (reg_box, reg_dir, dir_score, valid) + + if self.with_velo: + res = res + (valid_box[:, 8:10],) + elif valid_box.shape[-1] > 8: + res = res + (valid_box[:, 8:10],) + if self.with_pred and valid_pred is not None: + prev_angles = valid_box[:, 7:8] + preds_tgt = [] + mask = [] + for i, boxes in enumerate(valid_pred.transpose(1, 0)): + # some gt_boxes do not have gt successors, zero padded virtual successors are used to align the number + # of boxes between gt_boxes and gt_preds, when calculate preds loss, these boxes should be ignored. + mask.append(boxes.any(dim=-1, keepdim=True).float()) + diff_xy = (boxes[:, :2] - valid_center[:, 1:3]) # / self.pred_max_offset + diff_z = boxes[:, 2:3] # + self.z_offset + diff_cos = torch.cos(boxes[:, 3:]) - torch.cos(prev_angles) + diff_sin = torch.sin(boxes[:, 3:]) - torch.sin(prev_angles) + preds_tgt.append(torch.cat([diff_xy, diff_z, diff_cos, diff_sin], dim=-1) / (i + 2)) + preds_tgt = torch.cat(preds_tgt, dim=-1) + mask = torch.cat(mask, dim=-1).all(dim=-1, keepdim=True) + res = res + (torch.cat([mask, preds_tgt], dim=-1),) + return res + + def decode(self, centers, reg): + """ + + :param centers: Tensor (N, 3) or (B, N, 2+). + :param reg: dict, + box - (N, 6) or (B, N, 6) + dir - (N, 8) or (B, N, 8) + scr - (N, 4) or (B, N, 4) + vel - (N, 2) or (B, N, 2), optional + pred - (N, 5) or (B, N, 5), optional + :return: decoded bboxes. + """ + if centers.ndim > 2: + xc, yc = torch.split(centers[..., 0:2], 1, dim=-1) + else: + xc, yc = torch.split(centers[..., 1:3], 1, dim=-1) + # reg['box'][..., :3] *= self.reg_radius + xt, yt, zt, lt, wt, ht = torch.split(reg['box'], 1, dim=-1) + + xo = xt + xc + yo = yt + yc + zo = zt #- self.z_offset + + lo = torch.exp(lt) + wo = torch.exp(wt) + ho = torch.exp(ht) + + # decode box directions + scr_max, max_idx = reg['scr'].max(dim=-1) + shape = max_idx.shape + max_idx = max_idx.view(-1) + ii = torch.arange(len(max_idx)) + ra = max_idx.float() * 0.5 * math.pi + ct = reg['dir'][..., :4].view(-1, 4)[ii, max_idx] + torch.cos(ra) + st = reg['dir'][..., 4:].view(-1, 4)[ii, max_idx] + torch.sin(ra) + ro = torch.atan2(st.view(*shape), ct.view(*shape)).unsqueeze(-1) + + if centers.ndim > 2: + # dense tensor + ret = torch.cat([xo, yo, zo, lo, wo, ho, ro], dim=-1) + else: + # sparse tensor with batch indices + ret = torch.cat([centers[..., :1], xo, yo, zo, lo, wo, ho, ro], dim=-1) + + if self.with_velo: + ret = torch.cat([ret, reg['vel']], dim=-1) + if self.with_pred: + pred = reg['pred'].clone() + b, n, c = pred.shape + pred_len = c // 5 + mul = torch.arange(1, pred_len + 1, device=pred.device, dtype=pred.dtype) + pred = pred.view(b, n, -1, 5) * mul.view(1, 1, -1, 1) + xy = pred[..., :2] + centers[..., :2].unsqueeze(-2) + z = pred[..., 2:3] + r = torch.atan2(pred[..., 4] + st.view(*shape, 1), pred[..., 3] + ct.view(*shape, 1)).unsqueeze(-1) + lwh = torch.cat([lo, wo, ho], dim=-1).unsqueeze(-2).repeat(1, 1, pred_len, 1) + pred = torch.cat([xy, z, lwh, r], dim=-1) + ret = (ret, pred) + + return ret + + +class BoxPredCoder(object): + def __init__(self, with_velo=False): + self.with_velo = with_velo + + def encode(self, centers, gt_boxes, meter_per_pixel, gt_preds): + """ + + :param centers: (N, 3) + :param gt_boxes: (N, 8) [batch_idx, x, y, z, l, w, h, r] + :param meter_per_pixel: tuple with 2 elements + :param gt_preds: (N, 8) [batch_idx, x, y, z, l, w, h, r], gt boxes to be predicted + :return: encoded bbox targets. + """ + if isinstance(meter_per_pixel, list): + assert meter_per_pixel[0] == meter_per_pixel[1], 'only support unified pixel size for x and y' + # TODO: adapt meter per pixel + meter_per_pixel = meter_per_pixel[0] + if len(gt_boxes) == 0: + valid = torch.zeros_like(centers[:, 0]).bool() + res = None, None, None, valid + if self.with_velo: + res = res + (None,) + return res + + # match centers and gt_boxes + dist_ctr_to_box = torch.norm(centers[:, 1:3].unsqueeze(1) + - gt_boxes[:, 1:3].unsqueeze(0), dim=-1) + cc, bb = torch.meshgrid(centers[:, 0], gt_boxes[:, 0], indexing='ij') + dist_ctr_to_box[cc != bb] = 1000 + min_dists, box_idx_of_pts = dist_ctr_to_box.min(dim=1) + diagnal = torch.norm(gt_boxes[:, 4:6].mean(dim=0) / 2) + valid = min_dists < max(diagnal, meter_per_pixel[0]) + # valid = min_dists < self.reg_radius + valid_center = centers[valid] + valid_box = gt_preds[box_idx_of_pts[valid]] + + xc, yc = torch.split(valid_center[:, 1:3], 1, dim=-1) + xg, yg, zg, lg, wg, hg, rg = torch.split(valid_box[:, 1:8], 1, dim=-1) + + xt = xg - xc + yt = yg - yc + zt = zg # + self.z_offset + + lt = torch.log(lg) + wt = torch.log(wg) + ht = torch.log(hg) + + # encode box directions + rgx = torch.cos(rg).view(-1, 1) # N 1 + rgy = torch.sin(rg).view(-1, 1) # N 1 + ra = torch.arange(0, 2, 0.5).to(xc.device) * math.pi + ra_ext = torch.ones_like(valid_box[:, :4]) * ra.view(-1, 4) # N 4 + rax = torch.cos(ra_ext) # N 4 + ray = torch.sin(ra_ext) # N 4 + # cos(a - b) = cos(a)cos(b) + sin(a)sin(b) + # we use arccos instead of a-b to control the difference in 0-pi + diff_angle = torch.arccos(rax * rgx + ray * rgy) # N 4 + dir_score = 1 - diff_angle / math.pi # N 4 + rtx = rgx - rax # N 4 + rty = rgy - ray # N 4 + + reg_box = torch.cat([xt, yt, zt, lt, wt, ht], dim=1) # N 6 + reg_dir = torch.cat([rtx, rty], dim=1) # N 8 + # reg_box[..., :3] /= self.reg_radius + + res = (reg_box, reg_dir, dir_score, valid) + + if self.with_velo: + res = res + (valid_box[:, 8:10],) + elif valid_box.shape[-1] > 8: + res = res + (valid_box[:, 8:10],) + return res + + def decode(self, centers, reg): + """ + + :param centers: Tensor (N, 3) or (B, N, 2+). + :param reg: dict, + box - (N, 6) or (B, N, 6) + dir - (N, 8) or (B, N, 8) + scr - (N, 4) or (B, N, 4) + vel - (N, 2) or (B, N, 2), optional + pred - (N, 5) or (B, N, 5), optional + :return: decoded bboxes. + """ + if centers.ndim > 2: + xc, yc = torch.split(centers[..., 0:2], 1, dim=-1) + else: + xc, yc = torch.split(centers[..., 1:3], 1, dim=-1) + # reg['box'][..., :3] *= self.reg_radius + xt, yt, zt, lt, wt, ht = torch.split(reg['box'], 1, dim=-1) + + xo = xt + xc + yo = yt + yc + zo = zt #- self.z_offset + + lo = torch.exp(lt) + wo = torch.exp(wt) + ho = torch.exp(ht) + + # decode box directions + scr_max, max_idx = reg['scr'].max(dim=-1) + shape = max_idx.shape + max_idx = max_idx.view(-1) + ii = torch.arange(len(max_idx)) + ra = max_idx.float() * 0.5 * math.pi + ct = reg['dir'][..., :4].view(-1, 4)[ii, max_idx] + torch.cos(ra) + st = reg['dir'][..., 4:].view(-1, 4)[ii, max_idx] + torch.sin(ra) + ro = torch.atan2(st.view(*shape), ct.view(*shape)).unsqueeze(-1) + + if centers.ndim > 2: + # dense tensor + ret = torch.cat([xo, yo, zo, lo, wo, ho, ro], dim=-1) + else: + # sparse tensor with batch indices + ret = torch.cat([centers[..., :1], xo, yo, zo, lo, wo, ho, ro], dim=-1) + + if self.with_velo: + ret = torch.cat([ret, reg['vel']], dim=-1) + + return ret diff --git a/cosense3d/modules/utils/cobevt_utils.py b/cosense3d/modules/utils/cobevt_utils.py new file mode 100644 index 00000000..6de9168a --- /dev/null +++ b/cosense3d/modules/utils/cobevt_utils.py @@ -0,0 +1,424 @@ +""" +torch_transformation_utils.py from CoBEVT +""" +import os + +import torch +import torch.nn.functional as F +import numpy as np +import matplotlib.pyplot as plt + + +def get_roi_and_cav_mask(shape, cav_mask, spatial_correction_matrix, + discrete_ratio, downsample_rate): + """ + Get mask for the combination of cav_mask and rorated ROI mask. + Parameters + ---------- + shape : tuple + Shape of (B, L, H, W, C). + cav_mask : torch.Tensor + Shape of (B, L). + spatial_correction_matrix : torch.Tensor + Shape of (B, L, 4, 4) + discrete_ratio : float + Discrete ratio. + downsample_rate : float + Downsample rate. + + Returns + ------- + com_mask : torch.Tensor + Combined mask with shape (B, H, W, L, 1). + + """ + B, L, H, W, C = shape + C = 1 + # (B,L,4,4) + dist_correction_matrix = get_discretized_transformation_matrix( + spatial_correction_matrix, discrete_ratio, + downsample_rate) + # (B*L,2,3) + T = get_transformation_matrix( + dist_correction_matrix.reshape(-1, 2, 3), (H, W)) + # (B,L,1,H,W) + roi_mask = get_rotated_roi((B, L, C, H, W), T) + # (B,L,1,H,W) + com_mask = combine_roi_and_cav_mask(roi_mask, cav_mask) + # (B,H,W,1,L) + com_mask = com_mask.permute(0,3,4,2,1) + return com_mask + + +def combine_roi_and_cav_mask(roi_mask, cav_mask): + """ + Combine ROI mask and CAV mask + + Parameters + ---------- + roi_mask : torch.Tensor + Mask for ROI region after considering the spatial transformation/correction. + cav_mask : torch.Tensor + Mask for CAV to remove padded 0. + + Returns + ------- + com_mask : torch.Tensor + Combined mask. + """ + # (B, L, 1, 1, 1) + cav_mask = cav_mask.unsqueeze(2).unsqueeze(3).unsqueeze(4) + # (B, L, C, H, W) + cav_mask = cav_mask.expand(roi_mask.shape) + # (B, L, C, H, W) + com_mask = roi_mask * cav_mask + return com_mask + + +def get_rotated_roi(shape, correction_matrix): + """ + Get rorated ROI mask. + + Parameters + ---------- + shape : tuple + Shape of (B,L,C,H,W). + correction_matrix : torch.Tensor + Correction matrix with shape (N,2,3). + + Returns + ------- + roi_mask : torch.Tensor + Roated ROI mask with shape (N,2,3). + + """ + B, L, C, H, W = shape + # To reduce the computation, we only need to calculate the mask for the first channel. + # (B,L,1,H,W) + x = torch.ones((B, L, 1, H, W)).to(correction_matrix.dtype).to( + correction_matrix.device) + # (B*L,1,H,W) + roi_mask = warp_affine(x.reshape(-1, 1, H, W), correction_matrix, + dsize=(H, W), mode="nearest") + # (B,L,C,H,W) + roi_mask = torch.repeat_interleave(roi_mask, C, dim=1).reshape(B, L, C, H, + W) + return roi_mask + + +def get_discretized_transformation_matrix(matrix, discrete_ratio, + downsample_rate): + """ + Get disretized transformation matrix. + Parameters + ---------- + matrix : torch.Tensor + Shape -- (B, L, 4, 4) where B is the batch size, L is the max cav + number. + discrete_ratio : float + Discrete ratio. + downsample_rate : float/int + downsample_rate + + Returns + ------- + matrix : torch.Tensor + Output transformation matrix in 2D with shape (B, L, 2, 3), + including 2D transformation and 2D rotation. + + """ + matrix = matrix[..., [0, 1], :][..., :, [0, 1, 3]] + # normalize the x,y transformation + matrix[..., -1] = matrix[..., -1] \ + / (discrete_ratio * downsample_rate) + + return matrix.type(dtype=torch.float) + + +def _torch_inverse_cast(input): + r""" + Helper function to make torch.inverse work with other than fp32/64. + The function torch.inverse is only implemented for fp32/64 which makes + impossible to be used by fp16 or others. What this function does, + is cast input data type to fp32, apply torch.inverse, + and cast back to the input dtype. + Args: + input : torch.Tensor + Tensor to be inversed. + + Returns: + out : torch.Tensor + Inversed Tensor. + + """ + dtype = input.dtype + if dtype not in (torch.float32, torch.float64): + dtype = torch.float32 + out = torch.inverse(input.to(dtype)).to(input.dtype) + return out + + +def normal_transform_pixel( + height, width, device, dtype, eps=1e-14): + r""" + Compute the normalization matrix from image size in pixels to [-1, 1]. + Args: + height : int + Image height. + width : int + Image width. + device : torch.device + Output tensor devices. + dtype : torch.dtype + Output tensor data type. + eps : float + Epsilon to prevent divide-by-zero errors. + + Returns: + tr_mat : torch.Tensor + Normalized transform with shape :math:`(1, 3, 3)`. + """ + tr_mat = torch.tensor( + [[1.0, 0.0, -1.0], [0.0, 1.0, -1.0], [0.0, 0.0, 1.0]], device=device, + dtype=dtype) # 3x3 + + # prevent divide by zero bugs + width_denom = eps if width == 1 else width - 1.0 + height_denom = eps if height == 1 else height - 1.0 + + tr_mat[0, 0] = tr_mat[0, 0] * 2.0 / width_denom + tr_mat[1, 1] = tr_mat[1, 1] * 2.0 / height_denom + + return tr_mat.unsqueeze(0) # 1x3x3 + + +def eye_like(n, B, device, dtype): + r""" + Return a 2-D tensor with ones on the diagonal and + zeros elsewhere with the same batch size as the input. + Args: + n : int + The number of rows :math:`(n)`. + B : int + Btach size. + device : torch.device + Devices of the output tensor. + dtype : torch.dtype + Data type of the output tensor. + + Returns: + The identity matrix with the shape :math:`(B, n, n)`. + """ + + identity = torch.eye(n, device=device, dtype=dtype) + return identity[None].repeat(B, 1, 1) + + +def normalize_homography(dst_pix_trans_src_pix, dsize_src, dsize_dst=None): + r""" + Normalize a given homography in pixels to [-1, 1]. + Args: + dst_pix_trans_src_pix : torch.Tensor + Homography/ies from source to destination to be normalized with + shape :math:`(B, 3, 3)`. + dsize_src : Tuple[int, int] + Size of the source image (height, width). + dsize_dst : Tuple[int, int] + Size of the destination image (height, width). + + Returns: + dst_norm_trans_src_norm : torch.Tensor + The normalized homography of shape :math:`(B, 3, 3)`. + """ + if dsize_dst is None: + dsize_dst = dsize_src + # source and destination sizes + src_h, src_w = dsize_src + dst_h, dst_w = dsize_dst + device = dst_pix_trans_src_pix.device + dtype = dst_pix_trans_src_pix.dtype + # compute the transformation pixel/norm for src/dst + src_norm_trans_src_pix = normal_transform_pixel(src_h, src_w, device, + dtype).to( + dst_pix_trans_src_pix) + + src_pix_trans_src_norm = _torch_inverse_cast(src_norm_trans_src_pix) + dst_norm_trans_dst_pix = normal_transform_pixel(dst_h, dst_w, device, + dtype).to( + dst_pix_trans_src_pix) + # compute chain transformations + dst_norm_trans_src_norm: torch.Tensor = dst_norm_trans_dst_pix @ ( + dst_pix_trans_src_pix @ src_pix_trans_src_norm) + return dst_norm_trans_src_norm + + +def get_rotation_matrix2d(M, dsize): + r""" + Return rotation matrix for torch.affine_grid based on transformation matrix. + Args: + M : torch.Tensor + Transformation matrix with shape :math:`(B, 2, 3)`. + dsize : Tuple[int, int] + Size of the source image (height, width). + + Returns: + R : torch.Tensor + Rotation matrix with shape :math:`(B, 2, 3)`. + """ + H, W = dsize + B = M.shape[0] + center = torch.Tensor([W / 2, H / 2]).to(M.dtype).to(M.device).unsqueeze(0) + shift_m = eye_like(3, B, M.device, M.dtype) + shift_m[:, :2, 2] = center + + shift_m_inv = eye_like(3, B, M.device, M.dtype) + shift_m_inv[:, :2, 2] = -center + + rotat_m = eye_like(3, B, M.device, M.dtype) + rotat_m[:, :2, :2] = M[:, :2, :2] + affine_m = shift_m @ rotat_m @ shift_m_inv + return affine_m[:, :2, :] # Bx2x3 + + +def get_transformation_matrix(M, dsize): + r""" + Return transformation matrix for torch.affine_grid. + Args: + M : torch.Tensor + Transformation matrix with shape :math:`(N, 2, 3)`. + dsize : Tuple[int, int] + Size of the source image (height, width). + + Returns: + T : torch.Tensor + Transformation matrix with shape :math:`(N, 2, 3)`. + """ + T = get_rotation_matrix2d(M, dsize) + T[..., 2] += M[..., 2] + return T + + +def convert_affinematrix_to_homography(A): + r""" + Convert to homography coordinates + Args: + A : torch.Tensor + The affine matrix with shape :math:`(B,2,3)`. + + Returns: + H : torch.Tensor + The homography matrix with shape of :math:`(B,3,3)`. + """ + H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", + value=0.0) + H[..., -1, -1] += 1.0 + return H + + +def warp_affine( + src, M, dsize, + mode='bilinear', + padding_mode='zeros', + align_corners=True): + r""" + Transform the src based on transformation matrix M. + Args: + src : torch.Tensor + Input feature map with shape :math:`(B,C,H,W)`. + M : torch.Tensor + Transformation matrix with shape :math:`(B,2,3)`. + dsize : tuple + Tuple of output image H_out and W_out. + mode : str + Interpolation methods for F.grid_sample. + padding_mode : str + Padding methods for F.grid_sample. + align_corners : boolean + Parameter of F.affine_grid. + + Returns: + Transformed features with shape :math:`(B,C,H,W)`. + """ + + B, C, H, W = src.size() + + # we generate a 3x3 transformation matrix from 2x3 affine + M_3x3 = convert_affinematrix_to_homography(M) + dst_norm_trans_src_norm = normalize_homography(M_3x3, (H, W), dsize) + + # src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm) + src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) + grid = F.affine_grid(src_norm_trans_dst_norm[:, :2, :], + [B, C, dsize[0], dsize[1]], + align_corners=align_corners) + + return F.grid_sample(src.half() if grid.dtype==torch.half else src, grid, align_corners=align_corners, mode=mode, + padding_mode=padding_mode) + + +class Test: + """ + Test the transformation in this file. + The methods in this class are not supposed to be used outside of this file. + """ + + def __init__(self): + pass + + @staticmethod + def load_img(): + torch.manual_seed(0) + x = torch.randn(1, 5, 16, 400, 200) * 100 + # x = torch.ones(1, 5, 16, 400, 200) + return x + + @staticmethod + def load_raw_transformation_matrix(N): + a = 90 / 180 * np.pi + matrix = torch.Tensor([[np.cos(a), -np.sin(a), 10], + [np.sin(a), np.cos(a), 10]]) + matrix = torch.repeat_interleave(matrix.unsqueeze(0).unsqueeze(0), N, + dim=1) + return matrix + + @staticmethod + def load_raw_transformation_matrix2(N, alpha): + a = alpha / 180 * np.pi + matrix = torch.Tensor([[np.cos(a), -np.sin(a), 0, 0], + [np.sin(a), np.cos(a), 0, 0]]) + matrix = torch.repeat_interleave(matrix.unsqueeze(0).unsqueeze(0), N, + dim=1) + return matrix + + @staticmethod + def test(): + img = Test.load_img() + B, L, C, H, W = img.shape + raw_T = Test.load_raw_transformation_matrix(5) + T = get_transformation_matrix(raw_T.reshape(-1, 2, 3), (H, W)) + img_rot = warp_affine(img.reshape(-1, C, H, W), T, (H, W)) + print(img_rot[0, 0, :, :]) + plt.matshow(img_rot[0, 0, :, :]) + plt.show() + + @staticmethod + def test_combine_roi_and_cav_mask(): + B = 2 + L = 5 + C = 16 + H = 300 + W = 400 + # 2, 5 + cav_mask = torch.Tensor([[1, 1, 1, 0, 0], [1, 0, 0, 0, 0]]) + x = torch.zeros(B, L, C, H, W) + correction_matrix = Test.load_raw_transformation_matrix2(5, 10) + correction_matrix = torch.cat([correction_matrix, correction_matrix], + dim=0) + mask = get_roi_and_cav_mask((B, L, H, W, C), cav_mask, correction_matrix, 0.4, 4) + plt.matshow(mask[0, :, :, 0, 0]) + plt.show() + + +if __name__ == "__main__": + os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' + Test.test_combine_roi_and_cav_mask() diff --git a/cosense3d/modules/utils/common.py b/cosense3d/modules/utils/common.py new file mode 100644 index 00000000..8b2da495 --- /dev/null +++ b/cosense3d/modules/utils/common.py @@ -0,0 +1,339 @@ +from importlib import import_module + +import torch +from torch import nn +import numpy as np + +from torch.distributions.multivariate_normal import _batch_mahalanobis +from cosense3d.modules.utils.me_utils import metric2indices + +pi = 3.141592653 + + +def clip_sigmoid(x: torch.Tensor, eps: float=1e-4) -> torch.Tensor: + """Sigmoid function for input feature. + + :param x: Input feature map with the shape of [B, N, H, W]. + :param eps: Lower bound of the range to be clamped to. + Defaults to 1e-4. + :return: Feature map after sigmoid. + """ + y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) + return y + + +def cat_name_str(module_name): + """ + :param module_name: str, format in xxx_yyy_zzz + :returns: class_name: str, format in XxxYyyZzz + """ + cls_name = '' + for word in module_name.split('_'): + cls_name += word[:1].upper() + word[1:] + return cls_name + + +def instantiate(module_name, cls_name=None, module_cfg=None, **kwargs): + package = import_module(f"cosense3d.model.{module_name}") + cls_name = cat_name_str(module_name) if cls_name is None else cls_name + obj_cls = getattr(package, cls_name) + if module_cfg is None: + obj_inst = obj_cls(**kwargs) + else: + obj_inst = obj_cls(module_cfg) + return obj_inst + + +def bias_init_with_prob(prior_prob: float) -> float: + """initialize conv/fc bias value according to a given probability value.""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init + + +def topk_gather(feat, topk_indexes): + if topk_indexes is not None: + feat_shape = feat.shape + topk_shape = topk_indexes.shape + + view_shape = [1 for _ in range(len(feat_shape))] + view_shape[:2] = topk_shape[:2] + topk_indexes = topk_indexes.view(*view_shape) + + feat = torch.gather(feat, 1, topk_indexes.repeat(1, 1, *feat_shape[2:])) + return feat + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + :param x: (Tensor) The tensor to do the + inverse. + :param eps: (float) EPS avoid numerical + overflow. Defaults 1e-5. + :returns: Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +def xavier_init(module: nn.Module, + gain: float = 1, + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def limit_period(val, offset=0.5, period=2 * pi): + return val - torch.floor(val / period + offset) * period + + +def get_conv2d_layers(conv_name, in_channels, out_channels, n_layers, kernel_size, stride, + padding, relu_last=True, sequential=True, **kwargs): + """ + Build convolutional layers. kernel_size, stride and padding should be a list with the + lengths that match n_layers + """ + seq = [] + if 'bias' in kwargs: + bias = kwargs.pop('bias') + else: + bias = False + for i in range(n_layers): + seq.extend([getattr(nn, conv_name)( + in_channels, out_channels, kernel_size[i], stride=stride[i], + padding=padding[i], bias=bias, **{k: v[i] for k, v in kwargs.items()} + ), nn.BatchNorm2d(out_channels, eps=1e-3, momentum=0.01)]) + if i < n_layers - 1 or relu_last: + seq.append(nn.ReLU()) + in_channels = out_channels + if sequential: + return nn.Sequential(*seq) + else: + return seq + + +def get_norm_layer(channels, norm): + if norm == 'LN': + norm_layer = nn.LayerNorm(channels) + elif norm == 'BN': + norm_layer = nn.BatchNorm1d(channels) + else: + raise NotImplementedError + return norm_layer + + +def linear_last(in_channels, mid_channels, out_channels, bias=False, norm='BN'): + return nn.Sequential( + nn.Linear(in_channels, mid_channels, bias=bias), + get_norm_layer(mid_channels, norm), + nn.ReLU(inplace=True), + nn.Linear(mid_channels, out_channels) + ) + + +def linear_layers(in_out, activations=None, norm='BN'): + if activations is None: + activations = ['ReLU'] * (len(in_out) - 1) + elif isinstance(activations, str): + activations = [activations] * (len(in_out) - 1) + else: + assert len(activations) == (len(in_out) - 1) + layers = [] + for i in range(len(in_out) - 1): + layers.append(nn.Linear(in_out[i], in_out[i+1], bias=False)) + layers.append(get_norm_layer(in_out[i+1], norm)) + layers.append(getattr(nn, activations[i])()) + return nn.Sequential(*layers) + + +def meshgrid(xmin, xmax, ymin=None, ymax=None, dim=2, n_steps=None, step=None): + assert dim <= 3, f'dim <= 3, but dim={dim} is given.' + if ymin is not None and ymax is not None: + assert dim == 2 + if n_steps is not None: + x = torch.linspace(xmin, xmax, n_steps) + y = torch.linspace(ymin, ymax, n_steps) + elif step is not None: + x = torch.arange(xmin, xmax, step) + y = torch.arange(ymin, ymax, step) + else: + raise NotImplementedError + xs = (x, y) + else: + if n_steps is not None: + x = torch.linspace(xmin, xmax, n_steps) + if ymin is not None and ymax is not None: + y = torch.linspace(ymin, ymax, n_steps) + elif step is not None: + x = torch.arange(xmin, xmax, step) + else: + raise NotImplementedError + xs = (x, ) * dim + indexing = 'ijk' + indexing = indexing[:dim] + coor = torch.stack( + torch.meshgrid(*xs, indexing=indexing), + dim=-1 + ) + return coor + + +def meshgrid_cross(xmins, xmaxs, n_steps=None, steps=None): + if n_steps is not None: + assert len(xmins) == len(n_steps) + xs = [torch.linspace(xmin, xmax + 1, nstp) for xmin, xmax, nstp \ + in zip(xmins, xmaxs, n_steps)] + elif steps is not None: + xs = [torch.arange(xmin, xmax + 1, stp) for xmin, xmax, stp \ + in zip(xmins, xmaxs, steps)] + else: + raise NotImplementedError + dim = len(xs) + indexing = 'ijk' + indexing = indexing[:dim] + coor = torch.stack( + torch.meshgrid(*xs, indexing=indexing), + dim=-1 + ) + return coor + +def pad_r(tensor, value=0): + tensor_pad = torch.ones_like(tensor[..., :1]) * value + return torch.cat([tensor, tensor_pad], dim=-1) + + +def pad_l(tensor, value=0): + tensor_pad = torch.ones_like(tensor[..., :1]) * value + return torch.cat([tensor_pad, tensor], dim=-1) + + +def cat_coor_with_idx(tensor_list): + out = [] + for i, t in enumerate(tensor_list): + out.append(pad_l(t, i)) + return torch.cat(out, dim=0) + + +def fuse_batch_indices(coords, num_cav): + """ + Fusing voxels of CAVs from the same frame + :param stensor: ME sparse tensor + :param num_cav: list of number of CAVs for each frame + :return: fused coordinates and features of stensor + """ + + for i, c in enumerate(num_cav): + idx_start = sum(num_cav[:i]) + mask = torch.logical_and( + coords[:, 0] >= idx_start, + coords[:, 0] < idx_start + c + ) + coords[mask, 0] = i + + return coords + + +def weighted_mahalanobis_dists(reg_evi, reg_var, dists, var0): + log_probs_list = [] + for i in range(reg_evi.shape[1]): + vars = reg_var[:, i, :] + var0[i] + covs = torch.diag_embed(vars.squeeze(), dim1=1) + unbroadcasted_scale_tril = covs.unsqueeze(1) # N 1 2 2 + + # a.shape = (i, 1, n, n), b = (..., i, j, n), + M = _batch_mahalanobis(unbroadcasted_scale_tril, dists) # N M + log_probs = -0.5 * M + log_probs_list.append(log_probs) + + log_probs = torch.stack(log_probs_list, dim=-1) + probs = log_probs.exp() # N M 2 + cls_evi = reg_evi.view(-1, 1, 2) # N 1 2 + probs_weighted = probs * cls_evi + + return probs_weighted + + +def draw_sample_prob(centers, reg, samples, res, distr_r, det_r, batch_size, var0): + # from utils.vislib import draw_points_boxes_plt + # vis_ctrs = centers[centers[:, 0]==0, 1:].cpu().numpy() + # vis_sams = samples[samples[:, 0]==0, 1:].cpu().numpy() + # + # ax = draw_points_boxes_plt(50, vis_ctrs, points_c='det_r', return_ax=True) + # draw_points_boxes_plt(50, vis_sams, points_c='b', ax=ax) + reg_evi = reg[:, :2] + reg_var = reg[:, 2:].view(-1, 2, 2) + + grid_size = int(det_r / res) * 2 + centers_map = torch.ones((batch_size, grid_size, grid_size), + device=reg.device).long() * -1 + ctridx = metric2indices(centers, res).T + ctridx[1:] += int(grid_size / 2) + centers_map[ctridx[0], ctridx[1], ctridx[2]] = torch.arange(ctridx.shape[1], + device=ctridx.device) + + steps = int(distr_r / res) + offset = meshgrid(-steps, steps, 2, n_steps=steps * 2 + 1).to(samples.device) # s s 2 + samidx = metric2indices(samples, res).view(-1, 1, 3) \ + + pad_l(offset).view(1, -1, 3) # n s*s 3 + samidx = samidx.view(-1, 3).T # 3 n*s*s + samidx[1:] = (samidx[1:] + (det_r / res)) + mask1 = torch.logical_and((samidx[1:] >= 0).all(dim=0), + (samidx[1:] < (det_r / res * 2)).all(dim=0)) + + inds = samidx[:, mask1].long() + ctr_idx_of_sam = centers_map[inds[0], inds[1], inds[2]] + mask2 = ctr_idx_of_sam >= 0 + ctr_idx_of_sam = ctr_idx_of_sam[mask2] + ns = offset.shape[0]**2 + new_samples = torch.tile(samples.unsqueeze(1), + (1, ns, 1)).view(-1, 3) # n*s*s 3 + new_centers = centers[ctr_idx_of_sam] + dists_sam2ctr = new_samples[mask1][mask2][:, 1:] - new_centers[:, 1:] + + probs_weighted = weighted_mahalanobis_dists( + reg_evi[ctr_idx_of_sam], + reg_var[ctr_idx_of_sam], + dists_sam2ctr.unsqueeze(1), + var0=var0 + ).squeeze() + + sample_evis = torch.zeros_like(samidx[:2].T) + mask = mask1.clone() + mask[mask1] = mask2 + sample_evis[mask] = probs_weighted + sample_evis = sample_evis.view(-1, ns, 2).sum(dim=1) + + return sample_evis + + +def get_voxel_centers(voxel_coords, + downsample_times, + voxel_size, + point_cloud_range): + """Get centers of spconv voxels. + + :param voxel_coords: (N, 3) + :param downsample_times: + :param voxel_size: + :param point_cloud_range: + :return: + """ + assert voxel_coords.shape[1] == 3 + voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz) + voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times + pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float() + voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range + return voxel_centers + + diff --git a/cosense3d/modules/utils/conv.py b/cosense3d/modules/utils/conv.py new file mode 100644 index 00000000..f6fa35ed --- /dev/null +++ b/cosense3d/modules/utils/conv.py @@ -0,0 +1,277 @@ +import warnings +from typing import Dict, Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn.modules import batchnorm, instancenorm + +from cosense3d.modules.utils.norm import build_norm_layer +from cosense3d.modules.utils import build_torch_module +from cosense3d.modules.utils.init import kaiming_init, constant_init + +PADDING_LAYERS = dict( + zero=nn.ZeroPad2d, + reflect=nn.ReflectionPad2d, + replicate=nn.ReplicationPad2d +) + + +def build_conv_layer(cfg: Optional[Dict], *args, **kwargs) -> nn.Module: + """Build convolution layer. Modified from openmmlab. + + Args: + cfg (None or dict): The conv layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an conv layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding conv layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding conv layer. + + Returns: + nn.Module: Created conv layer. + """ + if cfg is None: + cfg_ = dict(type='Conv2d') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if not hasattr(nn, layer_type): + raise KeyError(f'Unrecognized layer type {layer_type}') + else: + conv_layer = getattr(nn, layer_type) + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer + + +def build_padding_layer(cfg: Dict, *args, **kwargs) -> nn.Module: + """Build padding layer. + + Args: + cfg (dict): The padding layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate a padding layer. + + Returns: + nn.Module: Created padding layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + + cfg_ = cfg.copy() + padding_type = cfg_.pop('type') + if padding_type not in PADDING_LAYERS: + raise KeyError(f'Unrecognized padding type {padding_type}.') + else: + padding_layer = PADDING_LAYERS.get(padding_type) + + layer = padding_layer(*args, **kwargs, **cfg_) + + return layer + + +class ConvModule(nn.Module): + """A conv block that bundles conv/norm/activation layers. + + This block simplifies the usage of convolution layers, which are commonly + used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). + It is based upon three build methods: `build_conv_layer()`, + `build_norm_layer()` and `build_activation_layer()`. + + Besides, we add some additional features in this module. + 1. Automatically set `bias` of the conv layer. + 2. Spectral norm is supported. + 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only + supports zero and circular padding, and we add "reflect" padding mode. + + Args: + in_channels (int): Number of channels in the input feature map. + Same as that in ``nn._ConvNd``. + out_channels (int): Number of channels produced by the convolution. + Same as that in ``nn._ConvNd``. + kernel_size (int | tuple[int]): Size of the convolving kernel. + Same as that in ``nn._ConvNd``. + stride (int | tuple[int]): Stride of the convolution. + Same as that in ``nn._ConvNd``. + padding (int | tuple[int]): Zero-padding added to both sides of + the input. Same as that in ``nn._ConvNd``. + dilation (int | tuple[int]): Spacing between kernel elements. + Same as that in ``nn._ConvNd``. + groups (int): Number of blocked connections from input channels to + output channels. Same as that in ``nn._ConvNd``. + bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise + False. Default: "auto". + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + inplace (bool): Whether to use inplace mode for activation. + Default: True. + with_spectral_norm (bool): Whether use spectral norm in conv module. + Default: False. + padding_mode (str): If the `padding_mode` has not been supported by + current `Conv2d` in PyTorch, we will use our own padding layer + instead. Currently, we support ['zeros', 'circular'] with official + implementation and ['reflect'] with our own implementation. + Default: 'zeros'. + order (tuple[str]): The order of conv/norm/activation layers. It is a + sequence of "conv", "norm" and "act". Common examples are + ("conv", "norm", "act") and ("act", "conv", "norm"). + Default: ('conv', 'norm', 'act'). + """ + + _abbr_ = 'conv_block' + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int, int]], + stride: Union[int, Tuple[int, int]] = 1, + padding: Union[int, Tuple[int, int]] = 0, + dilation: Union[int, Tuple[int, int]] = 1, + groups: int = 1, + bias: Union[bool, str] = 'auto', + conv_cfg: Optional[Dict] = None, + norm_cfg: Optional[Dict] = None, + act_cfg: Optional[Dict] = dict(type='ReLU'), + inplace: bool = True, + with_spectral_norm: bool = False, + padding_mode: str = 'zeros', + order: tuple = ('conv', 'norm', 'act')): + super().__init__() + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + assert act_cfg is None or isinstance(act_cfg, dict) + official_padding_mode = ['zeros', 'circular'] + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.inplace = inplace + self.with_spectral_norm = with_spectral_norm + self.with_explicit_padding = padding_mode not in official_padding_mode + self.order = order + assert isinstance(self.order, tuple) and len(self.order) == 3 + assert set(order) == {'conv', 'norm', 'act'} + + self.with_norm = norm_cfg is not None + self.with_activation = act_cfg is not None + # if the conv layer is before a norm layer, bias is unnecessary. + if bias == 'auto': + bias = not self.with_norm + self.with_bias = bias + + if self.with_explicit_padding: + pad_cfg = dict(type=padding_mode) + self.padding_layer = build_padding_layer(pad_cfg, padding) + + # reset padding to 0 for conv module + conv_padding = 0 if self.with_explicit_padding else padding + # build convolution layer + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=conv_padding, + dilation=dilation, + groups=groups, + bias=bias) + # export the attributes of self.conv to a higher level for convenience + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + + if self.with_spectral_norm: + self.conv = nn.utils.spectral_norm(self.conv) + + # build normalization layers + if self.with_norm: + # norm layer is after conv layer + if order.index('norm') > order.index('conv'): + norm_channels = out_channels + else: + norm_channels = in_channels + self.norm_name, norm = build_norm_layer( + norm_cfg, norm_channels) # type: ignore + self.add_module(self.norm_name, norm) + if self.with_bias: + if isinstance(norm, (batchnorm._BatchNorm, + instancenorm._InstanceNorm)): + warnings.warn( + 'Unnecessary conv bias before batch/instance norm') + else: + self.norm_name = None # type: ignore + + # build activation layer + if self.with_activation: + act_cfg_ = act_cfg.copy() # type: ignore + # nn.Tanh has no 'inplace' argument + if act_cfg_['type'] not in [ + 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish', 'GELU' + ]: + act_cfg_.setdefault('inplace', inplace) + self.activate = build_torch_module(act_cfg_) + + # Use msra init by default + self.init_weights() + + @property + def norm(self): + if self.norm_name: + return getattr(self, self.norm_name) + else: + return None + + def init_weights(self): + # 1. It is mainly for customized conv layers with their own + # initialization manners by calling their own ``init_weights()``, + # and we do not want ConvModule to override the initialization. + # 2. For customized conv layers without their own initialization + # manners (that is, they don't have their own ``init_weights()``) + # and PyTorch's conv layers, they will be initialized by + # this method with default ``kaiming_init``. + # Note: For PyTorch's conv layers, they will be overwritten by our + # initialization implementation using default ``kaiming_init``. + if not hasattr(self.conv, 'init_weights'): + if self.with_activation and self.act_cfg['type'] == 'LeakyReLU': + nonlinearity = 'leaky_relu' + a = self.act_cfg.get('negative_slope', 0.01) + else: + nonlinearity = 'relu' + a = 0 + kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) + if self.with_norm: + constant_init(self.norm, 1, bias=0) + + def forward(self, + x: torch.Tensor, + activate: bool = True, + norm: bool = True) -> torch.Tensor: + for layer in self.order: + if layer == 'conv': + if self.with_explicit_padding: + x = self.padding_layer(x) + x = self.conv(x) + elif layer == 'norm' and norm and self.with_norm: + x = self.norm(x) + elif layer == 'act' and activate and self.with_activation: + x = self.activate(x) + return x diff --git a/cosense3d/modules/utils/edl_utils.py b/cosense3d/modules/utils/edl_utils.py new file mode 100644 index 00000000..ed8d1953 --- /dev/null +++ b/cosense3d/modules/utils/edl_utils.py @@ -0,0 +1,23 @@ +import torch + + +def logit_to_edl(logits): + """ + + Parameters + ---------- + logits: Tensor, (..., C), + + Returns + ------- + + """ + evidence = logits.relu() + alpha = evidence + 1 + S = torch.sum(alpha, dim=-1, keepdim=True) + conf = torch.div(alpha, S) + K = evidence.shape[-1] + unc = torch.div(K, S) + # conf = torch.sqrt(conf * (1 - unc)) + unc = unc.squeeze(dim=-1) + return conf, unc \ No newline at end of file diff --git a/cosense3d/modules/utils/gaussian_utils.py b/cosense3d/modules/utils/gaussian_utils.py new file mode 100644 index 00000000..fcd7fc64 --- /dev/null +++ b/cosense3d/modules/utils/gaussian_utils.py @@ -0,0 +1,156 @@ +from typing import List +import torch +from torch.distributions.multivariate_normal import _batch_mahalanobis +import torch_scatter +import numpy as np + + +def weighted_mahalanobis_dists(vars, dists, weights=None): + """Compute the squared mahalanobis distances. + + :param vars: (N, 2), variances of Gaussian distribution. + :param dists: (N, 2), distances to gaussian center at each axis. + :param weights: weights to be applied to the output probability. + :return: (N), squared mahalanobis + """ + vars = vars.squeeze() + if len(vars.shape) == 1: + vars = torch.stack([vars, vars], dim=-1) + covs = torch.diag_embed(vars.squeeze(), dim1=1) + unbroadcasted_scale_tril = covs.unsqueeze(1) # N 1 2 2 + + # a.shape = (i, 1, n, n), b = (..., i, j, n), + M = _batch_mahalanobis(unbroadcasted_scale_tril, dists) # N M + log_probs = -0.5 * M + probs = log_probs.exp() # N M 2 + if weights is not None: + probs = probs * weights + + return probs + + +def mahalanobis_dists_2d(sigmas, dists): + """Compute the squared mahalanobis distances. + + :param sigmas: (N, 2), standard deviation of Gaussian distribution + :param dists: (N, 2), distances to gaussian center + :return: (N), squared mahalanobis + """ + vars = sigmas ** 2 + covs = torch.diag_embed(vars, dim1=1) + unbroadcasted_scale_tril = covs.unsqueeze(1) # 1 1 2 2 + M = -0.5 * _batch_mahalanobis(unbroadcasted_scale_tril, dists.unsqueeze(0)) # N M + return M + + +def center_to_img_coor(center_in, lidar_range, pixel_sz): + x, y = center_in[:, 0], center_in[:, 1] + coord_x = (x - lidar_range[0]) / pixel_sz + coord_y = (y - lidar_range[1]) / pixel_sz + map_sz_x = (lidar_range[3] - lidar_range[0]) / pixel_sz + map_sz_y = (lidar_range[4] - lidar_range[1]) / pixel_sz + # clamp to fit image size: 1e-6 does not work for center.int() + coord_x = torch.clamp(coord_x, min=0, max=map_sz_x - 0.5) + coord_y = torch.clamp(coord_y, min=0, max=map_sz_y - 0.5) + center_out = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1) + return center_out + + +def cornernet_gaussian_radius(height, width, min_overlap=0.5): + a1 = 1 + b1 = (height + width) + c1 = width * height * (1 - min_overlap) / (1 + min_overlap) + sq1 = (b1 ** 2 - 4 * a1 * c1).sqrt() + r1 = (b1 + sq1) / 2 + + a2 = 4 + b2 = 2 * (height + width) + c2 = (1 - min_overlap) * width * height + sq2 = (b2 ** 2 - 4 * a2 * c2).sqrt() + r2 = (b2 + sq2) / 2 + + a3 = 4 * min_overlap + b3 = -2 * min_overlap * (height + width) + c3 = (min_overlap - 1) * width * height + sq3 = (b3 ** 2 - 4 * a3 * c3).sqrt() + r3 = (b3 + sq3) / 2 + ret = torch.min(torch.min(r1, r2), r3) + return ret + + +def gaussian_radius(box_dims, pixel_sz, overlap, min_radius=2): + dx, dy = box_dims[:, 0] / pixel_sz[0], box_dims[:, 1] / pixel_sz[1] + + radius = cornernet_gaussian_radius(dx, dy, min_overlap=overlap) + radius = torch.clamp_min(radius.int(), min=min_radius) + + return radius + + +def gaussian_2d(shape: List[int], sigma: float=1.0) -> np.ndarray: + """Generate gaussian map. + + :param shape: Shape of the map. + :param sigma: Sigma to generate gaussian map. + Defaults to 1. + :return: Generated gaussian map. + """ + m, n = [(ss - 1.) / 2. for ss in shape] + y, x = np.ogrid[-m:m + 1, -n:n + 1] + + h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + h[h < np.finfo(h.dtype).eps * h.max()] = 0 + return h + + +def draw_gaussian_map(boxes, lidar_range, pixel_sz, batch_size, radius=None, sigma=1, min_radius=2): + size_x = int((lidar_range[3] - lidar_range[0]) // pixel_sz[0]) + size_y = int((lidar_range[4] - lidar_range[1]) // pixel_sz[1]) + if boxes.shape[0] == 0: + return torch.zeros(batch_size, size_x, size_y, device=boxes.device) + if radius is None: + radius = torch.ones_like(boxes[:, 0]) * 2 + radius_max = radius.max() + center = center_to_img_coor(boxes[:, 1:3], lidar_range, pixel_sz) + ctridx = center.int() + + # sample points for each center point + steps = radius_max * 2 + 1 + x = torch.linspace(- radius_max, radius_max, steps) + offsets = torch.stack(torch.meshgrid(x, x, indexing='ij'), dim=-1).to(center.device) + offsets = offsets[torch.norm(offsets, dim=-1) <= radius_max] + samples = ctridx.unsqueeze(1) + offsets.view(1, -1, 2) + ind = torch.tile(boxes[:, 0].unsqueeze(1), (1, samples.shape[1])).unsqueeze(-1) + samples = torch.cat([ind, samples], dim=-1) + ctr_idx_of_sam = torch.arange(len(center)).unsqueeze(1).tile(1, samples.shape[1]) + + mask = (samples[..., 1] >= 0) & (samples[..., 1] < size_x) & \ + (samples[..., 2] >= 0) & (samples[..., 2] < size_y) + + + new_center = center[ctr_idx_of_sam[mask]] + new_vars = 1 / min_radius * radius[ctr_idx_of_sam[mask]].float() + new_samples = samples[mask] + dists_sam2ctr = new_samples[:, 1:].float() - new_center + + probs = weighted_mahalanobis_dists( + new_vars, + dists_sam2ctr.unsqueeze(1), + ).squeeze() + + # probs = probs / (2 * sigma * sigma) + probs[probs < torch.finfo(probs.dtype).eps * probs.max()] = 0 + + indices = new_samples[:, 0] * size_y * size_x + \ + new_samples[:, 1] * size_x + new_samples[:, 2] + + center_map = torch.zeros(batch_size * size_x * size_y, device=center.device) + torch_scatter.scatter(probs, indices.long(), dim=0, out=center_map, reduce='max') + center_map = center_map.view(batch_size, size_x, size_y) + + # import matplotlib.pyplot as plt + # plt.imshow(center_map[0].cpu().numpy()) + # plt.show() + # plt.close() + + return center_map \ No newline at end of file diff --git a/cosense3d/modules/utils/gevbev_utils.py b/cosense3d/modules/utils/gevbev_utils.py new file mode 100644 index 00000000..d6bd41e0 --- /dev/null +++ b/cosense3d/modules/utils/gevbev_utils.py @@ -0,0 +1,112 @@ +import torch +from torch.distributions.multivariate_normal import _batch_mahalanobis + +from cosense3d.modules.utils.me_utils import metric2indices +from cosense3d.modules.utils.common import meshgrid, pad_l + + +def weighted_mahalanobis_dists(reg_evi, reg_var, dists, var0): + log_probs_list = [] + for i in range(reg_evi.shape[1]): + vars = reg_var[:, i, :] + var0[i] + covs = torch.diag_embed(vars.squeeze(), dim1=1) + unbroadcasted_scale_tril = covs.unsqueeze(1) # N 1 2 2 + + # a.shape = (i, 1, n, n), b = (..., i, j, n), + M = _batch_mahalanobis(unbroadcasted_scale_tril, dists) # N M + log_probs = -0.5 * M + log_probs_list.append(log_probs) + + log_probs = torch.stack(log_probs_list, dim=-1) + probs = log_probs.exp() # N M 2 + cls_evi = reg_evi.view(-1, 1, 2) # N 1 2 + probs_weighted = probs * cls_evi + + return probs_weighted + + +def draw_sample_evis(ctr_pts: dict, samples: torch.Tensor, tag: str, + res: float, distr_r: float, lr: list, + batch_size: int, var0: float)->torch.Tensor: + """ + Given center points and its regression results, generate evidences for new samples. + + Parameters + ---------- + ctr_pts: points in BEV feature map, including its + - (key='ctr') metric center coordinates , + - (key='coor') index coordinates and + - (key='reg') regression values for centers, including the EDL evidence for each class and 2D stds for each class. + reg: + samples: sparse target points that are sampled in the continuous BEV space. + tag: tag for regression key. + res: resolution of the center points. + distr_r: maximum radius of the Gaussian distribution over which to draw samples. + lr: lidar range. + batch_size: batch size. + var0: base variance, to be added to the regressed variances. + + Returns + ------- + Evidences for the given samples. + """ + if len(samples) == 0: + return torch.empty((0, 2), device=ctr_pts[f'reg_{tag}'].device) + mask = (ctr_pts['ctr'].abs() < lr[3]).all(1) + if mask.sum() == 0: + return torch.zeros_like(samples[:, :2]) + reg = ctr_pts[f'reg_{tag}'][mask].relu() + ctr = ctr_pts['ctr'][mask] + coor = ctr_pts['coor'][mask] + assert reg.shape[1] == 6 + reg_evi = reg[:, :2] + reg_var = reg[:, 2:].view(-1, 2, 2) + + # create index map for center points + grid_size = (round((lr[3] - lr[0]) / res), round((lr[4] - lr[1]) / res)) + centers_map = torch.ones((batch_size, grid_size[0], grid_size[1]), + device=reg.device).long() - 1 + ctridx = coor.clone().T + ctridx[1] -= round(lr[0] / res) + ctridx[2] -= round(lr[1] / res) + ctridx = ctridx.long() + centers_map[ctridx[0], ctridx[1], ctridx[2]] = torch.arange(ctridx.shape[1], + device=ctridx.device) + + # get neighboring center indices for sample points + steps = int(distr_r / res) + offset = meshgrid(-steps, steps, 2, n_steps=steps * 2 + 1).to(samples.device) # s s 2 + samidx = metric2indices(samples[:, :3], res).view(-1, 1, 3) \ + + pad_l(offset).view(1, -1, 3) # n s*s 3 + samidx = samidx.view(-1, 3).T # 3 n*s*s + samidx[1] = (samidx[1] - (lr[0] / res)) + samidx[2] = (samidx[2] - (lr[1] / res)) + mask1 = (samidx[1] >= 0) & (samidx[1] < grid_size[0]) & \ + (samidx[2] >= 0) & (samidx[2] < grid_size[1]) + inds = samidx[:, mask1].long() + ctr_idx_of_sam = centers_map[inds[0], inds[1], inds[2]] + mask2 = ctr_idx_of_sam >= 0 + ctr_idx_of_sam = ctr_idx_of_sam[mask2] + ns = offset.shape[0]**2 + new_samples = torch.tile(samples[:, :3].unsqueeze(1), + (1, ns, 1)).view(-1, 3) # n*s*s 3 + new_centers = ctr[ctr_idx_of_sam] + dists_sam2ctr = new_samples[mask1][mask2][:, 1:] - new_centers[:, 1:] + + probs_weighted = weighted_mahalanobis_dists( + reg_evi[ctr_idx_of_sam], + reg_var[ctr_idx_of_sam], + dists_sam2ctr.unsqueeze(1), + var0=[var0] * 2 + ).squeeze() + + sample_evis = torch.zeros_like(samidx[:2].T) + mask = mask1.clone() + mask[mask1] = mask2 + sample_evis[mask] = probs_weighted + sample_evis = sample_evis.view(-1, ns, 2).sum(dim=1) + + if sample_evis.isnan().any(): + print('d') + + return sample_evis \ No newline at end of file diff --git a/cosense3d/modules/utils/init.py b/cosense3d/modules/utils/init.py new file mode 100644 index 00000000..3e7988cc --- /dev/null +++ b/cosense3d/modules/utils/init.py @@ -0,0 +1,79 @@ +from torch import nn +import numpy as np + + +def bias_init_with_prob(prior_prob: float) -> float: + """initialize conv/fc bias value according to a given probability value.""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init + + +def constant_init(module: nn.Module, val: float, bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + nn.init.constant_(module.weight, val) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def xavier_init(module: nn.Module, + gain: float = 1, + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def normal_init(module: nn.Module, + mean: float = 0, + std: float = 1, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + nn.init.normal_(module.weight, mean, std) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def trunc_normal_init(module: nn.Module, + mean: float = 0, + std: float = 1, + a: float = -2, + b: float = 2, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + trunc_normal_(module.weight, mean, std, a, b) # type: ignore + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) # type: ignore + + +def uniform_init(module: nn.Module, + a: float = 0, + b: float = 1, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + nn.init.uniform_(module.weight, a, b) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def kaiming_init(module: nn.Module, + a: float = 0, + mode: str = 'fan_out', + nonlinearity: str = 'relu', + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) diff --git a/cosense3d/modules/utils/localization_utils.py b/cosense3d/modules/utils/localization_utils.py new file mode 100644 index 00000000..93b20633 --- /dev/null +++ b/cosense3d/modules/utils/localization_utils.py @@ -0,0 +1,33 @@ +import copy +import open3d as o3d +import numpy as np + + +def register_points(source, target, init_transf=None, thr=[2.0, 0.5]): + source_cloud = o3d.geometry.PointCloud( + o3d.utility.Vector3dVector(source.contiguous().detach().cpu().numpy())) + target_cloud = o3d.geometry.PointCloud( + o3d.utility.Vector3dVector(target.contiguous().detach().cpu().numpy())) + + # Perform ICP registration + if init_transf is None: + icp_result = np.eye(4) + else: + icp_result = init_transf.detach().cpu().numpy() + if not isinstance(thr, list): + thr = [thr] + + icp_result = o3d.pipelines.registration.registration_icp( + source_cloud, target_cloud, thr[0], icp_result, + o3d.pipelines.registration.TransformationEstimationPointToPoint()) + + if len(thr) > 1: + for x in thr[1:]: + icp_result = o3d.pipelines.registration.registration_icp( + source_cloud, target_cloud, x, icp_result.transformation, + o3d.pipelines.registration.TransformationEstimationPointToPoint()) + + transform = copy.deepcopy(icp_result.transformation) + source_transformed = np.array(copy.deepcopy(source_cloud).transform(transform).points) + + return transform, source_transformed \ No newline at end of file diff --git a/cosense3d/modules/utils/me_utils.py b/cosense3d/modules/utils/me_utils.py new file mode 100644 index 00000000..391aa284 --- /dev/null +++ b/cosense3d/modules/utils/me_utils.py @@ -0,0 +1,364 @@ +import torch +from torch import nn +import MinkowskiEngine as ME +from MinkowskiEngine.MinkowskiKernelGenerator import KernelGenerator + + +@torch.no_grad() +def metric2indices(coor, voxel_size): + """"Round towards floor""" + indices = coor.clone() + if isinstance(voxel_size, float): + indices[:, 1:3] = indices[:, 1:3] / voxel_size + else: + indices[:, 1] = indices[:, 1] / voxel_size[0] + indices[:, 2] = indices[:, 2] / voxel_size[1] + return torch.floor(indices).long() + + +@torch.no_grad() +def indices2metric(indices, voxel_size): + """Voxel indices to voxel center in meter""" + coor = indices.clone().float() + coor[:, 1] = (coor[:, 1] + 0.5) * voxel_size[0] + coor[:, 2] = (coor[:, 2] + 0.5) * voxel_size[1] + return coor + + +@torch.no_grad() +def mink_coor_limit(lidar_range, voxel_size, stride): + if not isinstance(voxel_size, list): + voxel_size = [voxel_size, voxel_size] + lr = lidar_range + x_max = (round(lr[3] / voxel_size[0]) - 1) // stride * stride # relevant to ME + x_min = (round(lr[0] / voxel_size[0]) + 1) // stride * stride - stride # relevant to ME + y_max = (round(lr[4] / voxel_size[1]) - 1) // stride * stride + y_min = (round(lr[1] / voxel_size[1]) + 1) // stride * stride - stride + return [x_min, x_max, y_min, y_max] + + +def update_me_essentials(self: object, data_info: dict, stride: int=None): + """Update essential variables for ME-based models + + :param self: instance of a python class + :param data_info: + - det_r: float + - lidar_range: [xmin, ymin, zmin, xmax, ymax, zmax] + - voxel_size: [vx, vy, vz] + :param stride: + :return: + """ + for k, v in data_info.items(): + setattr(self, k, v) + + if getattr(self, 'det_r', False): + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif getattr(self, 'lidar_range', False): + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, 'lidar_range', lr) + + if stride is not None: + setattr(self, 'stride', stride) + setattr(self, 'res', (self.stride * self.voxel_size[0], self.stride * self.voxel_size[1])) + setattr(self, 'mink_xylim', mink_coor_limit(lr, self.voxel_size, self.stride)) + setattr(self, 'size_x', round((lr[3] - lr[0]) / self.res[0])) + setattr(self, 'size_y', round((lr[4] - lr[1]) / self.res[1])) + setattr(self, 'offset_sz_x', round(lr[0] / self.res[0])) + setattr(self, 'offset_sz_y', round(lr[1] / self.res[1])) + + +@torch.no_grad() +def me_coor_to_grid_indices(lr, voxel_size, stride, coor): + res_x, res_y = stride * voxel_size[0], stride * voxel_size[1] + size_x = round((lr[3] - lr[0]) / res_x) + size_y = round((lr[4] - lr[1]) / res_y) + offset_sz_x = round(lr[0] / res_x) + offset_sz_y = round(lr[1] / res_y) + inds = coor.clone() + inds[:, 0] -= offset_sz_x + inds[:, 1] -= offset_sz_y + in_range_mask = (inds >= 0).all(dim=-1) & inds[:, 0] < size_x & inds[:, 1] < size_y + return inds, in_range_mask + + +@torch.no_grad() +def bev_sparse_to_dense(self, preds): + conf, unc = preds['conf'], preds['unc'], + ctrs = preds['centers'][:, :3] # N 2 + batch_size = ctrs[:, 0].max().int() + 1 + conf_map = torch.zeros((batch_size, self.size_x, self.size_y, 2), + device=conf.device) + unc_map = torch.ones((batch_size, self.size_x, self.size_y), + device=unc.device) + inds = metric2indices(ctrs, self.res).T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + conf_map[inds[0], inds[1], inds[2]] = conf + unc_map[inds[0], inds[1], inds[2]] = unc + return conf_map, unc_map + + +def minkconv_layer(in_dim, out_dim, kernel, stride, d, tr=False): + if not isinstance(kernel, list): + kernel = [kernel] * d + else: + assert len(kernel) == d + if tr: + conv = getattr(ME, 'MinkowskiConvolutionTranspose') + else: + conv = getattr(ME, 'MinkowskiConvolution') + conv_layer = conv( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=kernel, + stride=stride, + dilation=1, + dimension=d + ) + return conv_layer + + +def minkconv_conv_block(in_dim, out_dim, kernel, stride, + d=3, + bn_momentum=0.1, + activation='LeakyReLU', + tr=False, + expand_coordinates=False, + norm_before=False, + distributed=False): + if isinstance(kernel, int): + kernel = [kernel] * d + if isinstance(stride, int): + stride = [stride] * d + if tr: + conv = getattr(ME, 'MinkowskiConvolutionTranspose') + else: + conv = getattr(ME, 'MinkowskiConvolution') + conv_layer = conv( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=kernel, + stride=stride, + dilation=1, + dimension=d, + expand_coordinates=expand_coordinates + ) + activation_fn = getattr(ME, f'Minkowski{activation}')() + if distributed: + norm_layer = ME.MinkowskiSyncBatchNorm(out_dim, momentum=bn_momentum) + else: + norm_layer = ME.MinkowskiBatchNorm(out_dim, momentum=bn_momentum) + if norm_before: + layer = nn.Sequential(conv_layer, norm_layer, activation_fn) + else: + layer = nn.Sequential(conv_layer, activation_fn, norm_layer) + return layer + + +def get_conv_block(nc, k=3, d=3, tr=False, bn_momentum=0.1, distributed=False): + """ + create sparse convolution block + :param nc: number of channels in each layer in [in_layer, mid_layer, out_layer] + :param k: kernel size + :param tr: transposed convolution + :return: conv block + """ + if isinstance(k, int): + k = [k,] * d + else: + assert len(k) == d + bnm = bn_momentum + assert len(nc) == 3 + return nn.Sequential( + minkconv_conv_block(nc[0], nc[1], k, 2, d, bnm, tr=tr, distributed=distributed), + minkconv_conv_block(nc[1], nc[1], k, 1, d, bnm, tr=tr, distributed=distributed), + minkconv_conv_block(nc[1], nc[2], k, 1, d, bnm, tr=tr, distributed=distributed), + ) + + +def sparse_to_dense(stensor, voxel_size, det_r): + b = int(stensor.C[:, 0].max()) + 1 + d = stensor.F.shape[-1] + stride = stensor.tensor_stride + h = int((det_r['x'][1] - det_r['x'][0]) / voxel_size[0]) // stride[0] + w = int((det_r['y'][1] - det_r['y'][0]) / voxel_size[1]) // stride[1] + x_offset = int(det_r['x'][0] / voxel_size[0]) + y_offset = int(det_r['y'][0] / voxel_size[1]) + assert len(stensor.C[:, 3].unique()) == 1 + dtensor = stensor.dense( + shape=torch.Size((b, d, h, w, 1)), + min_coordinate=torch.Tensor([x_offset, y_offset, 0]).int())[0].squeeze(dim=-1) + + return dtensor + + +def prepare_input_data(points_list, voxel_size, QMODE, floor_height, + coor_dim=3, feat_dim=3): + device = points_list[0].device + coords = [] + features = [] + vs = torch.tensor(voxel_size).reshape(1, 3).to(device) + for i, points in enumerate(points_list): + pts = points.clone() + if floor_height is not None: + pts[:, 3] -= floor_height + pts[:, 1:4] = pts[:, 1:4] / vs + features.append(points[:, 1:feat_dim + 1]) + coords.append(pts) + coords = torch.cat(coords, dim=0) + features = torch.cat(features, dim=0) + + x = ME.TensorField( + features=features.contiguous(), + coordinates=coords[:, :coor_dim + 1].contiguous(), + quantization_mode=QMODE, + device=device + ) + # ME rounds to the floor when casting coords to integer + return x + + +def voxelize_with_centroids(x: ME.TensorField, enc_mlp, pc_range): + cm = x.coordinate_manager + features = x.F + coords = x.C[:, 1:] + + out = x.sparse() + size = torch.Size([len(out), len(x)]) + tensor_map, field_map = cm.field_to_sparse_map(x.coordinate_key, out.coordinate_key) + coords_p1, count_p1 = downsample_points(coords, tensor_map, field_map, size) + features_p1, _ = downsample_points(features, tensor_map, field_map, size) + if len(features) != len(tensor_map): + print('ME: features != tensor map') + norm_features = normalize_points(features, features_p1, tensor_map) + + features[:, :3] = (features[:, :3] - pc_range[:3]) / (pc_range[3:] - pc_range[:3]) + voxel_embs = enc_mlp(torch.cat([features, norm_features], dim=1)) + down_voxel_embs = downsample_embeddings(voxel_embs, tensor_map, size, mode="avg") + out = ME.SparseTensor(features=down_voxel_embs, + coordinate_map_key=out.coordinate_key, + coordinate_manager=cm) + + norm_points_p1 = normalize_centroids(coords_p1, out.C, out.tensor_stride[0]) + return out, norm_points_p1, features_p1, count_p1, voxel_embs + + +def devoxelize_with_centroids(out: ME.SparseTensor, x: ME.TensorField, h_embs): + feats = torch.cat([out.slice(x).F, h_embs], dim=1) + return feats + + +@torch.no_grad() +def normalize_points(points, centroids, tensor_map): + tensor_map = tensor_map if tensor_map.dtype == torch.int64 else tensor_map.long() + norm_points = points - centroids[tensor_map] + return norm_points + + +@torch.no_grad() +def normalize_centroids(down_points, coordinates, tensor_stride): + norm_points = (down_points - coordinates[:, 1:]) / tensor_stride - 0.5 + return norm_points + + +@torch.no_grad() +def get_kernel_map_and_out_key(stensor, stensor_out=None, + kernel_size=3, stride=1, dilation=1, + kernel_type='cube', kernel_generator=None): + """ + Generate kernel maps for the input stensor. + The hybrid and custom kernel is not implemented in ME v0.5.x, + this function uses a kernel mask to select the kernel maps for + the customized kernel shapes. + :param stensor: ME.SparseTensor, NxC + :param kernel_type: 'cube'(default) | 'hybrid' + :return: masked kernel maps + """ + D = stensor.C.shape[-1] - 1 + if kernel_generator is None: + kernel_generator = KernelGenerator(kernel_size=kernel_size, + stride=stride, + dilation=dilation, + dimension=D) + assert D == len(kernel_generator.kernel_stride) + cm = stensor.coordinate_manager + in_key = stensor.coordinate_key + if stensor_out is None: + out_key = cm.stride(in_key, kernel_generator.kernel_stride) + else: + out_key = stensor_out.coordinate_key + region_type, region_offset, _ = kernel_generator.get_kernel( + stensor.tensor_stride, False) + + kernel_map = cm.kernel_map(in_key, + out_key, + kernel_generator.kernel_stride, + kernel_generator.kernel_size, + kernel_generator.kernel_dilation, + region_type=region_type, + region_offset=region_offset) + if kernel_type=='cube': + kernel_volume = kernel_generator.kernel_volume + elif kernel_type=='hybrid': + assert dilation == 1, "currently, hybrid kernel only support dilation=1." + xx = torch.tensor([-1, 0, 1]).int() + xx_list = [xx for i in range(D)] + kernels = torch.meshgrid([*xx_list], indexing='ij') + kernels = torch.stack([t.flatten() for t in kernels], dim=1) + kernel_mask = torch.zeros_like(kernels[:, 0]).bool() + m = torch.logical_or( + kernels[:, 0] == 0, + torch.logical_and(kernels[:, 0]==-1, (kernels[:, 1:]==0).all(dim=1)) + ) + kernel_mask[m] = True + kernel_mask_map = {ic.item(): ih for ih, ic in enumerate(torch.where(kernel_mask)[0])} + kernel_map = {kernel_mask_map[k]: v for k, v in kernel_map.items() if kernel_mask[k]} + kernel_volume = kernel_mask.sum().item() + else: + raise NotImplementedError + + return kernel_map, out_key, kernel_volume + + +@torch.no_grad() +def downsample_points(points, tensor_map, field_map, size): + down_points = ME.MinkowskiSPMMAverageFunction().apply( + tensor_map, field_map, size, points + ) + _, counts = torch.unique(tensor_map, return_counts=True) + return down_points, counts.unsqueeze_(1).type_as(down_points) + + +@torch.no_grad() +def stride_centroids(points, counts, rows, cols, size): + stride_centroids = ME.MinkowskiSPMMFunction().apply(rows, cols, counts, size, points) + ones = torch.ones(size[1], dtype=points.dtype, device=points.device) + stride_counts = ME.MinkowskiSPMMFunction().apply(rows, cols, ones, size, counts) + stride_counts.clamp_(min=1) + return torch.true_divide(stride_centroids, stride_counts), stride_counts + + +def downsample_embeddings(embeddings, inverse_map, size, mode="avg"): + assert len(embeddings) == size[1] + assert mode in ["avg", "max"] + if mode == "max": + in_map = torch.arange(size[1], dtype=inverse_map.dtype, device=inverse_map.device) + down_embeddings = ME.MinkowskiDirectMaxPoolingFunction().apply( + in_map, inverse_map, embeddings, size[0] + ) + else: + cols = torch.arange(size[1], dtype=inverse_map.dtype, device=inverse_map.device) + down_embeddings = ME.MinkowskiSPMMAverageFunction().apply( + inverse_map, cols, size, embeddings + ) + return down_embeddings + + + + + + + + diff --git a/cosense3d/modules/utils/misc.py b/cosense3d/modules/utils/misc.py new file mode 100644 index 00000000..62b19db5 --- /dev/null +++ b/cosense3d/modules/utils/misc.py @@ -0,0 +1,96 @@ +from torch import nn + + +class SELayer_Linear(nn.Module): + def __init__(self, channels, act_layer=nn.ReLU, gate_layer=nn.Sigmoid, norm=False): + super().__init__() + self.conv_reduce = nn.Linear(channels, channels) + self.act1 = act_layer() + self.conv_expand = nn.Linear(channels, channels) + self.gate = gate_layer() + self.norm = norm + + def forward(self, x, x_se): + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se) + + +class MLN(nn.Module): + ''' + Args: + c_dim (int): dimension of latent code c + f_dim (int): feature dimension + ''' + + def __init__(self, c_dim, f_dim=256): + super().__init__() + self.c_dim = c_dim + self.f_dim = f_dim + + self.reduce = nn.Sequential( + nn.Linear(c_dim, f_dim), + nn.ReLU(), + ) + self.gamma = nn.Linear(f_dim, f_dim) + self.beta = nn.Linear(f_dim, f_dim) + self.ln = nn.LayerNorm(f_dim, elementwise_affine=False) + self.reset_parameters() + + def reset_parameters(self): + nn.init.zeros_(self.gamma.weight) + nn.init.zeros_(self.beta.weight) + nn.init.ones_(self.gamma.bias) + nn.init.zeros_(self.beta.bias) + + def forward(self, x, c): + x = self.ln(x) + c = self.reduce(c) + gamma = self.gamma(c) + beta = self.beta(c) + out = gamma * x + beta + return out + + +class MLN2(nn.Module): + ''' + Args: + c_dim (int): dimension of latent code c + f_dim (int): feature dimension + ''' + + def __init__(self, c_dim, f_dim=256): + super().__init__() + self.c_dim = c_dim + self.f_dim = f_dim + + self.reduce = nn.Sequential( + nn.Linear(c_dim, f_dim), + nn.LayerNorm(f_dim), + nn.ReLU(), + ) + self.gamma = nn.Sequential( + nn.Linear(f_dim, f_dim), + nn.Sigmoid(), + ) + self.beta = nn.Sequential( + nn.Linear(f_dim, f_dim), + nn.LayerNorm(f_dim), + ) + self.ln = nn.LayerNorm(f_dim, elementwise_affine=False) + self.reset_parameters() + + def reset_parameters(self): + nn.init.zeros_(self.gamma[0].weight) + nn.init.zeros_(self.beta[0].weight) + nn.init.ones_(self.gamma[0].bias) + nn.init.zeros_(self.beta[0].bias) + + def forward(self, x, c): + x = self.ln(x) + c = self.reduce(c) + gamma = self.gamma(c) + beta = self.beta(c) + out = gamma * x + beta + return out \ No newline at end of file diff --git a/cosense3d/modules/utils/nbr_attn.py b/cosense3d/modules/utils/nbr_attn.py new file mode 100644 index 00000000..5a914a2d --- /dev/null +++ b/cosense3d/modules/utils/nbr_attn.py @@ -0,0 +1,62 @@ +import math +import torch +from torch import nn + +from cosense3d.modules.utils.positional_encoding import pos2posemb2d + + +class NeighborhoodAttention(nn.Module): + """Generate reference points and attend neighborhood features.""" + def __init__(self, emb_dim, n_nbr=16, num_pose_feat=64, **kwargs): + super(NeighborhoodAttention, self).__init__(**kwargs) + self.n_nbr = n_nbr + self.emb_dim = emb_dim + self.num_pose_feat = num_pose_feat + self.q_pos_emb = nn.Sequential( + nn.Linear(num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + self.kv_pos_emb = nn.Sequential( + nn.Linear(num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + + def forward(self, memory, mem_coor, q_coor, B): + """ + + Args: + q: (S, D) + kv: (L, D) + q_coor: (S, 3), [idx, x, y] + kv_coor: (L, 3) + + Returns: + + """ + query_pos = self.q_pos_emb(pos2posemb2d(q_coor[:, 1:], self.num_pose_feat)) + memory_pos = self.kv_pos_emb(pos2posemb2d(mem_coor[:, 1:], self.num_pose_feat)) + query = query_pos + kv_pe = memory_pos + memory + + outs = [] + for b in range(B): + qm = q_coor[:, 0] == b + km = mem_coor[:, 0] == b + q = query[qm] + kv = memory[km] + S, D = q.shape + L = kv.shape[0] + dists = torch.norm(q_coor[qm].unsqueeze(1) - mem_coor[km].unsqueeze(0), dim=-1) # (B, S, L) + topk_inds = torch.topk(-dists, k=self.n_nbr, dim=-1) # (B, S, n_nbr) + kv_inds = torch.cat([topk_inds[b] + b * L for b in range(B)], dim=0) # (BS, n_nbr) + q_inds = torch.cat([torch.arange(S) + b * S for b in range(B)], dim=0 + ).view(-1, 1).repeat(1, self.n_nbr) # (BS, n_nbr) + kv_m = kv_pe[km].view(-1, D)[kv_inds.view(-1)] + product = q.view(-1, D)[q_inds.view(-1)] * kv_m # (BS*n_nbr, D) + scaled_product = product / math.sqrt(D) + attn_weights = scaled_product.softmax(dim=-1) + out = (attn_weights * kv.view(-1, D)[kv_inds.view(-1)]).view(B, S, self.n_nbr, D) + outs.append(out) + return out \ No newline at end of file diff --git a/cosense3d/modules/utils/norm.py b/cosense3d/modules/utils/norm.py new file mode 100644 index 00000000..2e760fb6 --- /dev/null +++ b/cosense3d/modules/utils/norm.py @@ -0,0 +1,60 @@ +from typing import Dict, Tuple, Union +from torch import nn + +NORM_LAYERS = dict( + BN=nn.BatchNorm2d, + BN1d=nn.BatchNorm1d, + BN2d=nn.BatchNorm2d, + BN3d=nn.BatchNorm3d, + LN=nn.LayerNorm, + IN=nn.InstanceNorm2d, +) + + +def build_norm_layer(cfg: Dict, + num_features: int, + postfix: Union[int, str] = '') -> Tuple[str, nn.Module]: + """Build normalization layer. Modified from openmmlab. + + :param cfg: (dict) The norm layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + - requires_grad (bool, optional): Whether stop gradient updates. + :param num_features: (int) Number of input channels. + :param postfix: (int | str) The postfix to be appended into norm abbreviation + to create named layer. + + :returns: tuple[str, nn.Module]: The first element is the layer name consisting + of abbreviation and postfix, e.g., bn1, gn. The second element is the + created norm layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in NORM_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + + norm_layer = NORM_LAYERS.get(layer_type) + abbr = layer_type.lower() + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer \ No newline at end of file diff --git a/cosense3d/modules/utils/positional_encoding.py b/cosense3d/modules/utils/positional_encoding.py new file mode 100644 index 00000000..797be3b8 --- /dev/null +++ b/cosense3d/modules/utils/positional_encoding.py @@ -0,0 +1,135 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from mmdetection (https://github.com/open-mmlab/mmdetection) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ +# Modified by Shihao Wang +# Modified by Yunshuang Yuan +# ------------------------------------------------------------------------ +import math +import torch +import torch.nn as nn +import numpy as np + + +def ratio2coord(ratio, lidar_range): + return ratio * (lidar_range[3:] - lidar_range[:3]) + lidar_range[:3] + + +def coor2ratio(coor, lidar_range): + return (coor - lidar_range[:3]) / (lidar_range[3:] - lidar_range[:3]) + + +def img_locations(img_size, feat_size=None, stride=None): + H, W = img_size + if feat_size is None: + assert stride is not None + h, w = H // stride, W // stride + elif stride is None: + h, w = feat_size + stride = H // h + + shifts_x = (torch.arange( + 0, stride * w, step=stride, + dtype=torch.float32 + ) + stride // 2) / W + shifts_y = (torch.arange( + 0, h * stride, step=stride, + dtype=torch.float32 + ) + stride // 2) / H + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing='ij') + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + coors = torch.stack((shift_x, shift_y), dim=1) + + coors = coors.reshape(h, w, 2) + return coors + + +def pos2posemb3d(pos, num_pos_feats=128, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + pos_y = pos[..., 1, None] / dim_t + pos_z = pos[..., 2, None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2) + pos_z = torch.stack((pos_z[..., 0::2].sin(), pos_z[..., 1::2].cos()), dim=-1).flatten(-2) + posemb = torch.cat((pos_y, pos_x, pos_z), dim=-1) + return posemb + + +def pos2posemb2d(pos, num_pos_feats=128, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + pos_y = pos[..., 1, None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2) + posemb = torch.cat((pos_y, pos_x), dim=-1) + return posemb + + +def pos2posemb1d(pos, num_pos_feats=256, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + + return pos_x + + +def nerf_positional_encoding( + tensor: torch.Tensor, + num_encoding_functions: int=6, + include_input: bool=False, + log_sampling: bool=True +) -> torch.Tensor: + r"""Apply positional encoding to the input. + + :param tensor: Input tensor to be positionally encoded. + :param num_encoding_functions: Number of encoding functions used to compute + a positional encoding (default: 6). + :param include_input: Whether or not to include the input in the + positional encoding (default: True). + :param log_sampling: + :return: Positional encoding of the input tensor. + """ + # TESTED + # Trivially, the input tensor is added to the positional encoding. + encoding = [tensor] if include_input else [] + frequency_bands = None + if log_sampling: + frequency_bands = 2.0 ** torch.linspace( + 0.0, + num_encoding_functions - 1, + num_encoding_functions, + dtype=tensor.dtype, + device=tensor.device, + ) + else: + frequency_bands = torch.linspace( + 2.0 ** 0.0, + 2.0 ** (num_encoding_functions - 1), + num_encoding_functions, + dtype=tensor.dtype, + device=tensor.device, + ) + + for freq in frequency_bands: + for func in [torch.sin, torch.cos]: + encoding.append(func(tensor * freq)) + + # Special case, for no positional encoding + if len(encoding) == 1: + return encoding[0] + else: + return torch.cat(encoding, dim=-1) \ No newline at end of file diff --git a/cosense3d/modules/utils/test_flash_attn.py b/cosense3d/modules/utils/test_flash_attn.py new file mode 100644 index 00000000..fa51d523 --- /dev/null +++ b/cosense3d/modules/utils/test_flash_attn.py @@ -0,0 +1,935 @@ +import math +from functools import partial + +import torch +import torch.nn.functional as F + + +from einops import rearrange, repeat + +from flash_attn.flash_attn_interface import (flash_attn_func, flash_attn_unpadded_qkvpacked_func, \ + _get_block_size, flash_attn_unpadded_kvpacked_func, flash_attn_unpadded_func) +from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_split_func +from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis + +try: + from flash_attn.flash_attn_triton import flash_attn_func +except (ImportError, AttributeError): # Older version of Triton doesn't have tl.constexpr + flash_attn_func = None + + +is_sm75 = torch.cuda.get_device_capability('cuda') == (7, 5) +is_sm80 = torch.cuda.get_device_capability('cuda') == (8, 0) + + +def generate_random_padding_mask(max_seqlen, batch_size, device, mode='random'): + assert mode in ['full', 'random', 'third', 'split'] + if mode == 'full': + lengths = torch.full((batch_size, 1), max_seqlen, device=device, dtype=torch.int32) + elif mode == 'random': + lengths = torch.randint(max(1, max_seqlen - 20), max_seqlen + 1, (batch_size, 1), device=device) + elif mode == 'third': + lengths = torch.randint(max_seqlen // 3, max_seqlen + 1, (batch_size, 1), device=device) + elif mode == 'split': + lengths0 = torch.randint(min(128, max_seqlen), max_seqlen + 1, + (batch_size // 4 * 3, 1), device=device) + lengths1 = torch.randint(min(max(1, max_seqlen - 20), 128), min(max_seqlen, 128) + 1, + (batch_size - batch_size // 4 * 3, 1), device=device) + lengths = torch.cat([lengths0, lengths1], dim=0) + padding_mask = repeat(torch.arange(max_seqlen, device=device), 's -> b s', b=batch_size) < lengths + return padding_mask + + +def generate_qkv(x, Wqkv, nheads, query_padding_mask=None, key_padding_mask=None, + kvpacked=False, qkvpacked=False): + """ + :param x: (batch_size, seqlen, nheads * d) + :param Wqkv: nn.Linear(nheads * d, 3 * nheads * d) + :param query_padding_mask: (batch_size, seqlen), bool + :param key_padding_mask: (batch_size, seqlen), bool + """ + assert not (kvpacked and qkvpacked) + batch_size, seqlen, dim = x.shape + q, k, v = Wqkv(x).chunk(3, dim=-1) + + if query_padding_mask is not None: + q_unpad, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, query_padding_mask) + q_unpad = rearrange(q_unpad, 'nnz (h d) -> nnz h d', h=nheads) + output_pad_fn = lambda output_unpad: rearrange( + pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen), + 'b s (h d) -> b s h d', h=nheads + ) + else: + q_unpad = rearrange(q, 'b s (h d) -> (b s) h d', h=nheads) + cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, + device=q_unpad.device) + max_seqlen_q = seqlen + output_pad_fn = lambda output_unpad: rearrange(output_unpad, '(b s) h d -> b s h d', b=batch_size) + + if key_padding_mask is not None: + k_unpad, indices_k, cu_seqlens_k, max_seqlen_k = unpad_input(k, key_padding_mask) + k_unpad = rearrange(k_unpad, 'nnz (h d) -> nnz h d', h=nheads) + v_unpad, _, _, _ = unpad_input(v, key_padding_mask) + v_unpad = rearrange(v_unpad, 'nnz (h d) -> nnz h d', h=nheads) + else: + k_unpad = rearrange(k, 'b s (h d) -> (b s) h d', h=nheads) + v_unpad = rearrange(v, 'b s (h d) -> (b s) h d', h=nheads) + cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, + device=q_unpad.device) + max_seqlen_k = seqlen + + if qkvpacked: + assert (query_padding_mask == key_padding_mask).all() + qkv_unpad = torch.stack([q_unpad, k_unpad, v_unpad], dim=1) + qkv = rearrange(torch.stack([q, k, v], dim=2), 'b s t (h d) -> b s t h d', h=nheads) + if query_padding_mask is not None: + dqkv_pad_fn = lambda dqkv_unpad: rearrange( + pad_input(rearrange(dqkv_unpad, 'nnz t h d -> nnz (t h d)'), indices_q, batch_size, seqlen), + 'b s (t h d) -> b s t h d', t=3, h=nheads + ) + else: + dqkv_pad_fn = lambda dqkv_unpad: rearrange(dqkv_unpad, '(b s) t h d -> b s t h d', b=batch_size) + return (qkv_unpad.detach().requires_grad_(), cu_seqlens_q, max_seqlen_q, + qkv.detach().requires_grad_(), output_pad_fn, dqkv_pad_fn) + elif kvpacked: + kv_unpad = torch.stack([k_unpad, v_unpad], dim=1) + q = rearrange(q, 'b s (h d) -> b s h d', h=nheads) + kv = rearrange(torch.stack([k, v], dim=2), 'b s t (h d) -> b s t h d', h=nheads) + dq_pad_fn = output_pad_fn + if key_padding_mask is not None: + dkv_pad_fn = lambda dkv_unpad: rearrange( + pad_input(rearrange(dkv_unpad, 'nnz t h d -> nnz (t h d)'), indices_k, batch_size, seqlen), + 'b s (t h d) -> b s t h d', t=2, h=nheads + ) + else: + dkv_pad_fn = lambda dkv_unpad: rearrange(dkv_unpad, '(b s) t h d -> b s t h d', b=batch_size) + return (q_unpad.detach().requires_grad_(), kv_unpad.detach().requires_grad_(), + cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, + q.detach().requires_grad_(), kv.detach().requires_grad_(), + output_pad_fn, dq_pad_fn, dkv_pad_fn) + else: + q, k, v = [rearrange(z, 'b s (h d) -> b s h d', h=nheads).detach().requires_grad_() + for z in [q, k, v]] + dq_pad_fn = output_pad_fn + if key_padding_mask is not None: + dk_pad_fn = lambda dk_unpad: rearrange( + pad_input(rearrange(dk_unpad, 'nnz h d -> nnz (h d)'), indices_k, batch_size, seqlen), + 'b s (h d) -> b s h d', h=nheads + ) + else: + dk_pad_fn = lambda dk_unpad: rearrange(dk_unpad, '(b s) h d -> b s h d', b=batch_size) + return (q_unpad.detach().requires_grad_(), k_unpad.detach().requires_grad_(), + v_unpad.detach().requires_grad_(), + cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, + q, k, v, + output_pad_fn, dq_pad_fn, dk_pad_fn) + + +def attention_ref(q, k, v, query_padding_mask=None, key_padding_mask=None, dropout_p=0.0, + dropout_mask=None, causal=False, bias=None, upcast=True, reorder_ops=False): + """ + :param q: (batch_size, seqlen_q, nheads, head_dim) + :param k: (batch_size, seqlen_k, nheads, head_dim) + :param v: (batch_size, seqlen_k, nheads, head_dim) + :param query_padding_mask: (batch_size, seqlen_q) + :param key_padding_mask: (batch_size, seqlen_k) + :param dropout_p: float + :param dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k) + :param bias: (batch_size, nheads, seqlen_q, seqlen_k) + :param upcast: whether to cast all inputs to fp32, do all computation in fp32, then cast + output back to fp16/bf16. + :param reorder_ops: whether to change the order of operations (scaling k instead of scaling k, etc.) + without changing the math. This is to estimate the numerical error from operation + reordering. + :return: + - output: (batch_size, seqlen_q, nheads, head_dim) + - attention: (batch_size, nheads, seqlen_q, seqlen_k), softmax after dropout + """ + dtype_og = q.dtype + if upcast: + q, k, v = q.float(), k.float(), v.float() + seqlen_q, seqlen_k = q.shape[1], k.shape[1] + d = q.shape[-1] + if not reorder_ops: + scores = torch.einsum('bthd,bshd->bhts', q / math.sqrt(d), k) + else: + scores = torch.einsum('bthd,bshd->bhts', q, k / math.sqrt(d)) + if bias is not None: + scores = (scores + bias).to(dtype=scores.dtype) + if key_padding_mask is not None: + scores.masked_fill_(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), float('-inf')) + if causal: + causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool, device=q.device), 1) + scores.masked_fill_(causal_mask, float('-inf')) + attention = torch.softmax(scores, dim=-1) + dropout_scaling = 1.0 / (1 - dropout_p) + # attention_drop = attention.masked_fill(~dropout_mask, 0.0) * dropout_scaling + # output = torch.einsum('bhts,bshd->bthd', attention_drop , v) + if dropout_mask is not None: + attention_drop = attention.masked_fill(~dropout_mask, 0.0) + else: + attention_drop = attention + output = torch.einsum('bhts,bshd->bthd', attention_drop, v * dropout_scaling) + if query_padding_mask is not None: + output.masked_fill_(rearrange(~query_padding_mask, 'b s -> b s 1 1'), 0.0) + attention = attention.masked_fill(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), 0.0) + return output.to(dtype=dtype_og), attention.to(dtype=dtype_og) + + +def attention_kvpacked_ref(q, kv, query_padding_mask=None, key_padding_mask=None, dropout_p=0.0, + dropout_mask=None, causal=False, upcast=True, reorder_ops=False): + return attention_ref(q, kv[:, :, 0], kv[:, :, 1], query_padding_mask, + key_padding_mask, dropout_p, dropout_mask, upcast=upcast, causal=causal, + reorder_ops=reorder_ops) + + +def attention_qkvpacked_ref(qkv, key_padding_mask=None, dropout_p=0.0, + dropout_mask=None, causal=False, upcast=True, reorder_ops=False): + return attention_ref(qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], key_padding_mask, + key_padding_mask, dropout_p, dropout_mask, upcast=upcast, causal=causal, + reorder_ops=reorder_ops) + + +def generate_sparsity_mask(seqlen, sparsity=0.3): + repeats = seqlen // 16 // 2 + # mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda'), + # torch.tensor([0, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1) + # mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda'), + # torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1) + # mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1) + # mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda')], dim=-1) + nrow, ncol = seqlen // 16, seqlen // 256 + mask = torch.rand(nrow, ncol, device='cuda') < sparsity + return mask + + +def attention_blocksparse_ref(qkv, blockmask, attn_mask, dropout_p, dropout_mask): + """ + Arguments: + qkv: (batch_size, seqlen, 3, nheads, head_dim) + blockmask: (seqlen / 16, seqlen / 256) + attn_mask: (batch_size, seqlen) + dropout_p: float + dropout_mask: (batch_size, nheads, seqlen, seqlen) + Output: + output: (batch_size, seqlen, nheads, head_dim) + attention: softmax after dropout + """ + q, k, v = qkv.float().unbind(dim=2) + d = qkv.shape[-1] + seqlen = qkv.shape[1] + scores = torch.einsum('bthd,bshd->bhts', q / math.sqrt(d), k) + scores.masked_fill_(rearrange(~attn_mask, 'b s -> b 1 1 s'), float('-inf')) + blockmask = repeat(blockmask, 's_16 s_256 -> (s_16 16) (s_256 256)') + blockmask = blockmask[:seqlen, :seqlen] + scores.masked_fill_(rearrange(~blockmask, 't s -> 1 1 t s'), float('-inf')) + attention = torch.softmax(scores, dim=-1) + attention = attention.masked_fill(rearrange(~attn_mask, 'b s -> b 1 s 1'), 0.0) + attention = attention.masked_fill_(rearrange(~blockmask, 't s -> 1 1 t s'), 0.0) + attention_drop = attention.masked_fill(~dropout_mask, 0.0) / (1 - dropout_p) + output = torch.einsum('bhts,bshd->bthd', attention_drop , v) + output.masked_fill_(rearrange(~attn_mask, 'b s -> b s 1 1'), 0) + return output.to(dtype=qkv.dtype), attention.to(dtype=qkv.dtype) + + +def convert_flash_attn_S_to_softmax(S, query_padding_mask, key_padding_mask, head_dim, is_dropout, + causal=False): + """FlashAttention stores the S matrix in a different way. + Arguments: + S: (batch_size, nheads, seqlen_q, seqlen_k) + query_padding_mask: (batch_size, seqlen_q) + key_padding_mask: (batch_size, seqlen_k) + """ + S_flat = rearrange(S, 'b h t s -> b h (t s)') + seqlen_q, seqlen_k = S.shape[-2:] + block_size = _get_block_size(S.device, head_dim, is_dropout) + loop_steps = (seqlen_k + block_size - 1) // block_size + warps_n = 4 + mmas_n = (seqlen_k // warps_n // 16) if seqlen_k <= block_size else (block_size // warps_n // 16) + S_converted = rearrange(S_flat, 'b h (loop nsteps mmas_n warps_n eight t r c0 c1) -> b h (nsteps r eight) (loop mmas_n warps_n c0 t c1)', + loop=loop_steps, nsteps=seqlen_q // 16, mmas_n=mmas_n, warps_n=warps_n, eight=8, t=4, + r=2, c0=2, c1=2) + + # Need to zero out things not in attention_mask in case S was initialized with random values + # and some of those values aren't overwritten. + seqlen_q_og = query_padding_mask.shape[-1] + if seqlen_q_og < seqlen_q: + query_padding_mask = F.pad(query_padding_mask, (0, seqlen_q - seqlen_q_og)) + else: + query_padding_mask = query_padding_mask[:, :seqlen_q] + S_converted = S_converted.masked_fill(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), 0.0) + seqlen_k_og = key_padding_mask.shape[-1] + if seqlen_k_og < seqlen_k: + key_padding_mask = F.pad(key_padding_mask, (0, seqlen_k - seqlen_k_og)) + else: + key_padding_mask = key_padding_mask[:, :seqlen_k] + S_converted = S_converted.masked_fill(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), 0.0) + if causal: + causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool, device=S.device), 1) + S_converted.masked_fill_(causal_mask, 0.0) + if seqlen_q_og < seqlen_q: + S_converted = S_converted[:, :, :seqlen_q_og, :] + else: + S_converted = F.pad(S_converted, (0, 0, 0, seqlen_q_og - seqlen_q)) + if seqlen_k_og < seqlen_k: + S_converted = S_converted[:, :, :, :seqlen_k_og] + else: + S_converted = F.pad(S_converted, (0, seqlen_k_og - seqlen_k)) + return S_converted + + +def normalize_flash_attn_S(attn_unnorm, q, k, v, query_padding_mask=None, key_padding_mask=None, + is_dropout=False, causal=False): + """ + Arguments: + q: (batch_size, seqlen_q, nheads, head_dim) + k, v: (batch_size, seqlen_k, nheads, head_dim) + key_padding_mask: (batch_size, seqlen_q) + Output: + softmax_lse: (batch_size, nheads, seqlen_q) + softmax_max: (batch_size, nheads, seqlen_q) + """ + q, k, v = q.float(), k.float(), v.float() + _, seqlen_q, _, head_dim = q.shape + seqlen_k = k.shape[1] + scores = torch.einsum('bthd,bshd->bhts', q / math.sqrt(head_dim), k) + if key_padding_mask is not None: + scores.masked_fill_(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), float('-inf')) + if causal: + causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool, device=q.device), 1) + scores.masked_fill_(causal_mask, float('-inf')) + block_size = _get_block_size(scores.device, head_dim, is_dropout) + scores_block = scores.split(block_size, dim=-1) + lse_block = torch.stack([torch.logsumexp(s, dim=-1) for s in scores_block], dim=-1) + lcse_block = torch.logcumsumexp(lse_block, dim=-1).unbind(dim=-1) + scores_max_block = ([torch.amax(scores_block[0], dim=-1)] + + [torch.maximum(torch.amax(s, dim=-1), lcse) + for s, lcse in zip(scores_block[1:], lcse_block[:-1])]) + attn_unnorm_block = attn_unnorm.split(block_size, dim=-1) + attn_norm = torch.cat([a / rearrange(torch.exp(lcse_block[-1] - m), 'b h s -> b h s 1') + for a, m in zip(attn_unnorm_block, scores_max_block)], dim=-1) + if query_padding_mask is not None: + attn_norm.masked_fill_(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), 0.0) + return attn_norm.to(dtype=attn_unnorm.dtype) + + +def get_dropout_fraction(dropout_mask, query_padding_mask=None, key_padding_mask=None, causal=False): + """ + dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k), bool. True means keep, False means drop. + query_padding_mask: (batch_size, seqlen_q) + key_padding_mask: (batch_size, seqlen_k) + """ + batch_size, nheads, seqlen_q, seqlen_k = dropout_mask.shape + dropped = ~dropout_mask + if query_padding_mask is not None: + dropped.masked_fill_(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), False) + if key_padding_mask is not None: + dropped.masked_fill_(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), False) + if causal: + causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool, + device=dropout_mask.device), 1) + dropped.masked_fill_(causal_mask, False) + dropped_total = dropped.sum() + query_lengths = (query_padding_mask.sum(dim=-1) if query_padding_mask is not None + else torch.full((batch_size,), seqlen_q, device=dropout_mask.device)) + key_lengths = (key_padding_mask.sum(dim=-1) if key_padding_mask is not None + else torch.full((batch_size,), seqlen_k, device=dropout_mask.device)) + if not causal: + numel_per_batch = query_lengths * key_lengths + else: + numel_per_batch = torch.where( + query_lengths <= key_lengths, + query_lengths * (query_lengths + 1) / 2, + query_lengths * key_lengths - (key_lengths * (key_lengths - 1) / 2) + ) + return dropped_total / (numel_per_batch.sum() * nheads) + + +def test_flash_attn_unpadded_qkvpacked(seqlen, d, dropout_p, causal, dtype): + if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30: + pass # Reference implementation OOM + device = 'cuda' + # if dtype == torch.float16: + # rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3) + # else: # torch.bfloat16 + # rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + # Set smaller batch size so it would trigger num_splits > 1 + batch_size = 8 + nheads = 4 + x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True) + Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype) + + key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random') + # key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full') + + qkv_unpad, cu_seqlens, max_seqlen, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv( + x, Wqkv, nheads, key_padding_mask, key_padding_mask, qkvpacked=True + ) + + output_unpad, sm_lse, S_dmask = flash_attn_unpadded_qkvpacked_func( + qkv_unpad, cu_seqlens, max_seqlen, dropout_p, return_attn_probs=True, causal=causal + ) + output = output_pad_fn(output_unpad) + S_dmask_converted = convert_flash_attn_S_to_softmax( + S_dmask, key_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + dropout_mask = S_dmask_converted >= 0 + attn_unnorm = S_dmask_converted.abs() + attn = normalize_flash_attn_S(attn_unnorm, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], + key_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal) + dropout_fraction = get_dropout_fraction(dropout_mask, key_padding_mask, key_padding_mask, + causal=causal).item() + + output_ref, attn_ref = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask, + causal=causal) + output_pt, attn_pt = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask, + causal=causal, upcast=False, reorder_ops=True) + print(f'Actual dropout fraction: {dropout_fraction}') + print(f'Output max diff: {(output - output_ref).abs().max().item()}') + print(f'Output mean diff: {(output - output_ref).abs().mean().item()}') + print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}') + print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}') + print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}') + print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}') + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + g = torch.randn_like(output) + dqkv_unpad, = torch.autograd.grad(output, qkv_unpad, g) + dqkv = dqkv_pad_fn(dqkv_unpad) + dqkv_ref, = torch.autograd.grad(output_ref, qkv, g) + dqkv_pt, = torch.autograd.grad(output_pt, qkv, g) + print(f'dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}') + print(f'dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}') + print(f'dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}') + print(f'dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}') + print(f'dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}') + print(f'dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}') + print(f'dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}') + print(f'dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}') + + # Check that FlashAttention's numerical error is at most twice the numerical error + # of a Pytorch implementation. + assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item() + # assert torch.allclose(output, output_ref, rtol=rtol, atol=atol) + assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item() + # assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol) + if dropout_p == 0.0: + assert dropout_mask.all() + else: + assert 0.98 <= dropout_fraction / dropout_p <= 1.02 + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + # Error for dK and dV could be a bit higher if we're splitting along seqlen_q dimension + assert (dqkv - dqkv_ref).abs().max().item() <= 4 * (dqkv_pt - dqkv_ref).abs().max().item() + # assert torch.allclose(dqkv, dqkv_ref, rtol=rtol, atol=atol) + + +def test_flash_attn_unpadded_kvpacked(seqlen_q, seqlen_k, d, dropout_p, causal, dtype): + if seqlen_q >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30: + pass # Reference implementation OOM + device = 'cuda' + # if dtype == torch.float16: + # rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3) + # else: # torch.bfloat16 + # rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + batch_size = 5 + nheads = 8 + x = torch.randn(batch_size, seqlen_k, nheads * d, device=device, dtype=dtype, requires_grad=True) + Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype) + + query_padding_mask = generate_random_padding_mask(seqlen_q, batch_size, device, mode='full') + key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode='full') + # key_padding_mask = None + + (q_unpad, kv_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, q, kv, + output_pad_fn, dq_pad_fn, dkv_pad_fn) = generate_qkv( + x, Wqkv, nheads, query_padding_mask, key_padding_mask, kvpacked=True + ) + + output_unpad, sm_lse, S_dmask = flash_attn_unpadded_kvpacked_func( + q_unpad, kv_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, + dropout_p, return_attn_probs=True, causal=causal + ) + output = output_pad_fn(output_unpad) + S_dmask_converted = convert_flash_attn_S_to_softmax( + S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + dropout_mask = S_dmask_converted >= 0 + attn_unnorm = S_dmask_converted.abs() + attn = normalize_flash_attn_S(attn_unnorm, q, kv[:, :, 0], kv[:, :, 1], + query_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal) + dropout_fraction = get_dropout_fraction(dropout_mask, query_padding_mask, key_padding_mask, + causal=causal) + + output_ref, attn_ref = attention_kvpacked_ref(q, kv, query_padding_mask, key_padding_mask, + dropout_p, dropout_mask, causal=causal) + output_pt, attn_pt = attention_kvpacked_ref(q, kv, query_padding_mask, key_padding_mask, + dropout_p, dropout_mask, causal=causal, + upcast=False, reorder_ops=True) + print(f'Actual dropout fraction: {dropout_fraction}') + print(f'Output max diff: {(output - output_ref).abs().max().item()}') + print(f'Output mean diff: {(output - output_ref).abs().mean().item()}') + print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}') + print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}') + print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}') + print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}') + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + g = torch.randn_like(output) + dq_unpad, dkv_unpad, = torch.autograd.grad(output, (q_unpad, kv_unpad), g) + dq = dq_pad_fn(dq_unpad) + dkv = dkv_pad_fn(dkv_unpad) + dq_ref, dkv_ref, = torch.autograd.grad(output_ref, (q, kv), g) + dq_pt, dkv_pt = torch.autograd.grad(output_pt, (q, kv), g) + print(f'dQ max diff: {(dq - dq_ref).abs().max().item()}') + print(f'dK max diff: {(dkv[:, :, 0] - dkv_ref[:, :, 0]).abs().max().item()}') + print(f'dV max diff: {(dkv[:, :, 1] - dkv_ref[:, :, 1]).abs().max().item()}') + print(f'dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}') + print(f'dK Pytorch max diff: {(dkv_pt[:, :, 0] - dkv_ref[:, :, 0]).abs().max().item()}') + print(f'dV Pytorch max diff: {(dkv_pt[:, :, 1] - dkv_ref[:, :, 1]).abs().max().item()}') + + # Check that FlashAttention's numerical error is at most twice the numerical error + # of a Pytorch implementation. + assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item() + # assert torch.allclose(output, output_ref, rtol=rtol, atol=atol) + assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item() + # assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol) + if dropout_p == 0.0: + assert dropout_mask.all() + else: + assert 0.99 <= dropout_fraction / dropout_p <= 1.01 + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item() + assert (dkv - dkv_ref).abs().max().item() <= 2 * (dkv_pt - dkv_ref).abs().max().item() + # assert torch.allclose(dq, dq_ref, rtol=rtol, atol=atol) + # assert torch.allclose(dkv, dkv_ref, rtol=rtol, atol=atol) + +def test_flash_attn_unpadded(seqlen, d, dropout_p, causal, dtype): + if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30: + pass # Reference implementation OOM + device = 'cuda' + # if dtype == torch.float16: + # rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3) + # else: # torch.bfloat16 + # rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + batch_size = 32 + nheads = 4 + x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True) + Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype) + + query_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random') + key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random') + + (q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, q, k, v, + output_pad_fn, dq_pad_fn, dk_pad_fn) = generate_qkv( + x, Wqkv, nheads, query_padding_mask, key_padding_mask + ) + + output_unpad, sm_lse, S_dmask = flash_attn_unpadded_func( + q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, + dropout_p, return_attn_probs=True, causal=causal + ) + output = output_pad_fn(output_unpad) + S_dmask_converted = convert_flash_attn_S_to_softmax( + S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + dropout_mask = S_dmask_converted >= 0 + attn_unnorm = S_dmask_converted.abs() + attn = normalize_flash_attn_S(attn_unnorm, q, k, v, query_padding_mask, key_padding_mask, + dropout_p > 0.0, causal=causal) + dropout_fraction = get_dropout_fraction(dropout_mask, query_padding_mask, key_padding_mask, + causal=causal) + + output_ref, attn_ref = attention_ref(q, k, v, query_padding_mask, key_padding_mask, + dropout_p, dropout_mask, causal=causal) + output_pt, attn_pt = attention_ref(q, k, v, query_padding_mask, key_padding_mask, + dropout_p, dropout_mask, causal=causal, + upcast=False, reorder_ops=True) + print(f'Actual dropout fraction: {dropout_fraction}') + print(f'Output max diff: {(output - output_ref).abs().max().item()}') + print(f'Output mean diff: {(output - output_ref).abs().mean().item()}') + print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}') + print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}') + print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}') + print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}') + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + g = torch.randn_like(output) + dq_unpad, dk_unpad, dv_unpad, = torch.autograd.grad(output, (q_unpad, k_unpad, v_unpad), g) + dq = dq_pad_fn(dq_unpad) + dk = dk_pad_fn(dk_unpad) + dv = dk_pad_fn(dv_unpad) + dq_ref, dk_ref, dv_ref, = torch.autograd.grad(output_ref, (q, k, v), g) + dq_pt, dk_pt, dv_pt, = torch.autograd.grad(output_pt, (q, k, v), g) + print(f'dQ max diff: {(dq - dq_ref).abs().max().item()}') + print(f'dK max diff: {(dk - dk_ref).abs().max().item()}') + print(f'dV max diff: {(dv - dv_ref).abs().max().item()}') + print(f'dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}') + print(f'dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}') + print(f'dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}') + + # Check that FlashAttention's numerical error is at most twice the numerical error + # of a Pytorch implementation. + assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item() + # assert torch.allclose(output, output_ref, rtol=rtol, atol=atol) + assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item() + # assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol) + if dropout_p == 0.0: + assert dropout_mask.all() + else: + assert 0.99 <= dropout_fraction / dropout_p <= 1.01 + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item() + assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item() + assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item() + # assert torch.allclose(dq, dq_ref, rtol=rtol, atol=atol) + # assert torch.allclose(dk, dk_ref, rtol=rtol, atol=atol) + # assert torch.allclose(dv, dv_ref, rtol=rtol, atol=atol) + + +def test_flash_attn_split(seqlen, d, dropout_p, causal, dtype): + if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30: + pass # Reference implementation OOM + device = 'cuda' + # if dtype == torch.float16: + # rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3) + # else: # torch.bfloat16 + # rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + batch_size = 32 + nheads = 4 + x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True) + Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype) + + key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='split') + batch_size0 = batch_size // 4 * 3 # this must match what's in generate_random_padding_mask + # key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full') + + qkv_unpad, cu_seqlens, max_seqlen0, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv( + x, Wqkv, nheads, key_padding_mask, key_padding_mask, qkvpacked=True + ) + max_seqlen1 = 128 + + output_unpad, sm_lse, S_dmask0, S_dmask1 = flash_attn_unpadded_qkvpacked_split_func( + qkv_unpad, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0, dropout_p, + return_attn_probs=True, causal=causal + ) + output = output_pad_fn(output_unpad) + S_dmask0_converted = convert_flash_attn_S_to_softmax( + S_dmask0, key_padding_mask[:batch_size0], key_padding_mask[:batch_size0], d, dropout_p > 0.0, causal=causal + ) + S_dmask1_converted = convert_flash_attn_S_to_softmax( + S_dmask1, key_padding_mask[batch_size0:, :max_seqlen1], key_padding_mask[batch_size0:, :max_seqlen1], d, dropout_p > 0.0, causal=causal + ) + padding = (S_dmask0_converted.shape[-1] - S_dmask1_converted.shape[-1], + S_dmask0_converted.shape[-2] - S_dmask1_converted.shape[-2]) + S_dmask_converted = torch.cat([S_dmask0_converted, + F.pad(S_dmask1_converted, (0, padding[0], 0, padding[1]))], dim=0) + dropout_mask = S_dmask_converted >= 0 + attn_unnorm = S_dmask_converted.abs() + attn = normalize_flash_attn_S(attn_unnorm, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], + key_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal) + dropout_fraction = get_dropout_fraction(dropout_mask, key_padding_mask, key_padding_mask, + causal=causal).item() + + output_ref, attn_ref = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask, + causal=causal) + output_pt, attn_pt = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask, + causal=causal, upcast=False, reorder_ops=True) + print(f'Actual dropout fraction: {dropout_fraction}') + print(f'Output max diff: {(output - output_ref).abs().max().item()}') + print(f'Output mean diff: {(output - output_ref).abs().mean().item()}') + print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}') + print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}') + print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}') + print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}') + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + g = torch.randn_like(output) + dqkv_unpad, = torch.autograd.grad(output, qkv_unpad, g) + dqkv = dqkv_pad_fn(dqkv_unpad) + dqkv_ref, = torch.autograd.grad(output_ref, qkv, g) + dqkv_pt, = torch.autograd.grad(output_pt, qkv, g) + print(f'dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}') + print(f'dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}') + print(f'dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}') + print(f'dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}') + print(f'dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}') + print(f'dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}') + print(f'dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}') + print(f'dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}') + + # Check that FlashAttention's numerical error is at most twice the numerical error + # of a Pytorch implementation. + assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item() + # assert torch.allclose(output, output_ref, rtol=rtol, atol=atol) + assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item() + # assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol) + if dropout_p == 0.0: + assert dropout_mask.all() + else: + assert 0.99 <= dropout_fraction / dropout_p <= 1.01 + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item() + # assert torch.allclose(dqkv, dqkv_ref, rtol=rtol, atol=atol) + + +def test_flash_attn_race_condition(seqlen, d, dropout_p, causal, dtype): + if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30: + pass # Reference implementation OOM + device = 'cuda' + # set seed + torch.random.manual_seed(0) + batch_size = 32 + nheads = 4 + x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True) + Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype) + + query_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random') + key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random') + + (q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, q, k, v, + output_pad_fn, dq_pad_fn, dk_pad_fn) = generate_qkv( + x, Wqkv, nheads, query_padding_mask, key_padding_mask + ) + + torch.random.manual_seed(0) + output_unpad_0, sm_lse_0, S_dmask_0 = flash_attn_unpadded_func( + q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, + dropout_p, return_attn_probs=True, causal=causal + ) + S_dmask_converted_0 = convert_flash_attn_S_to_softmax( + S_dmask_0, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + g = torch.randn_like(output_unpad_0) + dq_unpad_0, dk_unpad_0, dv_unpad_0, = torch.autograd.grad(output_unpad_0, + (q_unpad, k_unpad, v_unpad), g) + # Parallelizing over seqlen_k makes dq non-deterministic + deterministic_dq = False + # Numerical error if we just do any arithmetic on dq + dq_atol = ((dq_unpad_0 + 0.3 - 0.3) - dq_unpad_0).abs().max().item() + equal_fn = torch.equal if deterministic_dq else partial(torch.allclose, atol=dq_atol) + + for _ in range(10): + torch.random.manual_seed(0) + output_unpad, sm_lse, S_dmask = flash_attn_unpadded_func( + q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, + dropout_p, return_attn_probs=True, causal=causal + ) + S_dmask_converted = convert_flash_attn_S_to_softmax( + S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + assert torch.equal(output_unpad, output_unpad_0) + # sm_lse has some parts that are uninitialized from torch.empty + # assert torch.equal(sm_lse, sm_lse_0) + assert torch.equal(S_dmask_converted, S_dmask_converted_0) + + if is_sm80 or d <= 64: # Only run backward for d=128 on A100 + dq_unpad, dk_unpad, dv_unpad, = torch.autograd.grad(output_unpad, + (q_unpad, k_unpad, v_unpad), g) + assert equal_fn(dq_unpad, dq_unpad_0) + assert torch.equal(dk_unpad, dk_unpad_0) + assert torch.equal(dv_unpad, dv_unpad_0) + + +def test_flash_attn_multigpu(): + seqlen = 256 + d = 64 + dropout_p = 0.0 + causal = False + dtype = torch.float16 + device = 'cuda:1' + torch.random.manual_seed(0) + batch_size = 32 + nheads = 4 + x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True) + Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype) + + key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random') + # key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full') + + qkv_unpad, cu_seqlens, max_seqlen, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv( + x, Wqkv, nheads, key_padding_mask, key_padding_mask, qkvpacked=True + ) + + output_unpad, sm_lse, S_dmask = flash_attn_unpadded_qkvpacked_func( + qkv_unpad, cu_seqlens, max_seqlen, dropout_p, return_attn_probs=True, causal=causal + ) + output = output_pad_fn(output_unpad) + S_dmask_converted = convert_flash_attn_S_to_softmax( + S_dmask, key_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + dropout_mask = S_dmask_converted >= 0 + attn_unnorm = S_dmask_converted.abs() + attn = normalize_flash_attn_S(attn_unnorm, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], + key_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal) + dropout_fraction = get_dropout_fraction(dropout_mask, key_padding_mask, key_padding_mask, + causal=causal).item() + + output_ref, attn_ref = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask, + causal=causal) + output_pt, attn_pt = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask, + causal=causal, upcast=False, reorder_ops=True) + print(f'Actual dropout fraction: {dropout_fraction}') + print(f'Output max diff: {(output - output_ref).abs().max().item()}') + print(f'Output mean diff: {(output - output_ref).abs().mean().item()}') + print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}') + print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}') + print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}') + print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}') + + g = torch.randn_like(output) + dqkv_unpad, = torch.autograd.grad(output, qkv_unpad, g) + dqkv = dqkv_pad_fn(dqkv_unpad) + dqkv_ref, = torch.autograd.grad(output_ref, qkv, g) + dqkv_pt, = torch.autograd.grad(output_pt, qkv, g) + print(f'dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}') + print(f'dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}') + print(f'dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}') + print(f'dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}') + print(f'dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}') + print(f'dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}') + print(f'dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}') + print(f'dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}') + + # Check that FlashAttention's numerical error is at most twice the numerical error + # of a Pytorch implementation. + assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item() + # assert torch.allclose(output, output_ref, rtol=rtol, atol=atol) + assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item() + # assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol) + if dropout_p == 0.0: + assert dropout_mask.all() + else: + assert 0.99 <= dropout_fraction / dropout_p <= 1.01 + + assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item() + + +def test_flash_attn_triton_output(seqlen_q, seqlen_k, d, causal, dtype, bias_shape): + if seqlen_q >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30: + pass # Reference implementation OOM + device = 'cuda' + # set seed + torch.random.manual_seed(0) + batch_size = 32 + nheads = 4 + q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype) + k, v = torch.randn(batch_size, seqlen_k, 2, nheads, d, device=device, dtype=dtype).unbind(dim=2) + if bias_shape == '1h1k': + bias = torch.randn(1, nheads, 1, seqlen_k, dtype=torch.float, device=device) + elif bias_shape == '1hqk': + bias = torch.randn(1, nheads, seqlen_q, seqlen_k, dtype=torch.float, device=device) + elif bias_shape == 'b11k': + bias = torch.randn(batch_size, 1, 1, seqlen_k, dtype=torch.float, device=device) + elif bias_shape == 'b1qk': + bias = torch.randn(batch_size, 1, seqlen_q, seqlen_k, dtype=torch.float, device=device) + else: + bias = None + + q, k, v = [x.detach().requires_grad_() for x in [q, k, v]] + output = flash_attn_func(q, k, v, bias, causal) + + output_ref, attn_ref = attention_ref(q, k, v, bias=bias, causal=causal) + output_pt, attn_pt = attention_ref(q, k, v, bias=bias, causal=causal, upcast=False, + reorder_ops=True) + print(f'Output max diff: {(output - output_ref).abs().max().item()}') + print(f'Output mean diff: {(output - output_ref).abs().mean().item()}') + print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}') + print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}') + + g = torch.randn_like(output) + dq, dk, dv = torch.autograd.grad(output, (q, k, v), g) + dq_ref, dk_ref, dv_ref, = torch.autograd.grad(output_ref, (q, k, v), g) + dq_pt, dk_pt, dv_pt, = torch.autograd.grad(output_pt, (q, k, v), g) + print(f'dQ max diff: {(dq - dq_ref).abs().max().item()}') + print(f'dK max diff: {(dk - dk_ref).abs().max().item()}') + print(f'dV max diff: {(dv - dv_ref).abs().max().item()}') + print(f'dQ mean diff: {(dq - dq_ref).abs().mean().item()}') + print(f'dK mean diff: {(dk - dk_ref).abs().mean().item()}') + print(f'dV mean diff: {(dv - dv_ref).abs().mean().item()}') + print(f'dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}') + print(f'dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}') + print(f'dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}') + print(f'dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}') + print(f'dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}') + print(f'dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}') + + # Check that FlashAttention's numerical error is at most twice the numerical error + # of a Pytorch implementation. + assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item() + # assert torch.allclose(output, output_ref, rtol=rtol, atol=atol) + + assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item() + assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item() + assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item() + + +def test_flash_attn_triton_race_condition(seqlen_q, seqlen_k, d, causal, dtype, bias_shape): + if seqlen_q >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30: + pass # Reference implementation OOM + device = 'cuda' + # set seed + torch.random.manual_seed(0) + batch_size = 32 + nheads = 4 + q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype) + k, v = torch.randn(batch_size, seqlen_k, 2, nheads, d, device=device, dtype=dtype).unbind(dim=2) + if bias_shape == '1h1k': + bias = torch.randn(1, nheads, 1, seqlen_k, dtype=torch.float, device=device) + elif bias_shape == '1hqk': + bias = torch.randn(1, nheads, seqlen_q, seqlen_k, dtype=torch.float, device=device) + elif bias_shape == 'b11k': + bias = torch.randn(batch_size, 1, 1, seqlen_k, dtype=torch.float, device=device) + elif bias_shape == 'b1qk': + bias = torch.randn(batch_size, 1, seqlen_q, seqlen_k, dtype=torch.float, device=device) + else: + bias = None + + q, k, v = [x.detach().requires_grad_() for x in [q, k, v]] + output_0 = flash_attn_func(q, k, v, bias, causal) + + g = torch.randn_like(output_0) + dq_0, dk_0, dv_0 = torch.autograd.grad(output_0, (q, k, v), g) + + # The SEQUENCE_PARALLEL option for the bwd to makes dq non-deterministic + deterministic_dq = False + # Numerical error if we just do any arithmetic on dq + dq_atol = ((dq_0 + 0.3 - 0.3) - dq_0).abs().max().item() + equal_fn = torch.equal if deterministic_dq else partial(torch.allclose, atol=dq_atol) + # Run 10000 times and check that the results don't change + for i in range(10000): + output = flash_attn_func(q, k, v, bias, causal) + output_equal = torch.equal(output, output_0) + if not output_equal: # Printing / computing diff sometimes makes the race condition disappear + print(f'{dtype = }, {causal = }, {d = }, {seqlen_q = }, {seqlen_k = }, {bias_shape = }, {i = }') + print(f'Output max diff: {(output - output_0).abs().max().item()}') + assert torch.equal(output, output_0) + dq, dk, dv = torch.autograd.grad(output, (q, k, v), g) + dq_equal = equal_fn(dq, dq_0) + dk_equal = torch.equal(dk, dk_0) + dv_equal = torch.equal(dv, dv_0) + if not (dq_equal and dk_equal and dv_equal): + print(f'{dtype = }, {causal = }, {d = }, {seqlen_q = }, {seqlen_k = }, {bias_shape = }, {i = }') + print(f'dQ max diff: {(dq - dq_0).abs().max().item()}') + print(f'dK max diff: {(dk - dk_0).abs().max().item()}') + print(f'dV max diff: {(dv - dv_0).abs().max().item()}') + assert equal_fn(dq, dq_0) + assert torch.equal(dk, dk_0) + assert torch.equal(dv, dv_0) + + +if __name__=="__main__": + test_flash_attn_unpadded_kvpacked(900, 1280,16, 0, False, torch.float16) \ No newline at end of file diff --git a/cosense3d/ops/__init__.py b/cosense3d/ops/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/ops/iou3d_nms_utils.py b/cosense3d/ops/iou3d_nms_utils.py new file mode 100644 index 00000000..3c085b59 --- /dev/null +++ b/cosense3d/ops/iou3d_nms_utils.py @@ -0,0 +1,301 @@ +""" +3D IoU Calculation and Rotated NMS +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +""" +import torch + +from cosense3d.ops.utils import check_numpy_to_torch +import cuda_ops +import numpy as np + + +def boxes_bev_iou_cpu(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + + """ + boxes_a, is_numpy = check_numpy_to_torch(boxes_a) + boxes_b, is_numpy = check_numpy_to_torch(boxes_b) + assert not (boxes_a.is_cuda or boxes_b.is_cuda), 'Only support CPU tensors' + assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7 + ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) + cuda_ops.boxes_iou_bev_cpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou) + + return ans_iou.numpy() if is_numpy else ans_iou + + +def boxes_iou_bev(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N, M) + """ + assert boxes_a.shape[1] == boxes_b.shape[1] == 7 + ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() + + cuda_ops.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou) + + return ans_iou + + +def decode_boxes_and_iou3d(boxes_a, boxes_b, pc_range, box_mean, box_std): + """ + Transform the boxes format back to [x, y, z, dx, dy, dz, heading] and calculate iou + :param boxes_a: (N, 7) [x_n, y_n, z_n, dx_n, dy_n, dz_n, heading_n] normalized + :param boxes_b: (M, 7) [x_n, y_n, z_n, dx_n, dy_n, dz_n, heading_n] + :param pc_range: point cloud range + :param object_ave_size: average object size + :return: ans_iou: (N, M) + """ + boxes_a_dec = decode_boxes(boxes_a, pc_range, box_mean, box_std) + boxes_b_dec = decode_boxes(boxes_b, pc_range, box_mean, box_std) + iou = boxes_iou3d_gpu(boxes_a_dec, boxes_b_dec) + + return iou + + +def decode_boxes(boxes, pc_range, box_mean, box_std): + assert len(boxes.shape)==2 + assert boxes.shape[1]==8 + if isinstance(box_mean, list): + box_mean = torch.tensor(box_mean, device=boxes.device) + if isinstance(box_std, list): + box_std = torch.tensor(box_std, device=boxes.device) + boxes = boxes * box_std[None, :] + box_mean[None, :] + boxes_out = torch.zeros((boxes.shape[0], 7), dtype=boxes.dtype, device=boxes.device) + for i in range(3): + boxes_out[:, i] = boxes[:, i] * (pc_range[i + 3] - pc_range[i]) + pc_range[i] + boxes_out[:, 3:6] = boxes[:, 3:6].exp() + boxes_out[:, 6] = torch.atan2(boxes[:, 6], boxes[:, 7]) + return boxes_out + + +def decode_boxes_and_giou3d(boxes_a, boxes_b, pc_range, box_mean, box_std): + boxes_a_dec = decode_boxes(boxes_a, pc_range, box_mean, box_std) + boxes_b_dec = decode_boxes(boxes_b, pc_range, box_mean, box_std) + corners_a = centroid_to_corners(boxes_a_dec) + corners_b = centroid_to_corners(boxes_b_dec) + iou, union = boxes_iou3d_gpu(boxes_a_dec, boxes_b_dec, return_union=True) + lwh = torch.max(corners_a.max(dim=1)[0][:, None, :], corners_b.max(dim=1)[0]) \ + -torch.min(corners_a.min(dim=1)[0][:, None, :], corners_b.min(dim=1)[0]) + volume = lwh[..., 0] * lwh[..., 1] * lwh[..., 2] + + giou = iou - (volume - union) / volume + + return giou + + +def giou3d(boxes_a_dec, boxes_b_dec): + corners_a = centroid_to_corners(boxes_a_dec) + corners_b = centroid_to_corners(boxes_b_dec) + iou, union = boxes_iou3d_gpu(boxes_a_dec, boxes_b_dec, return_union=True) + lwh = torch.max(corners_a.max(dim=1)[0][:, None, :], corners_b.max(dim=1)[0]) \ + -torch.min(corners_a.min(dim=1)[0][:, None, :], corners_b.min(dim=1)[0]) + volume = lwh[..., 0] * lwh[..., 1] * lwh[..., 2] + + giou = iou - (volume - union) / volume + + return giou + + +def aligned_boxes_iou3d_gpu(boxes_a, boxes_b, return_union=False): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N, 1) + """ + assert boxes_a.shape[1] == boxes_b.shape[1] == 7 + assert boxes_a.shape[0] == boxes_b.shape[0] + # height overlap + boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1) + boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1) + boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(-1, 1) + boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(-1, 1) + + # bev overlap + overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M) + cuda_ops.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev) + overlaps_bev = torch.diagonal(overlaps_bev).reshape(-1, 1) + + max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) + min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) + overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) + + # 3d iou + overlaps_3d = overlaps_bev * overlaps_h + + vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1) + vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(-1, 1) + union = torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6) + iou3d = overlaps_3d / union + if return_union: + return iou3d, union + return iou3d + + +def boxes_iou3d_gpu(boxes_a, boxes_b, return_union=False): + return boxes_iou3d(boxes_a, boxes_b, return_union, True) + + +def boxes_iou3d_cpu(boxes_a, boxes_b, return_union=False): + return boxes_iou3d(boxes_a, boxes_b, return_union, False) + + +def boxes_iou3d(boxes_a, boxes_b, return_union=False, gpu=True): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N, M) + """ + assert boxes_a.shape[1] == boxes_b.shape[1] == 7 + + # height overlap + boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1) + boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1) + boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1) + boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1) + + # bev overlap + if gpu: + overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M) + cuda_ops.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev) + else: + overlaps_bev = boxes_bev_iou_cpu(boxes_a, boxes_b) + + max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) + min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) + overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) + + # 3d iou + overlaps_3d = overlaps_bev * overlaps_h + + vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1) + vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1) + union = torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6) + iou3d = overlaps_3d / union + if return_union: + return iou3d, union + return iou3d + + +def centroid_to_corners(boxes): + if isinstance(boxes, np.ndarray): + corners = _centroid_to_corners_np(boxes) + elif isinstance(boxes, torch.Tensor): + corners = _centroid_to_corners_torch(boxes) + else: + raise TypeError('Input boxes should either be numpy array or torch tensor.') + + return corners + + +def _centroid_to_corners_torch(boxes): + '''Convert boxes from centroid format to corners + :param boxes: [N, 7] + :return: corners: [N, 8, 3] + ''' + corners = torch.zeros((boxes.shape[0], 8, 3), dtype=boxes.dtype, device=boxes.device) + sin_t = torch.sin(boxes[:, -1]) + cos_t = torch.cos(boxes[:, -1]) + corners[:, ::4, 0] = torch.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, dim=1) # lfx + corners[:, ::4, 1] = torch.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, dim=1) # lfy + corners[:, 1::4, 0] = torch.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, dim=1) # lbx + corners[:, 1::4, 1] = torch.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, dim=1) # lby + corners[:, 2::4, 0] = torch.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, dim=1) # rbx + corners[:, 2::4, 1] = torch.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, dim=1) # rby + corners[:, 3::4, 0] = torch.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, dim=1) # rfx + corners[:, 3::4, 1] = torch.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, dim=1) # rfy + corners[:, :, 2] = torch.cat([torch.stack([boxes[:, 2] - boxes[:, 5] / 2] * 4, dim=1), + torch.stack([boxes[:, 2] + boxes[:, 5] / 2] * 4, dim=1)], dim=1) + + return corners + + +def _centroid_to_corners_np(boxes): + '''Convert boxes from centroid format to corners + :param boxes: [N, 7] + :return: corners: [N, 8, 3] + ''' + corners = np.zeros((boxes.shape[0], 8, 3), dtype=boxes.dtype) + sin_t = np.sin(boxes[:, -1]) + cos_t = np.cos(boxes[:, -1]) + corners[:, ::4, 0] = np.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, axis=1) # lfx + corners[:, ::4, 1] = np.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, axis=1) # lfy + corners[:, 1::4, 0] = np.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, axis=1) # lbx + corners[:, 1::4, 1] = np.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, axis=1) # lby + corners[:, 2::4, 0] = np.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, axis=1) # rbx + corners[:, 2::4, 1] = np.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, axis=1) # rby + corners[:, 3::4, 0] = np.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, axis=1) # rfx + corners[:, 3::4, 1] = np.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, axis=1) # rfy + corners[:, :, 2] = np.concatenate([np.stack([boxes[:, 2] - boxes[:, 5] / 2] * 4, axis=1), + np.stack([boxes[:, 2] + boxes[:, 5] / 2] * 4, axis=1)], axis=1) + + return corners + + +def rotate_weighted_nms_gpu( + box_preds, + rbboxes, + dir_labels, + labels_preds, + scores, + iou_preds, + anchors, + pre_max_size=None, + post_max_size=None, + iou_threshold=0.5, +): + """Original definition can be found in CIA_SSD paper""" + if pre_max_size is not None: + num_keeped_scores = scores.shape[0] + + +def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs): + """ + Operate on rotated bev boxes[x,y,dx,dy,heading] + :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + :param scores: (N) + :param thresh: + :return: + """ + assert boxes.shape[1] == 7 + order = scores.sort(0, descending=True)[1] + if pre_maxsize is not None: + order = order[:pre_maxsize] + + boxes = boxes[order].contiguous() + keep = torch.LongTensor(boxes.size(0)) + num_out = cuda_ops.nms_gpu(boxes, keep, thresh) + return order[keep[:num_out].cuda()].contiguous() + + +def nms_normal_gpu(boxes, scores, thresh, **kwargs): + """ + Ignore heading and operate on bev boxes[x,y,dx,dy] + :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + :param scores: (N) + :param thresh: + :return: + """ + assert boxes.shape[1] == 7 + order = scores.sort(0, descending=True)[1] + + boxes = boxes[order].contiguous() + + keep = torch.LongTensor(boxes.size(0)) + num_out = cuda_ops.nms_normal_gpu(boxes, keep, thresh) + return order[keep[:num_out].cuda()].contiguous(), None diff --git a/cosense3d/ops/pointnet2_utils.py b/cosense3d/ops/pointnet2_utils.py new file mode 100644 index 00000000..51c2c25c --- /dev/null +++ b/cosense3d/ops/pointnet2_utils.py @@ -0,0 +1,395 @@ +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Function, Variable + +import cuda_ops as pointnet2 + + +class BallQuery(Function): + + @staticmethod + def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, + new_xyz: torch.Tensor, new_xyz_batch_cnt): + """ + Args: + ctx: + radius: float, radius of the balls + nsample: int, maximum number of features in the balls + xyz: (N1 + N2 ..., 3) xyz coordinates of the features + xyz_batch_cnt: (batch_size), [N1, N2, ...] + new_xyz: (M1 + M2 ..., 3) centers of the ball query + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + + Returns: + idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls + """ + assert new_xyz.is_contiguous() + assert new_xyz_batch_cnt.is_contiguous() + assert xyz.is_contiguous() + assert xyz_batch_cnt.is_contiguous() + + B = xyz_batch_cnt.shape[0] + M = new_xyz.shape[0] + idx = torch.cuda.IntTensor(M, nsample).zero_() + + pointnet2.ball_query_wrapper(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx) + empty_ball_mask = (idx[:, 0] == -1) + idx[empty_ball_mask] = 0 + return idx, empty_ball_mask + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class GroupingOperation(Function): + + @staticmethod + def forward(ctx, features: torch.Tensor, features_batch_cnt: torch.Tensor, + idx: torch.Tensor, idx_batch_cnt: torch.Tensor): + """ + Args: + ctx: + features: (N1 + N2 ..., C) tensor of features to group + features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + + Returns: + output: (M1 + M2, C, nsample) tensor + """ + assert features.is_contiguous() + assert features_batch_cnt.is_contiguous() + assert idx.is_contiguous() + assert idx_batch_cnt.is_contiguous() + + assert features.shape[0] == features_batch_cnt.sum(), \ + 'features: %s, features_batch_cnt: %s' % (str(features.shape), str(features_batch_cnt)) + assert idx.shape[0] == idx_batch_cnt.sum(), \ + 'idx: %s, idx_batch_cnt: %s' % (str(idx.shape), str(idx_batch_cnt)) + + M, nsample = idx.size() + N, C = features.size() + B = idx_batch_cnt.shape[0] + output = torch.cuda.FloatTensor(M, C, nsample) + + pointnet2.group_points_wrapper(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, output) + + ctx.for_backwards = (B, N, idx, features_batch_cnt, idx_batch_cnt) + return output + + @staticmethod + def backward(ctx, grad_out: torch.Tensor): + """ + Args: + ctx: + grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward + + Returns: + grad_features: (N1 + N2 ..., C) gradient of the features + """ + B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards + + M, C, nsample = grad_out.size() + grad_features = Variable(torch.cuda.FloatTensor(N, C).zero_()) + + grad_out_data = grad_out.data.contiguous() + pointnet2.group_points_grad_wrapper(B, M, C, N, nsample, grad_out_data, idx, + idx_batch_cnt, features_batch_cnt, grad_features.data) + return grad_features, None, None, None + + +grouping_operation = GroupingOperation.apply + + +class QueryAndGroup(nn.Module): + def __init__(self, radius: float, nsample: int, use_xyz: bool = True): + """ + Args: + radius: float, radius of ball + nsample: int, maximum number of features to gather in the ball + use_xyz: + """ + super().__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + + def forward(self, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, + new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor, + features: torch.Tensor = None): + """ + Args: + xyz: (N1 + N2 ..., 3) xyz coordinates of the features + xyz_batch_cnt: (batch_size), [N1, N2, ...] + new_xyz: (M1 + M2 ..., 3) centers of the ball query + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + features: (N1 + N2 ..., C) tensor of features to group + + Returns: + new_features: (M1 + M2, C, nsample) tensor + """ + assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt)) + assert new_xyz.shape[0] == new_xyz_batch_cnt.sum(), \ + 'new_xyz: %s, new_xyz_batch_cnt: %s' % (str(new_xyz.shape), str(new_xyz_batch_cnt)) + + # idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...) + idx, empty_ball_mask = ball_query(self.radius, self.nsample, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt) + grouped_xyz = grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, 3, nsample) + grouped_xyz -= new_xyz.unsqueeze(-1) + + grouped_xyz[empty_ball_mask] = 0 + + if features is not None: + grouped_features = grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, C, nsample) + grouped_features[empty_ball_mask] = 0 + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (M1 + M2 ..., C + 3, nsample) + else: + new_features = grouped_features + else: + assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" + new_features = grouped_xyz + + return new_features, idx + + +class FurthestPointSampling(Function): + @staticmethod + def forward(ctx, xyz: torch.Tensor, npoint: int): + """ + Args: + ctx: + xyz: (B, N, 3) where N > npoint + npoint: int, number of features in the sampled set + + Returns: + output: (B, npoint) tensor containing the set + """ + assert xyz.is_contiguous() + + B, N, _ = xyz.size() + output = torch.cuda.IntTensor(B, npoint) + temp = torch.cuda.FloatTensor(B, N).fill_(1e10) + + pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output) + return output + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply + + +class ThreeNN(Function): + @staticmethod + def forward(ctx, unknown, unknown_batch_cnt, known, known_batch_cnt): + """ + Args: + ctx: + unknown: (N1 + N2..., 3) + unknown_batch_cnt: (batch_size), [N1, N2, ...] + known: (M1 + M2..., 3) + known_batch_cnt: (batch_size), [M1, M2, ...] + + Returns: + dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + idx: (N1 + N2 ..., 3) index of the three nearest neighbors, range [0, M1+M2+...] + """ + assert unknown.shape.__len__() == 2 and unknown.shape[1] == 3 + assert known.shape.__len__() == 2 and known.shape[1] == 3 + assert unknown_batch_cnt.__len__() == known_batch_cnt.__len__() + + dist2 = unknown.new_zeros(unknown.shape) + idx = unknown_batch_cnt.new_zeros(unknown.shape).int() + + pointnet2.three_nn_wrapper( + unknown.contiguous(), unknown_batch_cnt.contiguous(), + known.contiguous(), known_batch_cnt.contiguous(), dist2, idx + ) + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + + @staticmethod + def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor): + """ + Args: + ctx: + features: (M1 + M2 ..., C) + idx: [N1 + N2 ..., 3] + weight: [N1 + N2 ..., 3] + + Returns: + out_tensor: (N1 + N2 ..., C) + """ + assert idx.shape[0] == weight.shape[0] and idx.shape[1] == weight.shape[1] == 3 + + ctx.three_interpolate_for_backward = (idx, weight, features.shape[0]) + output = features.new_zeros((idx.shape[0], features.shape[1])) + pointnet2.three_interpolate_wrapper(features.contiguous(), idx.contiguous(), weight.contiguous(), output) + return output + + @staticmethod + def backward(ctx, grad_out: torch.Tensor): + """ + Args: + ctx: + grad_out: (N1 + N2 ..., C) + + Returns: + grad_features: (M1 + M2 ..., C) + """ + idx, weight, M = ctx.three_interpolate_for_backward + grad_features = grad_out.new_zeros((M, grad_out.shape[1])) + pointnet2.three_interpolate_grad_wrapper( + grad_out.contiguous(), idx.contiguous(), weight.contiguous(), grad_features + ) + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class StackSAModuleMSG(nn.Module): + + def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]], + use_xyz: bool = True, pool_method='max_pool'): + """ + Args: + radii: list of float, list of radii to group with + nsamples: list of int, number of samples in each ball query + mlps: list of list of int, spec of the pointnet before the global pooling for each scale + use_xyz: + pool_method: max_pool / avg_pool + """ + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append(QueryAndGroup(radius, nsample, use_xyz=use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + shared_mlps = [] + for k in range(len(mlp_spec) - 1): + shared_mlps.extend([ + nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp_spec[k + 1]), + nn.ReLU() + ]) + self.mlps.append(nn.Sequential(*shared_mlps)) + self.pool_method = pool_method + + self.init_weights() + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + if isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.0) + nn.init.constant_(m.bias, 0) + + def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True): + """ + :param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features + :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + :param new_xyz: (M1 + M2 ..., 3) + :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + :param features: (N1 + N2 ..., C) tensor of the descriptors of the the features + :return: + new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz + new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + new_features_list = [] + for k in range(len(self.groupers)): + new_features, ball_idxs = self.groupers[k]( + xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features + ) # (M1 + M2, C, nsample) + new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample) + new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample) + + if self.pool_method == 'max_pool': + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ).squeeze(dim=-1) # (1, C, M1 + M2 ...) + elif self.pool_method == 'avg_pool': + new_features = F.avg_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ).squeeze(dim=-1) # (1, C, M1 + M2 ...) + else: + raise NotImplementedError + new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C) + new_features_list.append(new_features) + + new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C) + + return new_xyz, new_features + + +class StackPointnetFPModule(nn.Module): + def __init__(self, *, mlp: List[int]): + """ + Args: + mlp: list of int + """ + super().__init__() + shared_mlps = [] + for k in range(len(mlp) - 1): + shared_mlps.extend([ + nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp[k + 1]), + nn.ReLU() + ]) + self.mlp = nn.Sequential(*shared_mlps) + + def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None): + """ + Args: + unknown: (N1 + N2 ..., 3) + known: (M1 + M2 ..., 3) + unknow_feats: (N1 + N2 ..., C1) + known_feats: (M1 + M2 ..., C2) + + Returns: + new_features: (N1 + N2 ..., C_out) + """ + dist, idx = three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=-1, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = three_interpolate(known_feats, idx, weight) + + if unknown_feats is not None: + new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1) + else: + new_features = interpolated_feats + new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1) + new_features = self.mlp(new_features) + + new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C) + return new_features diff --git a/cosense3d/ops/setup.py b/cosense3d/ops/setup.py new file mode 100644 index 00000000..239b0935 --- /dev/null +++ b/cosense3d/ops/setup.py @@ -0,0 +1,39 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + + +setup( + name='cuda_ops', + author='xxx', + version="0.1.0", + ext_modules=[ + CUDAExtension('cuda_ops', [ + 'src/cuda_ops_api.cpp', + 'src/dot_product/dot_product.cpp', + 'src/dot_product/dot_product_kernel.cu', + 'src/scalar_attention/scalar_attention.cpp', + 'src/scalar_attention/scalar_attention_kernel.cu', + 'src/index_pooling/index_pooling.cpp', + 'src/index_pooling/index_pooling_kernel.cu', + 'src/utils/boxes.cpp', + 'src/utils/boxes_kernel.cu', + 'src/iou_nms/iou3d_cpu.cpp', + 'src/iou_nms/iou3d_nms.cpp', + 'src/iou_nms/iou3d_nms_kernel.cu', + # pointnet2 stack + 'src/pointnet2_stack/ball_query.cpp', + 'src/pointnet2_stack/ball_query_gpu.cu', + 'src/pointnet2_stack/group_points.cpp', + 'src/pointnet2_stack/group_points_gpu.cu', + 'src/pointnet2_stack/sampling.cpp', + 'src/pointnet2_stack/sampling_gpu.cu', + 'src/pointnet2_stack/interpolate.cpp', + 'src/pointnet2_stack/interpolate_gpu.cu', + ], + extra_compile_args={ + 'cxx': ['-g'], + 'nvcc': ['-O2'] + }), + + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/cosense3d/ops/sparse_ops.py b/cosense3d/ops/sparse_ops.py new file mode 100644 index 00000000..dab0b8a5 --- /dev/null +++ b/cosense3d/ops/sparse_ops.py @@ -0,0 +1,85 @@ +import torch +from torch.autograd import Function + +import cuda_ops + + +class DotProduct(Function): + @staticmethod + def forward(ctx, query, pos_enc, out_F, kq_map): + assert (query.is_contiguous() and pos_enc.is_contiguous() and out_F.is_contiguous()) + ctx.m = kq_map.shape[1] + _, ctx.h, ctx.c = query.shape + ctx.kkk = pos_enc.shape[0] + ctx.save_for_backward(query, pos_enc, kq_map) + cuda_ops.dot_product_forward(ctx.m, ctx.h, ctx.kkk, ctx.c, query, pos_enc, + out_F, kq_map) + return out_F + + @staticmethod + def backward(ctx, grad_out_F): + query, pos_enc, kq_map = ctx.saved_tensors + grad_query = torch.zeros_like(query) + grad_pos = torch.zeros_like(pos_enc) + cuda_ops.dot_product_backward(ctx.m, ctx.h, ctx.kkk, ctx.c, query, pos_enc, + kq_map, grad_query, grad_pos, grad_out_F) + return grad_query, grad_pos, None, None + +dot_product_cuda = DotProduct.apply + + +class ScalarAttention(Function): + @staticmethod + def forward(ctx, weight, value, out_F, kq_indices): + assert (weight.is_contiguous() and value.is_contiguous() and out_F.is_contiguous()) + ctx.m = kq_indices.shape[1] + _, ctx.h, ctx.c = value.shape + ctx.save_for_backward(weight, value, kq_indices) + cuda_ops.scalar_attention_forward(ctx.m, ctx.h, ctx.c, weight, value, out_F, + kq_indices) + return out_F + + @staticmethod + def backward(ctx, grad_out_F): + weight, value, kq_indices = ctx.saved_tensors + grad_weight = torch.zeros_like(weight) + grad_value = torch.zeros_like(value) + cuda_ops.scalar_attention_backward(ctx.m, ctx.h, ctx.c, weight, value, + kq_indices, grad_weight, grad_value, + grad_out_F) + return grad_weight, grad_value, None, None + + +scalar_attention_cuda = ScalarAttention.apply + + +class IndexPooling(Function): + @staticmethod + def forward(ctx, x, c_indices, out, out_indices): + assert (x.is_contiguous() and c_indices.is_contiguous() and out.is_contiguous()), 'inputs should be contiguous.' + assert len(x.shape)==1 and len(c_indices.shape)==1 and len(c_indices.shape)==1, 'input tensors dim error.' + assert len(out.shape) == 2, 'out tensor dim error.' + assert x.shape[0] == out_indices.shape[0] and x.shape[0] == out_indices.shape[0], 'shape doesn\'t match.' + ctx.m = x.shape[0] + assert c_indices.max() < ctx.m, 'c_indices max value larger than out dim.' + assert c_indices.min() >= 0, 'indices should >= 0' + assert out_indices.min() >= 0, 'indices should >= 0' + _, ctx.c = out.shape + ctx.save_for_backward(x, c_indices, out_indices) + cuda_ops.index_pooling_forward(ctx.m, ctx.c, x, c_indices, out, out_indices) + return out + + @staticmethod + def backward(ctx, grad_out): + # print(torch.isnan(grad_out).sum()) + # print(grad_out.type()) + # print(grad_out.shape) + x, c_indices, out_indices = ctx.saved_tensors + assert c_indices.min() >= 0, 'indices should >= 0' + assert out_indices.min() >= 0, 'indices should >= 0' + grad_x = torch.zeros_like(x) + cuda_ops.index_pooling_backward(ctx.m, ctx.c, c_indices, out_indices, + grad_x, grad_out) + return grad_x, None, None, None + +index_pooling_cuda = IndexPooling.apply diff --git a/cosense3d/ops/src/boxes/boxes.cpp b/cosense3d/ops/src/boxes/boxes.cpp new file mode 100644 index 00000000..143e7b97 --- /dev/null +++ b/cosense3d/ops/src/boxes/boxes.cpp @@ -0,0 +1,78 @@ +/* +Reference paper: https://arxiv.org/abs/1907.03670 +Written by Shaoshuai Shi +Modified by Yunshuang Yuan +*/ + +#include +#include +#include +#include +#include "boxes_kernel.h" + + +void points_in_boxes_gpu(AT boxes_tensor, AT pts_tensor, AT box_idx_of_points_tensor){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] + // params boxes_idx_of_points: (B, npoints), default -1 + +// CHECK_INPUT(boxes_tensor); +// CHECK_INPUT(pts_tensor); +// CHECK_INPUT(box_idx_of_points_tensor); + + int batch_size = boxes_tensor.size(0); + int boxes_num = boxes_tensor.size(1); + int pts_num = pts_tensor.size(1); + + const float *boxes = boxes_tensor.data(); + const float *pts = pts_tensor.data(); + int *box_idx_of_points = box_idx_of_points_tensor.data(); + + points_in_boxes_launcher(batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points); + +} + + +inline void lidar_to_local_coords_cpu(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){ + float cosa = cos(-rot_angle), sina = sin(-rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + + +inline int check_pt_in_box3d_cpu(const float *pt, const float *box3d, float &local_x, float &local_y){ + // param pt: (x, y, z) + // param box3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + const float MARGIN = 1e-2; + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords_cpu(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN); + return in_flag; +} + + +void points_in_boxes_cpu(AT boxes_tensor, AT pts_tensor, AT pts_indices_tensor){ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + // params pts: (num_points, 3) [x, y, z] + // params pts_indices: (N, num_points) + + int boxes_num = boxes_tensor.size(0); + int pts_num = pts_tensor.size(0); + + const float *boxes = boxes_tensor.data(); + const float *pts = pts_tensor.data(); + int *pts_indices = pts_indices_tensor.data(); + + float local_x = 0, local_y = 0; + for (int i = 0; i < boxes_num; i++){ + for (int j = 0; j < pts_num; j++){ + int cur_in_flag = check_pt_in_box3d_cpu(pts + j * 3, boxes + i * 7, local_x, local_y); + pts_indices[i * pts_num + j] = cur_in_flag; + } + } + +} diff --git a/cosense3d/ops/src/boxes/boxes_kernel.cu b/cosense3d/ops/src/boxes/boxes_kernel.cu new file mode 100644 index 00000000..f209af9b --- /dev/null +++ b/cosense3d/ops/src/boxes/boxes_kernel.cu @@ -0,0 +1,83 @@ +/* +Written by Shaoshuai Shi +Modified by Yunshuang Yuan +*/ + +#include +#include +#include "boxes_kernel.h" + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) + + +__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){ + float cosa = cos(-rot_angle), sina = sin(-rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + + +__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, float &local_x, float &local_y){ + // param pt: (x, y, z) + // param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + + const float MARGIN = 1e-5; + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN); + return in_flag; +} + + +__global__ void points_in_boxes_kernel(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate + // params boxes_idx_of_points: (B, npoints), default -1 + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= batch_size || pt_idx >= pts_num) return; + + boxes += bs_idx * boxes_num * 7; + pts += bs_idx * pts_num * 3 + pt_idx * 3; + box_idx_of_points += bs_idx * pts_num + pt_idx; + + float local_x = 0, local_y = 0; + int cur_in_flag = 0; + for (int k = 0; k < boxes_num; k++){ + cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y); + if (cur_in_flag){ + box_idx_of_points[0] = k; + break; + } + } +} + + +void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] + // params boxes_idx_of_points: (B, npoints), default -1 + cudaError_t err; + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size); + dim3 threads(THREADS_PER_BLOCK); + points_in_boxes_kernel<<>>(batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} \ No newline at end of file diff --git a/cosense3d/ops/src/boxes/boxes_kernel.h b/cosense3d/ops/src/boxes/boxes_kernel.h new file mode 100644 index 00000000..1327ee04 --- /dev/null +++ b/cosense3d/ops/src/boxes/boxes_kernel.h @@ -0,0 +1,23 @@ +#pragma once +#ifndef _boxes_KERNEL +#define _boxes_KERNEL +#include +#include +#include + +#define AT at::Tensor + +void points_in_boxes_cpu(AT boxes_tensor, AT pts_tensor, AT pts_indices_tensor); +void points_in_boxes_gpu(AT boxes_tensor, AT pts_tensor, AT box_idx_of_points_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/cosense3d/ops/src/cuda_ops_api.cpp b/cosense3d/ops/src/cuda_ops_api.cpp new file mode 100644 index 00000000..9d4c15c9 --- /dev/null +++ b/cosense3d/ops/src/cuda_ops_api.cpp @@ -0,0 +1,38 @@ +#include +#include + +#include "dot_product/dot_product_kernel.h" +#include "scalar_attention/scalar_attention_kernel.h" +#include "utils/boxes_kernel.h" +#include "index_pooling/index_pooling_kernel.h" +#include "iou_nms/iou3d_cpu.h" +#include "iou_nms/iou3d_nms.h" +#include "pointnet2_stack/ball_query_gpu.h" +#include "pointnet2_stack/group_points_gpu.h" +#include "pointnet2_stack/sampling_gpu.h" +#include "pointnet2_stack/interpolate_gpu.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("dot_product_forward", &dot_product_forward, "dot_product_forward"); + m.def("dot_product_backward", &dot_product_backward, "dot_product_backward"); + m.def("scalar_attention_forward", &scalar_attention_forward, "scalar_attention_forward"); + m.def("scalar_attention_backward", &scalar_attention_backward, "scalar_attention_backward"); + m.def("index_pooling_forward", &index_pooling_forward, "index_pooling_forward"); + m.def("index_pooling_backward", &index_pooling_backward, "index_pooling_backward"); + m.def("points_in_boxes_gpu", &points_in_boxes_gpu, "points_in_boxes_gpu forward (CUDA)"); + m.def("points_in_boxes_cpu", &points_in_boxes_cpu, "points_in_boxes_cpu forward (CUDA)"); + m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes overlap"); + m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou"); + m.def("nms_gpu", &nms_gpu, "oriented nms gpu"); + m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu"); + m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou"); + + // pointnet2 stack + m.def("ball_query_wrapper", &ball_query_wrapper_stack, "ball_query_wrapper_stack"); + m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper"); + m.def("group_points_wrapper", &group_points_wrapper_stack, "group_points_wrapper_stack"); + m.def("group_points_grad_wrapper", &group_points_grad_wrapper_stack, "group_points_grad_wrapper_stack"); + m.def("three_nn_wrapper", &three_nn_wrapper_stack, "three_nn_wrapper_stack"); + m.def("three_interpolate_wrapper", &three_interpolate_wrapper_stack, "three_interpolate_wrapper_stack"); + m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_stack, "three_interpolate_grad_wrapper_stack"); +} \ No newline at end of file diff --git a/cosense3d/ops/src/cuda_utils.h b/cosense3d/ops/src/cuda_utils.h new file mode 100644 index 00000000..4ef0e263 --- /dev/null +++ b/cosense3d/ops/src/cuda_utils.h @@ -0,0 +1,24 @@ +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include +#include + +#define TOTAL_THREADS 1024 +#define THREADS_PER_BLOCK 256 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + return std::max(std::min(1 << pow_2, TOTAL_THREADS), 1); +} + +inline dim3 opt_block_config(int x, int y) { + const int x_threads = opt_n_threads(x); + const int y_threads = std::max(std::min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); + dim3 block_config(x_threads, y_threads, 1); + return block_config; +} + +#endif \ No newline at end of file diff --git a/cosense3d/ops/src/dot_product/dot_product.cpp b/cosense3d/ops/src/dot_product/dot_product.cpp new file mode 100644 index 00000000..03d70d4a --- /dev/null +++ b/cosense3d/ops/src/dot_product/dot_product.cpp @@ -0,0 +1,38 @@ +#include +#include +#include +#include +#include "dot_product_kernel.h" + +void dot_product_forward( + int m, int h, int kkk, int c, AT query_tensor, AT pos_tensor, AT out_F_tensor, AT kq_map_tensor + ) +{ + const float* query = query_tensor.data_ptr(); + const float* pos = pos_tensor.data_ptr(); + float* out_F = out_F_tensor.data_ptr(); + const int* kq_map = kq_map_tensor.data_ptr(); + + dot_product_forward_launcher( + m, h, kkk, c, query, pos, out_F, kq_map + ); +} + +void dot_product_backward( + int m, int h, int kkk, int c, AT query_tensor, AT pos_tensor, AT kq_map_tensor, + AT grad_query_tensor, AT grad_pos_tensor, AT grad_out_F_tensor + ) +{ + const float* query = query_tensor.data_ptr(); + const float* pos = pos_tensor.data_ptr(); + const int* kq_map = kq_map_tensor.data_ptr(); + + float* grad_query = grad_query_tensor.data_ptr(); + float* grad_pos = grad_pos_tensor.data_ptr(); + const float* grad_out_F = grad_out_F_tensor.data_ptr(); + + dot_product_backward_launcher( + m, h, kkk, c, query, pos, kq_map, + grad_query, grad_pos, grad_out_F + ); +} \ No newline at end of file diff --git a/cosense3d/ops/src/dot_product/dot_product_kernel.cu b/cosense3d/ops/src/dot_product/dot_product_kernel.cu new file mode 100644 index 00000000..3f879738 --- /dev/null +++ b/cosense3d/ops/src/dot_product/dot_product_kernel.cu @@ -0,0 +1,84 @@ +#include "../cuda_utils.h" +#include "dot_product_kernel.h" + + +__global__ void dot_product_forward_kernel( + int m, int h, int kkk, int c, const float* query, const float* pos, float* out_F, const int* kq_map +) +{ + // m: # of total mappings + // h: # of attention heads + // kkk: # of keys (kernel volume) + // c: # of attention channels + + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= m * h) return; + + int map_idx = index / h; + int head_idx = index % h; + + int query_idx_ = kq_map[m + map_idx]; // kq_map[1][map_idx], kq_map: 2xm + int kernel_idx = kq_map[map_idx] % kkk; + + for(int i = 0; i < c; i++){ + + int query_idx = query_idx_ * h * c + head_idx * c + i; + int pos_idx = kernel_idx * h * c + head_idx * c + i; + + out_F[index] += query[query_idx] * pos[pos_idx]; + } +} + +__global__ void dot_product_backward_kernel( + int m, int h, int kkk, int c, const float* query, const float* pos, const int* kq_map, + float* grad_query, float* grad_pos, const float* grad_out_F +) +{ + // m: # of total mappings + // h: # of attention heads + // kkk: # of keys (kernel volume) + // c: # of attention channels + + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= m * c) return; + + int map_idx = index / c; + int i = index % c; + + int query_idx_ = kq_map[m + map_idx]; // kq_map[1][map_idx], kq_map: 2xm + int kernel_idx = kq_map[map_idx] % kkk; + + for(int head_idx = 0; head_idx < h; head_idx++){ + + int out_F_idx = map_idx * h + head_idx; + int query_idx = query_idx_ * h * c + head_idx * c + i; + int pos_idx = kernel_idx * h * c + head_idx * c + i; + + atomicAdd(grad_query + query_idx, grad_out_F[out_F_idx] * pos[pos_idx]); + atomicAdd(grad_pos + pos_idx, grad_out_F[out_F_idx] * query[query_idx]); + } +} + +void dot_product_forward_launcher( + int m, int h, int kkk, int c, const float* query, const float* pos, float* out_F, const int* kq_map +) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(DIVUP(m * h, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + dot_product_forward_kernel<<>>( + m, h, kkk, c, query, pos, out_F, kq_map + ); +} + +void dot_product_backward_launcher( + int m, int h, int kkk, int c, const float* query, const float* pos, const int* kq_map, + float* grad_query, float* grad_pos, const float* grad_out_F +) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(DIVUP(m * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + dot_product_backward_kernel<<>>( + m, h, kkk, c, query, pos, kq_map, + grad_query, grad_pos, grad_out_F + ); +} \ No newline at end of file diff --git a/cosense3d/ops/src/dot_product/dot_product_kernel.h b/cosense3d/ops/src/dot_product/dot_product_kernel.h new file mode 100644 index 00000000..f01119d0 --- /dev/null +++ b/cosense3d/ops/src/dot_product/dot_product_kernel.h @@ -0,0 +1,33 @@ +#pragma once +#ifndef _dot_product_KERNEL +#define _dot_product_KERNEL +#include +#include +#include + +#define AT at::Tensor + +void dot_product_forward( + int m, int h, int kkk, int c, AT query_tensor, AT pos_tensor, AT out_F_tensor, AT kq_map_tensor + ); +void dot_product_backward( + int m, int h, int kkk, int c, AT query_tensor, AT pos_tensor, AT kq_map_tensor, + AT grad_query_tensor, AT grad_pos_tensor, AT grad_out_F_tensor + ); + +#ifdef __cplusplus +extern "C" { +#endif + +void dot_product_forward_launcher( + int m, int h, int kkk, int c, const float* query, const float* pos, float* out_F, const int* kq_map + ); +void dot_product_backward_launcher( + int m, int h, int kkk, int c, const float* query, const float* pos, const int* kq_map, + float* grad_query, float* grad_pos, const float* grad_out_F + ); + +#ifdef __cplusplus +} +#endif +#endif \ No newline at end of file diff --git a/cosense3d/ops/src/index_pooling/index_pooling.cpp b/cosense3d/ops/src/index_pooling/index_pooling.cpp new file mode 100644 index 00000000..5bcb23fc --- /dev/null +++ b/cosense3d/ops/src/index_pooling/index_pooling.cpp @@ -0,0 +1,37 @@ +/* +Written by Yunshuang Yuan +*/ +#include +#include +#include +#include +#include "index_pooling_kernel.h" + +void index_pooling_forward( + int m, int c, AT x_tensor, AT c_indices_tensor, AT out_tensor, AT out_indices_tensor + ) +{ + const float* x = x_tensor.data_ptr(); + const int* c_indices = c_indices_tensor.data_ptr(); + float* out = out_tensor.data_ptr(); + const int* out_indices = out_indices_tensor.data_ptr(); + + index_pooling_forward_launcher( + m, c, x, c_indices, out, out_indices + ); +} + +void index_pooling_backward( + int m, int c, AT c_indices_tensor, AT out_indices_tensor, AT grad_x_tensor, AT grad_out_tensor + ) +{ + const int* c_indices = c_indices_tensor.data_ptr(); + const int* out_indices = out_indices_tensor.data_ptr(); + + float* grad_x = grad_x_tensor.data_ptr(); + const float* grad_out = grad_out_tensor.data_ptr(); + + index_pooling_backward_launcher( + m, c, c_indices, out_indices, grad_x, grad_out + ); +} \ No newline at end of file diff --git a/cosense3d/ops/src/index_pooling/index_pooling_kernel.cu b/cosense3d/ops/src/index_pooling/index_pooling_kernel.cu new file mode 100644 index 00000000..5cecfa50 --- /dev/null +++ b/cosense3d/ops/src/index_pooling/index_pooling_kernel.cu @@ -0,0 +1,68 @@ +/* +Written by Yunshuang Yuan +*/ +#include "../cuda_utils.h" +#include "index_pooling_kernel.h" + + +__global__ void index_pooling_forward_kernel( + int m, int c, const float* x, const int* c_indices, float* out, const int* out_indices +) +{ + // m: # of total mappings + // c: # of channels + + int map_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (map_idx >= m) return; + + int c_idx = c_indices[map_idx]; + int out_idx_ = out_indices[map_idx]; + int out_idx = out_idx_ * c + c_idx; + +// out[out_idx] += x[map_idx]; + atomicAdd(out + out_idx, x[map_idx]); // atomic operation can avoid race condition + +// if (map_idx>6421 && map_idx<6422+250) {printf("out_after: %f, x: %f, c_idx: %d, out_idx_: %d, out_idx: %d, map_idx: %d\n", +// out[out_idx], x[map_idx], c_idx, out_idx_, out_idx, map_idx);} +} + +__global__ void index_pooling_backward_kernel( + int m, int c, const int* c_indices, const int* out_indices, float* grad_x, const float* grad_out +) +{ + // m: # of total mappings + // c: # of channels + + int map_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (map_idx >= m) return; + + int c_idx = c_indices[map_idx]; + int out_idx_ = out_indices[map_idx]; + int out_idx = out_idx_ * c + c_idx; +// if (map_idx < 10) {printf("grad_out:%d, %f, grad_x:%d, %f, c_idx: %d, out_idx_: %d, map_idx: %d\n", +// out_idx, grad_out[out_idx], map_idx, grad_x[map_idx], c_idx, out_idx_, map_idx);} + atomicAdd(grad_x + map_idx, grad_out[out_idx]); +// grad_x[map_idx] += grad_out[out_idx]; +} + +void index_pooling_forward_launcher( + int m, int c, const float* x, const int* c_indices, float* out, const int* out_indices +) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + index_pooling_forward_kernel<<>>( + m, c, x, c_indices, out, out_indices + ); +} + +void index_pooling_backward_launcher( + int m, int c, const int* c_indices, const int* out_indices, float* grad_x, const float* grad_out +) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + index_pooling_backward_kernel<<>>( + m, c, c_indices, out_indices, grad_x, grad_out + ); +} diff --git a/cosense3d/ops/src/index_pooling/index_pooling_kernel.h b/cosense3d/ops/src/index_pooling/index_pooling_kernel.h new file mode 100644 index 00000000..2baf81df --- /dev/null +++ b/cosense3d/ops/src/index_pooling/index_pooling_kernel.h @@ -0,0 +1,34 @@ +/* +Written by Yunshuang Yuan +*/ +#pragma once +#ifndef _index_pooling_KERNEL +#define _index_pooling_KERNEL +#include +#include +#include + +#define AT at::Tensor + +void index_pooling_forward( + int m, int c, AT x_tensor, AT c_indices_tensor, AT out_tensor, AT out_indices_tensor + ); +void index_pooling_backward( + int m, int c, AT c_indices_tensor, AT out_indices_tensor, AT grad_x_tensor, AT grad_out_tensor + ); + +#ifdef __cplusplus +extern "C" { +#endif + +void index_pooling_forward_launcher( + int m, int c, const float* x, const int* c_indices, float* out, const int* out_indices + ); +void index_pooling_backward_launcher( + int m, int c, const int* c_indices, const int* out_indices, float* grad_x, const float* grad_out + ); + +#ifdef __cplusplus +} +#endif +#endif \ No newline at end of file diff --git a/cosense3d/ops/src/iou_nms/iou3d_cpu.cpp b/cosense3d/ops/src/iou_nms/iou3d_cpu.cpp new file mode 100644 index 00000000..d528ad91 --- /dev/null +++ b/cosense3d/ops/src/iou_nms/iou3d_cpu.cpp @@ -0,0 +1,252 @@ +/* +3D Rotated IoU Calculation (CPU) +Written by Shaoshuai Shi +All Rights Reserved 2020. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include "iou3d_cpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + +inline float min(float a, float b){ + return a > b ? b : a; +} + +inline float max(float a, float b){ + return a > b ? a : b; +} + +const float EPS = 1e-8; +struct Point { + float x, y; + __device__ Point() {} + __device__ Point(double _x, double _y){ + x = _x, y = _y; + } + + __device__ void set(float _x, float _y){ + x = _x; y = _y; + } + + __device__ Point operator +(const Point &b)const{ + return Point(x + b.x, y + b.y); + } + + __device__ Point operator -(const Point &b)const{ + return Point(x - b.x, y - b.y); + } +}; + +inline float cross(const Point &a, const Point &b){ + return a.x * b.y - a.y * b.x; +} + +inline float cross(const Point &p1, const Point &p2, const Point &p0){ + return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); +} + +inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ + int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && + min(q1.x,q2.x) <= max(p1.x,p2.x) && + min(p1.y,p2.y) <= max(q1.y,q2.y) && + min(q1.y,q2.y) <= max(p1.y,p2.y); + return ret; +} + +inline int check_in_box2d(const float *box, const Point &p){ + //params: (7) [x, y, z, dx, dy, dz, heading] + const float MARGIN = 1e-2; + + float center_x = box[0], center_y = box[1]; + float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box + float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); + float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; + + return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); +} + +inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ + // fast exclusion + if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; + + // check cross standing + float s1 = cross(q0, p1, p0); + float s2 = cross(p1, q1, p0); + float s3 = cross(p0, q1, q0); + float s4 = cross(q1, p1, q0); + + if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; + + // calculate intersection of two lines + float s5 = cross(q1, p1, p0); + if(fabs(s5 - s1) > EPS){ + ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); + ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); + + } + else{ + float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; + float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; + float D = a0 * b1 - a1 * b0; + + ans.x = (b0 * c1 - b1 * c0) / D; + ans.y = (a1 * c0 - a0 * c1) / D; + } + + return 1; +} + +inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){ + float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; + float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; + p.set(new_x, new_y); +} + +inline int point_cmp(const Point &a, const Point &b, const Point ¢er){ + return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); +} + +inline float box_overlap(const float *box_a, const float *box_b){ + // params: box_a (7) [x, y, z, dx, dy, dz, heading] + // params: box_b (7) [x, y, z, dx, dy, dz, heading] + +// float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = box_a[4]; +// float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = box_b[4]; + float a_angle = box_a[6], b_angle = box_b[6]; + float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; + float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; + float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; + float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; + float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; + + Point center_a(box_a[0], box_a[1]); + Point center_b(box_b[0], box_b[1]); + + Point box_a_corners[5]; + box_a_corners[0].set(a_x1, a_y1); + box_a_corners[1].set(a_x2, a_y1); + box_a_corners[2].set(a_x2, a_y2); + box_a_corners[3].set(a_x1, a_y2); + + Point box_b_corners[5]; + box_b_corners[0].set(b_x1, b_y1); + box_b_corners[1].set(b_x2, b_y1); + box_b_corners[2].set(b_x2, b_y2); + box_b_corners[3].set(b_x1, b_y2); + + // get oriented corners + float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); + float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); + + for (int k = 0; k < 4; k++){ + rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); + rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); + } + + box_a_corners[4] = box_a_corners[0]; + box_b_corners[4] = box_b_corners[0]; + + // get intersection of lines + Point cross_points[16]; + Point poly_center; + int cnt = 0, flag = 0; + + poly_center.set(0, 0); + for (int i = 0; i < 4; i++){ + for (int j = 0; j < 4; j++){ + flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); + if (flag){ + poly_center = poly_center + cross_points[cnt]; + cnt++; + } + } + } + + // check corners + for (int k = 0; k < 4; k++){ + if (check_in_box2d(box_a, box_b_corners[k])){ + poly_center = poly_center + box_b_corners[k]; + cross_points[cnt] = box_b_corners[k]; + cnt++; + } + if (check_in_box2d(box_b, box_a_corners[k])){ + poly_center = poly_center + box_a_corners[k]; + cross_points[cnt] = box_a_corners[k]; + cnt++; + } + } + + poly_center.x /= cnt; + poly_center.y /= cnt; + + // sort the points of polygon + Point temp; + for (int j = 0; j < cnt - 1; j++){ + for (int i = 0; i < cnt - j - 1; i++){ + if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ + temp = cross_points[i]; + cross_points[i] = cross_points[i + 1]; + cross_points[i + 1] = temp; + } + } + } + + // get the overlap areas + float area = 0; + for (int k = 0; k < cnt - 1; k++){ + area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); + } + + return fabs(area) / 2.0; +} + +inline float iou_bev(const float *box_a, const float *box_b){ + // params: box_a (7) [x, y, z, dx, dy, dz, heading] + // params: box_b (7) [x, y, z, dx, dy, dz, heading] + float sa = box_a[3] * box_a[4]; + float sb = box_b[3] * box_b[4]; + float s_overlap = box_overlap(box_a, box_b); + return s_overlap / fmaxf(sa + sb - s_overlap, EPS); +} + + +int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor){ + // params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading] + // params ans_iou_tensor: (N, M) + + CHECK_CONTIGUOUS(boxes_a_tensor); + CHECK_CONTIGUOUS(boxes_b_tensor); + + int num_boxes_a = boxes_a_tensor.size(0); + int num_boxes_b = boxes_b_tensor.size(0); + const float *boxes_a = boxes_a_tensor.data(); + const float *boxes_b = boxes_b_tensor.data(); + float *ans_iou = ans_iou_tensor.data(); + + for (int i = 0; i < num_boxes_a; i++){ + for (int j = 0; j < num_boxes_b; j++){ + ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7); + } + } + return 1; +} diff --git a/cosense3d/ops/src/iou_nms/iou3d_cpu.h b/cosense3d/ops/src/iou_nms/iou3d_cpu.h new file mode 100644 index 00000000..891c8177 --- /dev/null +++ b/cosense3d/ops/src/iou_nms/iou3d_cpu.h @@ -0,0 +1,11 @@ +#ifndef IOU3D_CPU_H +#define IOU3D_CPU_H + +#include +#include +#include +#include + +int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor); + +#endif \ No newline at end of file diff --git a/cosense3d/ops/src/iou_nms/iou3d_nms.cpp b/cosense3d/ops/src/iou_nms/iou3d_nms.cpp new file mode 100644 index 00000000..d41da8ad --- /dev/null +++ b/cosense3d/ops/src/iou_nms/iou3d_nms.cpp @@ -0,0 +1,188 @@ +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + +#include +#include +#include +#include +#include +#include "iou3d_nms.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) + +#define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); } +inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) +{ + if (code != cudaSuccess) + { + fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); + if (abort) exit(code); + } +} + +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; + + +void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap); +void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou); +void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh); +void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh); + + +int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + // params ans_overlap: (N, M) + + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_overlap); + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + const float * boxes_a_data = boxes_a.data(); + const float * boxes_b_data = boxes_b.data(); + float * ans_overlap_data = ans_overlap.data(); + + boxesoverlapLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_overlap_data); + + return 1; +} + +int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + // params ans_overlap: (N, M) + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_iou); + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + const float * boxes_a_data = boxes_a.data(); + const float * boxes_b_data = boxes_b.data(); + float * ans_iou_data = ans_iou.data(); + + boxesioubevLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data); + + return 1; +} + +int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh){ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + // params keep: (N) + CHECK_INPUT(boxes); + CHECK_CONTIGUOUS(keep); + + int boxes_num = boxes.size(0); + const float * boxes_data = boxes.data(); + long * keep_data = keep.data(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + unsigned long long *mask_data = NULL; + CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long))); + nmsLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); + + // unsigned long long mask_cpu[boxes_num * col_blocks]; + // unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks]; + std::vector mask_cpu(boxes_num * col_blocks); + +// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); + CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * sizeof(unsigned long long), + cudaMemcpyDeviceToHost)); + + cudaFree(mask_data); + + unsigned long long remv_cpu[col_blocks]; + memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); + + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++){ + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))){ + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_cpu[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++){ + remv_cpu[j] |= p[j]; + } + } + } + if ( cudaSuccess != cudaGetLastError() ) printf( "Error!\n" ); + + return num_to_keep; +} + + +int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh){ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + // params keep: (N) + + CHECK_INPUT(boxes); + CHECK_CONTIGUOUS(keep); + + int boxes_num = boxes.size(0); + const float * boxes_data = boxes.data(); + long * keep_data = keep.data(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + unsigned long long *mask_data = NULL; + CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long))); + nmsNormalLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); + + // unsigned long long mask_cpu[boxes_num * col_blocks]; + // unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks]; + std::vector mask_cpu(boxes_num * col_blocks); + +// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); + CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * sizeof(unsigned long long), + cudaMemcpyDeviceToHost)); + + cudaFree(mask_data); + + unsigned long long remv_cpu[col_blocks]; + memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); + + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++){ + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))){ + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_cpu[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++){ + remv_cpu[j] |= p[j]; + } + } + } + if ( cudaSuccess != cudaGetLastError() ) printf( "Error!\n" ); + + return num_to_keep; +} + + diff --git a/cosense3d/ops/src/iou_nms/iou3d_nms.h b/cosense3d/ops/src/iou_nms/iou3d_nms.h new file mode 100644 index 00000000..86cd473e --- /dev/null +++ b/cosense3d/ops/src/iou_nms/iou3d_nms.h @@ -0,0 +1,14 @@ +#ifndef IOU3D_NMS_H +#define IOU3D_NMS_H + +#include +#include +#include +#include + +int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); +int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou); +int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); +int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); + +#endif \ No newline at end of file diff --git a/cosense3d/ops/src/iou_nms/iou3d_nms_kernel.cu b/cosense3d/ops/src/iou_nms/iou3d_nms_kernel.cu new file mode 100644 index 00000000..e5e305cd --- /dev/null +++ b/cosense3d/ops/src/iou_nms/iou3d_nms_kernel.cu @@ -0,0 +1,414 @@ +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#define THREADS_PER_BLOCK 16 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +// #define DEBUG +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; +const float EPS = 1e-8; +struct Point { + float x, y; + __device__ Point() {} + __device__ Point(double _x, double _y){ + x = _x, y = _y; + } + + __device__ void set(float _x, float _y){ + x = _x; y = _y; + } + + __device__ Point operator +(const Point &b)const{ + return Point(x + b.x, y + b.y); + } + + __device__ Point operator -(const Point &b)const{ + return Point(x - b.x, y - b.y); + } +}; + +__device__ inline float cross(const Point &a, const Point &b){ + return a.x * b.y - a.y * b.x; +} + +__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ + return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); +} + +__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ + int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && + min(q1.x,q2.x) <= max(p1.x,p2.x) && + min(p1.y,p2.y) <= max(q1.y,q2.y) && + min(q1.y,q2.y) <= max(p1.y,p2.y); + return ret; +} + +__device__ inline int check_in_box2d(const float *box, const Point &p){ + //params: (7) [x, y, z, dx, dy, dz, heading] + const float MARGIN = 1e-2; + + float center_x = box[0], center_y = box[1]; + float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box + float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); + float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; + + return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); +} + +__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ + // fast exclusion + if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; + + // check cross standing + float s1 = cross(q0, p1, p0); + float s2 = cross(p1, q1, p0); + float s3 = cross(p0, q1, q0); + float s4 = cross(q1, p1, q0); + + if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; + + // calculate intersection of two lines + float s5 = cross(q1, p1, p0); + if(fabs(s5 - s1) > EPS){ + ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); + ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); + + } + else{ + float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; + float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; + float D = a0 * b1 - a1 * b0; + + ans.x = (b0 * c1 - b1 * c0) / D; + ans.y = (a1 * c0 - a0 * c1) / D; + } + + return 1; +} + +__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){ + float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; + float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; + p.set(new_x, new_y); +} + +__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){ + return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); +} + +__device__ inline float box_overlap(const float *box_a, const float *box_b){ + // params box_a: [x, y, z, dx, dy, dz, heading] + // params box_b: [x, y, z, dx, dy, dz, heading] + + float a_angle = box_a[6], b_angle = box_b[6]; + float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; + float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; + float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; + float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; + float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; + + Point center_a(box_a[0], box_a[1]); + Point center_b(box_b[0], box_b[1]); + +#ifdef DEBUG + printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, + b_x1, b_y1, b_x2, b_y2, b_angle); + printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); +#endif + + Point box_a_corners[5]; + box_a_corners[0].set(a_x1, a_y1); + box_a_corners[1].set(a_x2, a_y1); + box_a_corners[2].set(a_x2, a_y2); + box_a_corners[3].set(a_x1, a_y2); + + Point box_b_corners[5]; + box_b_corners[0].set(b_x1, b_y1); + box_b_corners[1].set(b_x2, b_y1); + box_b_corners[2].set(b_x2, b_y2); + box_b_corners[3].set(b_x1, b_y2); + + // get oriented corners + float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); + float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); + + for (int k = 0; k < 4; k++){ +#ifdef DEBUG + printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); +#endif + rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); + rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); +#ifdef DEBUG + printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); +#endif + } + + box_a_corners[4] = box_a_corners[0]; + box_b_corners[4] = box_b_corners[0]; + + // get intersection of lines + Point cross_points[16]; + Point poly_center; + int cnt = 0, flag = 0; + + poly_center.set(0, 0); + for (int i = 0; i < 4; i++){ + for (int j = 0; j < 4; j++){ + flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); + if (flag){ + poly_center = poly_center + cross_points[cnt]; + cnt++; +#ifdef DEBUG + printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n", + cross_points[cnt - 1].x, cross_points[cnt - 1].y, + box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, + box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); +#endif + } + } + } + + // check corners + for (int k = 0; k < 4; k++){ + if (check_in_box2d(box_a, box_b_corners[k])){ + poly_center = poly_center + box_b_corners[k]; + cross_points[cnt] = box_b_corners[k]; + cnt++; +#ifdef DEBUG + printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); +#endif + } + if (check_in_box2d(box_b, box_a_corners[k])){ + poly_center = poly_center + box_a_corners[k]; + cross_points[cnt] = box_a_corners[k]; + cnt++; +#ifdef DEBUG + printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); +#endif + } + } + + poly_center.x /= cnt; + poly_center.y /= cnt; + + // sort the points of polygon + Point temp; + for (int j = 0; j < cnt - 1; j++){ + for (int i = 0; i < cnt - j - 1; i++){ + if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ + temp = cross_points[i]; + cross_points[i] = cross_points[i + 1]; + cross_points[i + 1] = temp; + } + } + } + +#ifdef DEBUG + printf("cnt=%d\n", cnt); + for (int i = 0; i < cnt; i++){ + printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); + } +#endif + + // get the overlap areas + float area = 0; + for (int k = 0; k < cnt - 1; k++){ + area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); + } + + return fabs(area) / 2.0; +} + +__device__ inline float iou_bev(const float *box_a, const float *box_b){ + // params box_a: [x, y, z, dx, dy, dz, heading] + // params box_b: [x, y, z, dx, dy, dz, heading] + float sa = box_a[3] * box_a[4]; + float sb = box_b[3] * box_b[4]; + float s_overlap = box_overlap(box_a, box_b); + return s_overlap / fmaxf(sa + sb - s_overlap, EPS); +} + +__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b){ + return; + } + const float * cur_box_a = boxes_a + a_idx * 7; + const float * cur_box_b = boxes_b + b_idx * 7; + float s_overlap = box_overlap(cur_box_a, cur_box_b); + ans_overlap[a_idx * num_b + b_idx] = s_overlap; +} + +__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b){ + return; + } + + const float * cur_box_a = boxes_a + a_idx * 7; + const float * cur_box_b = boxes_b + b_idx * 7; + float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); + ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; +} + +__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, + const float *boxes, unsigned long long *mask){ + //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] + //params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; + block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; + block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; + block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; + block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; + block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; + block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 7; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + + +__device__ inline float iou_normal(float const * const a, float const * const b) { + //params: a: [x, y, z, dx, dy, dz, heading] + //params: b: [x, y, z, dx, dy, dz, heading] + + float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); + float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); + float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); + float interS = width * height; + float Sa = a[3] * a[4]; + float Sb = b[3] * b[4]; + return interS / fmaxf(Sa + Sb - interS, EPS); +} + + +__global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, + const float *boxes, unsigned long long *mask){ + //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] + //params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; + block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; + block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; + block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; + block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; + block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; + block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 7; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + + + + + +void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ + + dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); + + boxes_overlap_kernel<<>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ + + dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); + + boxes_iou_bev_kernel<<>>(num_a, boxes_a, num_b, boxes_b, ans_iou); +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + + +void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){ + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + nms_kernel<<>>(boxes_num, nms_overlap_thresh, boxes, mask); +} + + +void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){ + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + nms_normal_kernel<<>>(boxes_num, nms_overlap_thresh, boxes, mask); +} diff --git a/cosense3d/ops/src/pointnet2_stack/ball_query.cpp b/cosense3d/ops/src/pointnet2_stack/ball_query.cpp new file mode 100755 index 00000000..6722ecf0 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/ball_query.cpp @@ -0,0 +1,48 @@ +/* +Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include "ball_query_gpu.h" + + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + +int ball_query_wrapper_stack(int B, int M, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor) { + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(xyz_tensor); + CHECK_INPUT(new_xyz_batch_cnt_tensor); + CHECK_INPUT(xyz_batch_cnt_tensor); + + const float *new_xyz = new_xyz_tensor.data(); + const float *xyz = xyz_tensor.data(); + const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data(); + const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); + int *idx = idx_tensor.data(); + + ball_query_kernel_launcher_stack(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); + return 1; +} diff --git a/cosense3d/ops/src/pointnet2_stack/ball_query_gpu.cu b/cosense3d/ops/src/pointnet2_stack/ball_query_gpu.cu new file mode 100755 index 00000000..87bd8223 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/ball_query_gpu.cu @@ -0,0 +1,90 @@ +/* +Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include + +#include "ball_query_gpu.h" +#include "../cuda_utils.h" + + +__global__ void ball_query_kernel_stack(int B, int M, float radius, int nsample, \ + const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx) { + // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query + // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // output: + // idx: (M, nsample) + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= M) return; + + int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0]; + for (int k = 1; k < B; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += new_xyz_batch_cnt[k]; + bs_idx = k; + } + + int xyz_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k]; + // for (int k = 0; k < bs_idx; k++) new_xyz_batch_start_idx += new_xyz_batch_cnt[k]; + + new_xyz += pt_idx * 3; + xyz += xyz_batch_start_idx * 3; + idx += pt_idx * nsample; + + float radius2 = radius * radius; + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + int n = xyz_batch_cnt[bs_idx]; + + int cnt = 0; + for (int k = 0; k < n; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); + if (d2 < radius2){ + if (cnt == 0){ + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + } + if (cnt == 0) idx[0] = -1; +} + + +void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample, + const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx){ + // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query + // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // output: + // idx: (M, nsample) + + cudaError_t err; + + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + ball_query_kernel_stack<<>>(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/cosense3d/ops/src/pointnet2_stack/ball_query_gpu.h b/cosense3d/ops/src/pointnet2_stack/ball_query_gpu.h new file mode 100755 index 00000000..c74f1201 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/ball_query_gpu.h @@ -0,0 +1,25 @@ +/* +Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#ifndef _STACK_BALL_QUERY_GPU_H +#define _STACK_BALL_QUERY_GPU_H + +#include +#include +#include +#include + +int ball_query_wrapper_stack(int B, int M, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor); + + +void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample, + const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx); + + +#endif diff --git a/cosense3d/ops/src/pointnet2_stack/group_points.cpp b/cosense3d/ops/src/pointnet2_stack/group_points.cpp new file mode 100755 index 00000000..aa0d8553 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/group_points.cpp @@ -0,0 +1,71 @@ +/* +Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include "group_points_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, + at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor) { + + CHECK_INPUT(grad_out_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(idx_batch_cnt_tensor); + CHECK_INPUT(features_batch_cnt_tensor); + CHECK_INPUT(grad_features_tensor); + + const float *grad_out = grad_out_tensor.data(); + const int *idx = idx_tensor.data(); + const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); + const int *features_batch_cnt = features_batch_cnt_tensor.data(); + float *grad_features = grad_features_tensor.data(); + + group_points_grad_kernel_launcher_stack(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); + return 1; +} + + +int group_points_wrapper_stack(int B, int M, int C, int nsample, + at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, + at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor) { + + CHECK_INPUT(features_tensor); + CHECK_INPUT(features_batch_cnt_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(idx_batch_cnt_tensor); + CHECK_INPUT(out_tensor); + + const float *features = features_tensor.data(); + const int *idx = idx_tensor.data(); + const int *features_batch_cnt = features_batch_cnt_tensor.data(); + const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); + float *out = out_tensor.data(); + + group_points_kernel_launcher_stack(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); + return 1; +} \ No newline at end of file diff --git a/cosense3d/ops/src/pointnet2_stack/group_points_gpu.cu b/cosense3d/ops/src/pointnet2_stack/group_points_gpu.cu new file mode 100755 index 00000000..29a1de91 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/group_points_gpu.cu @@ -0,0 +1,125 @@ +/* +Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include + +#include "../cuda_utils.h" +#include "group_points_gpu.h" + + +__global__ void group_points_grad_kernel_stack(int B, int M, int C, int N, int nsample, + const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { + // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :return: + // grad_features: (N1 + N2 ..., C) gradient of the features + int index = blockIdx.x * blockDim.x + threadIdx.x; + int sample_idx = index % nsample; + int C_idx = (index / nsample) % C; + int pt_idx = (index / nsample / C); + + if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; + + int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; + for (int k = 1; k < B; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += idx_batch_cnt[k]; + bs_idx = k; + } + + int features_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; + + grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx; + idx += pt_idx * nsample + sample_idx; + grad_features += (features_batch_start_idx + idx[0]) * C + C_idx; + + atomicAdd(grad_features, grad_out[0]); +} + +void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, + const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { + // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :return: + // grad_features: (N1 + N2 ..., C) gradient of the features + + cudaError_t err; + // dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_grad_kernel_stack<<>>(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void group_points_kernel_stack(int B, int M, int C, int nsample, + const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { + // :param features: (N1 + N2 ..., C) tensor of features to group + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :return: + // output: (M1 + M2, C, nsample) tensor + int index = blockIdx.x * blockDim.x + threadIdx.x; + int sample_idx = index % nsample; + int C_idx = (index / nsample) % C; + int pt_idx = (index / nsample / C); + + if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; + + int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; + for (int k = 1; k < B; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += idx_batch_cnt[k]; + bs_idx = k; + } + + int features_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; + features += features_batch_start_idx * C; + + idx += pt_idx * nsample + sample_idx; + int in_idx = idx[0] * C + C_idx; + int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx; + + out[out_idx] = features[in_idx]; +} + + +void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, + const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { + // :param features: (N1 + N2 ..., C) tensor of features to group + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :return: + // output: (M1 + M2, C, nsample) tensor + + cudaError_t err; + dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_kernel_stack<<>>(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/cosense3d/ops/src/pointnet2_stack/group_points_gpu.h b/cosense3d/ops/src/pointnet2_stack/group_points_gpu.h new file mode 100755 index 00000000..4a266216 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/group_points_gpu.h @@ -0,0 +1,31 @@ +/* +Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#ifndef _STACK_GROUP_POINTS_GPU_H +#define _STACK_GROUP_POINTS_GPU_H + +#include +#include +#include +#include + + +int group_points_wrapper_stack(int B, int M, int C, int nsample, + at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, + at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor); + +void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, + const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out); + +int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, + at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor); + +void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, + const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features); + +#endif diff --git a/cosense3d/ops/src/pointnet2_stack/interpolate.cpp b/cosense3d/ops/src/pointnet2_stack/interpolate.cpp new file mode 100755 index 00000000..8dfc1869 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/interpolate.cpp @@ -0,0 +1,109 @@ +/* +Stacked-batch-data version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include "interpolate_gpu.h" + + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +void three_nn_wrapper_stack(at::Tensor unknown_tensor, + at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor, + at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor){ + // unknown: (N1 + N2 ..., 3) + // unknown_batch_cnt: (batch_size), [N1, N2, ...] + // known: (M1 + M2 ..., 3) + // known_batch_cnt: (batch_size), [M1, M2, ...] + // Return: + // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + // idx: (N1 + N2 ..., 3) index of the three nearest neighbors + CHECK_INPUT(unknown_tensor); + CHECK_INPUT(unknown_batch_cnt_tensor); + CHECK_INPUT(known_tensor); + CHECK_INPUT(known_batch_cnt_tensor); + CHECK_INPUT(dist2_tensor); + CHECK_INPUT(idx_tensor); + + int batch_size = unknown_batch_cnt_tensor.size(0); + int N = unknown_tensor.size(0); + int M = known_tensor.size(0); + const float *unknown = unknown_tensor.data(); + const int *unknown_batch_cnt = unknown_batch_cnt_tensor.data(); + const float *known = known_tensor.data(); + const int *known_batch_cnt = known_batch_cnt_tensor.data(); + float *dist2 = dist2_tensor.data(); + int *idx = idx_tensor.data(); + + three_nn_kernel_launcher_stack(batch_size, N, M, unknown, unknown_batch_cnt, known, known_batch_cnt, dist2, idx); +} + + +void three_interpolate_wrapper_stack(at::Tensor features_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor) { + // features_tensor: (M1 + M2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // out_tensor: (N1 + N2 ..., C) + CHECK_INPUT(features_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(weight_tensor); + CHECK_INPUT(out_tensor); + + int N = out_tensor.size(0); + int channels = features_tensor.size(1); + const float *features = features_tensor.data(); + const float *weight = weight_tensor.data(); + const int *idx = idx_tensor.data(); + float *out = out_tensor.data(); + + three_interpolate_kernel_launcher_stack(N, channels, features, idx, weight, out); +} + + +void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor, + at::Tensor weight_tensor, at::Tensor grad_features_tensor) { + // grad_out_tensor: (N1 + N2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // grad_features_tensor: (M1 + M2 ..., C) + CHECK_INPUT(grad_out_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(weight_tensor); + CHECK_INPUT(grad_features_tensor); + + int N = grad_out_tensor.size(0); + int channels = grad_out_tensor.size(1); + const float *grad_out = grad_out_tensor.data(); + const float *weight = weight_tensor.data(); + const int *idx = idx_tensor.data(); + float *grad_features = grad_features_tensor.data(); + + // printf("N=%d, channels=%d\n", N, channels); + three_interpolate_grad_kernel_launcher_stack(N, channels, grad_out, idx, weight, grad_features); +} \ No newline at end of file diff --git a/cosense3d/ops/src/pointnet2_stack/interpolate_gpu.cu b/cosense3d/ops/src/pointnet2_stack/interpolate_gpu.cu new file mode 100755 index 00000000..21e2533b --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/interpolate_gpu.cu @@ -0,0 +1,195 @@ +/* +Stacked-batch-data version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include + +#include "../cuda_utils.h" +#include "interpolate_gpu.h" + + +__global__ void three_nn_kernel_stack(int batch_size, int N, int M, const float *unknown, + const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, + float *dist2, int *idx) { + // unknown: (N1 + N2 ..., 3) + // unknown_batch_cnt: (batch_size), [N1, N2, ...] + // known: (M1 + M2 ..., 3) + // known_batch_cnt: (batch_size), [M1, M2, ...] + // Return: + // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + // idx: (N1 + N2 ..., 3) index of the three nearest neighbors + + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= N) return; + + int bs_idx = 0, pt_cnt = unknown_batch_cnt[0]; + for (int k = 1; k < batch_size; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += unknown_batch_cnt[k]; + bs_idx = k; + } + + int cur_num_known_points = known_batch_cnt[bs_idx]; + + int known_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) known_batch_start_idx += known_batch_cnt[k]; + + known += known_batch_start_idx * 3; + unknown += pt_idx * 3; + dist2 += pt_idx * 3; + idx += pt_idx * 3; + + float ux = unknown[0]; + float uy = unknown[1]; + float uz = unknown[2]; + + double best1 = 1e40, best2 = 1e40, best3 = 1e40; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < cur_num_known_points; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; besti3 = besti2; + best2 = best1; besti2 = besti1; + best1 = d; besti1 = k; + } + else if (d < best2) { + best3 = best2; besti3 = besti2; + best2 = d; besti2 = k; + } + else if (d < best3) { + best3 = d; besti3 = k; + } + } + dist2[0] = best1; dist2[1] = best2; dist2[2] = best3; + idx[0] = besti1 + known_batch_start_idx; + idx[1] = besti2 + known_batch_start_idx; + idx[2] = besti3 + known_batch_start_idx; +} + + +void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, + const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, + float *dist2, int *idx) { + // unknown: (N1 + N2 ..., 3) + // unknown_batch_cnt: (batch_size), [N1, N2, ...] + // known: (M1 + M2 ..., 3) + // known_batch_cnt: (batch_size), [M1, M2, ...] + // Return: + // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + // idx: (N1 + N2 ..., 3) index of the three nearest neighbors + + cudaError_t err; + dim3 blocks(DIVUP(N, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + three_nn_kernel_stack<<>>( + batch_size, N, M, unknown, unknown_batch_cnt, + known, known_batch_cnt, dist2, idx + ); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + + +__global__ void three_interpolate_kernel_stack(int N, int channels, const float *features, + const int *idx, const float *weight, float *out) { + // features: (M1 + M2 ..., C) + // idx: [N1 + N2 ..., 3] + // weight: [N1 + N2 ..., 3] + // Return: + // out: (N1 + N2 ..., C) + + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= N || c_idx >= channels) return; + + weight += pt_idx * 3; + idx += pt_idx * 3; + out += pt_idx * channels + c_idx; + + out[0] = weight[0] * features[idx[0] * channels + c_idx] + + weight[1] * features[idx[1] * channels + c_idx] + + weight[2] * features[idx[2] * channels + c_idx]; +} + + + +void three_interpolate_kernel_launcher_stack(int N, int channels, + const float *features, const int *idx, const float *weight, float *out) { + // features: (M1 + M2 ..., C) + // idx: [N1 + N2 ..., 3] + // weight: [N1 + N2 ..., 3] + // Return: + // out: (N1 + N2 ..., C) + + cudaError_t err; + dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_kernel_stack<<>>(N, channels, features, idx, weight, out); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void three_interpolate_grad_kernel_stack(int N, int channels, const float *grad_out, + const int *idx, const float *weight, float *grad_features) { + // grad_out_tensor: (N1 + N2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // grad_features_tensor: (M1 + M2 ..., C) + + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= N || c_idx >= channels) return; + + grad_out += pt_idx * channels + c_idx; + weight += pt_idx * 3; + idx += pt_idx * 3; + + // printf("pt_idx=%d, c_idx=%d, idx=(%d, %d, %d), grad_out=%f\n", pt_idx, c_idx, idx[0], idx[1], idx[2], grad_out[0]); + + atomicAdd(grad_features + idx[0] * channels + c_idx, grad_out[0] * weight[0]); + atomicAdd(grad_features + idx[1] * channels + c_idx, grad_out[0] * weight[1]); + atomicAdd(grad_features + idx[2] * channels + c_idx, grad_out[0] * weight[2]); +} + + +void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, + const int *idx, const float *weight, float *grad_features) { + // grad_out_tensor: (N1 + N2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // grad_features_tensor: (M1 + M2 ..., C) + + cudaError_t err; + dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_grad_kernel_stack<<>>( + N, channels, grad_out, idx, weight, grad_features + ); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} \ No newline at end of file diff --git a/cosense3d/ops/src/pointnet2_stack/interpolate_gpu.h b/cosense3d/ops/src/pointnet2_stack/interpolate_gpu.h new file mode 100755 index 00000000..12775ec3 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/interpolate_gpu.h @@ -0,0 +1,39 @@ +#ifndef _INTERPOLATE_GPU_H +#define _INTERPOLATE_GPU_H + +#include +#include +#include +#include + + +void three_nn_wrapper_stack(at::Tensor unknown_tensor, + at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor, + at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); + + +void three_interpolate_wrapper_stack(at::Tensor features_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); + + + +void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor, + at::Tensor weight_tensor, at::Tensor grad_features_tensor); + + +void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, + const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, + float *dist2, int *idx); + + +void three_interpolate_kernel_launcher_stack(int N, int channels, + const float *features, const int *idx, const float *weight, float *out); + + + +void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, + const int *idx, const float *weight, float *grad_features); + + + +#endif \ No newline at end of file diff --git a/cosense3d/ops/src/pointnet2_stack/sampling.cpp b/cosense3d/ops/src/pointnet2_stack/sampling.cpp new file mode 100755 index 00000000..75cf9181 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/sampling.cpp @@ -0,0 +1,36 @@ +#include +#include +#include +#include + +#include "sampling_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int furthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { + + CHECK_INPUT(points_tensor); + CHECK_INPUT(temp_tensor); + CHECK_INPUT(idx_tensor); + + const float *points = points_tensor.data(); + float *temp = temp_tensor.data(); + int *idx = idx_tensor.data(); + + furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); + return 1; +} diff --git a/cosense3d/ops/src/pointnet2_stack/sampling_gpu.cu b/cosense3d/ops/src/pointnet2_stack/sampling_gpu.cu new file mode 100755 index 00000000..35b7d613 --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/sampling_gpu.cu @@ -0,0 +1,176 @@ +#include +#include + +#include "../cuda_utils.h" +#include "sampling_gpu.h" + + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + + +template +__global__ void furthest_point_sampling_kernel(int b, int n, int m, + const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) + idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + // if (mag <= 1e-3) + // continue; + + float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 1024) { + if (tid < 512) { + __update(dists, dists_i, tid, tid + 512); + } + __syncthreads(); + } + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) + idxs[j] = old; + } +} + +void furthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + cudaError_t err; + unsigned int n_threads = opt_n_threads(n); + + switch (n_threads) { + case 1024: + furthest_point_sampling_kernel<1024><<>>(b, n, m, dataset, temp, idxs); break; + case 512: + furthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); break; + case 256: + furthest_point_sampling_kernel<256><<>>(b, n, m, dataset, temp, idxs); break; + case 128: + furthest_point_sampling_kernel<128><<>>(b, n, m, dataset, temp, idxs); break; + case 64: + furthest_point_sampling_kernel<64><<>>(b, n, m, dataset, temp, idxs); break; + case 32: + furthest_point_sampling_kernel<32><<>>(b, n, m, dataset, temp, idxs); break; + case 16: + furthest_point_sampling_kernel<16><<>>(b, n, m, dataset, temp, idxs); break; + case 8: + furthest_point_sampling_kernel<8><<>>(b, n, m, dataset, temp, idxs); break; + case 4: + furthest_point_sampling_kernel<4><<>>(b, n, m, dataset, temp, idxs); break; + case 2: + furthest_point_sampling_kernel<2><<>>(b, n, m, dataset, temp, idxs); break; + case 1: + furthest_point_sampling_kernel<1><<>>(b, n, m, dataset, temp, idxs); break; + default: + furthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); + } + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/cosense3d/ops/src/pointnet2_stack/sampling_gpu.h b/cosense3d/ops/src/pointnet2_stack/sampling_gpu.h new file mode 100755 index 00000000..2ad8aa5b --- /dev/null +++ b/cosense3d/ops/src/pointnet2_stack/sampling_gpu.h @@ -0,0 +1,15 @@ +#ifndef _SAMPLING_GPU_H +#define _SAMPLING_GPU_H + +#include +#include +#include + + +int furthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); + +void furthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs); + +#endif diff --git a/cosense3d/ops/src/scalar_attention/scalar_attention.cpp b/cosense3d/ops/src/scalar_attention/scalar_attention.cpp new file mode 100644 index 00000000..a58fa771 --- /dev/null +++ b/cosense3d/ops/src/scalar_attention/scalar_attention.cpp @@ -0,0 +1,41 @@ +/* +Written by Yunshuang Yuan +*/ +#include +#include +#include +#include +#include "scalar_attention_kernel.h" + +void scalar_attention_forward( + int m, int h, int c, AT weight_tensor, AT value_tensor, AT out_F_tensor, AT kq_indices_tensor + ) +{ + const float* weight = weight_tensor.data_ptr(); + const float* value = value_tensor.data_ptr(); + float* out_F = out_F_tensor.data_ptr(); + const int* kq_indices = kq_indices_tensor.data_ptr(); + + scalar_attention_forward_launcher( + m, h, c, weight, value, out_F, kq_indices + ); +} + +void scalar_attention_backward( + int m, int h, int c, AT weight_tensor, AT value_tensor, AT kq_indices_tensor, + AT grad_weight_tensor, AT grad_value_tensor, AT grad_out_F_tensor + ) +{ + const float* weight = weight_tensor.data_ptr(); + const float* value = value_tensor.data_ptr(); + const int* kq_indices = kq_indices_tensor.data_ptr(); + + float* grad_weight = grad_weight_tensor.data_ptr(); + float* grad_value = grad_value_tensor.data_ptr(); + const float* grad_out_F = grad_out_F_tensor.data_ptr(); + + scalar_attention_backward_launcher( + m, h, c, weight, value, kq_indices, + grad_weight, grad_value, grad_out_F + ); +} \ No newline at end of file diff --git a/cosense3d/ops/src/scalar_attention/scalar_attention_kernel.cu b/cosense3d/ops/src/scalar_attention/scalar_attention_kernel.cu new file mode 100644 index 00000000..fe868e22 --- /dev/null +++ b/cosense3d/ops/src/scalar_attention/scalar_attention_kernel.cu @@ -0,0 +1,86 @@ +/* +Written by Yunshuang Yuan +*/ +#include "../cuda_utils.h" +#include "scalar_attention_kernel.h" + + +__global__ void scalar_attention_forward_kernel( + int m, int h, int c, const float* weight, const float* value, float* out_F, const int* kq_indices +) +{ + // m: # of total mappings + // h: # of attention heads + // c: # of attention channels + + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= m * c) return; + + int map_idx = index / c; + int i = index % c; + + int out_F_idx_ = kq_indices[m + map_idx]; // kq_indices[1][map_idx] + int value_idx_ = kq_indices[map_idx]; // kq_indices[0][map_idx] + + for(int head_idx = 0; head_idx < h; head_idx++){ + + int weight_idx = map_idx * h + head_idx; + int out_F_idx = out_F_idx_ * h * c + head_idx * c + i; + int value_idx = value_idx_ * h * c + head_idx * c + i; + + atomicAdd(out_F + out_F_idx, weight[weight_idx] * value[value_idx]); // atomic operation can avoid race condition + } +} + +__global__ void scalar_attention_backward_kernel( + int m, int h, int c, const float* weight, const float* value, const int* kq_indices, + float* grad_weight, float* grad_value, const float* grad_out_F +) +{ + // m: # of total mappings + // h: # of attention heads + // c: # of attention channels + + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= m * c) return; + + int map_idx = index / c; + int i = index % c; + + int out_F_idx_ = kq_indices[m + map_idx]; // kq_indices[1][map_idx] + int value_idx_ = kq_indices[map_idx]; // kq_indices[0][map_idx] + + for(int head_idx = 0; head_idx < h; head_idx++){ + + int weight_idx = map_idx * h + head_idx; + int out_F_idx = out_F_idx_ * h * c + head_idx * c + i; + int value_idx = value_idx_ * h * c + head_idx * c + i; + + atomicAdd(grad_weight + weight_idx, grad_out_F[out_F_idx] * value[value_idx]); + atomicAdd(grad_value + value_idx, grad_out_F[out_F_idx] * weight[weight_idx]); + } +} + +void scalar_attention_forward_launcher( + int m, int h, int c, const float* weight, const float* value, float* out_F, const int* kq_indices +) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(DIVUP(m * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + scalar_attention_forward_kernel<<>>( + m, h, c, weight, value, out_F, kq_indices + ); +} + +void scalar_attention_backward_launcher( + int m, int h, int c, const float* weight, const float* value, const int* kq_indices, + float* grad_weight, float* grad_value, const float* grad_out_F +) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(DIVUP(m * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + scalar_attention_backward_kernel<<>>( + m, h, c, weight, value, kq_indices, + grad_weight, grad_value, grad_out_F + ); +} \ No newline at end of file diff --git a/cosense3d/ops/src/scalar_attention/scalar_attention_kernel.h b/cosense3d/ops/src/scalar_attention/scalar_attention_kernel.h new file mode 100644 index 00000000..a2b0e778 --- /dev/null +++ b/cosense3d/ops/src/scalar_attention/scalar_attention_kernel.h @@ -0,0 +1,36 @@ +/* +Written by Yunshuang Yuan +*/ +#pragma once +#ifndef _scalar_attention_KERNEL +#define _scalar_attention_KERNEL +#include +#include +#include + +#define AT at::Tensor + +void scalar_attention_forward( + int m, int h, int c, AT weight_tensor, AT value_tensor, AT out_F_tensor, AT kq_indices_tensor + ); +void scalar_attention_backward( + int m, int h, int c, AT weight_tensor, AT value_tensor, AT kq_indices_tensor, + AT grad_weight_tensor, AT grad_value_tensor, AT grad_out_F_tensor + ); + +#ifdef __cplusplus +extern "C" { +#endif + +void scalar_attention_forward_launcher( + int m, int h, int c, const float* weight, const float* value, float* out_F, const int* kq_indices + ); +void scalar_attention_backward_launcher( + int m, int h, int c, const float* weight, const float* value, const int* kq_indices, + float* grad_weight, float* grad_value, const float* grad_out_F + ); + +#ifdef __cplusplus +} +#endif +#endif \ No newline at end of file diff --git a/cosense3d/ops/src/utils/boxes.cpp b/cosense3d/ops/src/utils/boxes.cpp new file mode 100644 index 00000000..076a9a1c --- /dev/null +++ b/cosense3d/ops/src/utils/boxes.cpp @@ -0,0 +1,82 @@ +/* +Reference paper: https://arxiv.org/abs/1907.03670 +Written by Shaoshuai Shi +Modified by Yunshuang Yuan +*/ + +#include +#include +#include +#include +#include "boxes_kernel.h" + + +void points_in_boxes_gpu(AT boxes_tensor, AT pts_tensor, AT box_idx_of_points_tensor){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] + // params boxes_idx_of_points: (B, npoints), default -1 + +// CHECK_INPUT(boxes_tensor); +// CHECK_INPUT(pts_tensor); +// CHECK_INPUT(box_idx_of_points_tensor); + + int batch_size = boxes_tensor.size(0); + int boxes_num = boxes_tensor.size(1); + int pts_num = pts_tensor.size(1); + + const float *boxes = boxes_tensor.data(); + const float *pts = pts_tensor.data(); + int *box_idx_of_points = box_idx_of_points_tensor.data(); + + points_in_boxes_launcher(batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points); + +} + + +inline void lidar_to_local_coords_cpu(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){ + float cosa = cos(-rot_angle), sina = sin(-rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + + +inline int check_pt_in_box3d_cpu(const float *pt, const float *box3d, float &local_x, float &local_y){ + // param pt: (x, y, z) + // param box3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + const float MARGIN = 1e-2; + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords_cpu(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN); + return in_flag; +} + + +void points_in_boxes_cpu(AT boxes_tensor, AT pts_tensor, AT pts_indices_tensor){ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + // params pts: (num_points, 3) [x, y, z] + // params pts_indices: (N, num_points) + +// CHECK_CONTIGUOUS(boxes_tensor); +// CHECK_CONTIGUOUS(pts_tensor); +// CHECK_CONTIGUOUS(pts_indices_tensor); + + int boxes_num = boxes_tensor.size(0); + int pts_num = pts_tensor.size(0); + + const float *boxes = boxes_tensor.data(); + const float *pts = pts_tensor.data(); + int *pts_indices = pts_indices_tensor.data(); + + float local_x = 0, local_y = 0; + for (int i = 0; i < boxes_num; i++){ + for (int j = 0; j < pts_num; j++){ + int cur_in_flag = check_pt_in_box3d_cpu(pts + j * 3, boxes + i * 7, local_x, local_y); + pts_indices[i * pts_num + j] = cur_in_flag; + } + } + +} diff --git a/cosense3d/ops/src/utils/boxes_kernel.cu b/cosense3d/ops/src/utils/boxes_kernel.cu new file mode 100644 index 00000000..f209af9b --- /dev/null +++ b/cosense3d/ops/src/utils/boxes_kernel.cu @@ -0,0 +1,83 @@ +/* +Written by Shaoshuai Shi +Modified by Yunshuang Yuan +*/ + +#include +#include +#include "boxes_kernel.h" + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) + + +__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){ + float cosa = cos(-rot_angle), sina = sin(-rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + + +__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, float &local_x, float &local_y){ + // param pt: (x, y, z) + // param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + + const float MARGIN = 1e-5; + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN); + return in_flag; +} + + +__global__ void points_in_boxes_kernel(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate + // params boxes_idx_of_points: (B, npoints), default -1 + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= batch_size || pt_idx >= pts_num) return; + + boxes += bs_idx * boxes_num * 7; + pts += bs_idx * pts_num * 3 + pt_idx * 3; + box_idx_of_points += bs_idx * pts_num + pt_idx; + + float local_x = 0, local_y = 0; + int cur_in_flag = 0; + for (int k = 0; k < boxes_num; k++){ + cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y); + if (cur_in_flag){ + box_idx_of_points[0] = k; + break; + } + } +} + + +void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] + // params boxes_idx_of_points: (B, npoints), default -1 + cudaError_t err; + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size); + dim3 threads(THREADS_PER_BLOCK); + points_in_boxes_kernel<<>>(batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} \ No newline at end of file diff --git a/cosense3d/ops/src/utils/boxes_kernel.h b/cosense3d/ops/src/utils/boxes_kernel.h new file mode 100644 index 00000000..1327ee04 --- /dev/null +++ b/cosense3d/ops/src/utils/boxes_kernel.h @@ -0,0 +1,23 @@ +#pragma once +#ifndef _boxes_KERNEL +#define _boxes_KERNEL +#include +#include +#include + +#define AT at::Tensor + +void points_in_boxes_cpu(AT boxes_tensor, AT pts_tensor, AT pts_indices_tensor); +void points_in_boxes_gpu(AT boxes_tensor, AT pts_tensor, AT box_idx_of_points_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/cosense3d/ops/utils.py b/cosense3d/ops/utils.py new file mode 100644 index 00000000..33fd17d9 --- /dev/null +++ b/cosense3d/ops/utils.py @@ -0,0 +1,103 @@ +import copy +import torch +import numpy as np +import cuda_ops +from cosense3d.utils.misc import check_numpy_to_torch + + +def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False + + +@torch.no_grad() +def decompose_batch_indices(tensor, batch_size, batch_indices): + if batch_indices is None: + batch_indices = range(batch_size) + points_decomposed = [tensor[tensor[:, 0] == b] for b in batch_indices] + decomposed_tensor = torch.cat(points_decomposed, dim=0) + cnts = [len(pts) for pts in points_decomposed] + out_tensor = torch.zeros((batch_size, max(cnts), tensor.shape[-1] - 1), + dtype=tensor.dtype, device=tensor.device) + for b, (c, points) in enumerate(zip(cnts, points_decomposed)): + out_tensor[b, :c, :] = points_decomposed[b][:, 1:] + return decomposed_tensor, out_tensor, cnts + + +def points_in_boxes_cpu(points, boxes): + """ + Args: + points: (num_points, 3) + boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + Returns: + point_indices: (N, num_points) + """ + assert boxes.shape[1] == 7 + assert points.shape[1] == 3 + points, is_numpy = check_numpy_to_torch(points) + boxes, is_numpy = check_numpy_to_torch(boxes) + + point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int) + cuda_ops.points_in_boxes_cpu(boxes.float().contiguous(), points.float().contiguous(), point_indices) + + return point_indices.numpy() if is_numpy else point_indices + + +@torch.no_grad() +def points_in_boxes_gpu(points, boxes, batch_size=None, batch_indices=None): + """ + :param points: (B, M, 3) or (M, 4) + :param boxes: (B, T, 7) or (T, 8), num_valid_boxes <= T + :return box_idxs_of_pts: (B, M), default background = -1 + """ + src_idx = points[:, 0] + batch_flag = False + if len(points.shape)==2: + assert batch_size is not None + assert boxes[:, 0].max() < batch_size and points[:, 0].max() < batch_size, \ + f"Box shape: {', '.join([str(s) for s in boxes.shape])}" + assert boxes.shape[1] == 8 and points.shape[1] == 4 + batch_flag = True + n_box = len(boxes) + _, points, point_cnts = decompose_batch_indices(points, batch_size, batch_indices) + boxes_decomposed, boxes, box_cnts = decompose_batch_indices(boxes, batch_size, batch_indices) + assert boxes.shape[0] == points.shape[0], \ + f"boxes and point batch size does not match! boxes ({boxes.shape[0]}), points ({points.shape[0]})" + assert boxes.shape[2] == 7 and points.shape[2] == 3 + + batch_size, num_points, _ = points.shape + + box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1) + cuda_ops.points_in_boxes_gpu(boxes.contiguous(), points.contiguous(), box_idxs_of_pts) + + if batch_flag: + box_idxs_composed = torch.zeros(len(src_idx), dtype=torch.int, + device=points.device).fill_(-1) + cnt_p = 0 + cnt_b = 0 + for b, (cp, cb) in enumerate(zip(point_cnts, box_cnts)): + indices = box_idxs_of_pts[b, :cp] + if cb == 0: + # zero points are assigned to padded zeros boxes --> remove them + indices = -1 + else: + indices[indices >= 0] += cnt_b + box_idxs_composed[src_idx==b] = indices + cnt_p += cp + cnt_b += cb + return boxes_decomposed, box_idxs_composed.long() + return box_idxs_of_pts.long() + + +@torch.no_grad() +def points_in_boxes_gpu_2d(points, boxes, batch_size=None, batch_indices=None): + cur_boxes = copy.deepcopy(boxes) + cur_boxes[:, 3] = 0 + bev_pts = copy.deepcopy(points) + bev_pts[:, 3] = 0 + _, box_idx_of_pts = points_in_boxes_gpu(bev_pts, cur_boxes, + batch_size=batch_size, + batch_indices=batch_indices) + + return box_idx_of_pts \ No newline at end of file diff --git a/cosense3d/ops/version_diff.md b/cosense3d/ops/version_diff.md new file mode 100644 index 00000000..45d2a453 --- /dev/null +++ b/cosense3d/ops/version_diff.md @@ -0,0 +1,12 @@ + +``` +//Comment Out +//#include +//extern THCState *state; +//cudaStream_t stream = THCState_getCurrentStream(state); + +//Replace with +#include +#include +cudaStream_t stream = at::cuda::getCurrentCUDAStream(); +``` \ No newline at end of file diff --git a/cosense3d/tools/__init__.py b/cosense3d/tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/tools/agent_runner.py b/cosense3d/tools/agent_runner.py new file mode 100644 index 00000000..909b0272 --- /dev/null +++ b/cosense3d/tools/agent_runner.py @@ -0,0 +1,151 @@ +import os, sys +import argparse +import logging + +import numpy as np +import torch + +from cosense3d.dataset import get_dataloader +from cosense3d.utils.misc import setup_logger +from cosense3d.config import load_config, save_config +from cosense3d.utils.train_utils import seed_everything +from cosense3d.agents.center_controller import CenterController +from cosense3d.agents.core.train_runner import TrainRunner +from cosense3d.agents.core.test_runner import TestRunner +from cosense3d.agents.core.vis_runner import VisRunner +from cosense3d.tools.path_cfgs import parse_paths + + +def ddp_setup(): + from torch.distributed import init_process_group + init_process_group(backend="nccl") + torch.cuda.set_device(int(os.environ["LOCAL_RANK"])) + + +class AgentRunner: + def __init__(self, args, cfgs): + self.visualize = args.visualize or 'vis' in args.mode + self.mode = args.mode + if args.gpus > 0: + self.dist = True + ddp_setup() + else: + self.dist = False + if self.visualize: + from cosense3d.agents.core.gui import GUI + from PyQt5.QtWidgets import QApplication + self.app = QApplication(sys.argv) + self.gui = GUI(args.mode, cfgs['VISUALIZATION']) + + self.build_runner(args, cfgs) + + def build_runner(self, args, cfgs): + dataloader = get_dataloader(cfgs['DATASET'], + args.mode.replace('vis_', ''), + self.dist) + center_controller = CenterController(cfgs['CONTROLLER'], dataloader, self.dist) + if args.mode == 'train': + self.runner = TrainRunner(dataloader=dataloader, + controller=center_controller, + **cfgs['TRAIN']) + elif args.mode == 'test': + self.runner = TestRunner(dataloader=dataloader, + controller=center_controller, + **cfgs['TEST']) + else: + self.runner = VisRunner(dataloader=dataloader, + controller=center_controller,) + + def visible_run(self): + self.gui.setRunner(self.runner) + self.app.installEventFilter(self.gui) + + # self.app.setStyle("Fusion") + from PyQt5.QtWidgets import QDesktopWidget + desktop = QDesktopWidget().availableGeometry() + width = (desktop.width() - self.gui.width()) / 2 + height = (desktop.height() - self.gui.height()) / 2 + + self.gui.move(int(width), int(height)) + self.gui.initGUI() + # Start GUI + self.gui.show() + + logging.info("Showing GUI...") + sys.exit(self.app.exec_()) + + def run(self): + try: + if self.visualize: + self.visible_run() + else: + self.runner.run() + finally: + if self.dist: + from torch.distributed import destroy_process_group + destroy_process_group() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, default="./config/config.yaml") + parser.add_argument("--mode", type=str, default="test", + help="train | test | vis_train | vis_test") + parser.add_argument("--visualize", action="store_true") + parser.add_argument("--resume-from", type=str) + parser.add_argument("--load-from", type=str) + parser.add_argument("--log-dir", type=str, default=f"{os.path.dirname(__file__)}/../../logs") + parser.add_argument("--run-name", type=str, default="default") + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--debug", action="store_true") + parser.add_argument("--gpus", type=int, default=0) + parser.add_argument("--data-path", type=str) + parser.add_argument("--meta-path", type=str) + parser.add_argument("--batch-size", type=int) + parser.add_argument("--n-workers", type=int) + parser.add_argument("--data-latency", type=int, + help="-1: random latency selected from (0, 1, 2)*100ms;\n" + " 0: coop. data has no additional latency relative to ego frame;\n" + " n>0: coop. data has n*100ms latency relative to ego frame.") + parser.add_argument("--loc-err", type=str, + help="localization errors for x, y translation " + "and rotation angle along z-axis." + "example: `0.5,0.5,1` for 0.5m deviation at x and y axis " + "and 1 degree rotation angle") + parser.add_argument("--cnt-cpm-size", action="store_true") + parser.add_argument("--cpm-thr", type=float, default=0.0) + args = parser.parse_args() + + setup_logger(args.run_name, args.debug) + # for ME + os.environ['OMP_NUM_THREADS'] = "16" + # if 'vis' in args.mode: + # args.config = "./config/defaults/base_cav.yaml" + + seed_everything(2023) + cfgs = load_config(args) + if args.gpus: + cfgs['TRAIN']['gpus'] = args.gpus + if args.batch_size is not None: + cfgs['DATASET']['batch_size_train'] = args.batch_size + if args.n_workers is not None: + cfgs['DATASET']['n_workers'] = args.n_workers + if args.meta_path is not None: + cfgs['DATASET']['meta_path'] = args.meta_path + if args.data_path is not None: + cfgs['DATASET']['data_path'] = args.data_path + if args.data_latency is not None: + cfgs['DATASET']['latency'] = args.data_latency + if args.loc_err is not None: + loc_err = [float(x) for x in args.loc_err.split(',')] + cfgs['DATASET']['loc_err'] = [loc_err[0], loc_err[1], np.deg2rad(loc_err[2])] + if args.cnt_cpm_size: + cfgs['TEST']['hooks'].append({'type': 'CPMStatisticHook'}) + cfgs['CONTROLLER']['cav_manager']['cpm_statistic'] = True + if args.cpm_thr is not None: + cfgs['CONTROLLER']['cav_manager']['share_score_thr'] = args.cpm_thr + + agent_runner = AgentRunner(args, cfgs) + if args.mode == "train": + save_config(cfgs, agent_runner.runner.logdir) + agent_runner.run() diff --git a/cosense3d/tools/download.sh b/cosense3d/tools/download.sh new file mode 100644 index 00000000..33d188dd --- /dev/null +++ b/cosense3d/tools/download.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +url="https://data.uni-hannover.de:8080/dataset/upload/users/ikg/yuan/cosense3d/" + +# Check if the input file containing URLs is provided +if [ "$#" -ne 2 ]; then + echo "Usage: $0 $1 " + exit 1 +fi + +# Input file with the list of URLs +url_dir=$url$1 +output_dir=$2 + +# Check if the output directory exists, if not, create it +if [ ! -d "$output_dir" ]; then + mkdir -p "$output_dir" +fi + +if [ "$1" == "OPV2Vt" ]; then + files=("opv2vt_meta.zip" "opv2vt_test.zip" "opv2vt_train.zip" "opv2vt_train.z01" "opv2vt_train.z02" "opv2vt_train.z03") + for f in "${files[@]}"; do + url_file="$url_dir/$f" + wget -P "$output_dir" "$url_file" + done + + cd "$output_dir" + cat opv2vt_train.z01 opv2vt_train.z02 opv2vt_train.z03 opv2vt_train.zip > train.zip + unzip train.zip + rm opv2vt_train.z01 opv2vt_train.z02 opv2vt_train.z03 opv2vt_train.zip train.zip + unzip opv2vt_test.zip + rm opv2vt_test.zip + unzip opv2vt_meta.zip + rm opv2vt_meta.zip + +elif [ "$1" == "DairV2Xt" ]; then + url_file="$url_dir/dairv2xt_meta.zip" + wget -P "$output_dir" "$url_file" + cd "$output_dir" + unzip dairv2xt_meta.zip + rm dairv2xt_meta.zip + +elif [ "$1" == "OPV2V" ]; then + wget -P "$output_dir" https://data.uni-hannover.de/dataset/678827e9-bb64-44b8-b8fd-e583c740b5f5/resource/eade1879-e67b-4112-a088-2a92ca76e004/download/opv2v_meta.zip + files=("test.z01" "test.zip" "train.z01" "train.z02" "train.z03" "train.z04" "train.z05" "train.z06" "train.zip" ) + for f in "${files[@]}"; do + url_file="$url_dir/$f" + wget -P "$output_dir" "$url_file" + done + + cd "$output_dir" + cat train.z01 train.z02 train.z03 train.z04 train.z05 train.z06 train.zip > combined.zip + unzip combined.zip + rm train.z01 train.z02 train.z03 train.z04 train.z05 train.z06 train.zip combined.zip + cat test.z01 test.zip > combined.zip + unzip combined.zip + rm test.z01 test.zip combined.zip + unzip opv2v_meta.zip + rm opv2v_meta.zip +fi + +echo "Download completed." \ No newline at end of file diff --git a/cosense3d/tools/eval.py b/cosense3d/tools/eval.py new file mode 100644 index 00000000..46c1c623 --- /dev/null +++ b/cosense3d/tools/eval.py @@ -0,0 +1,337 @@ +import copy +import os, glob, tqdm +import shutil + +import torch +from matplotlib import colormaps +from cosense3d.utils.eval_detection_utils import * +from cosense3d.utils.box_utils import corners_to_boxes_3d +from cosense3d.utils.vislib import draw_points_boxes_plt,plt +from cosense3d.utils.pclib import load_pcd + +lidar_range_opv2v = [-140.8, -40, -3, 140.8, 40, 1] +lidar_range_dairv2x = [-100, -38.4, -3.0, 100, 38.4, 1.0] + +def filter_box_ranges(boxes, lidar_range): + mask = boxes.new_ones((len(boxes),)).bool() + if boxes.ndim == 3: + centers = boxes.mean(dim=1) + else: + centers = boxes[:, :3] + for i in range(3): + mask = mask & (centers[:, i] > lidar_range[1]) & (centers[:, i] < lidar_range[i+3]) + return mask + + +def eval_detection_opv2v(test_dir, iou_thr=[0.5, 0.7], global_sort_detections=True): + result_stat = {iou: {'tp': [], 'fp': [], 'gt': 0, 'score': []} for iou in iou_thr} + filenames = sorted(glob.glob(os.path.join(test_dir, '*.pth'))) + for i in tqdm.tqdm(range(len(filenames))): + if os.path.exists(os.path.join(test_dir, f"{i}.pth")): + data = torch.load(os.path.join(test_dir, f"{i}.pth")) + else: + data = torch.load(filenames[i]) + + if 'pred' in data: + pred_boxes = data['pred'] + pred_scores = data['score'] + gt_boxes = data['gt'] + else: + pred_boxes = data['detection']['box'] + pred_scores = data['detection']['scr'] + gt_boxes = data['gt_boxes'] + + gt_boxes = gt_boxes[filter_box_ranges(gt_boxes, lidar_range_opv2v)] + mask = filter_box_ranges(pred_boxes, lidar_range_opv2v) + pred_boxes = pred_boxes[mask] + pred_scores = pred_scores[mask] + for iou in iou_thr: + caluclate_tp_fp( + pred_boxes, pred_scores, gt_boxes, result_stat, iou + ) + result = eval_final_results(result_stat, iou_thr, global_sort_detections=global_sort_detections) + + +def eval_detection_opv2v_with_opencood_gt(test_dir_opencood, test_dir_cosense3d, iou_thr=[0.5, 0.7], global_sort_detections=True): + result_stat = {iou: {'tp': [], 'fp': [], 'gt': 0, 'score': []} for iou in iou_thr} + filenames_opencood = sorted(glob.glob(os.path.join(test_dir_opencood, '*.pth'))) + filenames_cosense3d = sorted(glob.glob(os.path.join(test_dir_cosense3d, '*.pth'))) + for i in tqdm.tqdm(range(len(filenames_opencood))): + data_opencood = torch.load(os.path.join(test_dir_opencood, f"{i}.pth")) + data_cosense3d = torch.load(filenames_cosense3d[i]) + + gt_boxes = data_opencood['gt'] + pred_boxes = data_cosense3d['detection']['box'] + pred_scores = data_cosense3d['detection']['scr'] + + gt_boxes = gt_boxes[filter_box_ranges(gt_boxes, lidar_range_opv2v)] + mask = filter_box_ranges(pred_boxes, lidar_range_opv2v) + pred_boxes = pred_boxes[mask] + pred_scores = pred_scores[mask] + for iou in iou_thr: + caluclate_tp_fp( + pred_boxes, pred_scores, gt_boxes, result_stat, iou + ) + eval_final_results(result_stat, iou_thr, global_sort_detections=global_sort_detections) + + +def eval_detection_cosense3d(test_dir, iou_thr=[0.5, 0.7], mode='bev'): + result_stat = {iou: {'tp': [], 'gt': 0, 'scr': []} for iou in iou_thr} + filenames = glob.glob(os.path.join(test_dir, '*.pth')) + for f in tqdm.tqdm(filenames): + data = torch.load(f) + preds = data['detection'] + gt_boxes = data['gt_boxes'] + for iou in iou_thr: + tp = ops_cal_tp( + preds['box'].cpu(), gt_boxes.cpu(), IoU_thr=iou, iou_mode=mode + ) + result_stat[iou]['tp'].append(tp.cpu()) + result_stat[iou]['gt'] += len(gt_boxes) + result_stat[iou]['scr'].append(preds['scr'].cpu()) + + result = {} + for iou in iou_thr: + scores = torch.cat(result_stat[iou]['scr'], dim=0) + tps = torch.cat(result_stat[iou]['tp'], dim=0) + n_pred = len(scores) + n_gt = result_stat[iou]['gt'] + + ap, mpre, mrec, _ = cal_ap_all_point(scores, tps, n_pred, n_gt) + iou_str = f"{int(iou * 100)}" + result[iou] = {f'ap_{iou_str}': ap, + f'mpre_{iou_str}': mpre, + f'mrec_{iou_str}': mrec, + } + print(f"AP@{iou}: {ap:.3f}") + + +def compare_detection(test_dir1, test_dir2, out_dir, pc_range): + filenames = sorted(glob.glob(os.path.join(test_dir2, '*.pth'))) + for i, f in enumerate(filenames): + if not i % 10 == 0: # or '35_1.018951' not in f: + continue + data1 = torch.load(os.path.join(test_dir1, os.path.basename(f))) + data2 = torch.load(f) + + pred_boxes1 = data1['detection']['box'][:, :7] + pred_scores1 = data1['detection']['scr'] + gt_boxes1 = data1['gt_boxes'][:, :7] + points1 = torch.cat([v for v in data1['points'].values()], dim=0).cpu().numpy() + points1 = points1[points1[:, 3] > 0] + time_norm = (points1[:, -1] - points1[:, -1].min()) / (points1[:, -1].max() - points1[:, -1].min()) + colors = colormaps['jet'](time_norm) + colors[:, -1] = 0.1 + + pred_boxes2 = data2['detection']['box'][:, :7] + pred_scores2 = data2['detection']['scr'] + gt_boxes2 = data2['gt_boxes'][:, :7] + + fig = plt.figure(figsize=(24, 8)) + axs = fig.subplots(1, 2) + axs[0].scatter(points1[:, 0], points1[:, 1], c=colors, s=1, alpha=0.5, edgecolors='none') + axs[1].scatter(points1[:, 0], points1[:, 1], c=colors, s=1, alpha=0.5, edgecolors='none') + axs[0].set_title('StreamLTS', fontsize=20) + axs[1].set_title('StreamLTS without RoI regression',fontsize=20) + + draw_points_boxes_plt( + pc_range=pc_range, + boxes_pred=pred_boxes1.cpu().numpy(), + boxes_gt=gt_boxes1.cpu().numpy(), + linewidth_scale=1.5, + ax=axs[0], + ) + draw_points_boxes_plt( + pc_range=pc_range, + boxes_pred=pred_boxes2.detach().cpu().numpy(), + boxes_gt=gt_boxes2.detach().cpu().numpy(), + linewidth_scale=1.5, + ax=axs[1] + ) + + scenario, frame, _, _ = os.path.basename(f).split('.') + plt.savefig(f"{out_dir}/{scenario}_{frame}.png", bbox_inches='tight') + plt.tight_layout() + plt.close() + # data_path = "/koko/OPV2V/test" + # cavs = os.listdir(f"{data_path}/{scenario}") + # pcds = [] + # for cav in cavs: + # if 'yaml' in cav: + # continue + # points = load_pcd(f"{data_path}/{scenario}/{cav}/{frame}.pcd")['xyz'] + + +def format_final_result(out_dict, iou_thr): + fmt_str = "" + for iou in iou_thr: + iou_str = f"{int(iou * 100)}" + fmt_str += f"AP@{iou_str}: {out_dict[f'ap_{iou_str}']:.3f}\n" + return fmt_str + + +def eval_cosense_detection_with_pth(result_path, pc_range, iou_thr=[0.5, 0.7], metrics=['OPV2V', 'CoSense3D']): + iou_thr = iou_thr + res_dict = {} + for m in metrics: + assert m in ['OPV2V', 'CoSense3D'] + res_dict[f'{m.lower()}_result'] = \ + {iou: {'tp': [], 'fp': [], 'gt': 0, 'scr': []} for iou in iou_thr} + files = glob.glob(os.path.join(result_path, "*.pth")) + for f in tqdm.tqdm(files[::30]): + res = torch.load(f) + preds = res['detection'] + cur_gt_boxes = res['gt_boxes'] + + for iou in iou_thr: + if 'OPV2V' in metrics: + result_dict = res_dict['opv2v_result'] + caluclate_tp_fp( + preds['box'][..., :7], preds['scr'], cur_gt_boxes[..., :7], result_dict, iou + ) + if 'CoSense3D' in metrics: + result_dict = res_dict['cosense3d_result'] + tp = ops_cal_tp( + preds['box'][..., :7].detach(), cur_gt_boxes[..., :7].detach(), IoU_thr=iou + ) + result_dict[iou]['tp'].append(tp.cpu()) + result_dict[iou]['gt'] += len(cur_gt_boxes) + result_dict[iou]['scr'].append(preds['scr'].detach().cpu()) + + fmt_str = ("################\n" + "DETECTION RESULT\n" + "################\n") + if 'OPV2V' in metrics: + result_dict = res_dict['opv2v_result'] + out_dict = eval_final_results( + result_dict, + iou_thr, + global_sort_detections=True + ) + fmt_str += "OPV2V BEV Global sorted:\n" + fmt_str += format_final_result(out_dict, iou_thr) + fmt_str += "----------------\n" + + out_dict = eval_final_results( + result_dict, + iou_thr, + global_sort_detections=False + ) + fmt_str += "OPV2V BEV Local sorted:\n" + fmt_str += format_final_result(out_dict, iou_thr) + fmt_str += "----------------\n" + if 'CoSense3D' in metrics: + out_dict = {} + result_dict = res_dict['cosense3d_result'] + for iou in iou_thr: + scores = torch.cat(result_dict[iou]['scr'], dim=0) + tps = torch.cat(result_dict[iou]['tp'], dim=0) + n_pred = len(scores) + n_gt = result_dict[iou]['gt'] + + ap, mpre, mrec, _ = cal_ap_all_point(scores, tps, n_pred, n_gt) + iou_str = f"{int(iou * 100)}" + out_dict.update({f'ap_{iou_str}': ap, + f'mpre_{iou_str}': mpre, + f'mrec_{iou_str}': mrec}) + fmt_str += "CoSense3D Global sorted:\n" + fmt_str += format_final_result(out_dict, iou_thr) + fmt_str += "----------------\n" + print(fmt_str) + with open(os.path.join(os.path.dirname(result_path), "test.log"), 'a') as fh: + fh.writelines(fmt_str) + + +def tmp(ckpt_path, ckpt_path_template, out_path): + ckpt = torch.load(ckpt_path) + ckpt_template = torch.load(ckpt_path_template) + ckpt_template['model'] = ckpt['model'] + ckpt_template['optimizer']['param_groups'][0]['params'] = ckpt['optimizer']['param_groups'][0]['params'] + step = ckpt_template['optimizer']['state'][0]['step'] + ckpt_template['optimizer']['state'] = ckpt['optimizer']['state'] + for k, v in ckpt_template['optimizer']['state'].items(): + ckpt_template['optimizer']['state'][k]['step'] = step + + torch.save(ckpt_template, out_path) + + +def plot_model_efficiency(): + import matplotlib.ticker as ticker + data = { + 'fcooper_opv2vt': {'0.5': 54.4, '0.7': 17.0, 'mem': 16.132, 'time': 0.7904}, + 'fcooper_dairv2xt': {'0.5': 41.8, '0.7': 17.7, 'mem': 11.811, 'time': 0.5920}, + 'fpvrcnn_opv2vt': {'0.5': 70.8, '0.7': 41.2, 'mem': 17.678, 'time': 1.6071}, + 'fpvrcnn_dairv2xt': {'0.5': 51.8, '0.7': 23.9, 'mem': 11.971, 'time': 0.9908}, + 'attnfusion_opv2vt': {'0.5': 78.7, '0.7': 41.4, 'mem': 20.021, 'time': 0.8335}, + 'attnfusion_dairv2xt': {'0.5': 62.1, '0.7': 34.0, 'mem': 15.224, 'time': 0.5890}, + 'streamlts_opv2vt': {'0.5': 81.2, '0.7': 59.5, 'mem': 12.587, 'time': 1.3012}, + 'streamlts_dairv2xt': {'0.5': 61.2, '0.7': 33.4, 'mem': 10.420, 'time': 0.8400}, + } + + models = ['Fcooper', 'FPVRCNN', 'AttnFusion', 'StreamLTS'] + markers = ['^', '*', 'o', 's'] + fig, axs = plt.subplots(1, 4, figsize=(10, 3)) + # opv2vt memory + for i in range(4): + axs[0].plot(data[f'{models[i].lower()}_opv2vt']['mem'], data[f'{models[i].lower()}_opv2vt']['0.5'], + color='green', marker=markers[i], markersize=12, label=f'{models[i]}@IoU0.5', linestyle='') + axs[0].plot(data[f'{models[i].lower()}_opv2vt']['mem'], data[f'{models[i].lower()}_opv2vt']['0.7'], + color='orange', marker=markers[i], markersize=12, label=f'{models[i]}@IoU0.7', linestyle='') + axs[0].set_title('OPV2Vt: AP vs Memory', fontsize=11) + axs[0].set_xlabel('Memory usage peak (GB)') + axs[0].set_ylabel('AP (%)') + axs[0].set_xlim(11, 21) + axs[0].set_ylim(10, 90) + axs[0].xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0f')) + + # dairv2xt memory + for i in range(4): + axs[1].plot(data[f'{models[i].lower()}_dairv2xt']['mem'], data[f'{models[i].lower()}_dairv2xt']['0.5'], + color='green', marker=markers[i], markersize=12, linestyle='') + axs[1].plot(data[f'{models[i].lower()}_dairv2xt']['mem'], data[f'{models[i].lower()}_dairv2xt']['0.7'], + color='orange', marker=markers[i], markersize=12, linestyle='') + axs[1].set_title('DairV2Xt: AP vs Memory', fontsize=11) + axs[1].set_xlabel('Memory usage peak (GB)') + axs[1].set_xlim(9.5, 16) + axs[1].xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0f')) + axs[1].set_ylim(10, 70) + + # opv2vt time + for i in range(4): + axs[2].plot(data[f'{models[i].lower()}_opv2vt']['time'], data[f'{models[i].lower()}_opv2vt']['0.5'], + color='green', marker=markers[i], markersize=12, linestyle='') + axs[2].plot(data[f'{models[i].lower()}_opv2vt']['time'], data[f'{models[i].lower()}_opv2vt']['0.7'], + color='orange', marker=markers[i], markersize=12, linestyle='') + axs[2].set_xlim(0.7, 1.7) + axs[2].set_ylim(10, 90) + axs[2].xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f')) + axs[2].set_title('OPV2Vt: AP vs Time', fontsize=11) + axs[2].set_xlabel('Epochal training time (Hour)') + + # dairv2xt time + for i in range(4): + axs[3].plot(data[f'{models[i].lower()}_dairv2xt']['time'], data[f'{models[i].lower()}_dairv2xt']['0.5'], + color='green', marker=markers[i], markersize=12, linestyle='') + axs[3].plot(data[f'{models[i].lower()}_dairv2xt']['time'], data[f'{models[i].lower()}_dairv2xt']['0.7'], + color='orange', marker=markers[i], markersize=12, linestyle='') + axs[3].set_xlim(0.5, 1.1) + axs[3].set_ylim(10, 70) + axs[3].set_title('DairV2Xt: AP vs Time', fontsize=11) + axs[3].set_xlabel('Epochal training time (Hour)') + + handles, labels = axs[0].get_legend_handles_labels() + fig.legend(handles, labels, loc='lower center', bbox_to_anchor=(0.5, 0.0), ncol=4) + plt.subplots_adjust(bottom=0.35) + # plt.tight_layout() + plt.savefig("/home/yys/Pictures/streamLTS/LTS_mem_time_compare.pdf") + plt.close() + + +if __name__=="__main__": + # compare_detection( + # "/media/yuan/luna/streamLTS/LTS_opv2v/epoch3/detection_eval", + # "/media/yuan/luna/streamLTS/LTS_opv2v_no_reg/epoch50_v1/detection_eval", + # "/media/yuan/luna/images/opv2v", + # [-20, -38.4, -3.0, 80, 38.4, 1.0] + # ) + plot_model_efficiency() \ No newline at end of file diff --git a/cosense3d/tools/server-train.sh b/cosense3d/tools/server-train.sh new file mode 100644 index 00000000..f31b299c --- /dev/null +++ b/cosense3d/tools/server-train.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +export OMP_NUM_THREADS=16 +export PYTORCH_CUDA_ALLOC_CONF=MAX_SPLIT_SIZE_MB=256 + +torchrun \ +--nproc_per_node=4 \ +cosense3d/tools/agent_runner.py \ +--config ./cosense3d/config/streamLTS_fcooper_dairv2x.yaml \ +--mode train \ +--gpus 4 \ +--log-dir /koko/yunshuang/train_out \ +--run-name StreamLTS_fcooper_dairv2x \ +--batch-size 1 \ +--n-workers 8 \ +#--resume-from /koko/yunshuang/train_out/StreamLTS_fcooper_dairv2x/epoch30.pth + diff --git a/cosense3d/tools/simulation_runner.py b/cosense3d/tools/simulation_runner.py new file mode 100644 index 00000000..a3902469 --- /dev/null +++ b/cosense3d/tools/simulation_runner.py @@ -0,0 +1,73 @@ +import argparse, sys +import os + +from cosense3d.config import load_yaml +from cosense3d.utils.misc import save_json +from cosense3d.utils.train_utils import seed_everything +from cosense3d.carla.map_manager import CarlaMapManager +from cosense3d.carla.scene_manager import get_scene_manager + +sys.path.append("/opt/carla-simulator/PythonAPI/carla/dist/carla-0.9.13-py3.7-linux-x86_64.egg") +import carla + + +class SimulationRunner: + def __init__(self, args, cfgs): + self.mode = args.mode + self.cfgs = cfgs + self.start_simulator() + + def start_simulator(self): + # setup the carla client + self.client = carla.Client('localhost', self.cfgs.get('client_port', 2000)) + self.client.set_timeout(10.0) + self.world = self.client.get_world() + if not self.world: + sys.exit('World loading failed') + + # setup the new setting + self.origin_settings = self.world.get_settings() + new_settings = self.world.get_settings() + new_settings.synchronous_mode = self.cfgs.get('sync_mode', True) + new_settings.fixed_delta_seconds = self.cfgs.get('fixed_delta_seconds', 0.1) + self.world.apply_settings(new_settings) + + # get managers + self.spectator = self.world.get_spectator() + + def run(self): + if self.mode == 'map': + maps = os.listdir('../carla/assets/maps/png') + bound_dict = {} + for m in maps: + town = m.split('.')[0] + self.client.load_world(town) + map_manager = CarlaMapManager(self.world, self.cfgs['map']) + map_manager.generate_map_mata() + bound_dict[town] = map_manager.global_bounds + print(town, map_manager.global_bounds) + save_json(bound_dict, '../carla/assets/map_bounds.json') + elif self.mode == 'open_drive_map': + maps = os.listdir('../carla/assets/maps/png') + for m in maps: + town = m.split('.')[0] + self.client.load_world(town) + open_drive_map = self.world.get_map().to_opendrive() + with open(f'../carla/assets/maps/xodr/{town}.xodr', 'w') as fh: + fh.write(open_drive_map) + else: + raise NotImplementedError + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, default="../config/carla.yaml") + parser.add_argument("--mode", type=str, default="open_drive_map", + help="data | sim | map_meta | open_drive_map") + parser.add_argument("--visualize", action="store_true") + args = parser.parse_args() + + seed_everything(2024) + cfgs = load_yaml(args.config) + sim_runner = SimulationRunner(args, cfgs) + sim_runner.run() \ No newline at end of file diff --git a/cosense3d/utils/__init__.py b/cosense3d/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cosense3d/utils/box_utils.py b/cosense3d/utils/box_utils.py new file mode 100644 index 00000000..c85d85fb --- /dev/null +++ b/cosense3d/utils/box_utils.py @@ -0,0 +1,407 @@ +import numpy as np +import torch +from typing import Union + +from shapely.geometry import Polygon + +from cosense3d.utils.misc import check_numpy_to_torch +from cosense3d.ops.utils import points_in_boxes_cpu +from cosense3d.utils.pclib import rotate_points_batch, rotation_mat2euler_torch + + + +def limit_period(val, offset=0.5, period=2 * np.pi): + return val - np.floor(val / period + offset) * period + + +def decode_boxes(reg, points, lwh_mean): + assert len(reg)==len(points) + if not isinstance(lwh_mean, torch.Tensor): + lwh_mean = torch.Tensor(lwh_mean).view(1, 3) + points = points.to(reg.device) + lwh_mean = lwh_mean.to(reg.device) + + diagonal = torch.norm(lwh_mean[0, :2]) + # encode with diagonal length + xy = reg[:, :2] * diagonal + points[:, :2] + z = reg[:, 2:3] * lwh_mean[0, 2] + points[:, 2:3] + lwh = reg[:, 3:6].exp() * lwh_mean + r = torch.atan2(reg[:, 6:7], reg[:, 7:]) + + return torch.cat([xy, z, lwh, r], dim=-1) + + +def boxes_to_corners_2d(boxes_np): + """ + Convert boxes to 4 corners in xy plane + :param boxes_np: np.ndarray [N, 7], cols - (x,y,z,dx,dy,dz,det_r) + :return: corners: np.ndarray [N, 4, 2], corner order is + back left, front left, front back, back left + """ + x = boxes_np[:, 0] + y = boxes_np[:, 1] + dx = boxes_np[:, 2] + dy = boxes_np[:, 3] + + x1 = - dx / 2 + y1 = - dy / 2 + x2 = + dx / 2 + y2 = + dy / 2 + theta = boxes_np[:, 6:7] + # bl, fl, fr, br + corners = np.array([[x1, y2],[x2,y2], [x2,y1], [x1, y1]]).transpose(2, 0, 1) + new_x = corners[:, :, 0] * np.cos(theta) + \ + corners[:, :, 1] * -np.sin(theta) + x[:, None] + new_y = corners[:, :, 0] * np.sin(theta) + \ + corners[:, :, 1] * (np.cos(theta)) + y[:, None] + corners = np.stack([new_x, new_y], axis=2) + + return corners + + +def boxes_to_corners_3d(boxes3d: Union[np.ndarray, torch.Tensor], + order: str='lwh' + ) -> Union[np.ndarray, torch.Tensor]: + r""" + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + + :param boxes3d: (N, 7 + (2: optional)) [x, y, z, dx, dy, dz, yaw] + or [x, y, z, dx, dy, dz, roll, pitch, yaw], (x, y, z) is the box center. + :param order: 'lwh' or 'hwl'. + :return: (N, 8, 3), the 8 corners of the bounding box. + """ + assert isinstance(boxes3d, np.ndarray) \ + or isinstance(boxes3d, torch.Tensor),\ + "input boxes should be numpy array or torch tensor." + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + + if order == 'hwl': + boxes3d[:, 3:6] = boxes3d[:, [5, 4, 3]] + elif order == 'lwh': + pass + + template = boxes3d.new_tensor(( + [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1], + [1, -1, 1], [1, 1, 1], [-1, 1, 1], [-1, -1, 1], + )) / 2 + + corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :] + if boxes3d[:, 6:].shape[1] == 1: + rot_order = 'z' + elif boxes3d[:, 6:].shape[1] == 3: + rot_order = 'xyz' + else: + raise IOError("box input shape should be (N, 7) for (N, 9).") + + corners3d = rotate_points_batch(corners3d.view(-1, 8, 3), + boxes3d[:, 6:], order=rot_order).view(-1, 8, 3) + corners3d += boxes3d[:, None, 0:3] + + return corners3d.numpy() if is_numpy else corners3d + + +def corners_to_boxes_3d(corners: Union[np.ndarray, torch.Tensor], + mode: int=9 + ) -> Union[np.ndarray, torch.Tensor]: + r""" + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + + :param corners: (N, 8, 3) + :param mode: 9 | 7 + :return: boxes, (N, 9 | 7) + """ + corners, is_numpy = check_numpy_to_torch(corners) + xyz = corners.mean(axis=1) + corners_reduced = corners - xyz.reshape(-1, 1, 3) + diff_x = corners[:, [0, 1, 5, 4], :] - corners[:, [3, 2, 6, 7], :] + diff_y = corners[:, [1, 5, 6, 2], :] - corners[:, [0, 4, 7, 3], :] + diff_z = corners[:, [4, 5, 6, 7], :] - corners[:, [0, 1, 2, 3], :] + l = torch.norm(diff_x, dim=2).mean(dim=1).reshape(-1, 1) + w = torch.norm(diff_y, dim=2).mean(dim=1).reshape(-1, 1) + h = torch.norm(diff_z, dim=2).mean(dim=1).reshape(-1, 1) + + template = corners.new_tensor(( + [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1], + [1, -1, 1], [1, 1, 1], [-1, 1, 1], [-1, -1, 1], + )).reshape(1, 8, 3) * torch.cat([l, w, h], dim=1)[:, None, :] / 2 + R, _ = find_rigid_alignment(template, corners_reduced) + euler = rotation_mat2euler_torch(R) + # yaw = torch.arctan2(dir_x[:, 1], dir_x[:, 0]).reshape(-1, 1) + if mode == 9: + boxes = torch.cat([xyz, l, w, h, euler], dim=1) + elif mode == 7: + boxes = torch.cat([xyz, l, w, h, euler[:, -1:]], dim=1) + else: + raise NotImplementedError + return boxes.numpy() if is_numpy else boxes + + +def boxes3d_to_standup_bboxes(boxes): + """ + :param boxes: Tensor(N, 7) + :return: Tenosr(N, 4): [x_min, y_min, x_max, y_max) + """ + corners = boxes_to_corners_3d(boxes) + standup_boxes = torch.zeros_like(boxes[:, :4]) + standup_boxes[:, :2] = corners[..., :2].min(dim=1)[0] + standup_boxes[:, 2:] = corners[..., :2].max(dim=1)[0] + return standup_boxes + + +def find_rigid_alignment(A, B): + """Find rotation and translation from A to B. + Parameters + + :param A: (B, N, 3) + :param B: (B, N, 3) + :return: + """ + A_mean = A.mean(dim=1, keepdim=True) + B_mean = B.mean(dim=1, keepdim=True) + A_c = A - A_mean + B_c = B - B_mean + # Covariance matrix + H = torch.bmm(A_c.permute(0, 2, 1), B_c) # (B, 3, N) @ (B, N, 3) = (B, 3, 3) + U, S, V = torch.svd(H) + # Rotation matrix + R = torch.bmm(V, U.permute(0, 2, 1)) + # Translation vector + t = B_mean[:, None, :] - torch.bmm(R, A_mean.permute(0, 2, 1)).permute(0, 2, 1) + return R, t + + +def mask_boxes_outside_range_numpy(boxes: np.ndarray, + limit_range: list, + order: str, + min_num_corners: int=2) -> np.ndarray: + """ + + :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + :param limit_range: [minx, miny, minz, maxx, maxy, maxz] + :param order: 'lwh' or 'hwl' + :param min_num_corners: The required minimum number of corners to be considered as in range. + :return: The filtered boxes. + """ + assert boxes.shape[1] == 8 or boxes.shape[1] == 7 + + new_boxes = boxes.copy() + if boxes.shape[1] == 7: + new_boxes = boxes_to_corners_3d(new_boxes, order) + + mask = ((new_boxes >= limit_range[0:3]) & + (new_boxes <= limit_range[3:6])).all(axis=2) + mask = mask.sum(axis=1) >= min_num_corners # (N) + + return boxes[mask], mask + + +def mask_boxes_outside_range_torch(boxes, lidar_range): + in_range = (boxes[:, 0] > lidar_range[0]) & \ + (boxes[:, 0] < lidar_range[3]) & \ + (boxes[:, 1] > lidar_range[1]) & \ + (boxes[:, 1] < lidar_range[4]) + return in_range + + +def remove_points_in_boxes3d(points, boxes3d, x_idx=0): + """ + :param points: (num_points, x_idx + 3 + C) + :param boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + + :return: + """ + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + points, is_numpy = check_numpy_to_torch(points) + point_masks = points_in_boxes_cpu(points[:, x_idx:x_idx+3], boxes3d) + points = points[point_masks.sum(dim=0) == 0] + + return points.numpy() if is_numpy else points + + +def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)): + """ + :param boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + :param extra_width: [extra_x, extra_y, extra_z] + + Returns: + + """ + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + large_boxes3d = boxes3d.clone() + + large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :] + return large_boxes3d + + +def convert_box_to_polygon(boxes_array): + """ + Convert boxes array to shapely.geometry.Polygon format. + + :param boxes_array : np.ndarray + (N, 4, 2) or (N, 8, 3). + + :return: + list of converted shapely.geometry.Polygon object. + + """ + polygons = [Polygon([(box[i, 0], box[i, 1]) for i in range(4)]) for box in + boxes_array] + return np.array(polygons) + + +def compute_iou(box, boxes): + """ + Compute iou between box and boxes list + + :param box: shapely.geometry.Polygon + Bounding box Polygon. + + :param boxes: list + List of shapely.geometry.Polygon. + + :return: iou : np.ndarray + Array of iou between box and boxes. + + """ + # Calculate intersection areas + iou = [box.intersection(b).area / box.union(b).area for b in boxes] + + return np.array(iou, dtype=np.float32) + + +def bbox_cxcywh_to_xyxy(bbox): + """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). + + :param bbox (Tensor): Shape (n, 4) for bboxes. + :return: Tensor: Converted bboxes. + """ + cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] + return torch.cat(bbox_new, dim=-1) + + +def bbox_xyxy_to_cxcywh(bbox): + """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). + + :param bbox (Tensor): Shape (n, 4) for bboxes. + + :return: Tensor, Converted bboxes. + """ + x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] + return torch.cat(bbox_new, dim=-1) + + +def transform_boxes_3d(boxes_in, transform, mode=7): + """ + :param boxes_in: (N, 7) + :param transform: (4, 4) + :param mode: 7 | 9 + """ + is_numpy = isinstance(boxes_in, np.ndarray) + assert mode == 11 or mode == 9 or mode == 7 + assert boxes_in.shape[-1] == 11 or boxes_in.shape[-1] == 9 or boxes_in.shape[-1] == 7 + if boxes_in.shape[-1] == 11: + boxes = boxes_in[:, [2, 3, 4, 5, 6, 7, 10]] + elif boxes_in.shape[-1] == 9: + boxes = boxes_in[:, [0, 1, 2, 3, 4, 5, 8]] + else: + boxes = boxes_in + boxes_corner = boxes_to_corners_3d(boxes[:, :7]) # (N, 8, 3) + boxes_corner = boxes_corner.reshape(-1, 3).T # (N*8, 3) + if is_numpy: + boxes_corner = np.concatenate([boxes_corner, np.ones_like(boxes_corner[:1])], axis=0) + else: + boxes_corner = torch.cat([boxes_corner, torch.ones_like(boxes_corner[:1])], dim=0) + # rotate bbx to augmented coords + boxes_corner = (transform @ boxes_corner)[:3].T.reshape(len(boxes), 8, 3) + if mode == 11: + boxes_ = corners_to_boxes_3d(boxes_corner, mode=9) + if is_numpy: + boxes = np.concatenate([boxes_in[:, :2], boxes_], axis=-1) + else: + boxes = torch.cat([boxes_in[:, :2], boxes_], dim=-1) + else: + boxes = corners_to_boxes_3d(boxes_corner, mode=mode) + if is_numpy and isinstance(boxes, torch.Tensor): + boxes = boxes.cpu().numpy() + return boxes + + +def normalize_bbox(bboxes): + cx = bboxes[..., 0:1] + cy = bboxes[..., 1:2] + cz = bboxes[..., 2:3] + w = bboxes[..., 3:4].log() + l = bboxes[..., 4:5].log() + h = bboxes[..., 5:6].log() + + rot = bboxes[..., 6:7] + if bboxes.size(-1) > 7: + vx = bboxes[..., 7:8] + vy = bboxes[..., 8:9] + normalized_bboxes = torch.cat( + (cx, cy, cz, w, l, h, rot.sin(), rot.cos(), vx, vy), dim=-1 + ) + else: + normalized_bboxes = torch.cat( + (cx, cy, cz, w, l, h, rot.sin(), rot.cos()), dim=-1 + ) + return normalized_bboxes + + +def denormalize_bbox(normalized_bboxes): + # rotation + rot_sine = normalized_bboxes[..., 6:7] + + rot_cosine = normalized_bboxes[..., 7:8] + rot = torch.atan2(rot_sine, rot_cosine) + + # center in the bev + cx = normalized_bboxes[..., 0:1] + cy = normalized_bboxes[..., 1:2] + cz = normalized_bboxes[..., 2:3] + + # size + w = normalized_bboxes[..., 3:4] + l = normalized_bboxes[..., 4:5] + h = normalized_bboxes[..., 5:6] + + w = w.exp() + l = l.exp() + h = h.exp() + if normalized_bboxes.size(-1) > 8: + # velocity + vx = normalized_bboxes[:, 8:9] + vy = normalized_bboxes[:, 9:10] + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot, vx, vy], dim=-1) + else: + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot], dim=-1) + return denormalized_bboxes + + +if __name__=="__main__": + boxes = np.random.random((1, 9)) + boxes[:, 3] *= 4 + boxes[:, 4] *= 1.8 + boxes[:, 5] *= 1.6 + boxes[:, 8] *= 3.14 + + boxes_corner = boxes_to_corners_3d(boxes) + boxes_center = corners_to_boxes_3d(boxes_corner) + print(boxes) + print(boxes_center) + print('------------------------------') + print(boxes_center - boxes) diff --git a/cosense3d/utils/data_statistics.py b/cosense3d/utils/data_statistics.py new file mode 100644 index 00000000..295f18c2 --- /dev/null +++ b/cosense3d/utils/data_statistics.py @@ -0,0 +1,55 @@ +# Code source: https://notmatthancock.github.io/2017/03/23/simple-batch-stat-updates.html +import numpy as np + +class StatsRecorder: + def __init__(self, data=None): + """ + data: ndarray, shape (nobservations, ndimensions) + """ + if data is not None: + assert len(data.shape) >= 2 + self.mean = data.mean(axis=0) + self.std = data.std(axis=0) + self.nobservations = data.shape[0] + self.ndimensions = data.shape[1] + else: + self.nobservations = 0 + + def update(self, data): + """ + data: ndarray, shape (nobservations, ndimensions) + """ + if self.nobservations == 0: + self.__init__(data) + else: + assert len(data.shape) >= 2 + if data.shape[1] != self.ndimensions: + raise ValueError("Data dims don't match prev observations.") + + newmean = data.mean(axis=0) + newstd = data.std(axis=0) + + m = self.nobservations * 1.0 + n = data.shape[0] + + tmp = self.mean + + self.mean = m/(m+n)*tmp + n/(m+n)*newmean + self.std = m/(m+n)*self.std**2 + n/(m+n)*newstd**2 +\ + m*n/(m+n)**2 * (tmp - newmean)**2 + self.std = np.sqrt(self.std) + + self.nobservations += n + + +if __name__=="__main__": + from tqdm import tqdm + data = np.random.random((100, 10, 10, 3)) + stats = StatsRecorder() + for i, d in tqdm(enumerate(data)): + d = d.reshape(-1, 3) + stats.update(d) + if i > 200: + break + print(f"Input Means: [{', '.join(['{:.5f}'] * len(stats.mean))}]".format(*stats.mean)) + print(f"Input Stds: [{', '.join(['{:.5f}'] * len(stats.std))}]".format(*stats.std)) \ No newline at end of file diff --git a/cosense3d/utils/eval_detection_utils.py b/cosense3d/utils/eval_detection_utils.py new file mode 100644 index 00000000..603e003d --- /dev/null +++ b/cosense3d/utils/eval_detection_utils.py @@ -0,0 +1,227 @@ +import os + +import numpy as np +import torch + +from cosense3d.utils.misc import torch_tensor_to_numpy +from cosense3d.utils.box_utils import convert_box_to_polygon, compute_iou, boxes_to_corners_3d +from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu, boxes_iou_bev, boxes_iou3d_cpu, boxes_bev_iou_cpu + + +def voc_ap(rec, prec): + """ + VOC 2010 Average Precision. + """ + rec.insert(0, 0.0) + rec.append(1.0) + mrec = rec[:] + + prec.insert(0, 0.0) + prec.append(0.0) + mpre = prec[:] + + for i in range(len(mpre) - 2, -1, -1): + mpre[i] = max(mpre[i], mpre[i + 1]) + + i_list = [] + for i in range(1, len(mrec)): + if mrec[i] != mrec[i - 1]: + i_list.append(i) + + ap = 0.0 + for i in i_list: + ap += ((mrec[i] - mrec[i - 1]) * mpre[i]) + return ap, mrec, mpre + + +def caluclate_tp_fp(det_boxes, det_score, gt_boxes, result_stat, iou_thresh, + det_range=None): + """ + Calculate the true positive and false positive numbers of the current + frames. + + Parameters + ---------- + det_boxes : torch.Tensor + The detection bounding box, shape (N, 8, 3) or (N, 4, 2) or (N, 7). + det_score :torch.Tensor + The confidence score for each preditect bounding box. + gt_boxes : torch.Tensor + The groundtruth bounding box. + result_stat: dict + A dictionary contains fp, tp and gt number. + iou_thresh : float + The iou thresh. + range : list, [left_range, right_range] + The evaluation range left bound + """ + # fp, tp and gt in the current frame + fp = [] + tp = [] + gt = gt_boxes.shape[0] + if det_boxes is not None: + # convert bounding boxes to numpy array + det_boxes = torch_tensor_to_numpy(det_boxes) + det_score = torch_tensor_to_numpy(det_score) + gt_boxes = torch_tensor_to_numpy(gt_boxes) + # convert center format to corners + if det_boxes.ndim==2 and det_boxes.shape[1] == 7: + det_boxes = boxes_to_corners_3d(det_boxes) + if gt_boxes.ndim==2 and gt_boxes.shape[1] == 7: + gt_boxes = boxes_to_corners_3d(gt_boxes) + + # remove the bbx out of range + if det_range is not None: + pass + + # sort the prediction bounding box by score + score_order_descend = np.argsort(-det_score) + det_score = det_score[score_order_descend] # from high to low + det_polygon_list = list(convert_box_to_polygon(det_boxes)) + gt_polygon_list = list(convert_box_to_polygon(gt_boxes)) + + # match prediction and gt bounding box + for i in range(score_order_descend.shape[0]): + det_polygon = det_polygon_list[score_order_descend[i]] + ious = compute_iou(det_polygon, gt_polygon_list) + + if len(gt_polygon_list) == 0 or np.max(ious) < iou_thresh: + fp.append(1) + tp.append(0) + continue + + fp.append(0) + tp.append(1) + + gt_index = np.argmax(ious) + gt_polygon_list.pop(gt_index) + result_stat[iou_thresh]['scr'] += det_score.tolist() + else: + gt = gt_boxes.shape[0] + result_stat[iou_thresh]['fp'] += fp + result_stat[iou_thresh]['tp'] += tp + result_stat[iou_thresh]['gt'] += gt + + +def calculate_ap(result_stat, iou, global_sort_detections): + """ + Calculate the average precision and recall, and save them into a txt. + + Parameters + ---------- + result_stat : dict + A dictionary contains fp, tp and gt number. + + iou : float + + global_sort_detections : bool + Whether to sort the detection results globally. + """ + iou_5 = result_stat[iou] + + if global_sort_detections: + fp = np.array(iou_5['fp']) + tp = np.array(iou_5['tp']) + score = np.array(iou_5['scr']) + + assert len(fp) == len(tp) and len(tp) == len(score) + sorted_index = np.argsort(-score) + fp = fp[sorted_index].tolist() + tp = tp[sorted_index].tolist() + + else: + fp = iou_5['fp'] + tp = iou_5['tp'] + assert len(fp) == len(tp) + + gt_total = iou_5['gt'] + + cumsum = 0 + for idx, val in enumerate(fp): + fp[idx] += cumsum + cumsum += val + + cumsum = 0 + for idx, val in enumerate(tp): + tp[idx] += cumsum + cumsum += val + + rec = tp[:] + for idx, val in enumerate(tp): + rec[idx] = float(tp[idx]) / gt_total + + prec = tp[:] + for idx, val in enumerate(tp): + prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx]) + + ap, mrec, mprec = voc_ap(rec[:], prec[:]) + + return ap, mrec, mprec + + +def eval_final_results(result_stat, iou_thrs, global_sort_detections=False): + dump_dict = {} + for iou in iou_thrs: + ap, mrec, mpre = calculate_ap(result_stat, iou, global_sort_detections) + iou_str = f"{int(iou * 100)}" + dump_dict.update({f'ap_{iou_str}': ap, + f'mpre_{iou_str}': mpre, + f'mrec_{iou_str}': mrec, + }) + return dump_dict + + +def ops_cal_tp(pred_boxes, gt_boxes, iou_mode='3d', IoU_thr=0.7): + if len(pred_boxes) == 0: + return torch.zeros(pred_boxes.shape[0], device=pred_boxes.device) + elif len(gt_boxes) == 0: + return torch.zeros(len(pred_boxes), device=pred_boxes.device).bool() + else: + if pred_boxes.is_cuda: + iou_func = boxes_iou3d_gpu if iou_mode == '3d' else boxes_iou_bev + else: + iou_func = boxes_iou3d_cpu if iou_mode == '3d' else boxes_bev_iou_cpu + ious = iou_func(pred_boxes, gt_boxes) + max_iou_pred_to_gts = ious.max(dim=1) + max_iou_gt_to_preds = ious.max(dim=0) + tp = max_iou_pred_to_gts[0] > IoU_thr + is_best_match = max_iou_gt_to_preds[1][max_iou_pred_to_gts[1]] \ + == torch.tensor([i for i in range(len(tp))], device=tp.device) + tp[torch.logical_not(is_best_match)] = False + return tp + + +def cal_precision_recall(scores, tps, n_pred, n_gt): + order_inds = scores.argsort(descending=True) + tp_all = tps[order_inds] + list_accTP = tp_all.cumsum(dim=0) + precision = list_accTP.float() / torch.arange(1, n_pred + 1) + recall = list_accTP.float() / n_gt + return precision, recall + + +def cal_ap_all_point(scores, tps, n_pred, n_gt): + ''' + source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L292 + ''' + + prec, rec = cal_precision_recall(scores, tps, n_pred, n_gt) + mrec = [] + mrec.append(0) + [mrec.append(e.item()) for e in rec] + mrec.append(1) + mpre = [] + mpre.append(0) + [mpre.append(e.item()) for e in prec] + mpre.append(0) + for i in range(len(mpre) - 1, 0, -1): + mpre[i - 1] = max(mpre[i - 1], mpre[i]) + ii = [] + for i in range(len(mrec) - 1): + if mrec[1:][i] != mrec[0:-1][i]: + ii.append(i + 1) + ap = 0 + for i in ii: + ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i]) + # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii] + return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii] \ No newline at end of file diff --git a/cosense3d/utils/iou2d_calculator.py b/cosense3d/utils/iou2d_calculator.py new file mode 100644 index 00000000..cf9618fd --- /dev/null +++ b/cosense3d/utils/iou2d_calculator.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def cast_tensor_type(x, scale=1., dtype=None): + if dtype == 'fp16': + # scale is for preventing overflows + x = (x / scale).half() + return x + + +def fp16_clamp(x, min=None, max=None): + if not x.is_cuda and x.dtype == torch.float16: + # clamp for cpu float16, tensor fp16 has no clamp implementation + return x.float().clamp(min, max).half() + + return x.clamp(min, max) + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): + r"""Calculate overlap between two set of bboxes. + + FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 + Note: + Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', + there are some new generated variable when calculating IOU + using bbox_overlaps function: + + 1) is_aligned is False + area1: M x 1 + area2: N x 1 + lt: M x N x 2 + rb: M x N x 2 + wh: M x N x 2 + overlap: M x N x 1 + union: M x N x 1 + ious: M x N x 1 + + Total memory: + S = (9 x N x M + N + M) * 4 Byte, + + When using FP16, we can reduce: + R = (9 x N x M + N + M) * 4 / 2 Byte + R large than (N + M) * 4 * 2 is always true when N and M >= 1. + Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, + N + 1 < 3 * N, when N or M is 1. + + Given M = 40 (ground truth), N = 400000 (three anchor boxes + in per grid, FPN, R-CNNs), + R = 275 MB (one times) + + A special case (dense detection), M = 512 (ground truth), + R = 3516 MB = 3.43 GB + + When the batch size is B, reduce: + B x R + + Therefore, CUDA memory runs out frequently. + + Experiments on GeForce RTX 2080Ti (11019 MiB): + + | dtype | M | N | Use | Real | Ideal | + +----+----+----+----+----+----+ + | FP32 | 512 | 400000 | 8020 MiB | -- | -- | + | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | + | FP32 | 40 | 400000 | 1540 MiB | -- | -- | + | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | + + 2) is_aligned is True + area1: N x 1 + area2: N x 1 + lt: N x 2 + rb: N x 2 + wh: N x 2 + overlap: N x 1 + union: N x 1 + ious: N x 1 + + Total memory: + S = 11 x N * 4 Byte + + When using FP16, we can reduce: + R = 11 x N * 4 / 2 Byte + + So do the 'giou' (large than 'iou'). + + Time-wise, FP16 is generally faster than FP32. + + When gpu_assign_thr is not -1, it takes more time on cpu + but not reduce memory. + There, we can reduce half the memory and keep the speed. + + If ``is_aligned`` is ``False``, then calculate the overlaps between each + bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned + pair of bboxes1 and bboxes2. + + + :param bboxes1: (Tensor) shape (B, m, 4) in format or empty. + :param bboxes2: (Tensor) shape (B, n, 4) in format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + :param mode: (str) "iou" (intersection over union), "iof" (intersection over + foreground) or "giou" (generalized intersection over union). + Default "iou". + :param is_aligned: (bool, optional) If True, then m and n must be equal. + Default False. + :param eps: (float, optional) A value added to the denominator for numerical + stability. Default 1e-6. + + :return: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 10, 10], + >>> [10, 10, 20, 20], + >>> [32, 32, 38, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 10, 20], + >>> [0, 10, 10, 19], + >>> [10, 10, 20, 20], + >>> ]) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + + Example: + >>> empty = torch.empty(0, 4) + >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes' last dimension is 4 + assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( + bboxes1[..., 3] - bboxes1[..., 1]) + area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( + bboxes2[..., 3] - bboxes2[..., 1]) + + if is_aligned: + lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] + rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) + enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) + else: + lt = torch.max(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) # [B, rows, cols, 2] + rb = torch.min(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + else: + union = area1[..., None] + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) + enclosed_rb = torch.max(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou', 'iof']: + return ious + # calculate gious + enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious \ No newline at end of file diff --git a/cosense3d/utils/logger.py b/cosense3d/utils/logger.py new file mode 100644 index 00000000..ae7aa95f --- /dev/null +++ b/cosense3d/utils/logger.py @@ -0,0 +1,156 @@ +import os, pathlib +from datetime import datetime +from collections import defaultdict, deque +import logging + +import torch +from functools import partial +from rich.logging import RichHandler + +from cosense3d.utils.misc import ensure_dir + + +def setup_logger(exp_name, debug): + from imp import reload + + reload(logging) + # reload() reloads a previously imported module. This is useful if you have edited the module source file using an + # external editor and want to try out the new version without leaving the Python interpreter. + + CUDA_TAG = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + EXP_TAG = exp_name + + logger_config = dict( + level=logging.DEBUG if debug else logging.INFO, + format=f"{CUDA_TAG}:[{EXP_TAG}] %(message)s", + handlers=[RichHandler()], + datefmt="[%X]", + ) + logging.basicConfig(**logger_config) + + +class SmoothedValue(object): + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{avg:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.count = 0 + self.total = 0.0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class LogMeter(object): + def __init__(self, total_iter, logdir, delimiter="\t", log_every=20, wandb_project=None): + self.meters = defaultdict(partial(SmoothedValue, fmt="{avg:.4f}")) + file_name = datetime.now().strftime("%d_%m_%H_%M_%S") + ".log" + self.logdir = logdir + if not isinstance(logdir, pathlib.Path): + logdir = pathlib.Path(logdir) + self.log_fh = (logdir / file_name).open('a') + self.delimiter = delimiter + self.log_every = log_every + self.log_msg = self.delimiter.join([ + 'E:{epoch:2d}', + 'I:[{itr:4d}/' + str(total_iter) + ']', + 'lr:{lr:.6f}', + '{meters}' + ]) + if wandb_project is not None: + import wandb + wandb.init(project=wandb_project) + wandb.config.log_histo = True + wandb.config.step = 0 + wandb_project = wandb + self.wandb = wandb_project + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int, str)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.log_fh.close() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log(self, epoch, iteration, lr, **kwargs): + self.update(**kwargs) + if iteration % self.log_every == 0: + msg = self.log_msg.format( + epoch=epoch, + itr=iteration, + lr=lr, + meters=str(self) + ) + print(msg) + self.log_fh.write(msg + "\n") + if self.wandb is not None: + self.wandb.log({('avg/' + name): meter.avg for name, meter in self.meters.items()}) + self.wandb.log({('global_avg/' + name): meter.global_avg for name, meter in self.meters.items()}) + + +class TestLogger(object): + def __init__(self, logdir): + self.logdir = logdir + ensure_dir(self.logdir) + self.log_fh = (pathlib.Path(self.logdir) / "test.log").open('a') + + def log(self, msg): + self.log_fh.writelines(msg) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.log_fh.close() \ No newline at end of file diff --git a/cosense3d/utils/lr_scheduler.py b/cosense3d/utils/lr_scheduler.py new file mode 100644 index 00000000..d23baa63 --- /dev/null +++ b/cosense3d/utils/lr_scheduler.py @@ -0,0 +1,130 @@ +from torch.optim import lr_scheduler as torch_lr +from torch.optim import Optimizer + + +def build_lr_scheduler(optimizer, cfg, total_iter): + return LRUpdater(optimizer, total_iter, **cfg) + + +class TransformerAdaptiveScheduler(torch_lr._LRScheduler): + def __init__(self, + optimizer: Optimizer, + dim_embed: int, + warmup_steps: int, + itrs_per_epoch: int, + last_epoch: int = -1, + global_fade_ratio: float = 1, + verbose: bool = False) -> None: + self.dim_embed = dim_embed + self.warmup_steps = warmup_steps + self.num_param_groups = len(optimizer.param_groups) + self.global_fade_ratio = global_fade_ratio + super().__init__(optimizer, last_epoch, verbose) + if last_epoch > 0: + self._step_count = itrs_per_epoch * last_epoch + + def get_lr(self) -> float: + lr = self.calc_lr(self._step_count, self.dim_embed, self.warmup_steps) * self.global_fade_ratio + return [lr] * self.num_param_groups + + def calc_lr(self, step, dim_embed, warmup_steps): + return dim_embed ** (-0.5) * min(step ** (-0.5), step * warmup_steps ** (-1.5)) + + +class LRUpdater: + """ + Unified API for updating LR with different LR schedulers. + """ + def __init__(self, optimizer, total_iter, policy, **kwargs): + self.policy = policy + self.total_itr = total_iter + if policy == 'MultiStepLR': + # construct a learning rate scheduler + self.lr_scheduler = torch_lr.MultiStepLR(optimizer, **kwargs) + elif policy == 'CosineAnnealingWarm': + from timm.scheduler.cosine_lr import CosineLRScheduler + num_steps = kwargs['epochs'] * total_iter + warmup_lr = kwargs['warmup_lr'] + warmup_steps = kwargs['warmup_epochs'] * total_iter + lr_min = kwargs['lr_min'] + decay_rate = kwargs.get('decay_rate', 0.5) + + self.lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_steps, + lr_min=lr_min, + warmup_lr_init=warmup_lr, + warmup_t=warmup_steps, + cycle_limit=1, + t_in_epochs=False, + cycle_decay=decay_rate + ) + elif policy == 'TransformerAdaptiveScheduler': + kwargs['itrs_per_epoch'] = total_iter + self.lr_scheduler = TransformerAdaptiveScheduler(optimizer, **kwargs) + else: + raise NotImplementedError + + self.optimizer = self.lr_scheduler.optimizer + + def step_epoch(self, epoch): + if self.policy == 'TransformerAdaptiveScheduler': + pass + elif self.policy in ['CosineAnnealingWarm',]: + self.lr_scheduler.step(epoch) + else: + self.lr_scheduler.step() + + def step_itr(self, itr): + if self.policy == 'TransformerAdaptiveScheduler': + self.lr_scheduler.step() + + def state_dict(self): + return self.lr_scheduler.state_dict() + + def load_state_dict(self, state_dict): + self.lr_scheduler.load_state_dict(state_dict) + + def get_last_lr(self): + return self.lr_scheduler.get_last_lr() + + +if __name__=="__main__": + import torch + import matplotlib.pyplot as plt + params = torch.nn.Parameter(torch.rand(10, 10)) + optimizer = torch.optim.AdamW([params], + lr=0.0001, + weight_decay=1e-2, + betas=(0.9, 0.98), + eps=1.0e-9, + # init_lr=0.001, + ) + lr_scheduler = TransformerAdaptiveScheduler( + optimizer, + dim_embed=256, + warmup_steps=2000, + itrs_per_epoch=2000, + last_epoch=-1, + global_fade_ratio=0.5 + ) + + # torch.save(optimizer.state_dict(), 'optimizer_checkpoint.pth') + # optimizer.load_state_dict(torch.load('optimizer_checkpoint.pth')) + # lr_scheduler = TransformerAdaptiveScheduler( + # optimizer, + # dim_embed=256, + # warmup_steps=4000, + # itrs_per_epoch=2000, + # last_epoch=3, + # ) + + lrs = [] + for epoch in range(50 * 2000): + lrs.append(lr_scheduler.get_lr()[0]) + optimizer.step() + lr_scheduler.step() + + plt.plot(torch.arange(len(lrs)).numpy(), lrs) + plt.show() + plt.close() diff --git a/cosense3d/utils/metrics.py b/cosense3d/utils/metrics.py new file mode 100644 index 00000000..bca0327d --- /dev/null +++ b/cosense3d/utils/metrics.py @@ -0,0 +1,398 @@ +import os, logging +import torch +import torch.nn.functional as F +import matplotlib.pyplot as plt +import numpy as np +from sklearn.metrics import precision_recall_curve + +from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu, boxes_iou_bev +from cosense3d.ops.utils import points_in_boxes_gpu +from cosense3d.utils.misc import save_json, update_dict +from cosense3d.utils.box_utils import mask_boxes_outside_range_torch + + +class Metric: + def __init__(self, cfg, log_dir): + self.cfg = cfg + self.log_dir = log_dir + + def add_samples(self, data_dict): + raise NotImplementedError + + def save_detections(self, filename): + raise NotImplementedError + + def summary(self): + raise NotImplementedError + + +class MetricObjDet(Metric): + def __init__(self, cfg, log_dir, logger, bev=False): + super(MetricObjDet, self).__init__(cfg, log_dir) + self.eval_func = cfg['eval_func'] + self.lidar_range = cfg.get('lidar_range', None) + self.score_metric = cfg.get('score_metric', 'scr') + self.score_thr = cfg.get('score_thr', 0.0) + self.logger = logger + self.samples = [] + self.pred_boxes = {} + self.gt_boxes = {} + self.confidences = {} + self.v_ids = {} + self.bev = bev + self.iou_fn = boxes_iou_bev if self.bev else boxes_iou3d_gpu + self.file_test = os.path.join(log_dir, 'pred.json') + self.has_test_detections = False + self.result = {} + + def add_sample(self, name, pred_boxes, gt_boxes, confidences, ids=None): + self.samples.append(name) + valid = confidences > self.score_thr + if self.lidar_range is not None: + in_range_gt = mask_boxes_outside_range_torch(gt_boxes, self.lidar_range), + in_range_pred = mask_boxes_outside_range_torch(pred_boxes, self.lidar_range) + valid = torch.logical_and(valid, in_range_pred) + gt_boxes = gt_boxes[in_range_gt] + self.pred_boxes[name] = pred_boxes[valid] + self.gt_boxes[name] = gt_boxes + self.confidences[name] = confidences[valid] + if ids is not None: + self.v_ids[name] = ids + ss = name.split("/") + scenario = ss[0] + frame = ss[1] + pred_boxes_np = pred_boxes[valid].cpu().numpy() + bbx_out = np.zeros((len(pred_boxes_np), 11)) + bbx_out[:, [2, 3, 4, 5, 6, 7, 10]] = pred_boxes_np + bbx_out[:, 0] = -1 # box id not set + conf_out = confidences[valid].cpu().numpy() + if '.' in frame: + frame, agent_id = frame.split('.') + fdict = {'agents': { + agent_id: { + 'gt_boxes': bbx_out.tolist(), + 'box_confidences': conf_out.tolist() + } + }} + else: + fdict = {'meta': {'bbx_center_global': bbx_out.tolist()}} + update_dict( + self.result, + {scenario: {frame: fdict}} + ) + + @torch.no_grad() + def add_samples(self, out_dict): + data_dict = out_dict['detections'] + names = data_dict['name'] + for i in range(len(names)): + self.add_sample(names[i], + data_dict['pred_boxes'][i]['box'].float(), + data_dict['gt_boxes'][i].float(), + data_dict['pred_boxes'][i][self.score_metric]) + + def save_detections(self, filename): + dict_detections = { + 'samples': self.samples, + 'pred_boxes': self.pred_boxes, + 'gt_boxes': self.gt_boxes, + 'confidences': self.confidences, + 'ids': self.v_ids + } + torch.save(dict_detections, filename) + self.has_test_detections = True + + def cal_precision_recall(self, IoU_thr=0.5): + list_sample = [] + list_confidence = [] + list_tp = [] + N_gt = 0 + + for sample in self.samples: + if len(self.pred_boxes[sample])>0 and len(self.gt_boxes[sample])>0: + ious = self.iou_fn(self.pred_boxes[sample], self.gt_boxes[sample]) + n, m = ious.shape + list_sample.extend([sample] * n) + list_confidence.extend(self.confidences[sample]) + N_gt += len(self.gt_boxes[sample]) + max_iou_pred_to_gts = ious.max(dim=1) + max_iou_gt_to_preds = ious.max(dim=0) + tp = max_iou_pred_to_gts[0] > IoU_thr + is_best_match = max_iou_gt_to_preds[1][max_iou_pred_to_gts[1]] \ + ==torch.tensor([i for i in range(len(tp))], device=tp.device) + tp[torch.logical_not(is_best_match)] = False + list_tp.extend(tp) + elif len(self.pred_boxes[sample])==0: + N_gt += len(self.gt_boxes[sample]) + elif len(self.gt_boxes[sample])==0: + tp = torch.zeros(len(self.pred_boxes[sample]), device=self.pred_boxes[sample].device) + list_tp.extend(tp.bool()) + order_inds = torch.tensor(list_confidence).argsort(descending=True) + tp_all = torch.tensor(list_tp)[order_inds] + list_accTP = tp_all.cumsum(dim=0) + # list_accFP = torch.logical_not(tp_all).cumsum(dim=0) + list_precision = list_accTP.float() / torch.arange(1, len(list_sample) + 1) + list_recall = list_accTP.float() / N_gt + # plt.plot(list_recall.numpy(), list_precision.numpy(), 'k.') + # plt.savefig(str(model.run_path / 'auc_thr{}_ncoop{}.png' + # .format(model.cfg['score_threshold'], model.n_coop))) + # plt.close() + + return list_precision, list_recall + + def cal_ap_all_point(self, IoU_thr=0.5): + ''' + source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L292 + ''' + + prec, rec = self.cal_precision_recall(IoU_thr=IoU_thr) + mrec = [] + mrec.append(0) + [mrec.append(e.item()) for e in rec] + mrec.append(1) + mpre = [] + mpre.append(0) + [mpre.append(e.item()) for e in prec] + mpre.append(0) + for i in range(len(mpre) - 1, 0, -1): + mpre[i - 1] = max(mpre[i - 1], mpre[i]) + ii = [] + for i in range(len(mrec) - 1): + if mrec[1:][i] != mrec[0:-1][i]: + ii.append(i + 1) + ap = 0 + for i in ii: + ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i]) + # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii] + return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii] + + def cal_ap_11_point(self, IoU_thr=0.5): + ''' + source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L315 + ''' + # 11-point interpolated average precision + prec, rec = self.cal_precision_recall(IoU_thr=IoU_thr) + mrec = [] + # mrec.append(0) + [mrec.append(e.item()) for e in rec] + # mrec.append(1) + mpre = [] + # mpre.append(0) + [mpre.append(e.item()) for e in prec] + # mpre.append(0) + recallValues = np.linspace(0, 1, 11) + recallValues = list(recallValues[::-1]) + rhoInterp = [] + recallValid = [] + # For each recallValues (0, 0.1, 0.2, ... , 1) + for r in recallValues: + # Obtain all recall values higher or equal than det_r + argGreaterRecalls = np.argwhere(mrec[:] >= r) + pmax = 0 + # If there are recalls above det_r + if argGreaterRecalls.size != 0: + pmax = max(mpre[argGreaterRecalls.min():]) + recallValid.append(r) + rhoInterp.append(pmax) + # By definition AP = sum(max(precision whose recall is above det_r))/11 + ap = sum(rhoInterp) / 11 + # Generating values for the plot + rvals = [] + rvals.append(recallValid[0]) + [rvals.append(e) for e in recallValid] + rvals.append(0) + pvals = [] + pvals.append(0) + [pvals.append(e) for e in rhoInterp] + pvals.append(0) + # rhoInterp = rhoInterp[::-1] + cc = [] + for i in range(len(rvals)): + p = (rvals[i], pvals[i - 1]) + if p not in cc: + cc.append(p) + p = (rvals[i], pvals[i]) + if p not in cc: + cc.append(p) + recallValues = [i[0] for i in cc] + rhoInterp = [i[1] for i in cc] + return [ap, rhoInterp, recallValues, None] + + def summary(self): + thrs = [0.3, 0.5, 0.7] + ss = [] + for thr in thrs: + ap = getattr(self, self.eval_func)(thr)[0] + ss.append(f"AP@{thr}: {ap:.4f}") + ss = (f"Score metric: {self.score_metric}\n " + f"Score thr: {self.score_thr:.2f}\n" + f"--------------\n" + + "\n".join(ss) + "\n") + print(ss) + self.logger.write(ss) + + os.makedirs(os.path.join(self.log_dir, "jsons"), exist_ok=True) + for s, sdict in self.result.items(): + save_json(sdict, os.path.join(self.log_dir, "jsons", f'{s}.json')) + + +class MetricSemSeg(Metric): + def __init__(self, cfg, run_path, name='test'): + super(MetricSemSeg, self).__init__(cfg, run_path) + self.filename = os.path.join(run_path, name) + self.n_cls = cfg['n_cls'] + # model.result = { + # 'tp': [], + # 'tn': [], + # 'fp': [], + # 'fn': [], + # 'N': 0 + # } + self.result = { + 'area_intersect': torch.zeros(self.n_cls), + 'area_label': torch.zeros(self.n_cls), + 'area_pred': torch.zeros(self.n_cls), + 'area_union': torch.zeros(self.n_cls) + } + + def add_samples(self, data_dict): + preds = torch.argmax(data_dict['pred_cls'], dim=1).view(-1, 1) + tgts = data_dict['tgt_cls'].view(-1, 1) + # mask = (tgts != 0) + # preds = preds[mask] + # tgts = tgts[mask] + classes = torch.arange(self.n_cls, dtype=preds.dtype, device=preds.device).view(1, -1) + intersect = preds[preds == tgts] + area_intersect = (intersect.view(-1, 1) == (classes)).sum(0) + area_pred = (preds.view(-1, 1) == (classes)).sum(0) + area_label = (tgts.view(-1, 1) == (classes)).sum(0) + area_union = area_label + area_label - area_intersect + self.result['area_intersect'] = self.result['area_intersect'] + area_intersect.cpu() + self.result['area_label'] = self.result['area_label'] + area_label.cpu() + self.result['area_pred'] = self.result['area_pred'] + area_pred.cpu() + self.result['area_union'] = self.result['area_union'] + area_union.cpu() + # pred_pos = preds.int() == classes + # pred_neg = torch.logical_not(pred_pos) + # tgt_pos = tgts.int() == classes + # tgt_neg = torch.logical_not(tgt_pos) + # tp = torch.logical_and(pred_pos, tgt_pos).sum(0) + # tn = torch.logical_and(pred_neg, tgt_neg).sum(0) + # fp = torch.logical_and(pred_pos, tgt_neg).sum(0) + # fn = torch.logical_and(pred_neg, tgt_pos).sum(0) + # acc_ = tp.sum() / len(tgts) + # model.result['tp'].append(tp) + # model.result['tn'].append(tn) + # model.result['fp'].append(fp) + # model.result['fn'].append(fn) + # model.result['N'] += len(tgts) + + def cal_ious_and_accs(self): + area_intersect = self.result['area_intersect'].sum(0) + area_label = self.result['area_label'].sum(0) + area_union = self.result['area_union'].sum(0) + all_acc = area_intersect.sum() / area_label.sum() + acc = area_intersect / area_label + iou = area_intersect / area_union + + result = { + 'all_acc': all_acc, + 'acc': acc, + 'iou': iou + } + for k, v in result.items(): + print(k, v) + return result + + def save_detections(self, filename): + torch.save(self.result, filename) + + +class MetricBev(Metric): + def __init__(self, cfg, run_path, logger, name='test'): + super(MetricBev, self).__init__(cfg, run_path) + self.filename = os.path.join(run_path, name) + self.filename_prefix = '' + self.logger = logger + self.cfg = cfg + self.thrs = torch.arange(0.1, 1.1, 0.1) + self.iou_sum = 0 + self.iou_cnt = 0 + self.result = {} + + def add_samples(self, out_dict): + """ + Args: + out_dict: + bev: + conf: Tensor, (B, H, W, C) or (N, C) + unc: Tensor (optional), (B, H, W, C) or (N, C) + gt: Tensor, (B, H, W, C) or (N, C) + """ + self.iou(**out_dict['bev']) + + def iou(self, conf, gt, unc=None): + """ + Compare the thresholded pred BEV map with the full gt BEV map (including non + observable area) + """ + if unc is None: + pred = conf[..., 1] > 0.5 + mi = torch.logical_and(pred, gt).sum() + mu = torch.logical_or(pred, gt).sum() + self.iou_sum += mi / mu + self.iou_cnt += 1 + else: + pos_mask = conf[..., 1] > 0.5 + pos_mask = torch.logical_and(pos_mask, unc < 1.0) + mi = torch.logical_and(pos_mask, gt).sum() + mu = torch.logical_or(pos_mask, gt).sum() + + self.iou_sum += mi.item() / mu.item() + self.iou_cnt += 1 + + # import matplotlib.pyplot as plt + # plt.imshow(conf[0, ..., 1].cpu().numpy()) + # plt.show() + # plt.close() + # plt.imshow(gt[0].cpu().numpy()) + # plt.show() + # plt.close() + + def summary(self): + iou_mean = self.iou_sum / self.iou_cnt * 100 + + self.summary_hook() + + self.result = { + 'BEV.iou': iou_mean + } + ss = self.format_str(self.result) + print(ss) + self.logger.write(ss) + + def summary_hook(self): + pass + + def format_str(self, result_dict): + ss = "==================================================================================\n" + for k, vs in result_dict.items(): + s1 = f"{k:20s} : " + if isinstance(vs, float): + s2 = f"{vs:4.1f} \n" + else: + s2 = " ".join([f"{v:4.1f} " for v in vs]) + "\n" + ss += s1 + s2 + return ss + + + +class MetricMOT(Metric): + def __init__(self, cfg, log_dir): + super().__init__(cfg, log_dir) + + def add_samples(self, data_dict): + pass + + + + diff --git a/cosense3d/utils/misc.py b/cosense3d/utils/misc.py new file mode 100644 index 00000000..08853419 --- /dev/null +++ b/cosense3d/utils/misc.py @@ -0,0 +1,218 @@ +import os +import json +import logging +import re +from functools import partial + +import yaml +import torch +import numpy as np +from rich.logging import RichHandler + +PI = 3.14159265358979323846 + + +def multi_apply(func, *args, **kwargs): + """Apply function to a list of arguments. + + Note: + This function applies the ``func`` to multiple inputs and + map the multiple outputs of the ``func`` into different + list. Each list contains the same type of outputs corresponding + to different inputs. + + Args: + func (Function): A function that will be applied to a list of + arguments + + Returns: + tuple(list): A tuple containing multiple list, each list contains \ + a kind of returned results by the function + """ + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def setup_logger(exp_name, debug): + from imp import reload + + reload(logging) + # reload() reloads a previously imported module. This is useful if you have edited the module source file using an + # external editor and want to try out the new version without leaving the Python interpreter. + + CUDA_TAG = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + EXP_TAG = exp_name + + logger_config = dict( + level=logging.DEBUG if debug else logging.INFO, + format=f"{CUDA_TAG}:[{EXP_TAG}] %(message)s", + handlers=[RichHandler()], + datefmt="[%X]", + ) + logging.basicConfig(**logger_config) + + +def update_dict(dict_out, dict_add): + """ + Merge config_add into config_out. + Existing values in config_out will be overwritten by the config_add. + + Parameters + ---------- + dict_out: dict + dict_add: dict + + Returns + ------- + config_out: dict + Updated config_out + """ + for add_key, add_content in dict_add.items(): + if add_key not in dict_out or not isinstance(add_content, dict): + dict_out[add_key] = add_content + else: + update_dict(dict_out[add_key], add_content) + + return dict_out + + +def load_json(filename): + with open(filename, 'r') as fh: + data = json.load(fh) + return data + + +def save_json(data, filename): + with open(filename, 'w') as fh: + json.dump(data, fh, indent=3) + + +def load_yaml(filename, cloader=False): + """ + Load yaml file into dictionary. + + Parameters + ---------- + filename : str + Full path of yaml file. + + Returns + ------- + params : dict + A dictionary that contains defined parameters. + """ + with open(filename, 'r') as stream: + if cloader: + loader = yaml.CLoader + else: + loader = yaml.Loader + loader.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(u'''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + params = yaml.load(stream, Loader=loader) + return params + + +def save_yaml(data, filename, cdumper=False): + with open(filename, 'w') as fid: + if cdumper: + yaml.dump(data, fid, Dumper=yaml.CDumper, + default_flow_style=False) + else: + yaml.dump(data, fid, default_flow_style=False) + + +def ensure_dir(path): + if not os.path.exists(path): + os.makedirs(path, mode=0o777, exist_ok=True) + + +def list_dirs(path): + return sorted([x for x in os.listdir(path) if + os.path.isdir(os.path.join(path, x))]) + + +# @gin.configurable +# def logged_hparams(keys): +# C = dict() +# for k in keys: +# C[k] = gin.query_parameter(f"{k}") +# return C + + +def load_from_pl_state_dict(model, pl_state_dict): + state_dict = {} + for k, v in pl_state_dict.items(): + state_dict[k[6:]] = v + model.load_state_dict(state_dict) + return model + + +def pad_list_to_array_np(data): + """ + Pad list of numpy data to one single numpy array + :param data: list of np.ndarray + :return: np.ndarray + """ + B = len(data) + cnt = [len(d) for d in data] + max_cnt = max(cnt) + out = np.zeros(B, max_cnt, *data[0].shape[1:]) + for b in range(B): + out[b, :cnt[b]] = data[b] + return out + + +def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False + + +def multi_apply(func, *args, **kwargs): + """Apply function to a list of arguments. + + Note: + This function applies the ``func`` to multiple inputs and + map the multiple outputs of the ``func`` into different + list. Each list contains the same type of outputs corresponding + to different inputs. + + Args: + func (Function): A function that will be applied to a list of + arguments + + Returns: + tuple(list): A tuple containing multiple list, each list contains \ + a kind of returned results by the function + """ + pfunc = partial(func, **kwargs) if kwargs else func + map_results = list(map(pfunc, *args)) + if isinstance(map_results[0], tuple): + return tuple(map(list, zip(*map_results))) + else: + return map_results + + +def torch_tensor_to_numpy(torch_tensor): + """ + Convert a torch tensor to numpy. + + Parameters + ---------- + torch_tensor : torch.Tensor + + Returns + ------- + A numpy array. + """ + return torch_tensor.numpy() if not torch_tensor.is_cuda else \ + torch_tensor.cpu().detach().numpy() \ No newline at end of file diff --git a/cosense3d/utils/module_utils.py b/cosense3d/utils/module_utils.py new file mode 100644 index 00000000..4242b012 --- /dev/null +++ b/cosense3d/utils/module_utils.py @@ -0,0 +1,78 @@ +import copy +import warnings +from importlib import import_module +from packaging.version import parse +from torch import nn + + +def build_norm_layer(cfgs, shape): + if cfgs['type'] == 'LN': + _cfgs = copy.copy(cfgs) + _cfgs.pop('type') + norm = nn.LayerNorm(shape, **_cfgs) + else: + raise NotImplementedError + return norm + + +def build_dropout(cfgs): + if cfgs['type'] == 'Dropout': + dropout = nn.Dropout(cfgs['drop_prob']) + else: + raise NotImplementedError + return dropout + + +def get_target_module(target): + module, cls_name = target.rsplit('.', 1) + module = import_module(module) + cls_obj = getattr(module, cls_name) + return cls_obj + + +def instantiate_target_module(target, cfg=None, **kwargs): + if cfg is not None: + return get_target_module(target)(cfg) + else: + return get_target_module(target)(**kwargs) + + +def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + assert 'parrots' not in version_str + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) # type: ignore + else: + release.extend([0, 0]) + return tuple(release) \ No newline at end of file diff --git a/cosense3d/utils/pcdio.py b/cosense3d/utils/pcdio.py new file mode 100644 index 00000000..5d52c632 --- /dev/null +++ b/cosense3d/utils/pcdio.py @@ -0,0 +1,802 @@ +""" +Read and write PCL .pcd files in python. +dimatura@cmu.edu, 2013-2018 + +- TODO better API for wacky operations. +- TODO add a cli for common operations. +- TODO deal properly with padding +- TODO deal properly with multicount fields +- TODO better support for rgb nonsense +""" + +import re +import struct +import copy +from io import StringIO as sio +import numpy as np +import warnings +import lzf + +HAS_SENSOR_MSGS = True +try: + from sensor_msgs.msg import PointField + import numpy_pc2 # needs sensor_msgs +except ImportError: + HAS_SENSOR_MSGS = False + +__all__ = ['PointCloud', + 'point_cloud_to_path', + 'point_cloud_to_buffer', + 'point_cloud_to_fileobj', + 'point_cloud_from_path', + 'point_cloud_from_buffer', + 'point_cloud_from_fileobj', + 'make_xyz_point_cloud', + 'make_xyz_rgb_point_cloud', + 'make_xyz_label_point_cloud', + 'save_txt', + 'cat_point_clouds', + 'add_fields', + 'update_field', + 'build_ascii_fmtstr', + 'encode_rgb_for_pcl', + 'decode_rgb_from_pcl', + 'save_point_cloud', + 'save_point_cloud_bin', + 'save_point_cloud_bin_compressed', + 'pcd_type_to_numpy_type', + 'numpy_type_to_pcd_type', + ] + +if HAS_SENSOR_MSGS: + pc2_pcd_type_mappings = [(PointField.INT8, ('I', 1)), + (PointField.UINT8, ('U', 1)), + (PointField.INT16, ('I', 2)), + (PointField.UINT16, ('U', 2)), + (PointField.INT32, ('I', 4)), + (PointField.UINT32, ('U', 4)), + (PointField.FLOAT32, ('F', 4)), + (PointField.FLOAT64, ('F', 8))] + pc2_type_to_pcd_type = dict(pc2_pcd_type_mappings) + pcd_type_to_pc2_type = dict((q, p) for (p, q) in pc2_pcd_type_mappings) + __all__.extend(['pcd_type_to_pc2_type', 'pc2_type_to_pcd_type']) + +numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)), + (np.dtype('float64'), ('F', 8)), + (np.dtype('uint8'), ('U', 1)), + (np.dtype('uint16'), ('U', 2)), + (np.dtype('uint32'), ('U', 4)), + (np.dtype('uint64'), ('U', 8)), + (np.dtype('int16'), ('I', 2)), + (np.dtype('int32'), ('I', 4)), + (np.dtype('int64'), ('I', 8))] +numpy_type_to_pcd_type = dict(numpy_pcd_type_mappings) +pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings) + + +def parse_header(lines): + """ Parse header of PCD files. + """ + metadata = {} + for ln in lines: + if ln.startswith(b'#') or len(ln) < 2: + continue + match = re.match('(\w+)\s+([\w\s\.]+)', ln.decode('ascii')) + if not match: + warnings.warn("warning: can't understand line: %s" % ln) + continue + key, value = match.group(1).lower(), match.group(2) + if key == 'version': + metadata[key] = value + elif key in ('fields', 'type'): + metadata[key] = value.split() + elif key in ('size', 'count'): + metadata[key] = list(map(int, value.split())) + elif key in ('width', 'height', 'points'): + metadata[key] = int(value) + elif key == 'viewpoint': + metadata[key] = list(map(float, value.split())) + elif key == 'data': + metadata[key] = value.strip().lower() + # TODO apparently count is not required? + # add some reasonable defaults + if 'count' not in metadata: + metadata['count'] = [1]*len(metadata['fields']) + if 'viewpoint' not in metadata: + metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] + if 'version' not in metadata: + metadata['version'] = '.7' + return metadata + + +def write_header(metadata, rename_padding=False): + """ Given metadata as dictionary, return a string header. + """ + template = """\ +VERSION {version} +FIELDS {fields} +SIZE {size} +TYPE {type} +COUNT {count} +WIDTH {width} +HEIGHT {height} +VIEWPOINT {viewpoint} +POINTS {points} +DATA {data} +""" + str_metadata = metadata.copy() + + if not rename_padding: + str_metadata['fields'] = ' '.join(metadata['fields']) + else: + new_fields = [] + for f in metadata['fields']: + if f == '_': + new_fields.append('padding') + else: + new_fields.append(f) + str_metadata['fields'] = ' '.join(new_fields) + str_metadata['size'] = ' '.join(map(str, metadata['size'])) + str_metadata['type'] = ' '.join(metadata['type']) + str_metadata['count'] = ' '.join(map(str, metadata['count'])) + str_metadata['width'] = str(metadata['width']) + str_metadata['height'] = str(metadata['height']) + str_metadata['viewpoint'] = ' '.join(map(str, metadata['viewpoint'])) + str_metadata['points'] = str(metadata['points']) + tmpl = template.format(**str_metadata) + return tmpl + + +def _metadata_is_consistent(metadata): + """ Sanity check for metadata. Just some basic checks. + """ + checks = [] + required = ('version', 'fields', 'size', 'width', 'height', 'points', + 'viewpoint', 'data') + for f in required: + if f not in metadata: + print('%s required' % f) + checks.append((lambda m: all([k in m for k in required]), + 'missing field')) + checks.append((lambda m: len(m['type']) == len(m['count']) == + len(m['fields']), + 'length of type, count and fields must be equal')) + checks.append((lambda m: m['height'] > 0, + 'height must be greater than 0')) + checks.append((lambda m: m['width'] > 0, + 'width must be greater than 0')) + checks.append((lambda m: m['points'] > 0, + 'points must be greater than 0')) + checks.append((lambda m: m['data'].lower() in ('ascii', 'binary', + 'binary_compressed'), + 'unknown data type:' + 'should be ascii/binary/binary_compressed')) + ok = True + for check, msg in checks: + if not check(metadata): + print('error:', msg) + ok = False + return ok + +# def pcd_type_to_numpy(pcd_type, pcd_sz): +# """ convert from a pcd type string and size to numpy dtype.""" +# typedict = {'F' : { 4:np.float32, 8:np.float64 }, +# 'I' : { 1:np.int8, 2:np.int16, 4:np.int32, 8:np.int64 }, +# 'U' : { 1:np.uint8, 2:np.uint16, 4:np.uint32 , 8:np.uint64 }} +# return typedict[pcd_type][pcd_sz] + + +def _build_dtype(metadata): + """ Build numpy structured array dtype from pcl metadata. + + Note that fields with count > 1 are 'flattened' by creating multiple + single-count fields. + + *TODO* allow 'proper' multi-count fields. + """ + types = [] + for f, c, t, s in zip(metadata['fields'], + metadata['count'], + metadata['type'], + metadata['size']): + np_type = pcd_type_to_numpy_type[(t, s)] + if c == 1: + types.append((f, np_type)) + else: + for i in range(c): + types.append(('%s_%04d' % (f, i), np_type)) + dtype = np.dtype(types) + return dtype + + +def build_ascii_fmtstr(pc): + """ Make a format string for printing to ascii. + + Note %.8f is minimum for rgb. + """ + fmtstr = [] + for t, cnt in zip(pc.type, pc.count): + if t == 'F': + fmtstr.extend(['%.10f']*cnt) + elif t == 'I': + fmtstr.extend(['%d']*cnt) + elif t == 'U': + fmtstr.extend(['%u']*cnt) + else: + raise ValueError("don't know about type %s" % t) + return fmtstr + + +def parse_ascii_pc_data(f, dtype, metadata): + """ Use numpy to parse ascii pointcloud data. + """ + return np.loadtxt(f, dtype=dtype, delimiter=' ') + + +def parse_binary_pc_data(f, dtype, metadata): + rowstep = metadata['points']*dtype.itemsize + # for some reason pcl adds empty space at the end of files + buf = f.read(rowstep) + return np.fromstring(buf, dtype=dtype) + + +def parse_binary_compressed_pc_data(f, dtype, metadata): + """ Parse lzf-compressed data. + Format is undocumented but seems to be: + - compressed size of data (uint32) + - uncompressed size of data (uint32) + - compressed data + - junk + """ + fmt = 'II' + compressed_size, uncompressed_size =\ + struct.unpack(fmt, f.read(struct.calcsize(fmt))) + compressed_data = f.read(compressed_size) + # TODO what to use as second argument? if buf is None + # (compressed > uncompressed) + # should we read buf as raw binary? + buf = lzf.decompress(compressed_data, uncompressed_size) + if len(buf) != uncompressed_size: + raise IOError('Error decompressing data') + # the data is stored field-by-field + pc_data = np.zeros(metadata['width'], dtype=dtype) + ix = 0 + for dti in range(len(dtype)): + dt = dtype[dti] + bytes = dt.itemsize * metadata['width'] + column = np.fromstring(buf[ix:(ix+bytes)], dt) + pc_data[dtype.names[dti]] = column + ix += bytes + return pc_data + + +def point_cloud_from_fileobj(f): + """ Parse pointcloud coming from file object f + """ + header = [] + while True: + ln = f.readline().strip() + header.append(ln) + if ln.startswith(b'DATA'): + metadata = parse_header(header) + dtype = _build_dtype(metadata) + break + if metadata['data'] == 'ascii': + pc_data = parse_ascii_pc_data(f, dtype, metadata) + elif metadata['data'] == 'binary': + pc_data = parse_binary_pc_data(f, dtype, metadata) + elif metadata['data'] == 'binary_compressed': + pc_data = parse_binary_compressed_pc_data(f, dtype, metadata) + else: + print('DATA field is neither "ascii" or "binary" or\ + "binary_compressed"') + return PointCloud(metadata, pc_data) + + +def point_cloud_from_path(fname): + """ load point cloud in binary format + """ + with open(fname, 'rb') as f: + pc = point_cloud_from_fileobj(f) + return pc + + +def point_cloud_from_buffer(buf): + fileobj = sio.StringIO(buf) + pc = point_cloud_from_fileobj(fileobj) + fileobj.close() # necessary? + return pc + + +def point_cloud_to_fileobj(pc, fileobj, data_compression=None): + """ Write pointcloud as .pcd to fileobj. + If data_compression is not None it overrides pc.data. + """ + metadata = pc.get_metadata() + if data_compression is not None: + data_compression = data_compression.lower() + assert(data_compression in ('ascii', 'binary', 'binary_compressed')) + metadata['data'] = data_compression + + header = write_header(metadata) + fileobj.write(header) + if metadata['data'].lower() == 'ascii': + fmtstr = build_ascii_fmtstr(pc) + np.savetxt(fileobj, pc.pc_data, fmt=fmtstr) + elif metadata['data'].lower() == 'binary': + fileobj.write(pc.pc_data.tostring('C')) + elif metadata['data'].lower() == 'binary_compressed': + # TODO + # a '_' field is ignored by pcl and breakes compressed point clouds. + # changing '_' to '_padding' or other name fixes this. + # admittedly padding shouldn't be compressed in the first place. + # reorder to column-by-column + uncompressed_lst = [] + for fieldname in pc.pc_data.dtype.names: + column = np.ascontiguousarray(pc.pc_data[fieldname]).tostring('C') + uncompressed_lst.append(column) + uncompressed = ''.join(uncompressed_lst) + uncompressed_size = len(uncompressed) + # print("uncompressed_size = %r"%(uncompressed_size)) + buf = lzf.compress(uncompressed) + if buf is None: + # compression didn't shrink the file + # TODO what do to do in this case when reading? + buf = uncompressed + compressed_size = uncompressed_size + else: + compressed_size = len(buf) + fmt = 'II' + fileobj.write(struct.pack(fmt, compressed_size, uncompressed_size)) + fileobj.write(buf) + else: + raise ValueError('unknown DATA type') + # we can't close because if it's stringio buf then we can't get value after + + +def point_cloud_to_path(pc, fname): + with open(fname, 'w') as f: + point_cloud_to_fileobj(pc, f) + + +def point_cloud_to_buffer(pc, data_compression=None): + fileobj = sio.StringIO() + point_cloud_to_fileobj(pc, fileobj, data_compression) + return fileobj.getvalue() + + +def save_point_cloud(pc, fname): + """ Save pointcloud to fname in ascii format. + """ + with open(fname, 'w') as f: + point_cloud_to_fileobj(pc, f, 'ascii') + + +def save_point_cloud_bin(pc, fname): + """ Save pointcloud to fname in binary format. + """ + with open(fname, 'w') as f: + point_cloud_to_fileobj(pc, f, 'binary') + + +def save_point_cloud_bin_compressed(pc, fname): + """ Save pointcloud to fname in binary compressed format. + """ + with open(fname, 'w') as f: + point_cloud_to_fileobj(pc, f, 'binary_compressed') + + +def save_xyz_label(pc, fname, use_default_lbl=False): + """ Save a simple (x y z label) pointcloud, ignoring all other features. + Label is initialized to 1000, for an obscure program I use. + """ + md = pc.get_metadata() + if not use_default_lbl and ('label' not in md['fields']): + raise Exception('label is not a field in this point cloud') + with open(fname, 'w') as f: + for i in range(pc.points): + x, y, z = ['%.4f' % d for d in ( + pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i] + )] + lbl = '1000' if use_default_lbl else pc.pc_data['label'][i] + f.write(' '.join((x, y, z, lbl))+'\n') + + +def save_xyz_intensity_label(pc, fname, use_default_lbl=False): + """ Save XYZI point cloud. + """ + md = pc.get_metadata() + if not use_default_lbl and ('label' not in md['fields']): + raise Exception('label is not a field in this point cloud') + if 'intensity' not in md['fields']: + raise Exception('intensity is not a field in this point cloud') + with open(fname, 'w') as f: + for i in range(pc.points): + x, y, z = ['%.4f' % d for d in ( + pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i] + )] + intensity = '%.4f' % pc.pc_data['intensity'][i] + lbl = '1000' if use_default_lbl else pc.pc_data['label'][i] + f.write(' '.join((x, y, z, intensity, lbl))+'\n') + + +def save_txt(pc, fname, header=True): + """ Save to csv-style text file, separated by spaces. + + TODO: + - support multi-count fields. + - other delimiters. + """ + with open(fname, 'w') as f: + if header: + header_lst = [] + for field_name, cnt in zip(pc.fields, pc.count): + if cnt == 1: + header_lst.append(field_name) + else: + for c in range(cnt): + header_lst.append('%s_%04d' % (field_name, c)) + f.write(' '.join(header_lst)+'\n') + fmtstr = build_ascii_fmtstr(pc) + np.savetxt(f, pc.pc_data, fmt=fmtstr) + + +def update_field(pc, field, pc_data): + """ Updates field in-place. + """ + pc.pc_data[field] = pc_data + return pc + + +def add_fields(pc, metadata, pc_data): + """ Builds copy of pointcloud with extra fields. + + Multi-count fields are sketchy, yet again. + """ + if len(set(metadata['fields']).intersection(set(pc.fields))) > 0: + raise Exception("Fields with that name exist.") + + if pc.points != len(pc_data): + raise Exception("Mismatch in number of points.") + + new_metadata = pc.get_metadata() + new_metadata['fields'].extend(metadata['fields']) + new_metadata['count'].extend(metadata['count']) + new_metadata['size'].extend(metadata['size']) + new_metadata['type'].extend(metadata['type']) + + # parse metadata to add + # TODO factor this + fieldnames, typenames = [], [] + for f, c, t, s in zip(metadata['fields'], + metadata['count'], + metadata['type'], + metadata['size']): + np_type = pcd_type_to_numpy_type[(t, s)] + if c == 1: + fieldnames.append(f) + typenames.append(np_type) + else: + fieldnames.extend(['%s_%04d' % (f, i) for i in range(c)]) + typenames.extend([np_type]*c) + dtype = zip(fieldnames, typenames) + # new dtype. could be inferred? + new_dtype = [(f, pc.pc_data.dtype[f]) + for f in pc.pc_data.dtype.names] + dtype + + new_data = np.empty(len(pc.pc_data), new_dtype) + for n in pc.pc_data.dtype.names: + new_data[n] = pc.pc_data[n] + for n, n_tmp in zip(fieldnames, pc_data.dtype.names): + new_data[n] = pc_data[n_tmp] + + # TODO maybe just all the metadata in the dtype. + # TODO maybe use composite structured arrays for fields with count > 1 + newpc = PointCloud(new_metadata, new_data) + return newpc + + +def cat_point_clouds(pc1, pc2): + """ Concatenate two point clouds into bigger point cloud. + Point clouds must have same metadata. + """ + if len(pc1.fields) != len(pc2.fields): + raise ValueError("Pointclouds must have same fields") + new_metadata = pc1.get_metadata() + new_data = np.concatenate((pc1.pc_data, pc2.pc_data)) + # TODO this only makes sense for unstructured pc? + new_metadata['width'] = pc1.width+pc2.width + new_metadata['points'] = pc1.points+pc2.points + pc3 = PointCloud(new_metadata, new_data) + return pc3 + + +def make_xyz_point_cloud(xyz, metadata=None): + """ Make a pointcloud object from xyz array. + xyz array is cast to float32. + """ + md = {'version': .7, + 'fields': ['x', 'y', 'z'], + 'size': [4, 4, 4], + 'type': ['F', 'F', 'F'], + 'count': [1, 1, 1], + 'width': len(xyz), + 'height': 1, + 'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + 'points': len(xyz), + 'data': 'binary'} + if metadata is not None: + md.update(metadata) + xyz = xyz.astype(np.float32) + pc_data = xyz.view(np.dtype([('x', np.float32), + ('y', np.float32), + ('z', np.float32)])) + # pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt) + # data = np.rec.fromarrays([xyz.T], dtype=dt) + pc = PointCloud(md, pc_data) + return pc + + +def make_xyz_rgb_point_cloud(xyz_rgb, metadata=None): + """ Make a pointcloud object from xyz array. + xyz array is assumed to be float32. + rgb is assumed to be encoded as float32 according to pcl conventions. + """ + md = {'version': .7, + 'fields': ['x', 'y', 'z', 'rgb'], + 'count': [1, 1, 1, 1], + 'width': len(xyz_rgb), + 'height': 1, + 'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + 'points': len(xyz_rgb), + 'type': ['F', 'F', 'F', 'F'], + 'size': [4, 4, 4, 4], + 'data': 'binary'} + if xyz_rgb.dtype != np.float32: + raise ValueError('array must be float32') + if metadata is not None: + md.update(metadata) + pc_data = xyz_rgb.view(np.dtype([('x', np.float32), + ('y', np.float32), + ('z', np.float32), + ('rgb', np.float32)])).squeeze() + # pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt) + # data = np.rec.fromarrays([xyz.T], dtype=dt) + pc = PointCloud(md, pc_data) + return pc + + +def encode_rgb_for_pcl(rgb): + """ Encode bit-packed RGB for use with PCL. + + :param rgb: Nx3 uint8 array with RGB values. + :rtype: Nx1 float32 array with bit-packed RGB, for PCL. + """ + assert(rgb.dtype == np.uint8) + assert(rgb.ndim == 2) + assert(rgb.shape[1] == 3) + rgb = rgb.astype(np.uint32) + rgb = np.array((rgb[:, 0] << 16) | (rgb[:, 1] << 8) | (rgb[:, 2] << 0), + dtype=np.uint32) + rgb.dtype = np.float32 + return rgb + + +def decode_rgb_from_pcl(rgb): + """ Decode the bit-packed RGBs used by PCL. + + :param rgb: An Nx1 array. + :rtype: Nx3 uint8 array with one column per color. + """ + + rgb = rgb.copy() + rgb.dtype = np.uint32 + r = np.asarray((rgb >> 16) & 255, dtype=np.uint8) + g = np.asarray((rgb >> 8) & 255, dtype=np.uint8) + b = np.asarray(rgb & 255, dtype=np.uint8) + rgb_arr = np.zeros((len(rgb), 3), dtype=np.uint8) + rgb_arr[:, 0] = r + rgb_arr[:, 1] = g + rgb_arr[:, 2] = b + return rgb_arr + + +def make_xyz_label_point_cloud(xyzl, label_type='f'): + """ Make XYZL point cloud from numpy array. + + TODO i labels? + """ + md = {'version': .7, + 'fields': ['x', 'y', 'z', 'label'], + 'count': [1, 1, 1, 1], + 'width': len(xyzl), + 'height': 1, + 'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + 'points': len(xyzl), + 'data': 'ASCII'} + if label_type.lower() == 'f': + md['size'] = [4, 4, 4, 4] + md['type'] = ['F', 'F', 'F', 'F'] + elif label_type.lower() == 'u': + md['size'] = [4, 4, 4, 1] + md['type'] = ['F', 'F', 'F', 'U'] + else: + raise ValueError('label type must be F or U') + # TODO use .view() + xyzl = xyzl.astype(np.float32) + dt = np.dtype([('x', np.float32), ('y', np.float32), ('z', np.float32), + ('label', np.float32)]) + pc_data = np.rec.fromarrays([xyzl[:, 0], xyzl[:, 1], xyzl[:, 2], + xyzl[:, 3]], dtype=dt) + pc = PointCloud(md, pc_data) + return pc + + +class PointCloud(object): + """ Wrapper for point cloud data. + + The variable members of this class parallel the ones used by + the PCD metadata (and similar to PCL and ROS PointCloud2 messages), + + ``pc_data`` holds the actual data as a structured numpy array. + + The other relevant metadata variables are: + + - ``version``: Version, usually .7 + - ``fields``: Field names, e.g. ``['x', 'y' 'z']``. + - ``size.`: Field sizes in bytes, e.g. ``[4, 4, 4]``. + - ``count``: Counts per field e.g. ``[1, 1, 1]``. NB: Multi-count field + support is sketchy. + - ``width``: Number of points, for unstructured point clouds (assumed by + most operations). + - ``height``: 1 for unstructured point clouds (again, what we assume most + of the time. + - ``viewpoint``: A pose for the viewpoint of the cloud, as + x y z qw qx qy qz, e.g. ``[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]``. + - ``points``: Number of points. + - ``type``: Data type of each field, e.g. ``[F, F, F]``. + - ``data``: Data storage format. One of ``ascii``, ``binary`` or ``binary_compressed``. + + See `PCL docs `__ + for more information. + """ + + def __init__(self, metadata, pc_data): + self.metadata_keys = metadata.keys() + self.__dict__.update(metadata) + self.pc_data = pc_data + self.check_sanity() + + def get_metadata(self): + """ returns copy of metadata """ + metadata = {} + for k in self.metadata_keys: + metadata[k] = copy.copy(getattr(self, k)) + return metadata + + def check_sanity(self): + # pdb.set_trace() + md = self.get_metadata() + assert(_metadata_is_consistent(md)) + assert(len(self.pc_data) == self.points) + assert(self.width*self.height == self.points) + assert(len(self.fields) == len(self.count)) + assert(len(self.fields) == len(self.type)) + + def save(self, fname): + self.save_pcd(fname, 'ascii') + + def save_pcd(self, fname, compression=None, **kwargs): + if 'data_compression' in kwargs: + warnings.warn('data_compression keyword is deprecated for' + ' compression') + compression = kwargs['data_compression'] + with open(fname, 'w') as f: + point_cloud_to_fileobj(self, f, compression) + + def save_pcd_to_fileobj(self, fileobj, compression=None, **kwargs): + if 'data_compression' in kwargs: + warnings.warn('data_compression keyword is deprecated for' + ' compression') + compression = kwargs['data_compression'] + point_cloud_to_fileobj(self, fileobj, compression) + + def save_pcd_to_buffer(self, compression=None, **kwargs): + if 'data_compression' in kwargs: + warnings.warn('data_compression keyword is deprecated for' + ' compression') + compression = kwargs['data_compression'] + return point_cloud_to_buffer(self, compression) + + def save_txt(self, fname): + save_txt(self, fname) + + def save_xyz_label(self, fname, **kwargs): + save_xyz_label(self, fname, **kwargs) + + def save_xyz_intensity_label(self, fname, **kwargs): + save_xyz_intensity_label(self, fname, **kwargs) + + def copy(self): + new_pc_data = np.copy(self.pc_data) + new_metadata = self.get_metadata() + return PointCloud(new_metadata, new_pc_data) + + def to_msg(self): + if not HAS_SENSOR_MSGS: + raise Exception('ROS sensor_msgs not found') + # TODO is there some metadata we want to attach? + return numpy_pc2.array_to_pointcloud2(self.pc_data) + + @staticmethod + def from_path(fname): + return point_cloud_from_path(fname) + + @staticmethod + def from_fileobj(fileobj): + return point_cloud_from_fileobj(fileobj) + + @staticmethod + def from_buffer(buf): + return point_cloud_from_buffer(buf) + + @staticmethod + def from_array(arr): + """ create a PointCloud object from an array. + """ + pc_data = arr.copy() + md = {'version': .7, + 'fields': [], + 'size': [], + 'count': [], + 'width': 0, + 'height': 1, + 'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + 'points': 0, + 'type': [], + 'data': 'binary_compressed'} + md['fields'] = pc_data.dtype.names + for field in md['fields']: + type_, size_ =\ + numpy_type_to_pcd_type[pc_data.dtype.fields[field][0]] + md['type'].append(type_) + md['size'].append(size_) + # TODO handle multicount + md['count'].append(1) + md['width'] = len(pc_data) + md['points'] = len(pc_data) + pc = PointCloud(md, pc_data) + return pc + + @staticmethod + def from_msg(msg, squeeze=True): + """ from pointcloud2 msg + squeeze: fix when clouds get 1 as first dim + """ + if not HAS_SENSOR_MSGS: + raise NotImplementedError('ROS sensor_msgs not found') + md = {'version': .7, + 'fields': [], + 'size': [], + 'count': [], + 'width': msg.width, + 'height': msg.height, + 'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + 'points': 0, + 'type': [], + 'data': 'binary_compressed'} + for field in msg.fields: + md['fields'].append(field.name) + t, s = pc2_type_to_pcd_type[field.datatype] + md['type'].append(t) + md['size'].append(s) + # TODO handle multicount correctly + if field.count > 1: + warnings.warn('fields with count > 1 are not well tested') + md['count'].append(field.count) + pc_array = numpy_pc2.pointcloud2_to_array(msg) + pc_data = pc_array.reshape(-1) + md['height'], md['width'] = pc_array.shape + md['points'] = len(pc_data) + pc = PointCloud(md, pc_data) + return pc \ No newline at end of file diff --git a/cosense3d/utils/pclib.py b/cosense3d/utils/pclib.py new file mode 100644 index 00000000..24e67dac --- /dev/null +++ b/cosense3d/utils/pclib.py @@ -0,0 +1,502 @@ +import os +import shutil + +import numpy as np +import torch +import torch.nn.functional as F +import open3d as o3d +from plyfile import PlyData, PlyElement +from scipy.spatial.transform import Rotation as R + +from cosense3d.utils.misc import check_numpy_to_torch +from cosense3d.utils.pcdio import point_cloud_from_path + +ply_fields = {'x': 'f4', 'y': 'f4', 'z': 'f4', 'ObjIdx': 'u4', 'ObjTag': 'u4', 'ring': 'u1', 'time': 'f4'} +np_types = {'f4': np.float32, 'u4': np.uint32, 'u1': np.uint8} + + +def header(points): + return f"""\ + VERSION 0.7 + FIELDS x y z rgb + SIZE 4 4 4 4 + TYPE F F F F + COUNT 1 1 1 1 + WIDTH {len(points)} + HEIGHT 1 + VIEWPOINT 0 0 0 1 0 0 0 + POINTS {len(points)} + DATA ascii + """ + + +def pose_to_transformation(pose): + """ + + :param pose: list, [x, y, z, roll, pitch, yaw] + + :return: + transformation: np.ndarray, (4, 4) + """ + transformation = np.eye(4) + r = R.from_euler('xyz', pose[3:]).as_matrix() + transformation[:3, :3] = r + transformation[:3, 3] = np.array(pose[:3]) + return transformation + + +def read_ply(filename): + ply = PlyData.read(filename) + data = ply['vertex'] + properties = [prop.name for prop in data.properties] + property_types = [prop.val_dtype for prop in data.properties] + + return {name: np.array(data[name]) for name in properties}, property_types + + +def save_cosense_ply(data, output_file_name): + data = { + 'x': data['x'].astype(np_types[ply_fields['x']]), + 'y': data['y'].astype(np_types[ply_fields['y']]), + 'z': data['z'].astype(np_types[ply_fields['z']]), + 'ObjIdx': data['ObjIdx'].astype(np_types[ply_fields['ObjIdx']]), + 'ObjTag': data['ObjTag'].astype(np_types[ply_fields['ObjTag']]), + 'ring': data['ring'].astype(np_types[ply_fields['ring']]), + 'time': data['time'].astype(np_types[ply_fields['time']]) + } + vertex_data = list(zip(*[data[k] for k, v in ply_fields.items()])) + vertex_type = [(k, v) for k, v in ply_fields.items()] + vertex = np.array(vertex_data, dtype=vertex_type) + el = PlyElement.describe(vertex, 'vertex') + PlyData([el]).write(output_file_name) + + +def lidar_ply2bin(ply_file, bin_file, + fields=['x', 'y', 'z', 'intensity'], + replace=False): + """ + Read ply and save to the cosense3d binary format. + + :param ply_file: str, input file name + :param bin_file: str, output file name + :param fields: list of str, names that indicates 'x', 'y', 'z' and 'intensity' + :param replace: replace the exisiting file if True + """ + if not replace and os.path.exists(bin_file): + return + pointcloud, property_types = read_ply(ply_file) + pcd_out = np.stack([pointcloud[k] for k in fields], axis=1) + pcd_out.tofile(bin_file) + + +def lidar_bin2pcd_o3d(bin_file, out_file, replace=False): + if not replace and os.path.exists(out_file): + return + bin_pcd = np.fromfile(bin_file, dtype=np.float32) + + # reshape + points = bin_pcd.reshape(-1, 4) + # remove nan points + mask = np.logical_not(np.isnan(points[:, :3]).any(axis=1)) + points = points[mask] + + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(points[:, :-1]) + + point_intensity = np.zeros_like(points[:, :-1]) + point_intensity[:, 0] = points[:, -1] / 255. + o3d_pcd.colors = o3d.utility.Vector3dVector(point_intensity) + + # write to pcd file + o3d.io.write_point_cloud(out_file, + pointcloud=o3d_pcd, + write_ascii=True) + + +def lidar_bin2pcd(bin_file, out_file, replace=False): + if not replace and os.path.exists(out_file): + return + bin_pcd = np.fromfile(bin_file, dtype=np.float32) + # reshape + points = bin_pcd.reshape(-1, 4) + points[:, 3] /= 255 + mask = np.logical_not(np.isnan(points[:, :3]).any(axis=1)) + points = points[mask] + header_str = header(points) + with open(out_file, 'w') as fh: + # fh.write() + np.savetxt(fh, points, fmt='%f', header=header_str) + # shutil.copy(out_file.replace('pcd', 'txt'), out_file) + + +def lidar_bin2bin(bin_file, out_file): + shutil.copy(bin_file, out_file) + + +def load_pcd(pcd_file: str, return_o3d: bool=False): + """ + Read pcd and return numpy array. + + :param pcd_file: The pcd file that contains the point cloud. + :param return_o3d: Default returns numpy array, set True to return pcd as o3d PointCloud object + + :return: lidar_dict, + xyz: (pcd_np | pcd : np.ndarray | o3d.geometry.PointCloud) the lidar xyz coordinates in numpy format, shape:(n, 3); + intensity: (optional) np.ndarray, (n,). + label: (optional) np.ndarray, (n,). + time: (optional) np.ndarray, (n,). + ray: (optional) np.ndarray, (n,). + """ + lidar_dict = {} + ext = os.path.splitext(pcd_file)[-1] + if ext == '.pcd': + if return_o3d: + return o3d.io.read_point_cloud(pcd_file) + else: + pcd = point_cloud_from_path(pcd_file) + lidar_dict['xyz'] = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1).astype(float) + # we save the intensity in the first channel + if 'intensity' in pcd.fields: + lidar_dict['intensity'] = pcd.pc_data['intensity'] + if 'timestamp' in pcd.fields: + lidar_dict['time'] = pcd.pc_data['timestamp'] + + elif ext == '.bin': + pcd_np = np.fromfile(pcd_file, dtype=np.float32).reshape(-1, 4) + if return_o3d: + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(pcd_np) + return pcd + lidar_dict['xyz'] = pcd_np[:, :3] + # check attribute of last column, + # num of unique labels for the datasets in this projects is less than 50, + # unique intensities is normally larger then 50 + if len(np.unique(pcd_np[:, -1])) < 50: + lidar_dict['label'] = pcd_np[:, -1] + elif pcd_np[:, -1].max() > 1: + lidar_dict['intensity'] = pcd_np[:, -1] / 255 + else: + lidar_dict['intensity'] = pcd_np[:, -1] + + elif ext == '.ply': + data = read_ply(pcd_file)[0] + xyz = np.stack([data.pop(x) for x in 'xyz'], axis=1) + lidar_dict['xyz'] = xyz + lidar_dict.update(data) + else: + raise NotImplementedError + + return lidar_dict + + +def tf2pose(tf_matrix): + euler = R.from_matrix(tf_matrix[:3, :3]).as_euler('xyz') + translation = tf_matrix[:3, 3] + return translation.tolist() + euler.tolist() + + +def pose2tf(pose): + tf_matrix = np.eye(4) + tf_matrix[:3, :3] = rotation_matrix(pose[3:]) + tf_matrix[:3, 3] = np.array(pose[:3]) + return tf_matrix + + +def rotation_matrix(euler, degrees=True): + """ + Construct rotation matrix with the given pose. + + :param euler: list or np.ndarray + [roll, pitch, yaw] + :return: rot: np.ndarray, 3x3 + rotation matrix + """ + return R.from_euler('xyz', euler, degrees=degrees).as_matrix() + + +def rotate3d(points, euler): + """ + Rotate point cloud with the euler angles given in pose. + + :param points: np.ndarray, N x (3 + C) + each point in the row has the format [x, y, z, ...] + :param euler: list or np.ndarray + [roll, pitch, yaw] + + :return: points: np.ndarray + rotated point cloud + """ + assert len(euler) == 3 + rot = rotation_matrix(euler) + points[:, :3] = (rot @ points[:, :3].T).T + return points + + +def cart2cyl(input_xyz): + rho = np.sqrt(input_xyz[..., 0] ** 2 + input_xyz[..., 1] ** 2) + phi = np.arctan2(input_xyz[..., 1], input_xyz[..., 0]) + return np.concatenate((rho.reshape(-1, 1), phi.reshape(-1, 1), input_xyz[..., 2:]), axis=-1) + + +def cyl2cart(input_xyz_polar): + x = input_xyz_polar[..., 0] * np.cos(input_xyz_polar[..., 1]) + y = input_xyz_polar[..., 0] * np.sin(input_xyz_polar[..., 1]) + return np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), input_xyz_polar[..., 2:]), axis=-1) + + +def mat_yaw(cosa, sina, zeros=0, ones=1): + return [ + cosa, -sina, zeros, + sina, cosa, zeros, + zeros, zeros, ones + ] + + +def mat_pitch(cosa, sina, zeros=0, ones=1): + return [ + cosa, zeros, sina, + zeros, ones, zeros, + -sina, zeros, cosa, + ] + + +def mat_roll(cosa, sina, zeros=0, ones=1): + return [ + ones, zeros, zeros, + zeros, cosa, -sina, + zeros, sina, cosa, + ] + + +def rotate_points_along_z_np(points, angle): + """ + :param points: (N, 3 + C or 2 + C) + :param angle: float, angle along z-axis, angle increases x ==> y + + """ + cosa = np.cos(angle) + sina = np.sin(angle) + rot_matrix = np.array([ + [cosa, sina, 0], + [-sina, cosa, 0], + [0, 0, 1] + ]).astype(np.float) + if points.shape[1]==2: + points_rot = np.matmul(points, rot_matrix[:2, :2]) + elif points.shape[1]>2: + points_rot = np.matmul(points[:, 0:3], rot_matrix) + points_rot = np.concatenate((points_rot, points[:, 3:]), axis=-1) + else: + raise IOError('Input points should have the shape: (N, 3 + C or 2 + C).') + return points_rot + + +def rotate_points_batch(points, angles, order='xyz'): + """ + :param points: (B, N, 3 + C) + :param angles: (B, 1|3), radians + rotation = R(3)R(2)R(1) if angles shape in (B, 3) + :return: points_rot: (B, N, 3 + C) + """ + assert angles.shape[1] == len(order), \ + "angles should has the shape (len(points), len(order))." + + points, is_numpy = check_numpy_to_torch(points) + angles, _ = check_numpy_to_torch(angles) + + cosas = torch.cos(angles) + sinas = torch.sin(angles) + zeros = angles[:, 0].new_zeros(points.shape[0]) + ones = angles[:, 0].new_ones(points.shape[0]) + rot_matrix = torch.eye(3, dtype=points.dtype, device=points.device) + rot_matrix = rot_matrix.reshape((1, 3, 3)).repeat(angles.shape[0], 1, 1) + for cosa, sina, ax in zip(cosas.T, sinas.T, order): + if ax == 'z': + rot = torch.stack(mat_yaw( + cosa, sina, zeros, ones + ), dim=1).view(-1, 3, 3).float() + elif ax == 'y': + rot = torch.stack(mat_pitch( + cosa, sina, zeros, ones + ), dim=1).view(-1, 3, 3).float() + elif ax == 'x': + rot = torch.stack(mat_roll( + cosa, sina, zeros, ones + ), dim=1).view(-1, 3, 3).float() + else: + raise NotImplementedError + rot_matrix = torch.bmm(rot, rot_matrix) + points_rot = torch.bmm(rot_matrix, points[:, :, 0:3].float(). + permute(0, 2, 1)).permute(0, 2, 1) + points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1) + return points_rot.numpy() if is_numpy else points_rot + + +def rotate_points_along_z_torch(points, angle): + """ + :param points: (N, 2 + C) or (B, 2 + C) + :param angle: float or tensor of shape (B), angle along z-axis, angle increases x ==> y + + """ + if len(points.shape) == 2: + points = points.unsqueeze(0) + if isinstance(angle, float): + angle = torch.tensor([angle], device=points.device) + else: + assert isinstance(angle, torch.Tensor) + assert points.shape[0] == 1 or angle.shape[0] == points.shape[0] + cosa = torch.cos(angle) + sina = torch.sin(angle) + rot_matrix = torch.stack([ + torch.stack([cosa, sina], dim=-1), + torch.stack([-sina, cosa], dim=-1) + ], dim=1).float().to(points.device) + if points.shape[0] == 1 and angle.shape[0] > 1: + points = torch.tile(points, (len(rot_matrix), 1, 1)) + points_rot = torch.bmm(points[..., 0:2], rot_matrix) + points_rot = torch.cat((points_rot, points[..., 2:]), dim=-1) + return points_rot + + +def rotate_points_with_tf_np(points: np.ndarray, tf_np: np.ndarray) -> np.ndarray: + """ + Rotate points with transformation matrix. + + :param points (np.ndarray): Nx3 points array + :param tf_np (np.ndarray): 4x4 transformation matrix + :return: points (np.ndarray): Nx3 points array + """ + points_homo = np.concatenate([points, np.ones_like(points[:, :1])], axis=-1).T + points = (tf_np @ points_homo)[:3].T + return points + + +def rotate_box_corners_with_tf_np(corners: np.ndarray, tf_np: np.ndarray) -> np.ndarray: + """ + Rotate points with transformation matrix + :param corners: Nx8X3 points array + :param tf_np: 4x4 transformation matrix + :return: corners, Nx8X3 points array + """ + points = rotate_points_with_tf_np(corners.reshape(-1, 3), tf_np) + corners = points.reshape(corners.shape) + return corners + + +def mask_values_in_range(values, min, max): + return np.logical_and(values>min, values np.array: + """ + :rtype: np.array + """ + return np.linalg.norm(points[:, :2], axis=1) < dist + + +def get_tf_matrix_torch(vectors, inv=False): + device = vectors.device + n, _ = vectors.shape + xs = vectors[:, 0] + ys = vectors[:, 1] + angles = vectors[:, 2] + cosa = torch.cos(angles) + sina = torch.sin(angles) + ones = torch.ones_like(angles) + zeros = torch.zeros_like(angles) + rot_matrix = torch.zeros((n, 3, 3), device=device, requires_grad=True) + rot_matrix[:, 0, 0] = cosa + rot_matrix[:, 0, 1] = -sina + rot_matrix[:, 1, 0] = sina + rot_matrix[:, 1, 1] = cosa + shift_matrix = torch.zeros_like(rot_matrix, requires_grad=True) + shift_matrix[:, 0, 1] = xs + shift_matrix[:, 1, 0] = ys + shift_matrix[:, [0, 1, 2], [0, 1, 2]] = 1.0 + if inv: + mat = torch.einsum('...ij, ...jk', rot_matrix, shift_matrix) + else: + mat = torch.einsum('...ij, ...jk', shift_matrix, rot_matrix) + return mat, rot_matrix, shift_matrix + + +def rotation_mat2euler_torch(mat): + sy = torch.norm(mat[:, :2, 0], dim=1) + singular = sy < 1e-6 + not_singular = torch.logical_not(singular) + euler = torch.zeros_like(mat[:, 0]) + + if not_singular.sum() > 0: + euler[not_singular, 0] = torch.atan2(mat[not_singular, 2, 1], mat[not_singular, 2, 2]) + euler[not_singular, 1] = torch.atan2(-mat[not_singular, 2, 0], sy) + euler[not_singular, 2] = torch.atan2(mat[not_singular, 1, 0], mat[not_singular, 0, 0]) + if singular.sum() > 0: + euler[singular, 0] = torch.atan2(-mat[singular, 1, 2], mat[singular, 1, 1]) + euler[singular, 1] = torch.atan2(-mat[singular, 2, 0], sy) + + return euler + + +def pose_err_global2relative_torch(poses, errs): + """ + Calculate relative pose transformation based on the errorneous global positioning + :param poses: Nx2 or Nx3, first row is ego pose, other rows are the coop poses + :param errs: Nx3, first row is ego pose error and other rows for coop pose errors + :return: (N-1)x3, relative localization errors between ego and coop vehicles + """ + if poses.shape[-1]==2: + poses = torch.cat([poses, torch.zeros_like(poses[:, 0:1])], dim=-1) + poses_err = poses + errs + + R01, _, _ = get_tf_matrix_torch(-poses[:1], inv=True) + R10_hat, _, _ = get_tf_matrix_torch(poses_err[:1]) + R20, _, _ = get_tf_matrix_torch(poses[1:]) + R02_hat, _, _ = get_tf_matrix_torch(-poses_err[1:], inv=True) + + delta_R21 = torch.einsum('...ij, ...jk', R01, R20) + delta_R21 = torch.einsum('...ij, ...jk', delta_R21, R02_hat) + delta_R21 = torch.einsum('...ij, ...jk', delta_R21, R10_hat) + + x = delta_R21[0, 2] + y = delta_R21[1, 2] + theta = torch.atan2(delta_R21[1, 0], delta_R21[0, 0]) + return torch.stack([x, y, theta], dim=-1) + + +def project_points_by_matrix_torch(points, transformation_matrix): + """ + Project the points to another coordinate system based on the + transformation matrix. + + :param points: torch.Tensor, 3D points, (N, 3) + :param transformation_matrix: torch.Tensor, Transformation matrix, (4, 4) + :return: projected_points : torch.Tensor, The projected points, (N, 3) + """ + points, is_numpy = \ + check_numpy_to_torch(points) + transformation_matrix, _ = \ + check_numpy_to_torch(transformation_matrix) + + # convert to homogeneous coordinates via padding 1 at the last dimension. + # (N, 4) + points_homogeneous = F.pad(points, (0, 1), mode="constant", value=1) + # (N, 4) + projected_points = torch.einsum("ik, jk->ij", points_homogeneous, + transformation_matrix) + + return projected_points[:, :3] if not is_numpy \ + else projected_points[:, :3].numpy() + +if __name__=="__main__": + for i in range(0, 300): + frame = f"{i:06d}" + ply_file = f"/koko/LUMPI/train/measurement5/lidar/{frame}.ply" + bin_file = f"/media/hdd/projects/TAL/data/lumpi_m5/lidar0/{frame}.bin" + lidar_ply2bin(ply_file, bin_file) diff --git a/cosense3d/utils/plot_dirichlet.py b/cosense3d/utils/plot_dirichlet.py new file mode 100644 index 00000000..a7f5c97b --- /dev/null +++ b/cosense3d/utils/plot_dirichlet.py @@ -0,0 +1,126 @@ +import math +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.tri as tri +import seaborn as sns +from math import gamma +from operator import mul +from functools import reduce + +sns.set(style='white', font_scale=1.2) +sns.color_palette("Blues", as_cmap=True) + + +def plot_mesh(corners): + """Subdivide the triangle into a triangular mesh and plot the original and subdivided triangles.""" + triangle = tri.Triangulation(corners[:, 0], corners[:, 1]) + + refiner = tri.UniformTriRefiner(triangle) + trimesh = refiner.refine_triangulation(subdiv=4) + + plt.figure(figsize=(6, 4)) + for i, mesh in enumerate((triangle, trimesh)): + plt.subplot(1, 2, i + 1) + plt.triplot(mesh) + plt.axis('off') + plt.axis('equal') + + +class Dirichlet: + """Define the Dirichlet distribution with vector parameter alpha.""" + + def __init__(self, alpha): + self._alpha = np.array(alpha) + self._coef = gamma(np.sum(self._alpha)) / reduce(mul, [gamma(a) for a in self._alpha]) + + def pdf(self, x): + """Returns pdf value for `x`. """ + return self._coef * reduce(mul, [xx ** (aa - 1) for (xx, aa) in zip(x, self._alpha)]) + + +class PlotDirichlet: + """ + Plot the Dirichlet distribution as a contour plot on a 2-Simplex. + """ + + def __init__(self, corners): + self._corners = corners + self._triangle = tri.Triangulation(corners[:, 0], corners[:, 1]) + # Midpoints of triangle sides opposite of each corner + self._midpoints = [(corners[(i + 1) % 3] + corners[(i + 2) % 3]) / 2.0 for i in range(3)] + + def xy2bc(self, xy, tol=1.e-3): + """Map the x-y coordinates of the mesh vertices to the simplex coordinate space (aka barycentric coordinates). + Here we use a simple method that uses vector algebra. For some values of alpha, calculation of the Dirichlet pdf + can become numerically unstable at the boundaries of the simplex so our conversion function will take an optional + tolerance that will avoid barycentric coordinate values directly on the simplex boundary. + """ + s = [(self._corners[i] - self._midpoints[i]).dot(xy - self._midpoints[i]) / 0.75 for i in range(3)] + return np.clip(s, tol, 1.0 - tol) + + def draw_pdf_contours(self, ax, dist, label=None, nlevels=50, subdiv=8, **kwargs): + """Draw pdf contours for a Dirichlet distribution""" + # Subdivide the triangle into a triangular mesh + refiner = tri.UniformTriRefiner(self._triangle) + trimesh = refiner.refine_triangulation(subdiv=subdiv) + + # convert to barycentric coordinates and compute probabilities of the given distribution + pvals = [dist.pdf(self.xy2bc(xy)) for xy in zip(trimesh.x, trimesh.y)] + + ax.tricontour(trimesh, pvals, 10, linewidths=0.1, colors="gray") + tcf = ax.tricontourf(trimesh, pvals, nlevels, cmap="jet", **kwargs) + ax.plot([0, 0.5], [0, 0.75 ** 0.5], 'k') + ax.plot([0, 1], [0, 0]) + ax.plot([0.5, 1], [0.75 ** 0.5 , 0], 'k') + # plt.axis('equal') + ax.set_xlim(0, 1) + ax.set_ylim(0, 0.75 ** 0.5) + ax.set_title("$\\alpha$ = " + str(label)) + ax.axis('off') + + return ax, tcf + + +if __name__ == '__main__': + kwargs = {} + corners = np.array([[0, 0], [1, 0], [0.5, 0.75 ** 0.5]]) + plot_dirichlet = PlotDirichlet(corners) + + f, axes = plt.subplots(2, 3, figsize=(14, 6)) + ax = axes[0, 0] + alpha = (0.9, 0.9, 0.9) + dist = Dirichlet(alpha) + ax, tcf = plot_dirichlet.draw_pdf_contours(ax, dist, alpha, **kwargs) + f.colorbar(tcf, ax=ax, format="%.1f") + + ax = axes[0, 1] + alpha = (1, 1, 1) + dist = Dirichlet(alpha) + ax, tcf = plot_dirichlet.draw_pdf_contours(ax, dist, alpha, **kwargs) + f.colorbar(tcf, ax=ax, format="%2.1f") + + ax = axes[0, 2] + alpha = (3, 3, 3) + dist = Dirichlet(alpha) + ax, tcf = plot_dirichlet.draw_pdf_contours(ax, dist, alpha, **kwargs) + f.colorbar(tcf, ax=ax, format="%2.1f") + + ax = axes[1, 0] + alpha = (1, 3, 3) + dist = Dirichlet(alpha) + ax, tcf = plot_dirichlet.draw_pdf_contours(ax, dist, alpha, **kwargs) + f.colorbar(tcf, ax=ax, format="%2.1f") + + ax = axes[1, 1] + alpha = (2, 2, 10) + dist = Dirichlet(alpha) + ax, tcf = plot_dirichlet.draw_pdf_contours(ax, dist, alpha, **kwargs) + f.colorbar(tcf, ax=ax, format="%2.1f") + + ax = axes[1, 2] + alpha = (20, 20, 20) + dist = Dirichlet(alpha) + ax, tcf = plot_dirichlet.draw_pdf_contours(ax, dist, alpha, **kwargs) + f.colorbar(tcf, ax=ax, format="%2.1f") + + f.savefig('/home/yys/Downloads/Dirichlet.pdf', bbox_inches='tight', transparent=True) \ No newline at end of file diff --git a/cosense3d/utils/tensor_utils.py b/cosense3d/utils/tensor_utils.py new file mode 100644 index 00000000..411a314c --- /dev/null +++ b/cosense3d/utils/tensor_utils.py @@ -0,0 +1,26 @@ +import torch +import numpy as np + + +def pad_list_to_array_torch(data): + """ + Pad list of numpy data to one single numpy array + :param data: list of np.ndarray + :return: np.ndarray + """ + B = len(data) + cnt = [len(d) for d in data] + max_cnt = max(cnt) + out = torch.zeros((B, max_cnt,) + tuple(data[0].shape[1:]), + device=data[0].device, dtype=data[0].dtype) + for b in range(B): + out[b, :cnt[b]] = data[b] + return out + + +def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False + + diff --git a/cosense3d/utils/train_utils.py b/cosense3d/utils/train_utils.py new file mode 100644 index 00000000..1decbcab --- /dev/null +++ b/cosense3d/utils/train_utils.py @@ -0,0 +1,128 @@ +import copy +import random +import warnings + +import numpy as np +import torch +from torch.nn.utils.clip_grad import clip_grad_norm_ + + +def get_gpu_architecture(): + if torch.cuda.is_available(): + device = torch.device("cuda") + gpu_props = torch.cuda.get_device_properties(device) + return gpu_props.major * 10 + gpu_props.minor + else: + return 0 + + +def seed_everything(seed): + torch.manual_seed(seed) + random.seed(seed) + np.random.seed(seed) + + +def build_optimizer(model, cfg): + # construct an optimizer + params = [p for p in model.parameters() if p.requires_grad] + optimizer = torch.optim.AdamW(params, lr=cfg['lr'], + weight_decay=cfg['weight_decay'], + betas=tuple(cfg['betas'])) + + return optimizer + + +def build_lr_scheduler(optimizer, cfg, steps_per_epoch): + cfg_ = copy.copy(cfg) + policy = cfg_.pop('policy', 'MultiStepLR') + if policy == 'MultiStepLR': + # construct a learning rate scheduler + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, + milestones=cfg['milestones'], + gamma=cfg['gamma']) + elif policy == 'CosineAnnealingWarm': + from timm.scheduler.cosine_lr import CosineLRScheduler + num_steps = cfg['epochs'] * steps_per_epoch + warmup_lr = cfg['warmup_lr'] + warmup_steps = cfg['warmup_epochs'] * steps_per_epoch + lr_min = cfg['lr_min'] + + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_steps, + lr_min=lr_min, + warmup_lr_init=warmup_lr, + warmup_t=warmup_steps, + cycle_limit=1, + t_in_epochs=False, + ) + else: + raise NotImplementedError + + return lr_scheduler + + +def is_tensor_to_cuda(data, device=0): + if isinstance(data, dict): + for k, v in data.items(): + data[k] = is_tensor_to_cuda(v, device) + return data + elif isinstance(data, torch.Tensor): + return data.to(device) + elif isinstance(data, list) or isinstance(data, tuple): + data_t = [] + for i in range(len(data)): + data_t.append(is_tensor_to_cuda(data[i], device)) + return data_t + else: + return data + + +def load_tensors_to_gpu(batch_dict, device=0): + """ + Load all tensors in batch_dict to gpu + """ + + for k, v in batch_dict.items(): + batch_dict[k] = is_tensor_to_cuda(v, device=device) + + +def load_model_dict(model, pretrained_dict): + try: + model.load_state_dict(pretrained_dict) + except: + UnmatchedParams = "" + # 1. filter out unnecessary keys + model_dict = model.state_dict() + matched_dict = {} + + pretrained_keys = list() + for k, v in pretrained_dict.items(): + if 'module' in k: + k = k.replace('module.', '') + if k in model_dict and v.shape == model_dict[k].shape: + matched_dict[k] = v + elif v.shape != model_dict[k].shape: + UnmatchedParams += f"{k} : Unmatched shape ({v.shape} -> {model_dict[k].shape})\n" + else: + UnmatchedParams += f"{k} : Pretrained parameters not in model dict\n" + pretrained_keys.append(k) + for k in set(model_dict.keys()) - set(pretrained_keys): + UnmatchedParams += f"{k} : Model parameters not in pretrained dict\n" + if len(UnmatchedParams) > 0: + warnings.warn("Model state dict does not match pretrained state dict. Unmatched parameters are:\n" + + UnmatchedParams) + # 2. overwrite entries in the existing state dict + model_dict.update(matched_dict) + # 3. load the new state dict + model.load_state_dict(model_dict) + return model + + +def clip_grads(params, max_norm=35, norm_type=2): + params = list( + filter(lambda p: p.requires_grad and p.grad is not None, params)) + if len(params) > 0: + total_norm = clip_grad_norm_(params, max_norm=max_norm, norm_type=norm_type) + return total_norm + diff --git a/cosense3d/utils/vislib.py b/cosense3d/utils/vislib.py new file mode 100644 index 00000000..d0d22346 --- /dev/null +++ b/cosense3d/utils/vislib.py @@ -0,0 +1,598 @@ +import random +import sys +import os +import time + +import torch +import numpy as np +import matplotlib.pyplot as plt +import open3d as o3d + +from cosense3d.utils import pclib +from cosense3d.utils.box_utils import corners_to_boxes_3d, boxes_to_corners_3d +from matplotlib.patches import Polygon + + +COLOR_PALETTES = { + 'pastels_rock': { + 'DesertSand': [238, 185, 161], + 'DeepChampagne': [241, 213, 170], + 'Champagne': [242, 237, 207], + 'JetStream': [186, 224, 195], + 'LightPeriwinkle':[190, 198, 225], + }, + 'calm_afternoon': { + 'MiddleBlueGreen': [137, 204, 202], + 'Khaki': [245, 222, 145], + 'MacaroniAndCheese': [245, 193, 129], + 'Middle Red': [232, 132, 107], + 'Rose Gold': [189, 93, 115], + 'Rackley': [101, 135, 168], + }, + 'objects': { + 'vehicle': [0, 0, 142], + 'cyclist': [200, 100, 0], + 'pedestrian': [220, 20, 60], + 'truck': [0, 0, 0], + 'motorcycle': [100, 200, 0], + 'bus': [100, 100, 0] + }, + 'random': {i: [random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255)] for i in range(20)} +} + + +def get_palette_colors(palette): + return np.array( + list(COLOR_PALETTES[palette].values()) + ) / 255 + + +def visualization(func_list, batch_data): + for func_str in func_list: + getattr(sys.modules[__name__], func_str)(batch_data) + + +def draw_box_plt(boxes_dec, ax, color=None, linewidth_scale=2.0, linestyle='solid'): + """ + draw boxes in a given plt ax + :param boxes_dec: (N, 5) or (N, 7) in metric + :param ax: + :return: ax with drawn boxes + """ + if not len(boxes_dec)>0: + return ax + boxes_np= boxes_dec + if isinstance(boxes_np, torch.Tensor): + boxes_np = boxes_np.cpu().detach().numpy() + elif isinstance(boxes_np, list): + boxes_np = np.array(boxes_np) + if boxes_np.shape[-1]>5: + boxes_np = boxes_np[:, [0, 1, 3, 4, 6]] + x = boxes_np[:, 0] + y = boxes_np[:, 1] + dx = boxes_np[:, 2] + dy = boxes_np[:, 3] + + x1 = x - dx / 2 + y1 = y - dy / 2 + x2 = x + dx / 2 + y2 = y + dy / 2 + theta = boxes_np[:, 4:5] + # bl, fl, fr, br + corners = np.array([[x1, y1],[x1,y2], [x2,y2], [x2, y1]]).transpose(2, 0, 1) + new_x = (corners[:, :, 0] - x[:, None]) * np.cos(theta) + (corners[:, :, 1] + - y[:, None]) * (-np.sin(theta)) + x[:, None] + new_y = (corners[:, :, 0] - x[:, None]) * np.sin(theta) + (corners[:, :, 1] + - y[:, None]) * (np.cos(theta)) + y[:, None] + corners = np.stack([new_x, new_y], axis=2) + for corner in corners: + ax.plot(corner[[0,1,2,3,0], 0], corner[[0,1,2,3,0], 1], color=color, + linewidth=linewidth_scale, linestyle=linestyle) + # draw direction + # front = corner[[2, 3]].mean(axis=0) + # center = corner.mean(axis=0) + # ax.plot([front[0], center[0]], [front[1], center[1]], color=color, + # linewidth=linewidth_scale) + ax.plot(corner[[2, 3], 0], corner[[2, 3], 1], color=color, linewidth=1.5*linewidth_scale) + return ax + + +def draw_points_boxes_plt(pc_range=None, points=None, boxes_pred=None, boxes_gt=None, wandb_name=None, + points_c='gray', bbox_gt_c='green', bbox_pred_c='red', linewidth_scale=0.75, + bbox_pred_label=None, bbox_gt_label=None, + return_ax=False, ax=None, marker_size=2.0, filename=None): + if pc_range is not None: + if isinstance(pc_range, int) or isinstance(pc_range, float): + pc_range = [-pc_range, -pc_range, pc_range, pc_range] + elif isinstance(pc_range, list) and len(pc_range)==6: + pc_range = [pc_range[i] for i in [0, 1, 3, 4]] + else: + assert isinstance(pc_range, list) and len(pc_range)==4, \ + "pc_range should be a int, float or list of lenth 6 or 4" + if ax is None: + ax = plt.figure(figsize=((pc_range[2] - pc_range[0]) / 20, + (pc_range[3] - pc_range[1]) / 20)).add_subplot(1, 1, 1) + ax.set_aspect('equal', 'box') + if pc_range is not None: + ax.set(xlim=(pc_range[0], pc_range[2]), + ylim=(pc_range[1], pc_range[3])) + if points is not None: + ax.plot(points[:, 0], points[:, 1], '.', + color=points_c, markersize=marker_size) + if (boxes_pred is not None) and len(boxes_pred) > 0: + ax = draw_box_plt(boxes_pred, ax, color=bbox_pred_c, linewidth_scale=linewidth_scale) + if bbox_pred_label is not None: + assert len(boxes_pred) == len(bbox_pred_label) + for box, label in zip(boxes_pred, bbox_pred_label): + ax.annotate(label, (box[0], box[1]), textcoords="offset points", xytext=(0, 10), ha='center', color='r') + if (boxes_gt is not None) and len(boxes_gt) > 0: + ax = draw_box_plt(boxes_gt, ax, color=bbox_gt_c, linewidth_scale=linewidth_scale) + if bbox_gt_label is not None: + assert len(boxes_gt) == len(bbox_gt_label) + for box, label in zip(boxes_gt, bbox_gt_label): + ax.annotate(label, (box[0], box[1]), textcoords="offset points", xytext=(0, 10), ha='center', color='g') + plt.xlabel('x') + plt.ylabel('y') + + if return_ax: + return ax + if filename is not None: + plt.savefig(filename) + plt.close() + + +def update_axis_linset(line_set, axis_len=5): + points = [ + [0, 0, 0], + [axis_len, 0, 0], + [0, axis_len, 0], + [0, 0, axis_len] + ] + lines = [[0, 1], [0, 2], [0, 3]] + colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] + line_set.points = o3d.utility.Vector3dVector(points) + line_set.lines = o3d.utility.Vector2iVector(lines) + line_set.colors = o3d.utility.Vector3dVector(colors) + return line_set + + +def bbx2linset(bbx, color=(0, 1, 0)): + """ + Convert the bounding box to o3d lineset for visualization. + + :param bbx : np.ndarray + shape: (n, 7) or (n, 11) or (n, 8, 3). + :param color : tuple + The bounding box color. + + :return: line_set : open3d.LineSet + """ + if len(bbx) > 0 and len(bbx[0]) == 11: + bbx = bbx[:, 2:] + bbx_corner = boxes_to_corners_3d(bbx, 'lwh') + elif len(bbx) > 0 and len(bbx[0]) == 7: + bbx_tmp = np.zeros((len(bbx), 9)) + bbx_tmp[:, :6] = bbx[:, :6] + bbx_tmp[:, -1] = bbx[:, -1] + bbx_corner = boxes_to_corners_3d(bbx_tmp, 'lwh') + else: + bbx_corner = bbx + bbx_corner = np.array(bbx_corner) + # Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc... + lines = [[0, 1], [1, 2], [2, 3], [0, 3], + [4, 5], [5, 6], [6, 7], [4, 7], + [0, 4], [1, 5], [2, 6], [3, 7]] + + # Use the same color for all lines + colors = [list(color) for _ in range(len(lines))] + bbx_linset = [] + + for i in range(len(bbx_corner)): + bbx = bbx_corner[i] + # o3d use right-hand coordinate + bbx[:, :1] = - bbx[:, :1] + + line_set = o3d.geometry.LineSet() + line_set.points = o3d.utility.Vector3dVector(bbx) + line_set.lines = o3d.utility.Vector2iVector(lines) + line_set.colors = o3d.utility.Vector3dVector(colors) + bbx_linset.append(line_set) + + return bbx_linset + + +def update_lineset_vbo(vbo, bbx, color=None): + if len(bbx) > 0 and len(bbx[0]) == 9: + bbx = bbx[:, 2:] + bbx_corner = boxes_to_corners_3d(bbx, 'lwh') + else: + bbx_corner = bbx + bbx_corner = np.array(bbx_corner) + # Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc... + lines = [[0, 1], [1, 2], [2, 3], [0, 3], + [4, 5], [5, 6], [6, 7], [4, 7], + [0, 4], [1, 5], [2, 6], [3, 7]] + lines = np.array(lines) + if isinstance(color, np.ndarray): + color = color.squeeze().tolist() + + points_all = [] + lines_all = [] + colors_all = [] + for i in range(len(bbx_corner)): + bbx = bbx_corner[i] + # o3d use right-hand coordinate + bbx[:, :1] = - bbx[:, :1] + points_all.extend(bbx) + lines_all.extend((lines + 8 * i).tolist()) + # if no color given, use green for all lines + if color is None: + box_color = [[0, 1, 0] for _ in range(len(lines))] + elif isinstance(color[0], float): + box_color = [color for _ in range(len(lines))] + else: + box_color = [color[i] for _ in range(len(lines))] + + colors_all.extend(box_color) + vbo.points = o3d.utility.Vector3dVector(points_all) + vbo.lines = o3d.utility.Vector2iVector(lines_all) + vbo.colors = o3d.utility.Vector3dVector(colors_all) + return vbo + + +def o3d_draw_pcds_bbxs(pcds: list, + bbxs: list, + bbxs_colors: list=None, + pcds_colors: list=None): + """ + :param pcds: list of np array + :param bbxs: list of np array, + bounding boxes in corner format + :param bbxs_colors: list of tuples + :param pcds_colors: list of np array, shape same as pcds + """ + pcds_vis = [] + linsets = [] + for i, bbx in enumerate(bbxs): + bbx_color = (0, 1, 0) + if bbxs_colors is not None: + assert len(bbxs_colors) == len(bbxs) + bbx_color = bbxs_colors[i] + linset = bbx2linset(bbx, bbx_color) + linsets.extend(linset) + for i, points in enumerate(pcds): + points[:, 0] *= -1 + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points[:, :3]) + if pcds_colors is not None: + assert pcds_colors[i].shape == points[:, :3].shape + pcd.colors = o3d.utility.Vector3dVector(pcds_colors[i]) + else: + colors = get_palette_colors('random') + pcd.paint_uniform_color(colors[i]) + pcds_vis.append(pcd) + o3d.visualization.draw_geometries(pcds_vis + linsets) + + +def o3d_draw_frame_data(frame_dict, data_path): + pcds = [] + bbx_global = frame_dict['meta']['bbx_center_global'] + bbx_corners = boxes_to_corners_3d(np.array(bbx_global[:, 2:])) + linsets = [] + bbx_colors = get_palette_colors('objects') + for l in np.unique(bbx_global[:, 1]): + assert l < 3 + linsets.extend(bbx2linset(bbx_corners, bbx_colors[int(l)])) + for ai, acontent in frame_dict['agents'].items(): + for li, lidar_dict in acontent['lidar0'].items(): + lidar_file = os.path.join(data_path, lidar_dict['filename']) + points = pclib.load_pcd(lidar_file)[:, :3] + points = pclib.rotate3d(points, lidar_dict['pose'][3:]) + points = points + np.array(lidar_dict['pose'][:3]).reshape(1, 3) + # o3d use right hand: left -> right hand + points[:, 0] *= -1 + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + colors = get_palette_colors('calm_afternoon') + pcd.paint_uniform_color(colors[ai]) + pcds.append(pcd) + o3d.visualization.draw_geometries(pcds + linsets) + + +def o3d_draw_agent_data(agent_dict, data_path): + pcds = [] + bbx_lensets = [] + for li, lidar_dict in agent_dict['lidar0'].items(): + lidar_file = os.path.join(data_path, lidar_dict['filename']) + points = pclib.load_pcd(lidar_file)[:, :3] + # o3d use right hand: left -> right hand + points[:, 0] *= -1 + bbx = np.array(agent_dict['bbx_center']) + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + pcd.paint_uniform_color([0.5] * 3) + linsets = bbx2linset(bbx, (0, 1, 0)) + pcds.append(pcd) + bbx_lensets.extend(linsets) + o3d.visualization.draw_geometries(pcds + bbx_lensets) + + +def o3d_play_sequence(meta_dict, data_path): + vis = o3d.visualization.Visualizer() + vis.create_window() + vis.get_render_option().background_color = [0.05, 0.05, 0.05] + vis.get_render_option().point_size = 1.0 + vis.get_render_option().show_coordinate_frame = True + + vbo_pcd = o3d.geometry.PointCloud() + vbo_lineset = o3d.geometry.LineSet() + painter = get_palette_colors('pastels_rock') + + idx = 0 + while True: + for scenario, scenario_dict in meta_dict.items(): + for frame, frame_dict in scenario_dict.items(): + pcds = [] + colors = [] + for i, (ai, agent_dict) in enumerate(frame_dict['agents'].items()): + for li, lidar_dict in agent_dict['lidar0'].items(): + points = pclib.load_pcd(os.path.join( + data_path, + lidar_dict['filename']) + )[:, :3] + points = pclib.rotate3d(points, lidar_dict['pose'][3:]) + points = points + np.array(lidar_dict['pose'][:3]).reshape(1, 3) + pcds.append(points) + colors.append(np.ones_like(points) * + np.array(painter[i]).reshape(1, 3)) + pcds = np.concatenate(pcds, axis=0) + pcds[:, 0] *= -1 + colors = np.concatenate(colors, axis=0) + vbo_pcd.points = o3d.utility.Vector3dVector(pcds) + vbo_pcd.colors = o3d.utility.Vector3dVector(colors) + + # add boxes + bbxs = frame_dict['meta']['bbx_center_global'] + if len(bbxs) > 0: + bbxs = boxes_to_corners_3d(np.array(bbxs)[:, 2:]) + vbo_lineset = update_lineset_vbo(vbo_lineset, bbxs) + if idx == 0: + vis.add_geometry(vbo_lineset) + else: + vis.update_geometry(vbo_lineset) + # add pcds + if idx == 0: + vis.add_geometry(vbo_pcd) + else: + vis.update_geometry(vbo_pcd) + + vis.poll_events() + vis.update_renderer() + time.sleep(0.1) + idx += 1 + + +def plt_draw_frame_data(frame_dict, data_path): + fig = plt.figure(figsize=(10, 10)) + ax = fig.add_subplot() + for ai, acontent in frame_dict.items(): + for li, lidar_dict in acontent['lidar0'].items(): + lidar_file = os.path.join(data_path, lidar_dict['filename']) + points = pclib.load_pcd(lidar_file)[:, :3] + points = pclib.rotate3d(points, lidar_dict['pose']) + points = points + np.array(lidar_dict['pose'][:3]).reshape(1, 3) + # points = np.r_[points, [np.ones(points.shape[1])]] + # points = np.dot(lidar_dict['pose'], points).T[:, :3] + bbx = np.array(acontent['objects']) + assert len(bbx.shape) == 2 + bbx = bbx[:, 2:] + + ax.plot(points[:, 0], points[:, 1], '.', markersize=.5) + ax = draw_box_plt(bbx, ax) + plt.show() + plt.close() + + +def draw_3d_points_boxes_on_img(img, lidar2cam, I, points=None, boxes=None): + """ + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + + :param img: np.ndarray + :param lidar2cam: np.ndarray, (4, 4), transformation matrix from lidar to camera coordinates + :param I: np.ndarray, (3, 3), intrinsic parameters + :param points: np.ndarray, (N, 3+C) + :param boxes: np.ndarray, (N, 8, 3), corners are in lidar coordinates + """ + assert lidar2cam.shape == (4, 4) + assert I.shape == (3, 3) + # Create a figure and axis + fig, ax = plt.subplots(1) + + if points is not None: + points_homo = np.concatenate([points[:, :3], np.ones_like(points[:, :1])], axis=1).T + points_homo = lidar2cam @ points_homo + pixels = I @ points_homo[:3] + pixels[:2] = pixels[:2] / pixels[2:] + px = pixels[0].astype(int) + py = pixels[1].astype(int) + mask = (px >= 0) & (px<800) & (py >= 0) & (py < 600) & (pixels[2] > 0) + px, py = px[mask], py[mask] + dist = np.linalg.norm(points_homo[:2].T[mask], axis=1) + dist_norm = np.clip(dist, a_min=0, a_max=100) / 100. + # Create a colormap based on the numbers + cmap = plt.get_cmap('jet') + + # Convert the numbers to colors using the colormap + colors = np.array([cmap(num) for num in dist_norm])[:, :3] * 255 + img[py, px] = colors + + ax.imshow(img) + # Loop through the boxes and draw them on the image + if boxes is not None: + n_box = len(boxes) + box_points = boxes.reshape(-1, 3) + box_points_homo = np.concatenate([box_points[:, :3], np.ones_like(box_points[:, :1])], axis=1).T + box_points_homo = lidar2cam @ box_points_homo + box_pixels = I @ box_points_homo[:3] + box_pixels[:2] = box_pixels[:2] / box_pixels[2:] + box_pixels = box_pixels.T.reshape(n_box, 8, 3) + box_pixels = box_pixels[(box_pixels[:, :, 2] > 0).all(axis=1)] + for box in box_pixels: + faces = [ + [0, 1, 2, 3, 0], + [4, 5, 6, 7, 4], + [0, 1, 5, 4, 0], + [2, 3, 7, 6, 2] + ] + for face in faces: + vertices = [(box[i][0], box[i][1]) for i in face] + polygon = Polygon(vertices, fill=None, edgecolor='g') + ax.add_patch(polygon) + + plt.show() + plt.close() + + +def draw_2d_bboxes_on_img(img, boxes2d, ax_in=None): + """ + :param img: np.ndarray + :param boxes2d: np.ndarray, (N, 4, 2) for 4 corners or (N, 2, 2) for left top and right bottom corners inn pixel metric + """ + if ax_in is None: + fig, ax = plt.subplots(1) + else: + ax = ax_in + ax.imshow(img) + + if boxes2d is not None and len(boxes2d) > 0: + assert len(boxes2d.shape) == 3 + if boxes2d.shape[1] == 2: + box_4corners = [] + for box in boxes2d: + box_4corners.append([ + box[0], # left top + [box[1, 0], box[0, 1]], # right top + box[1], # right bottom + [box[0, 0], box[1, 1]], # left bottom + ]) + else: + box_4corners = boxes2d + + for box in box_4corners: + vertices = [(box[i][0], box[i][1]) for i in [0, 1, 2, 3, 0]] + polygon = Polygon(vertices, fill=None, edgecolor='lime') + ax.add_patch(polygon) + + if ax_in is None: + plt.show() + plt.close() + else: + return ax + + +def draw_3d_points_boxes_on_img(ax, img, lidar2img, points=None, boxes=None): + """ + 1 -------- 6 ^ z + /| /| | + 2 -------- 5. | + | | | | | . x + . 0 -------- 7 |/ + |/ |/ +-------> y + 3 -------- 4 + + :param ax: plt plot axis + :param img: np.ndarray, (H, W, 3) + :param lidar2img: np.ndarray, (4, 4), transformation matrix from lidar to camera coordinates + :param points: np.ndarray, (N, 3+C) + :param boxes: np.ndarray, (N, 8, 3) or (N, 7), in lidar coordinates + """ + H, W = img.shape[:2] + if points is not None: + points_homo = np.concatenate([points[:, :3], np.ones_like(points[:, :1])], axis=1).T + points_homo = lidar2img @ points_homo + pixels = points_homo[:3] + pixels[:2] = pixels[:2] / pixels[2:] + px = pixels[0].astype(int) + py = pixels[1].astype(int) + mask = (px >= 0) & (px= 0) & (py < H) & (pixels[2] > 0) + if mask.sum() > 0: + px, py = px[mask], py[mask] + dist = np.linalg.norm(points_homo[:2].T[mask], axis=1) + dist_norm = np.clip(dist, a_min=0, a_max=100) / 100. + # Create a colormap based on the numbers + cmap = plt.get_cmap('cool') + + # Convert the numbers to colors using the colormap + colors = np.array([cmap(num) for num in dist_norm]) + colors = colors[:, :3] * 255 + img[py, px] = colors + + ax.imshow(img) + # Loop through the boxes and draw them on the image + if boxes is not None: + n_box = len(boxes) + if boxes.shape[1] == 7: + boxes = boxes_to_corners_3d(boxes) + box_points = boxes.reshape(-1, 3) + box_points_homo = np.concatenate([box_points[:, :3], np.ones_like(box_points[:, :1])], axis=1).T + box_points_homo = lidar2img @ box_points_homo + + box_pixels = box_points_homo[:3] + box_pixels[:2] = box_pixels[:2] / box_pixels[2:] + box_pixels = box_pixels.T.reshape(n_box, 8, 3) + box_pixels = box_pixels[(box_pixels[:, :, 2] > 0).all(axis=1)] + for box in box_pixels: + faces = [ + [0, 1, 2, 3, 0], + [4, 5, 6, 7, 4], + [0, 1, 5, 4, 0], + [2, 3, 7, 6, 2] + ] + for face in faces: + vertices = [(box[i][0], box[i][1]) for i in face] + polygon = Polygon(vertices, fill=None, edgecolor='lime') + ax.add_patch(polygon) + + +def draw_matched_boxes(boxes1, boxes2, match, out_file=None): + fig = plt.figure(figsize=(10, 10)) + ax = fig.add_subplot() + ax.axis('equal') + + ax = draw_box_plt(boxes1, ax=ax, color='b') + ax = draw_box_plt(boxes2, ax=ax, color='r') + + for p1, p2 in match: + ax.plot([boxes1[p1][0], boxes2[p2][0]], [boxes1[p1][1], boxes2[p2][1]], c='k', markersize=3) + + if out_file is None: + plt.show() + plt.close() + else: + plt.savefig(out_file) + + +def plot_cavs_points(cavs, points_key='points'): + lidar_range = cavs[0].lidar_range.tolist() + ax = draw_points_boxes_plt( + pc_range=lidar_range, + return_ax=True + ) + colors = ['green', 'blue', 'orange', 'magenta', 'cyan'] + for i, cav in enumerate(cavs): + points = cav.data[points_key].detach().cpu().numpy() + roadline = cav.data['roadline_pred'].detach().cpu().numpy() + ax.plot(points[:, 0], points[:, 1], '.', markersize=1, color=colors[i]) + ax.plot(roadline[:, 0], roadline[:, 1], 'ro', markersize=1) + plt.savefig("/home/yys/Downloads/tmp.jpg") + plt.close() + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d4bb2cbb --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_build/doctrees/cosense3d.agents.cav_prototype.doctree b/docs/_build/doctrees/cosense3d.agents.cav_prototype.doctree new file mode 100644 index 00000000..cabdcc04 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.agents.cav_prototype.doctree differ diff --git a/docs/_build/doctrees/cosense3d.agents.core.doctree b/docs/_build/doctrees/cosense3d.agents.core.doctree new file mode 100644 index 00000000..45df6052 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.agents.core.doctree differ diff --git a/docs/_build/doctrees/cosense3d.agents.doctree b/docs/_build/doctrees/cosense3d.agents.doctree new file mode 100644 index 00000000..5002b1b6 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.agents.doctree differ diff --git a/docs/_build/doctrees/cosense3d.agents.utils.doctree b/docs/_build/doctrees/cosense3d.agents.utils.doctree new file mode 100644 index 00000000..3a01692d Binary files /dev/null and b/docs/_build/doctrees/cosense3d.agents.utils.doctree differ diff --git a/docs/_build/doctrees/cosense3d.agents.viewer.doctree b/docs/_build/doctrees/cosense3d.agents.viewer.doctree new file mode 100644 index 00000000..a5d32499 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.agents.viewer.doctree differ diff --git a/docs/_build/doctrees/cosense3d.agents.viewer.items.doctree b/docs/_build/doctrees/cosense3d.agents.viewer.items.doctree new file mode 100644 index 00000000..00949419 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.agents.viewer.items.doctree differ diff --git a/docs/_build/doctrees/cosense3d.dataset.doctree b/docs/_build/doctrees/cosense3d.dataset.doctree new file mode 100644 index 00000000..b21c4cf2 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.dataset.doctree differ diff --git a/docs/_build/doctrees/cosense3d.dataset.pipeline.doctree b/docs/_build/doctrees/cosense3d.dataset.pipeline.doctree new file mode 100644 index 00000000..34c90efd Binary files /dev/null and b/docs/_build/doctrees/cosense3d.dataset.pipeline.doctree differ diff --git a/docs/_build/doctrees/cosense3d.dataset.toolkit.doctree b/docs/_build/doctrees/cosense3d.dataset.toolkit.doctree new file mode 100644 index 00000000..fa7476b2 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.dataset.toolkit.doctree differ diff --git a/docs/_build/doctrees/cosense3d.doctree b/docs/_build/doctrees/cosense3d.doctree new file mode 100644 index 00000000..213bbf98 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.backbone2d.doctree b/docs/_build/doctrees/cosense3d.modules.backbone2d.doctree new file mode 100644 index 00000000..12288aa2 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.backbone2d.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.backbone3d.doctree b/docs/_build/doctrees/cosense3d.modules.backbone3d.doctree new file mode 100644 index 00000000..4a8c1659 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.backbone3d.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.doctree b/docs/_build/doctrees/cosense3d.modules.doctree new file mode 100644 index 00000000..76539acf Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.fusion.doctree b/docs/_build/doctrees/cosense3d.modules.fusion.doctree new file mode 100644 index 00000000..a2383ed7 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.fusion.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.heads.doctree b/docs/_build/doctrees/cosense3d.modules.heads.doctree new file mode 100644 index 00000000..92270210 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.heads.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.losses.doctree b/docs/_build/doctrees/cosense3d.modules.losses.doctree new file mode 100644 index 00000000..46132406 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.losses.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.necks.doctree b/docs/_build/doctrees/cosense3d.modules.necks.doctree new file mode 100644 index 00000000..28581c8e Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.necks.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.plugin.doctree b/docs/_build/doctrees/cosense3d.modules.plugin.doctree new file mode 100644 index 00000000..3c60563d Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.plugin.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.projection.doctree b/docs/_build/doctrees/cosense3d.modules.projection.doctree new file mode 100644 index 00000000..0dc1cad9 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.projection.doctree differ diff --git a/docs/_build/doctrees/cosense3d.modules.utils.doctree b/docs/_build/doctrees/cosense3d.modules.utils.doctree new file mode 100644 index 00000000..db79302a Binary files /dev/null and b/docs/_build/doctrees/cosense3d.modules.utils.doctree differ diff --git a/docs/_build/doctrees/cosense3d.utils.doctree b/docs/_build/doctrees/cosense3d.utils.doctree new file mode 100644 index 00000000..8f3a2cd1 Binary files /dev/null and b/docs/_build/doctrees/cosense3d.utils.doctree differ diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle new file mode 100644 index 00000000..5f24d6c8 Binary files /dev/null and b/docs/_build/doctrees/environment.pickle differ diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree new file mode 100644 index 00000000..5b749032 Binary files /dev/null and b/docs/_build/doctrees/index.doctree differ diff --git a/docs/_build/doctrees/md/installation.doctree b/docs/_build/doctrees/md/installation.doctree new file mode 100644 index 00000000..1e5b5b0d Binary files /dev/null and b/docs/_build/doctrees/md/installation.doctree differ diff --git a/docs/_build/doctrees/md/prepare_data.doctree b/docs/_build/doctrees/md/prepare_data.doctree new file mode 100644 index 00000000..2e19e2f3 Binary files /dev/null and b/docs/_build/doctrees/md/prepare_data.doctree differ diff --git a/docs/_build/doctrees/md/structure.doctree b/docs/_build/doctrees/md/structure.doctree new file mode 100644 index 00000000..48df404f Binary files /dev/null and b/docs/_build/doctrees/md/structure.doctree differ diff --git a/docs/_build/doctrees/modules.doctree b/docs/_build/doctrees/modules.doctree new file mode 100644 index 00000000..f00ed7fd Binary files /dev/null and b/docs/_build/doctrees/modules.doctree differ diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo new file mode 100644 index 00000000..519715b8 --- /dev/null +++ b/docs/_build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 491fd18f495589bb12a3c547120c4862 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/_images/buffer_based_sampling.png b/docs/_build/html/_images/buffer_based_sampling.png new file mode 100644 index 00000000..6678d0f4 Binary files /dev/null and b/docs/_build/html/_images/buffer_based_sampling.png differ diff --git a/docs/_build/html/_images/center_controller.png b/docs/_build/html/_images/center_controller.png new file mode 100644 index 00000000..3d758489 Binary files /dev/null and b/docs/_build/html/_images/center_controller.png differ diff --git a/docs/_build/html/_images/framework-structure.png b/docs/_build/html/_images/framework-structure.png new file mode 100644 index 00000000..78c5136a Binary files /dev/null and b/docs/_build/html/_images/framework-structure.png differ diff --git a/docs/_build/html/_images/glviewer.png b/docs/_build/html/_images/glviewer.png new file mode 100644 index 00000000..ef75085f Binary files /dev/null and b/docs/_build/html/_images/glviewer.png differ diff --git a/docs/_build/html/_images/imganno2dviewer.png b/docs/_build/html/_images/imganno2dviewer.png new file mode 100644 index 00000000..dbd4a6cc Binary files /dev/null and b/docs/_build/html/_images/imganno2dviewer.png differ diff --git a/docs/_build/html/_images/imgviewer.png b/docs/_build/html/_images/imgviewer.png new file mode 100644 index 00000000..0eb6a691 Binary files /dev/null and b/docs/_build/html/_images/imgviewer.png differ diff --git a/docs/_build/html/_images/outputviewer.png b/docs/_build/html/_images/outputviewer.png new file mode 100644 index 00000000..66efa4cf Binary files /dev/null and b/docs/_build/html/_images/outputviewer.png differ diff --git a/docs/_build/html/_modules/cosense3d/agents/cav_prototype.html b/docs/_build/html/_modules/cosense3d/agents/cav_prototype.html new file mode 100644 index 00000000..2a497ecd --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/cav_prototype.html @@ -0,0 +1,121 @@ + + + + + + cosense3d.agents.cav_prototype — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.cav_prototype

+# This module provides prototypes for CAVs/agents.
+# The prototype has the following features:
+# 1. Data processing logics for each prototyped agent/CAV.
+# 2. All intermediate data processed are stored locally at prototype class.
+# 3. Specify the requesting and responding CPMs
+
+import importlib
+
+
+
[docs]def get_prototype(module_full_path: str): + module_name, cls_name = module_full_path.rsplit('.', 1) + module = importlib.import_module(f'cosense3d.agents.cav_prototype.{module_name}') + cls_obj = getattr(module, cls_name, None) + assert cls_obj is not None, f'Class \'{module_name}\' not found.' + return cls_obj
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/cav_prototype/base_cav.html b/docs/_build/html/_modules/cosense3d/agents/cav_prototype/base_cav.html new file mode 100644 index 00000000..25048ced --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/cav_prototype/base_cav.html @@ -0,0 +1,371 @@ + + + + + + cosense3d.agents.cav_prototype.base_cav — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.agents.cav_prototype.base_cav

+import torch
+from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP
+
+
+
[docs]class BaseCAV: + def __init__(self, id: str, mapped_id: int, is_ego: bool, + lidar_range: torch.Tensor, memory_len: int, + lidar_pose: torch.Tensor=None, require_grad: bool=False, + seq_len: int=1, **kwargs): + """ + Base class for CAV prototype. + + :param id: agent id. + :param mapped_id: remapped id. + :param is_ego: if the agent is an ego agent. + :param lidar_range: visible lidar range, + :param memory_len: memory length for memory queue. + :param lidar_pose: lidar pose in shape (4, 4). + :param require_grad: if True, the gradients will be calculated for this agent during training. + :param seq_len: sequence length of the input data. + :param kwargs: additional key-value arguments. + """ + self.id = id + self.mapped_id = mapped_id + self.is_ego = is_ego + self.lidar_pose = lidar_pose + self.lidar_range = lidar_range + self.memory_len = memory_len + self.require_grad = require_grad + self.seq_len = seq_len + for k, v in kwargs.items(): + setattr(self, k, v) + self.data = {} # memory FIFO + self.prepare_data_keys = ['img', 'points', 'annos_global', 'annos_local'] + +
[docs] def update(self, lidar_pose, is_ego, require_grad): + self.lidar_pose = lidar_pose + self.is_ego = is_ego + self.require_grad = require_grad
+ + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(id={self.id}, ' + repr_str += f'is_ego={self.is_ego}, ' + repr_str += f'data={self.data.keys()})' + return repr_str + +
[docs] def apply_transform(self): + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + DOP.cav_aug_transform(self.data, transform, self.data['augment_params'], + apply_to=self.prepare_data_keys)
+ +
[docs] def prepare_data(self): + pass
+ +
[docs] def transform_data(self): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys)
+ +
[docs] def has_request(self): + if 'received_request' in self.data and self.data['received_request'] is not None: + return True + else: + return False
+ +
[docs] def get_request_cpm(self): + return {'lidar_pose': self.lidar_pose}
+ +
[docs] def get_response_cpm(self): + cpm = {} + for k in ['points']: + if k in self.data: + cpm[k] = self.data[k] + return cpm
+ +
[docs] def receive_request(self, request): + self.data['received_request'] = request
+ +
[docs] def receive_response(self, response): + self.data['received_response'] = response
+ +
[docs] def forward(self, tasks, training_mode, **kwargs): + self.forward_localization(tasks, training_mode, **kwargs) + self.forward_local(tasks, training_mode, **kwargs) + self.forward_fusion(tasks, training_mode, **kwargs) + self.forward_head(tasks, training_mode, **kwargs) + return tasks
+ +
[docs] def forward_localization(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks
+ +
[docs] def forward_local(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks
+ +
[docs] def forward_fusion(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks
+ +
[docs] def forward_head(self, tasks, training_mode, **kwargs): + """To be overloaded.""" + return tasks
+ +
[docs] def loss(self, tasks, **kwargs): + """To be overloaded.""" + return tasks
+ +
[docs] def reset_data(self, *args, **kwargs): + del self.data + self.data = {}
+ +
[docs] def pre_update_memory(self): + """Update memory before each forward run of a single frame.""" + pass
+ +
[docs] def post_update_memory(self): + """Update memory after each forward run of a single frame.""" + pass
+ + +
[docs]class BaseSeqCAV: + def __init__(self, id, mapped_id, is_ego, lidar_range, memory_len, + lidar_pose=None, require_grad=False, seq_len=1, **kwargs): + self.id = id + self.mapped_id = mapped_id + self.is_ego = is_ego + self.lidar_pose = lidar_pose + self.lidar_range = lidar_range + self.memory_len = memory_len + self.require_grad = require_grad + self.seq_len = seq_len + for k, v in kwargs.items(): + setattr(self, k, v) + self.data = {} # memory FIFO + self.memory = {} + self.prepare_data_keys = ['img', 'points', 'annos_global', 'annos_local'] + +
[docs] def update(self, lidar_pose): + self.lidar_pose = lidar_pose
+ +
[docs] def task_id(self, seq_idx): + return f"{self.id}.{seq_idx}"
+ +
[docs] def get_data(self, keys, seq_idx=None): + if seq_idx is None: + out = {} + for i, d in self.data.items(): + out[i] = {} + for k in keys: + out[i][k] = d[k] + else: + out = {k: self.data[seq_idx][k] for k in keys} + return out
+ + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(id={self.id}, ' + repr_str += f'is_ego={self.is_ego}, ' + repr_str += f'data={self.data.keys()})' + return repr_str + +
[docs] def apply_transform(self, seq_idx): + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + DOP.cav_aug_transform(self.data, transform, self.data['augment_params'], + apply_to=self.prepare_data_keys)
+ +
[docs] def prepare_data(self, seq_idx): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys)
+ +
[docs] def has_request(self): + has_req = False + for d in self.data.values(): + if 'received_request' in d and d['received_request'] is not None: + has_req = True + break + return has_req
+ +
[docs] def get_request_cpm(self): + return self.get_data(['lidar_poses'])
+ +
[docs] def get_response_cpm(self): + cpm = {} + for k in ['points']: + if k in self.data[0]: + cpm[k] = {i: d[k] for i, d in self.data.items()} + return cpm
+ +
[docs] def receive_request(self, request): + for i, req in request.items(): + if i not in self.data: + continue + self.data[i]['received_request'] = req
+ +
[docs] def receive_response(self, response, seq_idx): + for cav_id, resp in response.items(): + self.data[seq_idx]['received_response'][cav_id] = {k: v[seq_idx] for k, v in resp.items()}
+ +
[docs] def forward(self, tasks, training_mode, seq_idx, with_loss): + self.prepare_data(seq_idx) + self.forward_local(tasks, training_mode, seq_idx, with_loss) + self.forward_fusion(tasks, training_mode, seq_idx, with_loss) + self.forward_head(tasks, training_mode, seq_idx, with_loss) + return tasks
+ +
[docs] def forward_local(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks
+ +
[docs] def forward_fusion(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks
+ +
[docs] def forward_head(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks
+ +
[docs] def loss(self, tasks, training_mode, seq_idx, with_loss): + """To be overloaded.""" + return tasks
+ +
[docs] def reset_data(self, *args, **kwargs): + del self.data + self.data = {}
+ +
[docs] def pre_update_memory(self, seq_idx, **kwargs): + """Update memory before each forward run of a single frame.""" + pass
+ +
[docs] def post_update_memory(self, seq_idx, **kwargs): + """Update memory after each forward run of a single frame.""" + pass
+ + +
[docs]class OPV2VtCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global']
+ + +
[docs]class OPV2VtCAV_v2(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.is_ego: + self.prepare_data_keys = ['points', 'annos_local', 'annos_global', 'annos_global_pred'] + else: + self.prepare_data_keys = ['points', 'annos_local', 'annos_global']
+ + +
[docs]class DairV2XCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_global', 'annos_local']
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/cav_prototype/streamLTS_collection.html b/docs/_build/html/_modules/cosense3d/agents/cav_prototype/streamLTS_collection.html new file mode 100644 index 00000000..a145669e --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/cav_prototype/streamLTS_collection.html @@ -0,0 +1,814 @@ + + + + + + cosense3d.agents.cav_prototype.streamLTS_collection — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.agents.cav_prototype.streamLTS_collection

+import copy
+
+import torch
+import torch_scatter
+from cosense3d.agents.utils.transform import DataOnlineProcessor as DOP
+from cosense3d.agents.cav_prototype.base_cav import BaseCAV
+
+
+
[docs]class StreamLidarCAV(BaseCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset = kwargs.get('dataset', None) + self.lidar_range = torch.nn.Parameter(self.lidar_range) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global'] + self.data['memory'] = None + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + self.use_aug = True + +
[docs] def refresh_memory(self, prev_exists): + x = prev_exists.float() + init_pose = torch.eye(4, device=self.lidar_pose.device).unsqueeze(0).unsqueeze(0) + if not x: + self.data['memory'] = { + 'embeddings': x.new_zeros(self.memory_len, self.memory_num_propagated, self.memory_emb_dims), + 'ref_pts': x.new_zeros(self.memory_len, self.memory_num_propagated, self.ref_pts_dim), + 'timestamp': x.new_zeros(self.memory_len, self.memory_num_propagated, 1), + 'pose': x.new_zeros(self.memory_len, self.memory_num_propagated, 4, 4) , + 'pose_no_aug': x.new_zeros(self.memory_len, self.memory_num_propagated, 4, 4) , + 'velo': x.new_zeros(self.memory_len, self.memory_num_propagated, 2), + } + self.data['memory']['pose_no_aug'] = self.data['memory']['pose'] + init_pose + self.aug_transform = None + self.T_aug2g = None + self.T_g2aug = None + self.T_e2g = None + else: + for k, v in self.data['memory'].items(): + self.data['memory'][k] = self.data['memory'][k][:self.memory_len] * x + if not x: + self.data['memory']['pose_no_aug'][0] = init_pose[0].repeat(self.memory_num_propagated, 1, 1) + self.data['memory']['prev_exists'] = x
+ +
[docs] def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + if self.is_ego: + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + else: + data_keys = [k for k in self.prepare_data_keys if k != 'annos_global'] + DOP.apply_transform(self.data, T_c2aug, apply_to=data_keys) + # global bboxes share the same memory with the ego cav, therefore it is already transformed to the aug coor + # DOP.apply_transform(self.data, T_e2aug, apply_to=['annos_global']) + if self.data['prev_exists']: + self.data['memory']['pose_no_aug'] = T_g2e @ self.data['memory']['pose_no_aug'] + self.data['memory']['ref_pts'] = self.transform_ref_pts( + self.data['memory']['ref_pts'], T_g2aug) + self.data['memory']['pose'] = self.aug_transform @ self.data['memory']['pose_no_aug'] + + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=['points', 'annos_local']) + self.T_aug2g = T_c2aug
+ +
[docs] def prepare_data(self): + self.prepare_time_scale() + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1)
+ +
[docs] def transform_data(self): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys)
+ # self.vis_data('transformed', 4) + +
[docs] def prepare_time_scale(self): + # hash time + azi = torch.arctan2(self.data['points'][:, 1], self.data['points'][:, 0]) + azi, inds = (torch.rad2deg(azi) + 180).floor().long().unique(return_inverse=True) + times = torch.zeros_like(azi).float() + torch_scatter.scatter_mean(self.data['points'][:, -1], inds, dim=0, out=times) + if len(times) < 360: + time360 = times.new_zeros(360) + time360[azi] = times + time360[time360 == 0] = times.mean() + else: + time360 = times + self.data['time_scale'] = time360 + self.data['time_scale_reduced'] = time360 - self.timestamp
+ # self.data['points'] = self.data['points'][:, :-1] + +
[docs] def update_memory_timestamps(self, ref_pts): + # transform ref pts to coop coordinates + transform = self.lidar_pose.inverse() @ self.T_aug2g + pts = self.transform_ref_pts(ref_pts, transform) + timestamp = torch.rad2deg(torch.arctan2(pts[:, 1], pts[:, 0])) + 180 + timestamp = - self.data['time_scale'][(timestamp % 360).floor().long()].unsqueeze(-1) + return timestamp
+ +
[docs] def get_response_cpm(self): + cpm = {} + feat = self.data['temp_fusion_feat'] + scores = self.data['detection_local']['all_cls_scores'][-1][..., + min(self.data['detection_local']['all_cls_scores' + ][-1].shape[-1] - 1, 1):].topk(1, dim=-1).values[..., 0] + mask = scores > self.share_score_thr + cpm['temp_fusion_feat'] = {'ref_pts': feat['ref_pts'][mask], 'outs_dec': feat['outs_dec'][:, mask]} + return cpm
+ +
[docs] def forward_local(self, tasks, training_mode, **kwargs): + if self.is_ego and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:backbone_neck', {})) + tasks[grad_mode].append((self.id, '13:roi_head', {})) + + if self.require_grad and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '14:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '15:det1_head', {}))
+ +
[docs] def forward_fusion(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego: + tasks[grad_mode].append((self.id, '21:spatial_fusion', {})) + return tasks
+ +
[docs] def forward_head(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego: + tasks[grad_mode].append((self.id, '23:det2_head', {})) + return tasks
+ +
[docs] def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '31:roi_head', {})) + tasks['loss'].append((self.id, '32:det1_head', {})) + tasks['loss'].append((self.id, '33:det2_head', {})) + elif self.require_grad: + tasks['loss'].append((self.id, '32:det1_head', {})) + return tasks
+ +
[docs] def pre_update_memory(self): + """Update memory before each forward run of a single frame.""" + if self.data['memory'] is not None: + self.data['memory']['timestamp'] += self.timestamp + # pose_inv = self.lidar_pose.inverse() + # self.data['memory']['pose'] = pose_inv @ self.data['memory']['pose'] + # self.data['memory']['ref_pts'] = self.transform_ref_pts( + # self.data['memory']['ref_pts'], pose_inv) + + self.refresh_memory(self.data['prev_exists'])
+ +
[docs] def post_update_memory(self): + """Update memory after each forward run of a single frame.""" + x = self.data['detection_local'] + scores = x['all_cls_scores'][-1][..., + min(x['all_cls_scores'][-1].shape[-1] - 1, 1):].topk(1, dim=-1).values[..., 0] + topk = torch.topk(scores, k=self.memory_num_propagated).indices + + ref_pts = x['all_bbox_preds'][-1][:, :self.ref_pts_dim] + velo = x['all_bbox_preds'][-1][:, -2:] + embeddings = self.data['temp_fusion_feat']['outs_dec'][-1] + + timestamp = self.update_memory_timestamps(ref_pts) + pose_no_aug = torch.eye(4, device=ref_pts.device).unsqueeze(0).repeat( + timestamp.shape[0], 1, 1) + + vars = locals() + for k, v in self.data['memory'].items(): + if k == 'prev_exists' or k == 'pose': + continue + rec_topk = vars[k][topk].unsqueeze(0) + self.data['memory'][k] = torch.cat([rec_topk, v], dim=0) + + # self.vis_ref_pts('post update') + + # ego aug to global + self.data['memory']['ref_pts'] = self.transform_ref_pts( + self.data['memory']['ref_pts'], self.T_aug2g) + self.data['memory']['timestamp'][1:] -= self.timestamp + self.data['memory']['pose_no_aug'] = self.T_e2g[(None,) * 2] @ self.data['memory']['pose_no_aug'] # aug -->global
+ + # if self.require_grad: + # # self.vis_local_detection() + # self.vis_local_pred() + # print('d') + +
[docs] def transform_ref_pts(self, reference_points, matrix): + reference_points = torch.cat( + [reference_points, torch.ones_like(reference_points[..., 0:1])], dim=-1) + if reference_points.ndim == 3: + reference_points = matrix.unsqueeze(0) @ reference_points.permute(0, 2, 1) + reference_points = reference_points.permute(0, 2, 1)[..., :3] + elif reference_points.ndim == 2: + reference_points = matrix @ reference_points.T + reference_points = reference_points.T[..., :3] + else: + raise NotImplementedError + return reference_points
+ + @property + def timestamp(self): + if self.dataset == 'opv2vt': + timestamp = float(self.data['frame']) * 0.1 / 2 + elif self.dataset == 'dairv2xt': + timestamp = self.data['global_time'] + else: + raise NotImplementedError + return timestamp + +
[docs] def vis_ref_pts(self, ax=None, label=None, his_len=1, **kwargs): + import matplotlib.pyplot as plt + from cosense3d.utils.vislib import draw_points_boxes_plt + if ax is None: + fig = plt.figure(figsize=(8, 4)) + ax = fig.add_subplot() + pcd = self.data['points'][:, :3].detach().cpu().numpy() + gt_boxes = self.data['local_bboxes_3d'].detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.lidar_range.tolist(), + boxes_gt=gt_boxes[:, :7], + points=pcd, + ax=ax, + return_ax=True, + ) + + ref_pts = self.data['memory']['ref_pts'].detach().cpu().numpy() + markers = ['.r', '.m', '.b', '.c'] + for i in range(his_len): + plt.plot(ref_pts[i, :, 0], ref_pts[i, :, 1], markers[i], markersize=2) + ax.set_title(f"{label}: {self.data['scenario']}, {self.data['frame']}") + plt.show() + plt.close() + + return ax
+ +
[docs] def vis_poses(self, ax=None, label=None, his_len=1, **kwargs): + import matplotlib.pyplot as plt + markers = ['r', 'm', 'b', 'c'] + mem_poses = self.data['memory']['pose'][:, 0].detach().cpu() + p0 = mem_poses[:his_len, :2, -1].numpy() + p1 = mem_poses[:his_len] @ torch.tensor([1., 0., 0., 1.]).view(1, 4, 1).repeat(his_len, 1, 1) + p2 = mem_poses[:his_len] @ torch.tensor([0., 1., 0., 1.]).view(1, 4, 1).repeat(his_len, 1, 1) + p1 = p1.squeeze(-1)[:, :2].numpy() + p2 = p2.squeeze(-1)[:, :2].numpy() + + if ax is None: + fig = plt.figure() + ax = fig.add_subplot() + ax.axis('equal') + for i in range(his_len): + ax.plot([p0[i, 0], p1[i, 0]], [p0[i, 1], p1[i, 1]], markers[i]) + ax.plot([p0[i, 0], p2[i, 0]], [p0[i, 1], p2[i, 1]], markers[i]) + return ax
+ +
[docs] def vis_local_detection(self): + import matplotlib.pyplot as plt + from cosense3d.utils.vislib import draw_points_boxes_plt + points = self.data['points'][:, :3].detach().cpu().numpy() + # pred_boxes = self.data['det_local']['preds']['box'].detach().cpu().numpy() + gt_boxes = self.data['local_bboxes_3d'][:, :7].detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.lidar_range.tolist(), + boxes_gt=gt_boxes[:, :7], + # boxes_pred=pred_boxes, + points=points, + return_ax=True + ) + + ax.set_title('ego' if self.is_ego else 'coop') + plt.savefig("/home/yuan/Pictures/local_det.png") + plt.close()
+ +
[docs] def vis_local_pred(self): + import matplotlib.pyplot as plt + from cosense3d.utils.vislib import draw_points_boxes_plt + points = self.data['points'][:, :3].detach().cpu().numpy() + # pred_boxes = self.data['detection_local']['preds']['box'].detach().cpu().numpy() + ref_pts = self.data['temp_fusion_feat']['ref_pts'].cpu() * (self.lidar_range[3:] - self.lidar_range[:3]) + self.lidar_range[:3] + ref_pts = ref_pts.detach().numpy() + gt_boxes = self.data['global_bboxes_3d'][:, :7].detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.lidar_range.tolist(), + boxes_gt=gt_boxes[:, :7], + # boxes_pred=pred_boxes, + points=points, + return_ax=True + ) + ax.plot(ref_pts[:, 0], ref_pts[:, 1], '.r', markersize=1) + + ax.set_title('ego' if self.is_ego else 'coop') + plt.savefig("/home/yuan/Pictures/local_pred.png") + plt.close()
+ + +
[docs]class slcDenseToSparse(StreamLidarCAV): + +
[docs] def prepare_data(self): + self.prepare_time_scale()
+ +
[docs] def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:roi_head', {})) + tasks[grad_mode].append((self.id, '13:formatting', {})) + tasks[grad_mode].append((self.id, '14:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '15:det1_head', {}))
+ + +slcFcooper = slcDenseToSparse +slcAttnFusion = slcDenseToSparse + + +
[docs]class slcFPVRCNN(StreamLidarCAV): +
[docs] def prepare_data(self): + self.prepare_time_scale()
+ +
[docs] def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:roi_head', {})) + tasks[grad_mode].append((self.id, '13:keypoint_composer', {})) + tasks[grad_mode].append((self.id, '14:formatting', {})) + tasks[grad_mode].append((self.id, '15:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '16:det1_head', {}))
+ + # def forward_fusion(self, tasks, training_mode, **kwargs): + # # if self.is_ego: + # # tasks['with_grad'].append((self.id, '21:spatial_fusion', {})) + # return tasks + # + # def forward_head(self, tasks, training_mode, **kwargs): + # # if self.is_ego: + # # tasks['with_grad'].append((self.id, '23:det2_head', {})) + # return tasks + # + # def pre_update_memory(self): + # pass + # + # def post_update_memory(self): + # pass + # + # def get_response_cpm(self): + # return {} + # + # def loss(self, tasks, **kwargs): + # if self.is_ego: + # tasks['loss'].append((self.id, '31:roi_head', {})) + # return tasks + # + # def apply_transform(self): + # if self.use_aug: + # if self.is_ego: + # T_e2g = self.lidar_pose + # T_g2e = self.lidar_pose.inverse() + # T_c2e = torch.eye(4).to(self.lidar_pose.device) + # else: + # # cav to ego + # T_e2g = self.data['received_request']['lidar_pose'] + # T_g2e = self.data['received_request']['lidar_pose'].inverse() + # T_c2e = T_g2e @ self.lidar_pose + # + # if self.aug_transform is None: + # self.aug_transform = DOP.update_transform_with_aug( + # torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + # T_e2aug = self.aug_transform + # else: + # # adapt aug params to the current ego frame + # T_e2aug = self.T_g2aug @ T_e2g + # + # T_c2aug = T_e2aug @ T_c2e + # T_g2aug = T_e2aug @ T_g2e + # + # DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + # + # self.T_e2g = T_e2g + # self.T_g2aug = T_g2aug + # self.T_aug2g = T_g2aug.inverse() # ego aug to global + # + # else: + # if self.is_ego: + # transform = torch.eye(4).to(self.lidar_pose.device) + # else: + # # cav to ego + # request = self.data['received_request'] + # transform = request['lidar_pose'].inverse() @ self.lidar_pose + # + # T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + # DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + # self.T_aug2g = T_c2aug + + +
[docs]class slcNoBoxTime(StreamLidarCAV): + +
[docs] def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1)
+ +
[docs] def update_memory_timestamps(self, ref_pts): + timestamp = torch.zeros_like(ref_pts[..., :1]) + return timestamp
+ + +
[docs]class slcCIASSD(StreamLidarCAV): +
[docs] def prepare_data(self): + self.prepare_time_scale()
+ +
[docs] def forward_local(self, tasks, training_mode, **kwargs): + if (self.is_ego or self.require_grad) and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:roi_head', {}))
+ +
[docs] def forward_fusion(self, tasks, training_mode, **kwargs): + return tasks
+ +
[docs] def forward_head(self, tasks, training_mode, **kwargs): + return tasks
+ +
[docs] def pre_update_memory(self): + pass
+ +
[docs] def post_update_memory(self): + pass
+ +
[docs] def get_response_cpm(self): + return {}
+ +
[docs] def loss(self, tasks, **kwargs): + if self.is_ego: + tasks['loss'].append((self.id, '21:roi_head', {})) + return tasks
+ +
[docs] def apply_transform(self): + if self.use_aug: + if self.is_ego: + T_e2g = self.lidar_pose + T_g2e = self.lidar_pose.inverse() + T_c2e = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + T_e2g = self.data['received_request']['lidar_pose'] + T_g2e = self.data['received_request']['lidar_pose'].inverse() + T_c2e = T_g2e @ self.lidar_pose + + if self.aug_transform is None: + self.aug_transform = DOP.update_transform_with_aug( + torch.eye(4).to(self.lidar_pose.device), self.data['augment_params']) + T_e2aug = self.aug_transform + else: + # adapt aug params to the current ego frame + T_e2aug = self.T_g2aug @ T_e2g + + T_c2aug = T_e2aug @ T_c2e + T_g2aug = T_e2aug @ T_g2e + + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + + self.T_e2g = T_e2g + self.T_g2aug = T_g2aug + self.T_aug2g = T_g2aug.inverse() # ego aug to global + + else: + if self.is_ego: + transform = torch.eye(4).to(self.lidar_pose.device) + else: + # cav to ego + request = self.data['received_request'] + transform = request['lidar_pose'].inverse() @ self.lidar_pose + + T_c2aug = DOP.update_transform_with_aug(transform, self.data['augment_params']) + DOP.apply_transform(self.data, T_c2aug, apply_to=self.prepare_data_keys) + self.T_aug2g = T_c2aug
+ + +
[docs]class LTSDairV2X(StreamLidarCAV): +
[docs] def forward_local(self, tasks, training_mode, **kwargs): + if self.require_grad and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '11:pts_backbone', {})) + tasks[grad_mode].append((self.id, '12:backbone_neck', {})) + tasks[grad_mode].append((self.id, '13:roi_head', {})) + tasks[grad_mode].append((self.id, '14:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '15:det1_head', {}))
+ +
[docs] def forward_fusion(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '21:spatial_fusion', {})) + return tasks
+ +
[docs] def forward_head(self, tasks, training_mode, **kwargs): + if self.is_ego: + tasks['with_grad'].append((self.id, '23:det2_head', {})) + return tasks
+ +
[docs] def loss(self, tasks, **kwargs): + if self.require_grad: + tasks['loss'].append((self.id, '31:roi_head', {})) + tasks['loss'].append((self.id, '32:det1_head', {})) + if self.is_ego: + tasks['loss'].append((self.id, '33:det2_head', {})) + return tasks
+ + +
[docs]class slcNoBoxTimeDairV2X(LTSDairV2X): + +
[docs] def prepare_data(self): + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1)
+ +
[docs] def update_memory_timestamps(self, ref_pts): + timestamp = torch.zeros_like(ref_pts[..., :1]) + return timestamp
+ + +
[docs]class LTSCAVLocCorr(StreamLidarCAV): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prepare_data_keys = ['points', 'annos_local', 'annos_global'] + self.rl_range = torch.nn.Parameter(torch.Tensor([-50, -50, -3.0, 50, 50, 1.0])) + self.seq_idx = 0 + +
[docs] def apply_transform(self): + super().apply_transform() + self.data['lidar_pose_aug'] = self.T_aug2g
+ +
[docs] def prepare_data(self): + self.prepare_time_scale() + DOP.adaptive_free_space_augmentation(self.data, time_idx=-1) + # DOP.adaptive_free_space_augmentation(self.data, res=0.5, min_h=0) + DOP.generate_sparse_target_roadline_points(self.data, range=75) + self.data['points_rl'] = copy.deepcopy(self.data['points']) + DOP.filter_range(self.data, self.rl_range, apply_to=['points_rl'])
+ + # import matplotlib.pyplot as plt + # points = torch.cat([self.data['points'][:, :3], + # torch.ones_like(self.data['points'][:, :1])], dim=-1) + # points = (self.data['lidar_poses_gt'] @ points.T)[:3].T.detach().cpu().numpy() + # rl = self.data['roadline'].detach().cpu().numpy() + # fig = plt.figure(figsize=(14, 6)) + # ax = fig.add_subplot() + # ax.plot(points[:, 0], points[:, 1], 'g.', markersize=1) + # ax.plot(rl[:, 0], rl[:, 1], 'k.', markersize=1) + # plt.savefig("/home/yys/Downloads/tmp.jpg") + # plt.close() + +
[docs] def transform_data(self): + self.apply_transform() + DOP.filter_range(self.data, self.lidar_range, apply_to=self.prepare_data_keys)
+ +
[docs] def forward_localization(self, tasks, training_mode, **kwargs): + self.seq_idx = kwargs['seq_idx'] + if self.is_ego and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + if kwargs['seq_idx'] == self.memory_len - 1: + # only do localization correction for the last frame for easier matching during data fusion + # the relative transformations between the subsequent frame in the sequence is assumed to be correct. + tasks[grad_mode].append((self.id, '01:rl_backbone', {})) + tasks[grad_mode].append((self.id, '02:rl_neck', {})) + tasks[grad_mode].append((self.id, '03:rlseg_head', {})) + tasks[grad_mode].append((self.id, '04:localization', {}))
+ +
[docs] def forward_local(self, tasks, training_mode, **kwargs): + if self.is_ego and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '14:pts_backbone', {})) + tasks[grad_mode].append((self.id, '15:backbone_neck', {})) + tasks[grad_mode].append((self.id, '16:roi_head', {})) + + if self.require_grad and training_mode: + grad_mode = 'with_grad' + else: + grad_mode = 'no_grad' + tasks[grad_mode].append((self.id, '17:temporal_fusion', {})) + tasks[grad_mode].append((self.id, '18:det1_head', {}))
+ +
[docs] def forward_fusion(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego and self.seq_idx == self.memory_len - 1: + tasks[grad_mode].append((self.id, '21:spatial_fusion', {})) + return tasks
+ +
[docs] def forward_head(self, tasks, training_mode, **kwargs): + grad_mode = 'with_grad' if training_mode else 'no_grad' + if self.is_ego and self.seq_idx == self.memory_len - 1: + tasks[grad_mode].append((self.id, '23:det2_head', {})) + return tasks
+ +
[docs] def get_response_cpm(self): + if self.seq_idx < self.memory_len - 1: + return {} + pose_corrected = self.data['lidar_poses_gt'] + pose = self.data['lidar_poses'] + ego_pose = self.data['received_request']['lidar_pose'] + box_ctrs = copy.deepcopy(self.data['detection_local']['preds']['box'][:, :4]) + box_ctrs[:, 3] = 1 + ref_pts = self.data['temp_fusion_feat']['ref_pts'] + lr = self.lidar_range.to(ref_pts.device) + ref_pts = ref_pts * (lr[3:6] - lr[:3]) + lr[:3] + # ref_pts = torch.cat([ref_pts, torch.ones_like(ref_pts[:, :1])], dim=-1) + # transformation matrix from augment-frame to corrected world-frame + transform = pose_corrected @ pose.inverse() @ self.T_aug2g + # transform = pose.inverse() @ ego_pose + box_ctrs = (transform @ box_ctrs.T)[:2].T + # ref_pts = (transform @ ref_pts.T)[:3].T + # transform roadline points to corrected world-frame + roadline = self.data.get('roadline_pred', None) + roadline = torch.cat([roadline, torch.zeros_like(roadline[:, :1]), + torch.ones_like(roadline[:, :1])], dim=-1) + roadline = (pose_corrected @ roadline.T)[:2].T + + # points is only for GL-visualization + # points = torch.cat([self.data['points'][:, :3], + # torch.ones_like(self.data['points'][:, :1])], dim=-1) + # self.data['points'][:, :3] = (transform @ points.T)[:3].T + + # import matplotlib.pyplot as plt + # + # pts = self.data['points_rl'].detach().cpu().numpy() + # rl_vis = self.data.get('roadline_pred', None).detach().cpu().numpy() + # plt.plot(pts[:, 0], pts[:, 1], 'k.', markersize=1) + # plt.plot(rl_vis[:, 0], rl_vis[:, 1], 'r.', markersize=1) + # plt.show() + # plt.close() + + # import matplotlib.pyplot as plt + # fig = plt.figure(figsize=(6, 6)) + # ax = fig.add_subplot() + # rl_gt = self.data['points'].detach().cpu().numpy() + # rl_vis = roadline.detach().cpu().numpy() + # box_ctrs_vis = box_ctrs.detach().cpu().numpy() + # ref_pts_vis = ref_pts.detach().cpu().numpy() + # ax.plot(rl_gt[:, 0], rl_gt[:, 1], 'g.', markersize=1) + # ax.plot(rl_vis[:, 0], rl_vis[:, 1], 'k.', markersize=1) + # ax.plot(box_ctrs_vis[:, 0], box_ctrs_vis[:, 1], 'bo', markersize=3) + # ax.plot(ref_pts_vis[:, 0], ref_pts_vis[:, 1], 'r.', markersize=1) + # plt.savefig("/home/yys/Downloads/tmp.jpg") + # plt.close() + + return { + # 'pose_corrected': self.data['lidar_poses_corrected'], + 'box_ctrs': box_ctrs, + 'roadline': roadline, + 'ref_pts': ref_pts, + 'feat': self.data['temp_fusion_feat']['outs_dec'], + 'Taug2caw': transform, + 'points': self.data['points'], + }
+ + + + + + + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/center_controller.html b/docs/_build/html/_modules/cosense3d/agents/center_controller.html new file mode 100644 index 00000000..b329f2d5 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/center_controller.html @@ -0,0 +1,322 @@ + + + + + + cosense3d.agents.center_controller — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.center_controller

+import matplotlib.pyplot as plt
+import torch
+
+from cosense3d.agents import core
+
+
+
[docs]class CenterController: + def __init__(self, cfg, data_loader, dist=False): + self.mode = data_loader.dataset.mode + self.dist = dist + self.seq_len = data_loader.dataset.seq_len + self.data_info = data_loader.dataset.cfgs['data_info'] + self.num_loss_frame = cfg.get('num_loss_frame', 1) + self.batch_seq = cfg.get('batch_seq', False) + self.setup_core(cfg) + self.global_data = {} + +
[docs] def setup_core(self, cfg): + if self.batch_seq: + cav_manager = core.SeqCAVManager + data_manager = core.SeqDataManager + task_manager = core.SeqTaskManager(self.seq_len) + + else: + cav_manager = core.CAVManager + data_manager = core.DataManager + task_manager = core.TaskManager() + self.cav_manager = cav_manager(**self.update_cfg(cfg['cav_manager'], + self.data_info)) + self.data_manager = data_manager( + self.cav_manager, **self.update_cfg( + cfg['data_manager'][self.mode], self.data_info)) + self.task_manager = task_manager + self.forward_runner = core.ForwardRunner(cfg['shared_modules'], + self.data_manager, + self.dist, **cfg.get('forward_runner', {}))
+ +
[docs] def update_cfg(self, cfg, *args): + for arg in args: + cfg.update(arg) + return cfg
+ + @property + def modules(self): + return self.forward_runner.shared_modules + + @property + def model(self): + return self.forward_runner + + @property + def parameters(self): + return self.forward_runner.parameters() + +
[docs] def train_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + self.data_manager.add_loc_err(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + self.cav_manager.reset() + + if self.batch_seq: + return self.run_seq(seq_data, training_mode=True, **kwargs) + else: + loss = 0 + loss_dict = {} + for i, data in enumerate(seq_data): # a few seqs from dataloader might < self.seq_lens + with_loss = i >= self.seq_len - self.num_loss_frame + kwargs['seq_idx'] = i + frame_loss_dict = self.run_frame(data, with_loss, training_mode=True, **kwargs) + for k, v in frame_loss_dict.items(): + if 'loss' in k: + loss = loss + v + loss_dict[f'f{i}.{k}'] = v + loss_dict['total_loss'] = loss + return loss, loss_dict
+ +
[docs] def test_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + self.data_manager.add_loc_err(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + self.cav_manager.reset() + + # cav_idx = 1 + # import matplotlib.pyplot as plt + # import torch + # fig = plt.figure(figsize=(16, 10)) + # ax = fig.add_subplot() + # + # for i, frame_data in enumerate(seq_data): + # points = frame_data['points'][0][cav_idx] + # lidar_pose = frame_data['lidar_poses'][0][0].inverse() @ frame_data['lidar_poses'][0][cav_idx] + # # lidar_pose = frame_data['lidar_poses'][0][cav_idx] + # points = lidar_pose @ torch.cat([points[:, :3], torch.ones_like(points[:, :1])], dim=-1).T + # points = points.detach().cpu().numpy() + # ax.plot(points[0], points[1], '.', markersize=1) + # + # plt.savefig("/home/yys/Downloads/tmp.png") + # plt.close() + + for i in range(self.seq_len): + kwargs['seq_idx'] = i + self.run_frame(seq_data[i], + with_loss=False, + training_mode=False, + **kwargs)
+ +
[docs] def vis_forward(self, batch_dict, **kwargs): + self.data_manager.generate_augment_params(batch_dict, self.seq_len) + self.data_manager.add_loc_err(batch_dict, self.seq_len) + seq_data = self.data_manager.distribute_to_seq_list(batch_dict, self.seq_len) + frame_data = seq_data[0] + self.cav_manager.update_cav_info(**frame_data) + self.data_manager.distribute_to_cav(**frame_data) + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + # apply data online transform + self.cav_manager.forward(False, False)
+ +
[docs] def run_frame(self, frame_data, with_loss, training_mode, **kwargs): + self.cav_manager.update_cav_info(**frame_data) + self.data_manager.distribute_to_cav(**frame_data) + self.cav_manager.apply_cav_function('pre_update_memory') + + # get pseudo forward tasks + tasks = self.cav_manager.forward(with_loss, training_mode, **kwargs) + batched_tasks = self.task_manager.summarize_tasks(tasks) + + # prepare local data + self.cav_manager.apply_cav_function('prepare_data') + + # correct localization errors + self.forward_runner(batched_tasks[0]['no_grad'], with_grad=False, **kwargs) + self.forward_runner(batched_tasks[0]['with_grad'], with_grad=True, **kwargs) + + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + + # apply data transformation with the corrected localization + self.cav_manager.apply_cav_function('transform_data') + + # preprocess after transformation to ego frame + self.data_manager.apply_preprocess() + # self.data_manager.vis_global_data_plt(['vis_ref_pts', 'vis_poses'], kwargs['seq_idx'] + 1) + + # from cosense3d.utils.vislib import plot_cavs_points + # plot_cavs_points(self.cav_manager.cavs[0]) + + # process local cav data + self.forward_runner(batched_tasks[1]['no_grad'], with_grad=False, **kwargs) + self.forward_runner(batched_tasks[1]['with_grad'], with_grad=training_mode, **kwargs) + + # send coop cav feature-level cpm to ego cav + response = self.cav_manager.send_response() + self.cav_manager.receive_response(response) + + # process ego cav data and fuse data from coop cav with grad if training + self.forward_runner(batched_tasks[2]['with_grad'], with_grad=training_mode, **kwargs) + self.forward_runner(batched_tasks[2]['no_grad'], with_grad=False, **kwargs) + self.cav_manager.apply_cav_function('post_update_memory') + + frame_loss_dict = {} + if with_loss: + frame_loss_dict = self.forward_runner.frame_loss(batched_tasks[3]['loss'], **kwargs) + return frame_loss_dict
+ +
[docs] def run_seq(self, seq_data, training_mode, **kwargs): + cur_len = len(seq_data) + self.cav_manager.update_cav_info(seq_data) + self.data_manager.distribute_to_cav(seq_data) + self.cav_manager.apply_cav_function('init_memory') + + # send and receive request + request = self.cav_manager.send_request() + self.cav_manager.receive_request(request) + # get pseudo forward tasks + tasks = self.cav_manager.forward(training_mode, self.num_loss_frame, cur_len) + batched_tasks = self.task_manager.summarize_tasks(tasks) + # preprocess after transformation to ego frame + self.data_manager.apply_preprocess() + + # process local cav data + if 'no_grad' in batched_tasks[0] and len(batched_tasks[0]['no_grad']) > 0: + self.forward_runner(batched_tasks[0]['no_grad'], with_grad=False, **kwargs) + + self.forward_runner(batched_tasks[0]['with_grad'], with_grad=training_mode, **kwargs) + + # process tasks that needs to be run sequentially + seq_tasks = self.task_manager.parallel_to_sequential(batched_tasks[1]) + for i in range(cur_len): + self.cav_manager.apply_cav_function('pre_update_memory', seq_idx=i) + if 'no_grad' in seq_tasks and len(seq_tasks['no_grad'][i]) > 0: + self.forward_runner(seq_tasks['no_grad'][i], with_grad=False, **kwargs) + self.forward_runner(seq_tasks['with_grad'][i], with_grad=training_mode, **kwargs) + self.cav_manager.apply_cav_function('post_update_memory', seq_idx=i) + + # send coop cav feature-level cpm to ego cav + response = self.cav_manager.send_response() + self.cav_manager.receive_response(response) + + if 2 not in batched_tasks: + print([d['valid_agent_ids'] for d in seq_data]) + # process ego cav data and fuse data from coop cav with grad if training + self.forward_runner(batched_tasks[2]['with_grad'], with_grad=training_mode, **kwargs) + if 'no_grad' in batched_tasks[2]: + self.forward_runner(batched_tasks[2]['no_grad'], with_grad=False, **kwargs) + loss, loss_dict = self.forward_runner.loss(batched_tasks[3]['loss'], with_grad=False, **kwargs) + return loss, loss_dict
+ + + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/base_runner.html b/docs/_build/html/_modules/cosense3d/agents/core/base_runner.html new file mode 100644 index 00000000..d5489949 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/base_runner.html @@ -0,0 +1,175 @@ + + + + + + cosense3d.agents.core.base_runner — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.base_runner

+
+
+from cosense3d.utils.train_utils import *
+from cosense3d.agents.core.hooks import Hooks
+
+
+
[docs]class BaseRunner: + def __init__(self, + dataloader, + controller, + gpus=0, + log_every=10, + hooks=None, + **kwargs + ): + self.dataloader = dataloader + self.data_iter = iter(dataloader) + self.total_iter = len(dataloader) + self.iter = 1 + self.epoch = 1 + + self.controller = controller + self.forward_runner = controller.forward_runner + self.hooks = Hooks(hooks) + + self.gpus = gpus + self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + self.log_every = log_every + + self.init() + +
[docs] def init(self): + if self.forward_runner is not None: + self.forward_runner.to(self.device)
+ +
[docs] def setup_logger(self, *args, **kwargs): + pass
+ +
[docs] def set_logdir(self, logdir): + self.logger.log_path = logdir
+ + @property + def logdir(self): + if hasattr(self, 'logger'): + return self.logger.logdir + else: + return None + +
[docs] def run(self): + raise NotImplementedError
+ +
[docs] def next_batch(self): + if self.iter >= self.total_iter: + self.iter = 1 + self.epoch += 1 + self.data_iter = iter(self.dataloader) + batch = next(self.data_iter) + return batch
+ +
[docs] def vis_data(self, keys=None, **kwargs): + if keys is None: + keys = ['points', 'imgs', 'bboxes2d', 'lidar2img', 'global_labels', 'local_labels'] + else: + keys = list(set(keys)) + return self.controller.data_manager.gather_vis_data(keys=keys)
+ + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/cav_manager.html b/docs/_build/html/_modules/cosense3d/agents/core/cav_manager.html new file mode 100644 index 00000000..4224d42c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/cav_manager.html @@ -0,0 +1,251 @@ + + + + + + cosense3d.agents.core.cav_manager — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.cav_manager

+
+import torch
+import numpy as np
+
+from cosense3d.agents.cav_prototype import get_prototype
+from cosense3d.utils.data_statistics import StatsRecorder
+
+
+
[docs]class CAVManager: + def __init__(self, lidar_range, prototype=None, memory_len=1, all_grad=False, + num_grad_cav=1, seq_len=0, cpm_statistic=False, **kwargs): + self.lidar_range = torch.tensor(lidar_range) + self.memory_len = memory_len + self.all_grad = all_grad + self.num_grad_cav = num_grad_cav + self.seq_len = seq_len + self.cpm_statistic = cpm_statistic + self.kwargs = kwargs + self.cavs = [] + self.cav_dict = {} + assert prototype is not None, "CAV prototype should be defined." + self.prototype = get_prototype(prototype) + + if self.cpm_statistic: + self.cpm_size_recorder = StatsRecorder() + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(cavs={self.cav_dict.keys()})' + return repr_str + +
[docs] def reset(self): + self.cavs = [] + self.cav_dict = {}
+ +
[docs] def update_cav_info(self, valid_agent_ids=None, lidar_poses=None, **data): + B = len(valid_agent_ids) # batch_size + cavs = [] + cav_dict = {} + for b in range(B): + batch_cavs = [] + for i, cav_id in enumerate(valid_agent_ids[b]): + is_ego = True if i==0 else False # assume the first car is ego car + require_grad = True if (i < self.num_grad_cav or self.all_grad) else False + # pad id with batch idx to avoid duplicated ids across different batches + cav_id = f'{b}.{cav_id}' + cav = self.get_cav_with_id(cav_id) + if not cav: + cav = self.prototype(cav_id, i, is_ego, + self.lidar_range, + self.memory_len, + lidar_pose=lidar_poses[b][i], + require_grad=require_grad, + **self.kwargs) + else: + cav.update(lidar_poses[b][i], is_ego, require_grad) + batch_cavs.append(cav) + cav_dict[cav_id] = (b, i) + cavs.append(batch_cavs) + self.cavs = cavs + self.cav_dict = cav_dict
+ +
[docs] def has_cav(self, cav_id): + return cav_id in self.cav_dict
+ +
[docs] def get_cav_with_id(self, id): + if id not in self.cav_dict: + return False + item = self.cav_dict[id] + if isinstance(item, tuple): + b, i = item + return self.cavs[b][i] + else: + return item
+ +
[docs] def send_request(self): + request = [] + for b, cavs in enumerate(self.cavs): + req = {} + for cav in cavs: + if cav.is_ego: + req[cav.id] = cav.get_request_cpm() + request.append(req) + return request
+ +
[docs] def receive_request(self, request): + for b, req in enumerate(request): + for ai, req_cpm in req.items(): + for cav in self.cavs[b]: + if ai != cav.id: + cav.receive_request(req_cpm)
+ +
[docs] def send_response(self): + response = [] + for b, cavs in enumerate(self.cavs): + ans = {} + for cav in cavs: + if cav.has_request(): + ans[cav.id] = cav.get_response_cpm() + response.append(ans) + if self.cpm_statistic: + self.update_cpm_statistic(response) + return response
+ +
[docs] def receive_response(self, response): + for cavs, resp in zip(self.cavs, response): + for cav in cavs: + if cav.is_ego: + cav.receive_response(resp)
+ +
[docs] def forward(self, with_loss, training_mode, **kwargs): + tasks = {'with_grad': [], 'no_grad': [], 'loss': []} + for i, cavs in enumerate(self.cavs): + for cav in cavs: + cav.forward(tasks, training_mode, **kwargs) + if with_loss and training_mode: + cav.loss(tasks, **kwargs) + return tasks
+ +
[docs] def apply_cav_function(self, func_name): + for b, cavs in enumerate(self.cavs): + for cav in cavs: + getattr(cav, func_name)()
+ +
[docs] def update_cpm_statistic(self, response): + sizes = [] + for resp in response: + for ai, data_dict in resp.items(): + def count_size(data): + if isinstance(data, dict): + s = 0 + for k, v in data.items(): + s += count_size(v) + return s + elif isinstance(data, torch.Tensor): + return data.numel() + sizes.append(count_size(data_dict)) + if len(sizes) > 0: + self.cpm_size_recorder.update(np.array(sizes).reshape(-1, 1))
+ + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/data_manager.html b/docs/_build/html/_modules/cosense3d/agents/core/data_manager.html new file mode 100644 index 00000000..7a85710d --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/data_manager.html @@ -0,0 +1,520 @@ + + + + + + cosense3d.agents.core.data_manager — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.data_manager

+
+
+import os
+import random
+
+import matplotlib.pyplot as plt
+import torch
+import torch_scatter
+
+from cosense3d.ops.utils import points_in_boxes_gpu
+from cosense3d.agents.utils.transform import generate_bev_tgt_pts
+
+
+
[docs]class DataManager: + def __init__(self, + cav_manager, + lidar_range, + voxel_size=None, + aug=None, + pre_process=[], + loc_err=None): + self.cav_manager = cav_manager + self.lidar_range = lidar_range + self.voxel_size = voxel_size + self.aug = aug + self.pre_process = pre_process + self.loc_err = loc_err + +
[docs] def apply_preprocess(self): + if isinstance(self.pre_process, list): + for p in self.pre_process: + getattr(self, p)() + elif isinstance(self.pre_process, dict): + for p, args in self.pre_process.items(): + getattr(self, p)(**args)
+ +
[docs] def remove_global_empty_boxes(self): + for cavs in self.cav_manager.cavs: + if cavs[0].data.get('global_bboxes_3d', None) is None: + continue + assert cavs[0].is_ego + points = torch.cat([cav.data['points'] for cav in cavs], dim=0) + global_boxes = cavs[0].data['global_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + global_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(global_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cavs[0].data['global_bboxes_3d'] = global_boxes[mask] + cavs[0].data['global_labels_3d'] = cavs[0].data['global_labels_3d'][mask] + if 'bboxes_3d_pred' in cavs[0].data: + cavs[0].data['bboxes_3d_pred'] = cavs[0].data['bboxes_3d_pred'][:, mask]
+ +
[docs] def generate_global_non_empty_mask(self): + for cavs in self.cav_manager.cavs: + if cavs[0].data.get('global_bboxes_3d', None) is None: + continue + assert cavs[0].is_ego + points = torch.cat([cav.data['points'] for cav in cavs], dim=0) + global_boxes = cavs[0].data['global_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + global_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(global_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cavs[0].data['global_bboxes_mask'] = mask
+ +
[docs] def remove_local_empty_boxes(self, ego_only=False): + for cavs in self.cav_manager.cavs: + for cav in cavs: + if not cav.is_ego and ego_only: + continue + if cav.data.get('local_bboxes_3d', None) is None: + continue + points = cav.data['points'] + local_boxes = cav.data['local_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + local_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(local_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cav.data['local_bboxes_3d'] = local_boxes[mask] + cav.data['local_labels_3d'] = cav.data['local_labels_3d'][mask]
+ +
[docs] def generate_local_non_empty_mask(self, ego_only=False): + for cavs in self.cav_manager.cavs: + for cav in cavs: + if not cav.is_ego and ego_only: + continue + if cav.data.get('local_bboxes_3d', None) is None: + continue + points = cav.data['points'] + local_boxes = cav.data['local_bboxes_3d'] + box_idx = points_in_boxes_gpu(points.unsqueeze(0)[..., :3], + local_boxes.unsqueeze(0)[..., :7])[0] + box_idx = box_idx[box_idx > -1] + num_pts = torch.zeros_like(local_boxes[:, 0]).long() + torch_scatter.scatter_add(torch.ones_like(box_idx), box_idx, dim=0, out=num_pts) + mask = num_pts > 3 + cav.data['local_bboxes_mask'] = mask
+ +
[docs] def sample_global_bev_tgt_pts(self, sam_res=0.4, map_res=0.2, range=50, max_num_pts=5000, discrete=False): + for cavs in self.cav_manager.cavs: + assert cavs[0].is_ego + points = torch.cat([cav.data['points'] for cav in cavs], dim=0) + transform = cavs[0].T_e2g.inverse() @ cavs[0].T_aug2g + bev_pts = generate_bev_tgt_pts(points, cavs[0].data, transform, + sam_res, map_res, range, max_num_pts, discrete) + cavs[0].data['global_bev_tgt_pts'] = bev_pts
+ + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # lidar = points.cpu().numpy() + # pts = bev_pts.cpu().numpy() + # pos = pts[:, 2] == 1 + # neg = pts[:, 2] == 0 + # + # ax = draw_points_boxes_plt( + # pc_range=50, + # points=pts[pos, :], + # points_c='r', + # return_ax=True + # ) + # ax.plot(pts[neg, 0], pts[neg, 1], '.', c='b', markersize=1) + # ax.plot(lidar[:, 0], lidar[:, 1], '.', c='gray', markersize=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + +
[docs] def distribute_to_seq_list(self, batch_dict, seq_len): + result = [] + for l in range(seq_len): + res = {} + for k, v in batch_dict.items(): + x_list = [x[l] for x in v if l < len(x)] + if len(x_list) == 0: + res = {} + break + res[k] = x_list + if len(res) > 0: + result.append(res) + return result
+ +
[docs] def distribute_to_cav(self, valid_agent_ids=None, **data): + cavs = self.cav_manager.cavs + global_data_list = [] + for b, agent_ids in enumerate(valid_agent_ids): + global_data = {} + for j, ai in enumerate(agent_ids): + assert cavs[b][j].id == f'{b}.{ai}' + for k, v in data.items(): + if isinstance(v[b], list) and len(v[b]) == len(agent_ids): + cavs[b][j].data[k] = v[b][j] + elif k == 'chosen_cams': + cavs[b][j].data[k] = v[b][ai] + elif k == 'augment_params': + cavs[b][j].data[k] = v[b] + global_data[k] = v[b] + # elif cavs[b][j].is_ego: + else: + cavs[b][j].data[k] = v[b] + global_data_list.append(global_data) + return global_data_list
+ +
[docs] def distribute_to_seq_cav(self, data): + for l, d in enumerate(data): + valid_agent_ids = d['valid_agent_ids'] + global_data_list = [] + for b, agent_ids in enumerate(valid_agent_ids): + global_data = {} + for j, ai in enumerate(agent_ids): + new_data = {} + for k, v in d.items(): + if isinstance(v[b], list) and len(v[b]) == len(agent_ids): + new_data[k] = v[b][j] + elif k == 'chosen_cams': + new_data[k] = v[b][ai] + elif k == 'augment_params': + new_data[k] = v[b] + global_data[k] = v[b] + # elif cavs[b][j].is_ego: + else: + new_data[k] = v[b] + self.cav_manager.get_cav_with_id(f'{b}.{ai}').data[l] = new_data + global_data_list.append(global_data) + return global_data_list
+ +
[docs] def generate_augment_params(self, batch_dict, seq_len): + B = len(batch_dict['scenario']) + if self.aug is None: + rand_aug = [[None] * seq_len] * B + else: + rand_aug = [] + def rand_from_range(r): + return torch.rand(1) * (r[1] - r[0]) + r[0] + for i in range(B): + cur_aug = {} + if 'rot_range' in self.aug: + theta = rand_from_range(self.aug['rot_range']) + ct = torch.cos(theta) + st = torch.sin(theta) + transform = torch.eye(4) + transform[0, 0] = ct + transform[0, 1] = -st + transform[1, 0] = st + transform[1, 1] = ct + cur_aug['rot'] = transform + if 'trans_std' in self.aug: + cur_aug['trans'] = torch.randn(len(self.aug['trans_std'])) * torch.tensor(self.aug['trans_std']) + if 'scale_ratio_range' in self.aug: + cur_aug['scale'] = rand_from_range(self.aug['scale_ratio_range']) + if 'flip' in self.aug: + cur_aug['flip'] = {'flip_idx': random.randint(0, 3), 'flip_axis': self.aug['flip']} + rand_aug.append([cur_aug for _ in range(seq_len)]) + batch_dict['augment_params'] = rand_aug
+ +
[docs] def add_loc_err(self, batch_dict, seq_len): + if self.loc_err is None: + return
+ # TODO + +
[docs] def gather(self, cav_list, data_keys): + data_list = [] + for k in data_keys: + data = [] + for cav_id in cav_list: + data.append(self.cav_manager.get_cav_with_id(cav_id).data[k]) + data_list.append(data) + return data_list
+ +
[docs] def scatter(self, cav_list, data_dict): + for k, data_list in data_dict.items(): + for cav_id, data in zip(cav_list, data_list): + self.update(cav_id, k, data)
+ +
[docs] def update(self, cav_id, data_key, data): + self.cav_manager.get_cav_with_id(cav_id).data[data_key] = data
+ +
[docs] def gather_batch(self, batch_idx, key, to_numpy=False): + data = {} + for cav in self.cav_manager.cavs[batch_idx]: + if key not in cav.data: + continue + d = cav.data[key] + if isinstance(d, torch.Tensor) and to_numpy: + d = d.cpu().numpy() + elif isinstance(d, list) and len(d) > 0 and isinstance(d[0], torch.Tensor): + d = [x.cpu().numpy() for x in d] + data[cav.id] = d + return data
+ +
[docs] def gather_ego_data(self, key): + data = {} + for cavs in self.cav_manager.cavs: + assert cavs[0].is_ego + if key not in cavs[0].data: + continue + d = cavs[0].data[key] + data[cavs[0].id] = d + return data
+ +
[docs] def gather_cav_data(self, key): + data = {} + for cavs in self.cav_manager.cavs: + for cav in cavs: + data[cav.id] = cav.data.get(key, {}) + return data
+ +
[docs] def boxes_to_vis_format(self, boxes, labels, id_appendix=0): + boxes_vis = {} + gt_labels = labels.tolist() + for i, box in enumerate(boxes.tolist()): + cur_id = i + 1 + if id_appendix != 0: + cur_id = cur_id * 10 + id_appendix + try: + boxes_vis[cur_id] = [gt_labels[i]] + box[:6] + [0, 0] + [box[6]] + except: + print('d') + return boxes_vis
+ +
[docs] def get_gt_boxes_as_vis_format(self, batch_idx, coor='global', successors=False): + gt_boxes = self.gather_batch(batch_idx, f'{coor}_bboxes_3d' ) + gt_labels = self.gather_batch(batch_idx, f'{coor}_labels_3d') + if successors and coor=='global': + bboxes_3d_pred = self.gather_batch(batch_idx, 'bboxes_3d_pred') + labels = {} + successor_labels = {} + for k in gt_boxes.keys(): + labels[k] = self.boxes_to_vis_format(gt_boxes[k], gt_labels[k]) + if successors and coor=='global' and k in bboxes_3d_pred: + successor_labels[k] = {} + for i, cur_preds in enumerate(bboxes_3d_pred[k]): + tmp_boxes = gt_boxes[k].detach().clone() + tmp_boxes[:, :3] = cur_preds[:, :3] + tmp_boxes[:, 6] = cur_preds[:, -1] + successor_labels[k].update(self.boxes_to_vis_format(tmp_boxes, gt_labels[k], i)) + return labels, successor_labels
+ + +
[docs] def gather_vis_data(self, batch_idx=0, keys=['points']): + gather_dict = {} + successors = 'global_pred_gt' in keys + for k in keys: + if k in ['global_labels', 'local_labels']: + ref_coor = k.split('_')[0] + gather_dict[f'{ref_coor}_labels'], successor_labels = ( + self.get_gt_boxes_as_vis_format(batch_idx, ref_coor, successors)) + if successors and ref_coor=='global': + gather_dict['global_pred_gt'] = successor_labels + elif k == 'global_pred_gt' or k == 'global_pred': + continue + elif k == 'detection' or k == 'detection_global': + detection = self.gather_ego_data(k) + global_pred = {} + for cav_id, det in detection.items(): + global_pred[cav_id] = {} + if 'preds' in det: + det = det['preds'] # todo: without nms hook, keywork preds is not removed + if 'box' in det and 'lbl' in det: + detection[cav_id]['labels'] = self.boxes_to_vis_format(det['box'], det['lbl']) + if 'pred' in det: + global_pred[cav_id]['labels'] = self.boxes_to_vis_format( + det['pred'].view(-1, 7), det['lbl'].unsqueeze(0).repeat(2, 1).view(-1)) + gather_dict['detection'] = detection + gather_dict['global_pred'] = global_pred + elif k == 'detection_local': + detection = self.gather_cav_data(k) + for cav_id, det in detection.items(): + if len(det) == 0: + continue + if 'preds' in det: + det = det['preds'] + if 'box' in det and 'lbl' in det: + detection[cav_id]['labels'] = self.boxes_to_vis_format(det['box'], det['lbl']) + gather_dict['detection_local'] = detection + else: + gather_dict[k] = self.gather_batch(batch_idx, k, True) + return gather_dict
+ +
[docs] def get_vis_data_input(self, batch_idx=0, keys=None): + """ + + Parameters + ---------- + batch_idx + key: additional gt keys that are not standarlized in consense3d data API + + Returns + ------- + + """ + pcds = self.gather_batch(batch_idx, 'points', True) + imgs = self.gather_batch(batch_idx, 'img', True) + global_labels = self.get_gt_boxes_as_vis_format(batch_idx, 'global') + local_labels = self.get_gt_boxes_as_vis_format(batch_idx, 'local') + bboxes2d = self.gather_batch(batch_idx, 'bboxes2d', True) + lidar2img = self.gather_batch(batch_idx, 'lidar2img', True) + out_dict = { + 'pcds': pcds, + 'imgs': imgs, + 'bboxes2d': bboxes2d, + 'lidar2img': lidar2img, + 'global_labels': global_labels, + 'local_labels': local_labels + } + if keys is not None: + for k in keys: + out_dict[k] = self.gather_batch(batch_idx, k, True) + return out_dict
+ +
[docs] def get_vis_data_detection(self, batch_idx=0, keys='detection'): + """ + + Parameters + ---------- + batch_idx: batch index + key: the default key for detection is 'detection', customized key can also be used, + depending on which key is used for saving detection result in the CAV data pool. + + Returns + ------- + detection: result with boxes and labels converted to the visualizing format. + """ + detection = self.gather_batch(batch_idx, 'detection') + for cav_id, det in detection.items(): + detection[cav_id]['labels'] = self.boxes_to_vis_format(det['box'], det['lbl']) + return detection
+ +
[docs] def get_vis_data_bev(self, batch_idx=0, keys='bev'): + return self.gather_batch(batch_idx, 'bev')
+ +
[docs] def get_vis_data_meta(self, batch_idx=0, keys=None): + return { + 'scenario': self.gather_batch(batch_idx, 'scenario'), + 'frame': self.gather_batch(batch_idx, 'frame') + }
+ +
[docs] def vis_global_data_plt(self, vis_funcs, seq_len=1): + for func in vis_funcs: + ax = None + for cav in self.cav_manager.cavs[0]: + ax = getattr(cav, func)(ax, his_len=seq_len) + plt.savefig(f"{os.environ['HOME']}/Pictures/{func}_{seq_len}.png") + plt.close()
+ + + + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/forward_runner.html b/docs/_build/html/_modules/cosense3d/agents/core/forward_runner.html new file mode 100644 index 00000000..b5410364 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/forward_runner.html @@ -0,0 +1,205 @@ + + + + + + cosense3d.agents.core.forward_runner — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.forward_runner

+
+import math
+import torch
+from torch import nn
+
+from cosense3d.modules import build_module
+
+
+
[docs]class ForwardRunner(nn.Module): + def __init__(self, shared_modules, data_manager, dist=False, chunk_size=24, **kwargs): + super().__init__() + self.lidar_range = torch.tensor(data_manager.lidar_range) + self.data_manager = data_manager + self.dist = dist + # if the fwd items of a module exits the GPU capacity, run them in several mini batches + self.chunk_size = chunk_size + + module_dict = {} + self.module_keys = [] + for k, v in shared_modules.items(): + if 'type' not in v: + continue + v['dist'] = dist + module = build_module(v) + if module.freeze: + module.freeze_parameters() + module_dict[k] = module + self.module_keys.append(k) + + self.shared_modules = nn.ModuleDict(module_dict) + +
[docs] def to_gpu(self, gpu_id): + for n, m in self.shared_modules.items(): + sync_func = m.to_gpu(gpu_id) + if sync_func is not None: + self.shared_modules[n] = sync_func(m)
+ +
[docs] def gather_cav_ids(self, tasks): + return [t[0] for t in tasks]
+ +
[docs] def forward(self, tasks, with_grad=True, **kwargs): + if with_grad: + self._forward(tasks, **kwargs) + else: + with torch.no_grad(): + self._forward(tasks, **kwargs)
+ + def _forward(self, tasks, **kwargs): + for task_name, task_list in tasks.items(): + module = getattr(self.shared_modules, task_name) + task_ids = self.gather_cav_ids(task_list) + n_task = len(task_ids) + s = self.chunk_size + if n_task > s and 0 < n_task % s < 4: + s = int(math.ceil(n_task / math.ceil(n_task / s))) + chunks = [task_ids[i:i + s] for i in range(0, len(task_ids), s)] + res = {k: [] for k in module.scatter_keys} + for tids in chunks: + data = self.data_manager.gather(tids, module.gather_keys) + cur_res = module(*data, **kwargs) + for k in module.scatter_keys: + res[k].extend(cur_res[k]) + self.data_manager.scatter(task_ids, res) + +
[docs] def loss(self, tasks, **kwargs): + loss_dict = {} + loss = 0 + for task_name, task_list in tasks.items(): + module = getattr(self.shared_modules, task_name) + if module.freeze: + continue + cav_ids = self.gather_cav_ids(task_list) + data = self.data_manager.gather(cav_ids, module.scatter_keys + module.gt_keys) + ldict = module.loss(*data, **kwargs) + for k, v in ldict.items(): + prefix = task_name.replace('_head', '') + loss_dict[f'{prefix}.{k}'] = v + loss = loss + v + loss_dict['total_loss'] = loss + return loss, loss_dict
+ +
[docs] def frame_loss(self, tasks, **kwargs): + loss_dict = {} + for task_name, task_list in tasks.items(): + module = getattr(self.shared_modules, task_name) + if module.freeze: + continue + cav_ids = self.gather_cav_ids(task_list) + data = self.data_manager.gather(cav_ids, module.scatter_keys + module.gt_keys) + ldict = module.loss(*data, **kwargs) + for k, v in ldict.items(): + prefix = task_name.replace('_head', '') + loss_dict[f'{prefix}.{k}'] = v + return loss_dict
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/gui.html b/docs/_build/html/_modules/cosense3d/agents/core/gui.html new file mode 100644 index 00000000..63da7ecc --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/gui.html @@ -0,0 +1,296 @@ + + + + + + cosense3d.agents.core.gui — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.agents.core.gui

+
+
+import functools
+import os
+
+from PyQt5 import QtCore, QtGui, QtWidgets
+
+from cosense3d.agents.viewer.gl_viewer import GLViewer
+from cosense3d.agents.viewer.output_viewer import OutputViewer
+from cosense3d.agents.viewer.img_viewer import ImgViewer
+from cosense3d.agents.viewer.img_anno3d_viewer import ImgAnno3DViewer
+
+
+
[docs]class GUI(QtWidgets.QMainWindow): + def __init__(self, mode, cfg) -> None: + super(GUI, self).__init__() + self.mode = mode + self.header_height = 30 + path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + self.css_dir = os.path.join(path, 'viewer', 'css') + self.data_keys = [ + 'scenario', 'frame', + 'points', 'img', 'bboxes2d', 'lidar2img', + 'global_labels', 'local_labels', 'global_pred_gt', + 'detection', 'detection_local', 'global_pred' + ] + self.setupUI(cfg) + self.setWindowTitle("Cosense3D") + + # Set window size to screen size + screen = QtWidgets.QDesktopWidget().screenGeometry() + width, height = screen.width(), screen.height() + self.setGeometry(0, 0, width, height) + + self.timer = QtCore.QTimer(self) + self.timer.timeout.connect(self.step) + self.data = None + self.colo_mode = 'united' + +
[docs] def setupUI(self, cfg): + self.tabs = QtWidgets.QTabWidget() + + self.glViewer0 = GLViewer('MAINVIEW', self) + self.tabs.addTab(self.glViewer0, 'GLViewer') + + self.img_viewer = ImgViewer(**cfg.get('img_viewer', {})) + self.tabs.addTab(self.img_viewer, 'ImgViewer') + + self.img_anno3d_viewer = ImgAnno3DViewer(**cfg.get('img_anno3d_viewer', {})) + self.tabs.addTab(self.img_anno3d_viewer, 'ImgAnno3DViewer') + + self.output_viewer = OutputViewer(**cfg['output_viewer']) + self.tabs.addTab(self.output_viewer, 'OutputViewer') + self.data_keys.extend(self.output_viewer.gather_data_keys) + + self.setCentralWidget(self.tabs) + self.get_toolbar()
+ +
[docs] def setRunner(self, runner): + self.runner = runner
+ +
[docs] def initGUI(self): + # connect all events + self.connect_events_to_funcs()
+ +
[docs] def get_toolbar(self): + self.toolbar = self.addToolBar("Toolbar") + self.infos = ['scene', 'frame', 'PCDcolor'] + self.tools = ['start', 'stop', 'step'] + self.visible_objects = ['localDet', 'globalDet', 'localGT', 'globalGT', 'globalPred', 'globalPredGT'] + + # add label combo pairs + for name in self.infos: + qlabel = QtWidgets.QLabel(f' {name[0].upper()}{name[1:]}:') + w1 = qlabel.sizeHint().width() + qlabel.setMinimumWidth(w1 + 25) + qlabel.setMaximumWidth(w1 + 50) + qcombo = QtWidgets.QComboBox() + w2 = qcombo.sizeHint().width() + qcombo.setMinimumWidth(w2 + 25) + qcombo.setMaximumWidth(w2 + 50) + if name=='PCDcolor': + qcombo.addItem('united') + qcombo.addItem('height') + qcombo.addItem('cav') + qcombo.addItem('time') + else: + qcombo.addItem('---------') + setattr(self, f'label_{name}', qlabel) + setattr(self, f'combo_{name}', qcombo) + setattr(self, f'cur_{name}', None) + + self.toolbar.addWidget(getattr(self, f'label_{name}')) + self.toolbar.addWidget(getattr(self, f'combo_{name}')) + + for name in self.tools: + bname = f'{name[0].upper()}{name[1:]}' + qbutton = QtWidgets.QToolButton() + qbutton.setText(bname) + # qbutton.setIcon(QtGui.QIcon(f"./interface/ui/icons/{name}.png")) + w = qbutton.sizeHint().width() + 1 + qbutton.setMaximumWidth(w) + setattr(self, f'button_{name}', qbutton) + self.toolbar.addWidget(getattr(self, f'button_{name}')) + + for name in ['glcolor'] + self.visible_objects: + bname = f'{name[0].upper()}{name[1:]}' + qbutton = QtWidgets.QPushButton() + qbutton.setText(bname) + w = qbutton.sizeHint().width() + 1 + qbutton.setMaximumWidth(w) + setattr(self, f'button_{name}', qbutton) + self.toolbar.addWidget(getattr(self, f'button_{name}')) + + for name in self.visible_objects: + setattr(self, f"{name.lower()}_visible", False) + + self.button_glcolor.setStyleSheet("background-color: black; color: white")
+ +
[docs] def change_visible(self, name): + button = getattr(self, f'button_{name}') + current_color = button.palette().button().color() + + if current_color != QtGui.QColor('lightblue'): + button.setStyleSheet("background-color: lightblue") + setattr(self, f"{name.lower()}_visible", True) + else: + button.setStyleSheet("background-color: #efefef") + setattr(self, f"{name.lower()}_visible", False) + self.refresh()
+ +
[docs] def change_glcolor(self): + button = self.button_glcolor + current_color = button.palette().button().color() + if current_color == QtGui.QColor('black'): + button.setStyleSheet("background-color: white; color: black") + self.glViewer0.setBackgroundColor('w') + else: + button.setStyleSheet("background-color: black; color: white") + self.glViewer0.setBackgroundColor('k') + self.refresh()
+ +
[docs] def change_color_mode(self): + self.colo_mode = self.combo_PCDcolor.currentText() + self.refresh()
+ +
[docs] def connect_events_to_funcs(self): + self.combo_PCDcolor.currentIndexChanged.connect(self.change_color_mode) + self.button_step.clicked.connect(self.step) + self.button_start.clicked.connect(self.start) + self.button_stop.clicked.connect(self.stop) + self.tabs.currentChanged.connect(self.refresh) + self.button_glcolor.clicked.connect(self.change_glcolor) + for name in self.visible_objects: + if getattr(self, f"{name.lower()}_visible"): + self.change_visible(name) + getattr(self, f'button_{name}').clicked.connect( + functools.partial(self.change_visible, name=name))
+ +
[docs] def step(self): + self.runner.step() + self.data = self.runner.vis_data(self.data_keys) + self.refresh() + if self.runner.iter == self.runner.total_iter: + self.timer.stop()
+ +
[docs] def refresh(self): + active_widget = self.tabs.currentWidget() + if self.data is not None: + visible_keys = [k for k in self.visible_objects if getattr(self, f"{k.lower()}_visible")] + active_widget.refresh(self.data, visible_keys=visible_keys, color_mode=self.colo_mode) + scene = list(self.data['scenario'].values())[0] + frame = list(self.data['frame'].values())[0] + # todo adapt scenario and frame selection + self.combo_frame.clear() + self.combo_frame.addItem(frame) + self.combo_scene.clear() + self.combo_scene.addItem(scene)
+ +
[docs] def start(self): + self.timer.start(300) # Trigger the animate method every 100ms
+ +
[docs] def stop(self): + self.timer.stop()
+ + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/hooks.html b/docs/_build/html/_modules/cosense3d/agents/core/hooks.html new file mode 100644 index 00000000..fe64fb08 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/hooks.html @@ -0,0 +1,695 @@ + + + + + + cosense3d.agents.core.hooks — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.agents.core.hooks

+
+
+import os
+import time
+
+import torch
+import torch.nn.functional as F
+from importlib import import_module
+
+from cosense3d.ops.utils import points_in_boxes_gpu
+
+
+
[docs]class Hooks: + def __init__(self, cfg): + self.hooks = [] + if cfg is None: + return + for hook_cfg in cfg: + self.hooks.append( + globals()[hook_cfg['type']](**hook_cfg) + ) + + def __call__(self, runner, hook_stage, **kwargs): + for hook in self.hooks: + getattr(hook, hook_stage)(runner, **kwargs) + +
[docs] def set_logger(self, logger): + for hook in self.hooks: + hook.set_logger(logger)
+ + +
[docs]class BaseHook: + def __init__(self, **kwargs): + pass + +
[docs] def pre_iter(self, runner, **kwargs): + pass
+ +
[docs] def post_iter(self, runner, **kwargs): + pass
+ +
[docs] def pre_epoch(self, runner, **kwargs): + pass
+ +
[docs] def post_epoch(self, runner, **kwargs): + pass
+ +
[docs] def set_logger(self, logger): + self.logger = logger
+ + +
[docs]class MemoryUsageHook(BaseHook): + def __init__(self, device='cuda:0', **kwargs): + super().__init__(**kwargs) + self.device = device + +
[docs] def post_iter(self, runner, **kwargs): + memory = torch.cuda.max_memory_allocated(self.device) / 1024 / 1024 + torch.cuda.empty_cache() + runner.logger.update(memory=memory)
+ + +
[docs]class CPMStatisticHook(BaseHook): + def __init__(self, device='cuda:0', **kwargs): + super().__init__(**kwargs) + self.device = device + +
[docs] def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'detection_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir
+ +
[docs] def post_epoch(self, runner, **kwargs): + cpm_rec = runner.controller.cav_manager.cpm_size_recorder + thr = runner.controller.cav_manager.cavs[0][0].share_score_thr + ss = (f"########## CPM size @ {thr} ###########\n" + f"Mean: {cpm_rec.mean[0] * 4 / 1024:.2f} KB, Std: {cpm_rec.std[0] * 4 / 1024:.2f} KB") + print(ss) + self.logger.log(ss)
+ + +
[docs]class TrainTimerHook(BaseHook): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.elapsed_time = 0 + self.start_time = None + self.mean_time_per_itr = None + self.observations = 0 + +
[docs] def pre_epoch(self, runner, **kwargs): + if self.start_time is None: + self.start_time = time.time() + self.last_time = time.time()
+ +
[docs] def post_iter(self, runner, **kwargs): + cur_time = time.time() + self.elapsed_time = (cur_time - self.start_time) / 3600 + # total_run_iter = (runner.total_iter * (runner.epoch - runner.start_epoch)) + runner.iter + # time_per_iter = self.elapsed_time / total_run_iter + time_per_iter = (cur_time - self.last_time) / 3600 + m = self.observations + if self.mean_time_per_itr is None: + self.mean_time_per_itr = time_per_iter + else: + self.mean_time_per_itr = m / (m + 1) * self.mean_time_per_itr + 1 / (m + 1) * time_per_iter + iter_remain = runner.total_iter * (runner.total_epochs - runner.epoch + 1) - runner.iter + time_remain = self.mean_time_per_itr * iter_remain + runner.logger.update(t_remain=time_remain, t_used=self.elapsed_time) + self.last_time = cur_time + self.observations += 1
+ + +
[docs]class CheckPointsHook(BaseHook): + def __init__(self, max_ckpt=3, epoch_every=None, iter_every=None, **kwargs): + super().__init__(**kwargs) + self.max_ckpt = max_ckpt + self.epoch_every = epoch_every + self.iter_every = iter_every + +
[docs] def post_epoch(self, runner, **kwargs): + if runner.gpu_id != 0: + return + self.save(runner, f'epoch{runner.epoch}.pth') + if runner.epoch > self.max_ckpt: + if (self.epoch_every is None or not + (runner.epoch - self.max_ckpt) % self.epoch_every == 0): + filename = os.path.join( + runner.logger.logdir, + f'epoch{runner.epoch - self.max_ckpt}.pth') + if os.path.exists(filename): + os.remove(filename)
+ +
[docs] def post_iter(self, runner, **kwargs): + if runner.gpu_id != 0: + return + if self.iter_every is not None and runner.iter % self.iter_every == 0: + self.save(runner, f'latest.pth')
+ +
[docs] @staticmethod + def save(runner, name): + save_path = os.path.join(runner.logger.logdir, name) + print(f'Saving checkpoint to {save_path}.') + torch.save({ + 'epoch': runner.epoch, + 'model': runner.forward_runner.state_dict(), + 'optimizer': runner.optimizer.state_dict(), + 'lr_scheduler': runner.lr_scheduler.state_dict(), + }, save_path)
+ + +
[docs]class DetectionNMSHook(BaseHook): + def __init__(self, nms_thr, pre_max_size, + det_key='detection', + **kwargs): + super().__init__(**kwargs) + self.nms_thr = nms_thr + self.pre_max_size = pre_max_size + self.nms = import_module('cosense3d.ops.iou3d_nms_utils').nms_gpu + self.det_key = det_key + self.defual_pred_keys = ['box', 'scr', 'lbl', 'idx'] + +
[docs] def post_iter(self, runner, **kwargs): + detection_out = runner.controller.data_manager.gather_ego_data(self.det_key) + preds = [] + cav_ids = [] + for cav_id, values in detection_out.items(): + cav_ids.append(cav_id) + + boxes = values['preds']['box'] + scores = values['preds']['scr'] + labels = values['preds']['lbl'] + indices = values['preds']['idx'] # map index for retrieving features + + out = {} + if 'center' in values: + out['ctr'] = values['center'] + if 'conf' in values: + out['conf'] = values['conf'] + + if len(values['preds']['box']) == 0: + out.update({ + 'box': torch.zeros((0, 7), device=boxes.device), + 'scr': torch.zeros((0,), device=scores.device), + 'lbl': torch.zeros((0,), device=labels.device), + 'idx': torch.zeros(indices.shape[0] if isinstance(indices, torch.Tensor) else (0,), + device=indices.device), + }) + if 'pred' in values['preds']: + out['pred'] = torch.zeros((0, 2, 7), device=boxes.device) + else: + keep = self.nms( + boxes[..., :7], + scores, + thresh=self.nms_thr, + pre_maxsize=self.pre_max_size + ) + out.update({ + 'box': boxes[keep], + 'scr': scores[keep], + 'lbl': labels[keep], + 'idx': indices[keep], + }) + if 'pred' in values['preds'] and values['preds']['pred'] is not None: + out['pred'] = values['preds']['pred'][keep] + assert len(out['pred']) != len(out['box']) + preds.append(out) + + # from cosense3d.utils.vislib import draw_points_boxes_plt + # points = out['ctr'].detach().cpu().numpy() + # boxes = out['box'].detach().cpu().numpy() + # draw_points_boxes_plt( + # pc_range=[-140.8, -38.4, -3.0, 140.8, 38.4, 1.0], + # boxes_pred=boxes, + # points=points, + # filename="/home/yuan/Pictures/tmp.png" + # ) + + runner.controller.data_manager.scatter(cav_ids, {self.det_key: preds})
+ + +
[docs]class EvalDetectionBEVHook(BaseHook): + def __init__(self, pc_range, iou_thr=[0.5, 0.7], save_result=False, + det_key='detection', gt_key='global_bboxes_3d', **kwargs): + super().__init__(**kwargs) + self.iou_thr = iou_thr + self.pc_range = pc_range + self.save_result = save_result + self.det_key = det_key + self.gt_key = gt_key + self.result = {iou: {'tp': [], 'fp': [], 'gt': 0, 'scr': []} for iou in iou_thr} + self.eval_funcs = import_module('cosense3d.utils.eval_detection_utils') + +
[docs] def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'detection_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir
+ +
[docs] def post_iter(self, runner, **kwargs): + detection = runner.controller.data_manager.gather_ego_data(self.det_key) + gt_boxes = runner.controller.data_manager.gather_ego_data(self.gt_key) + + for i, (cav_id, preds) in enumerate(detection.items()): + if 'preds' in preds: + preds = preds['preds'] + preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds['time'] = \ + self.filter_box_ranges(preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds.get('time', None)) + cur_gt_boxes = self.filter_box_ranges(gt_boxes[cav_id])[0] + cur_points = runner.controller.data_manager.gather_batch(i, 'points') + + if self.save_result: + ego_key = cav_id + senario = runner.controller.data_manager.gather_ego_data('scenario')[ego_key] + frame = runner.controller.data_manager.gather_ego_data('frame')[ego_key] + filename = f"{senario}.{frame}.{ego_key.split('.')[1]}.pth" + result = {'detection': preds, + 'gt_boxes': cur_gt_boxes, + 'points': cur_points} + torch.save(result, os.path.join(self.logdir, filename)) + + for iou in self.iou_thr: + self.eval_funcs.caluclate_tp_fp( + preds['box'][..., :7], preds['scr'], cur_gt_boxes[..., :7], self.result, iou + )
+ +
[docs] def filter_box_ranges(self, boxes, scores=None, labels=None, indices=None, times=None): + mask = boxes.new_ones((len(boxes),)).bool() + if boxes.ndim == 3: + centers = boxes.mean(dim=1) + else: + centers = boxes[:, :3] + for i in range(3): + mask = mask & (centers[:, i] > self.pc_range[i]) & (centers[:, i] < self.pc_range[i + 3]) + boxes = boxes[mask] + if scores is not None: + scores = scores[mask] + if labels is not None: + labels = labels[mask] + if indices is not None: + try: + indices = indices[mask] + except: + print("Number of boxes doesn't match the number of indices") + if times is not None: + times = times[mask] + return boxes, scores, labels, indices, times
+ +
[docs] def post_epoch(self, runner, **kwargs): + fmt_str = ("################\n" + "DETECTION RESULT\n" + "################\n") + out_dict = self.eval_funcs.eval_final_results( + self.result, + self.iou_thr, + global_sort_detections=True + ) + fmt_str += "OPV2V BEV Global sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + + print(fmt_str) + self.logger.log(fmt_str)
+ +
[docs] def format_final_result(self, out_dict): + fmt_str = "" + for iou in self.iou_thr: + iou_str = f"{int(iou * 100)}" + fmt_str += f"AP@{iou_str}: {out_dict[f'ap_{iou_str}']:.3f}\n" + return fmt_str
+ + +
[docs]class EvalDetectionHook(BaseHook): + def __init__(self, pc_range, iou_thr=[0.5, 0.7], metrics=['CoSense3D'], save_result=False, + det_key='detection', gt_key='global_bboxes_3d', **kwargs): + super().__init__(**kwargs) + self.iou_thr = iou_thr + self.pc_range = pc_range + self.save_result = save_result + self.det_key = det_key + self.gt_key = gt_key + for m in metrics: + assert m in ['OPV2V', 'CoSense3D'] + setattr(self, f'{m.lower()}_result', + {iou: {'tp': [], 'fp': [], 'gt': 0, 'scr': []} for iou in iou_thr}) + self.metrics = metrics + self.eval_funcs = import_module('cosense3d.utils.eval_detection_utils') + +
[docs] def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'detection_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir
+ +
[docs] def post_iter(self, runner, **kwargs): + detection = runner.controller.data_manager.gather_ego_data(self.det_key) + gt_boxes = runner.controller.data_manager.gather_ego_data(self.gt_key) + + for i, (cav_id, preds) in enumerate(detection.items()): + if 'preds' in preds: + preds = preds['preds'] + preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds['time'] = \ + self.filter_box_ranges(preds['box'], preds['scr'], preds['lbl'], preds['idx'], preds.get('time', None)) + cur_gt_boxes = self.filter_box_ranges(gt_boxes[cav_id])[0] + cur_points = runner.controller.data_manager.gather_batch(i, 'points') + + if self.save_result: + ego_key = cav_id + senario = runner.controller.data_manager.gather_ego_data('scenario')[ego_key] + frame = runner.controller.data_manager.gather_ego_data('frame')[ego_key] + filename = f"{senario}.{frame}.{ego_key.split('.')[1]}.pth" + result = {'detection': preds, + 'gt_boxes': cur_gt_boxes, + 'points': cur_points} + torch.save(result, os.path.join(self.logdir, filename)) + + for iou in self.iou_thr: + if 'OPV2V' in self.metrics: + result_dict = getattr(self, f'opv2v_result') + self.eval_funcs.caluclate_tp_fp( + preds['box'][..., :7], preds['scr'], cur_gt_boxes[..., :7], result_dict, iou + ) + if 'CoSense3D' in self.metrics: + result_dict = getattr(self, f'cosense3d_result') + tp = self.eval_funcs.ops_cal_tp( + preds['box'][..., :7].detach(), cur_gt_boxes[..., :7].detach(), IoU_thr=iou + ) + result_dict[iou]['tp'].append(tp.cpu()) + result_dict[iou]['gt'] += len(cur_gt_boxes) + result_dict[iou]['scr'].append(preds['scr'].detach().cpu())
+ +
[docs] def filter_box_ranges(self, boxes, scores=None, labels=None, indices=None, times=None): + mask = boxes.new_ones((len(boxes),)).bool() + if boxes.ndim == 3: + centers = boxes.mean(dim=1) + else: + centers = boxes[:, :3] + for i in range(3): + mask = mask & (centers[:, i] > self.pc_range[i]) & (centers[:, i] < self.pc_range[i + 3]) + boxes = boxes[mask] + if scores is not None: + scores = scores[mask] + if labels is not None: + labels = labels[mask] + if indices is not None: + try: + indices = indices[mask] + except: + print("Number of boxes doesn't match the number of indices") + if times is not None: + times = times[mask] + return boxes, scores, labels, indices, times
+ +
[docs] def post_epoch(self, runner, **kwargs): + fmt_str = ("################\n" + "DETECTION RESULT\n" + "################\n") + if 'OPV2V' in self.metrics: + result_dict = getattr(self, f'opv2v_result') + out_dict = self.eval_funcs.eval_final_results( + result_dict, + self.iou_thr, + global_sort_detections=True + ) + fmt_str += "OPV2V BEV Global sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + + out_dict = self.eval_funcs.eval_final_results( + result_dict, + self.iou_thr, + global_sort_detections=False + ) + fmt_str += "OPV2V BEV Local sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + if 'CoSense3D' in self.metrics: + out_dict = self.eval_cosense3d_final() + fmt_str += "CoSense3D Global sorted:\n" + fmt_str += self.format_final_result(out_dict) + fmt_str += "----------------\n" + print(fmt_str) + self.logger.log(fmt_str)
+ +
[docs] def format_final_result(self, out_dict): + fmt_str = "" + for iou in self.iou_thr: + iou_str = f"{int(iou * 100)}" + fmt_str += f"AP@{iou_str}: {out_dict[f'ap_{iou_str}']:.3f}\n" + # fmt_str += f"Precision@{iou_str}: {out_dict[f'mpre_{iou_str}']:.3f}\n" + # fmt_str += f"Recall@{iou_str}: {out_dict[f'mrec_{iou_str}']:.3f}\n" + return fmt_str
+ +
[docs] def eval_cosense3d_final(self): + out_dict = {} + result_dict = getattr(self, f'cosense3d_result') + for iou in self.iou_thr: + scores = torch.cat(result_dict[iou]['scr'], dim=0) + tps = torch.cat(result_dict[iou]['tp'], dim=0) + n_pred = len(scores) + n_gt = result_dict[iou]['gt'] + + ap, mpre, mrec, _ = self.eval_funcs.cal_ap_all_point(scores, tps, n_pred, n_gt) + iou_str = f"{int(iou * 100)}" + out_dict.update({f'ap_{iou_str}': ap, + f'mpre_{iou_str}': mpre, + f'mrec_{iou_str}': mrec}) + return out_dict
+ + +
[docs]class EvalBEVSemsegHook(BaseHook): + def __init__(self, + test_range, + test_res=0.4, + save_result=False, + eval_static=True, + bev_semseg_key='bev_semseg', + gt_bev_key='bevmap', + gt_boxes_key='global_bboxes_3d', + **kwargs): + super().__init__(**kwargs) + self.test_range = test_range + self.test_res = test_res + self.save_result = save_result + self.eval_static = eval_static + self.bev_semseg_key = bev_semseg_key + self.gt_bev_key = gt_bev_key + self.gt_boxes_key = gt_boxes_key + self.thrs = torch.arange(0.1, 1.1, 0.1) + self.sx = int(round((self.test_range[3] - self.test_range[0]) / self.test_res)) + self.sy = int(round((self.test_range[4] - self.test_range[1]) / self.test_res)) + + self.res_dict = { + 'iou_dynamic_all': [], + 'iou_dynamic_obs': [], + 'iou_static_all': [], + 'iou_static_obs': [], + } + +
[docs] def set_logger(self, logger): + super().set_logger(logger) + logdir = os.path.join(logger.logdir, 'bev_semseg_eval') + os.makedirs(logdir, exist_ok=True) + self.logdir = logdir
+ +
[docs] def post_iter(self, runner, **kwargs): + scene_tokens = runner.controller.data_manager.gather_ego_data('scene_tokens') + frame = runner.controller.data_manager.gather_ego_data('frame') + semseg = runner.controller.data_manager.gather_ego_data(self.bev_semseg_key) + gt_bevmaps = runner.controller.data_manager.gather_ego_data(self.gt_bev_key) + gt_boxes = runner.controller.data_manager.gather_ego_data(self.gt_boxes_key) + for i, (cav_id, preds) in enumerate(semseg.items()): + token = f'{scene_tokens[cav_id]}.{frame[cav_id]}' + gt_dynamic_map = self.gt_dynamic_map(gt_boxes[cav_id]) + self.cal_ious(preds, gt_dynamic_map, 'dynamic', token) + if self.eval_static: + gt_static_map = self.gt_static_map(gt_bevmaps[cav_id]) + self.cal_ious(preds, gt_static_map, 'static', token)
+ +
[docs] def cal_ious(self, preds, gt_map, tag, token=None): + conf = self.crop_map(preds[f'conf_map_{tag}']) + unc = self.crop_map(preds[f'unc_map_{tag}']) + obs_mask = self.crop_map(preds[f'obs_mask_{tag}']) + self.res_dict[f'iou_{tag}_all'].append(self.iou(conf, unc, gt_map)) + self.res_dict[f'iou_{tag}_obs'].append(self.iou(conf, unc, gt_map, obs_mask)) + + if self.save_result: + img = torch.cat([gt_map, unc, conf[..., 1]], dim=0).detach().cpu().numpy() + import matplotlib.pyplot as plt + plt.imshow(img.T) + plt.savefig(os.path.join(self.logdir, f'{token}.{tag}.jpg')) + plt.close()
+ +
[docs] def iou(self, conf, unc, gt, obs_mask=None): + ious = [] + for thr in self.thrs: + if obs_mask is None: + pos_mask = torch.argmax(conf, dim=-1).bool() + pos_mask = torch.logical_and(pos_mask, unc <= thr) + gt_ = gt + else: + pos_mask = torch.argmax(conf[obs_mask], dim=-1).bool() + pos_mask = torch.logical_and(pos_mask, unc[obs_mask] <= thr) + gt_ = gt[obs_mask] + mi = torch.logical_and(pos_mask, gt_).sum() + mu = torch.logical_or(pos_mask, gt_).sum() + ious.append(mi / mu) + return torch.stack(ious, dim=0)
+ +
[docs] def gt_dynamic_map(self, boxes): + # filter box range + mask = boxes.new_ones((len(boxes),)).bool() + dynamic_map = torch.ones((self.sx, self.sy), device=boxes.device) + centers = boxes[:, :3] + for i in range(3): + mask = mask & (centers[:, i] > self.test_range[i]) & (centers[:, i] < self.test_range[i + 3]) + boxes = boxes[mask] + if len(boxes) > 0: + indices = torch.stack(torch.where(dynamic_map), dim=1) + xy = indices.float() + xy = (xy + 0.5) * self.test_res + xy[:, 0] += self.test_range[0] + xy[:, 1] += self.test_range[1] + xyz = F.pad(xy, (1, 1), 'constant', 0.0) + boxes = F.pad(boxes, (1, 0), 'constant', 0.0) + boxes[:, 3] = 0 + boxes_decomposed, box_idx_of_pts = points_in_boxes_gpu( + xyz, boxes, batch_size=1 + ) + inds = indices[box_idx_of_pts >= 0].T + dynamic_map[inds[0], inds[1]] = 0 + dynamic_map = torch.logical_not(dynamic_map) + return dynamic_map
+ +
[docs] def gt_static_map(self, bevmap): + # map has higher resolution, downsample 2x + # bevmap = torch.flip(bevmap, [0]) + return bevmap[::2, ::2]
+ +
[docs] def crop_map(self, bevmap): + sx, sy = bevmap.shape[:2] + sx_crop = (sx - self.sx) // 2 + sy_crop = (sy - self.sy) // 2 + return bevmap[sx_crop:-sx_crop, sy_crop:-sy_crop]
+ +
[docs] def post_epoch(self, runner, **kwargs): + fmt_str = ("#################\n" + "BEV SEMSEG RESULT\n" + "#################\n") + fmt_str += f"{'thr':18s} | " + " ".join([f"{v:4.1f} " for v in self.thrs]) + "\n" + fmt_str += "-" * (23 + 70) + "\n" + for k, vs in self.res_dict.items(): + vs = torch.stack(vs, dim=0).mean(dim=0) * 100 + if isinstance(vs, int): + continue + s1 = f"{k:18s} | " + if isinstance(vs, float): + s2 = f"{vs:4.1f} \n" + else: + s2 = " ".join([f"{v:4.1f} " for v in vs]) + "\n" + fmt_str += s1 + s2 + print(fmt_str) + self.logger.log(fmt_str)
+ + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/task_manager.html b/docs/_build/html/_modules/cosense3d/agents/core/task_manager.html new file mode 100644 index 00000000..e631a5f9 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/task_manager.html @@ -0,0 +1,165 @@ + + + + + + cosense3d.agents.core.task_manager — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.task_manager

+
+
+from collections import OrderedDict
+
+
+
[docs]class TaskManager: + def __init__(self): + pass + +
[docs] def summarize_tasks(self, tasks): + tasks_out = {0: {'no_grad': [], 'with_grad': []}, + 1: {'no_grad': [], 'with_grad': []}, + 2: {'no_grad': [], 'with_grad': []}, + 3: {'loss': []}} + no_grad0, no_grad1, no_grad2, _ = self.reformat_tasks(tasks['no_grad']) + with_grad0, with_grad1, with_grad2, _ = self.reformat_tasks(tasks['with_grad']) + tasks_out[0]['no_grad'] = no_grad0 + tasks_out[0]['with_grad'] = with_grad0 + tasks_out[1]['no_grad'] = no_grad1 + tasks_out[1]['with_grad'] = with_grad1 + tasks_out[2]['no_grad'] = no_grad2 + tasks_out[2]['with_grad'] = with_grad2 + tasks_out[3]['loss'] = self.reformat_tasks(tasks['loss'])[3] + return tasks_out
+ +
[docs] def summarize_loss_tasks(self, tasks): + return self.reformat_tasks(tasks)
+ +
[docs] def reformat_tasks(self, task_list): + task_out = ({}, {}, {}, {}) # two stages + if len(task_list) == 0: + return task_out + for task in task_list: + cav_id, task_label, args = task + stage_order, task_name = task_label.split(':') + stage = int(stage_order[0]) + order = int(stage_order[1:]) + task_name = task_name.strip() + if order not in task_out[stage]: + task_out[stage][order] = {} + if task_name not in task_out[stage][order]: + task_out[stage][order][task_name] = [] + task_out[stage][order][task_name].append((cav_id, args)) + + task_out = [self.task_to_ordered_dict(tasks) for tasks in task_out] + return task_out
+ +
[docs] def task_to_ordered_dict(self, tasks): + orders = sorted(tasks) + ordered_task = OrderedDict() + for i in orders: + for k, v in tasks[i].items(): + ordered_task[k] = v + return ordered_task
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/test_runner.html b/docs/_build/html/_modules/cosense3d/agents/core/test_runner.html new file mode 100644 index 00000000..371b295b --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/test_runner.html @@ -0,0 +1,182 @@ + + + + + + cosense3d.agents.core.test_runner — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.test_runner

+
+
+import os, glob, logging
+from tqdm import tqdm
+
+from cosense3d.utils.train_utils import *
+from cosense3d.utils.logger import TestLogger
+from cosense3d.agents.core.base_runner import BaseRunner
+
+
+
[docs]class TestRunner(BaseRunner): + def __init__(self, + load_from=None, + logdir=None, + **kwargs + ): + super().__init__(**kwargs) + ckpt = self.load(load_from) + self.progress_bar = tqdm(total=self.total_iter) + self.setup_logger(ckpt, logdir) + self.forward_runner.eval() + +
[docs] def setup_logger(self, ckpt, logdir): + if logdir is None: + logdir = ckpt[:-4] + else: + logdir = os.path.join(logdir, f'test_{os.path.basename(ckpt)[:-4]}') + self.logger = TestLogger(logdir) + self.hooks.set_logger(self.logger)
+ +
[docs] def load(self, load_from): + assert load_from is not None, "load path not given." + assert os.path.exists(load_from), f'resume path does not exist: {load_from}.' + if os.path.isfile(load_from): + ckpt = load_from + else: + ckpts = glob.glob(os.path.join(load_from, 'epoch*.pth')) + if len(ckpts) > 0: + epochs = [int(os.path.basename(ckpt)[5:-4]) for ckpt in ckpts] + max_idx = epochs.index(max(epochs)) + ckpt = ckpts[max_idx] + elif os.path.exists(os.path.join(load_from, 'last.pth')): + ckpt = os.path.join(load_from, 'last.pth') + else: + raise IOError('No checkpoint found.') + logging.info(f"Resuming the model from checkpoint: {ckpt}") + ckpt_dict = torch.load(ckpt) + load_model_dict(self.forward_runner, ckpt_dict['model']) + return ckpt
+ +
[docs] def run(self): + self.hooks(self, 'pre_epoch') + for data in self.dataloader: + self.run_itr(data) + self.progress_bar.close() + self.hooks(self, 'post_epoch')
+ +
[docs] def step(self): + data = self.next_batch() + self.run_itr(data) + if self.iter == self.total_iter: + self.hooks(self, 'post_epoch')
+ +
[docs] def run_itr(self, data): + # if self.iter > 140: + # print('d') + self.hooks(self, 'pre_iter') + load_tensors_to_gpu(data) + self.controller.test_forward(data) + self.hooks(self, 'post_iter') + self.iter += 1 + self.progress_bar.update(1)
+ + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/train_runner.html b/docs/_build/html/_modules/cosense3d/agents/core/train_runner.html new file mode 100644 index 00000000..51fdeebb --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/train_runner.html @@ -0,0 +1,251 @@ + + + + + + cosense3d.agents.core.train_runner — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.train_runner

+
+
+import os, glob, logging
+from datetime import datetime
+
+from torch.nn.parallel import DistributedDataParallel as DDP
+
+from cosense3d.utils.train_utils import *
+from cosense3d.utils.lr_scheduler import build_lr_scheduler
+from cosense3d.utils.logger import LogMeter
+from cosense3d.utils.misc import ensure_dir
+from cosense3d.agents.core.base_runner import BaseRunner
+from cosense3d.agents.utils.deco import save_ckpt_on_error
+
+
+
[docs]class TrainRunner(BaseRunner): + def __init__(self, + max_epoch, + optimizer, + lr_scheduler, + gpus=0, + resume_from=None, + load_from=None, + run_name='default', + log_dir='work_dir', + use_wandb=False, + debug=False, + **kwargs + ): + super().__init__(**kwargs) + self.gpus = gpus + self.gpu_id = 0 + self.dist = False + self.debug = debug + if gpus > 0: + self.dist = True + self.gpu_id = int(os.environ.get("LOCAL_RANK", 0)) + self.forward_runner.to_gpu(self.gpu_id) + self.forward_runner = DDP(self.forward_runner, device_ids=[self.gpu_id]) + self.optimizer = build_optimizer(self.forward_runner, optimizer) + self.lr_scheduler = build_lr_scheduler(self.optimizer, lr_scheduler, + len(self.dataloader)) + self.total_epochs = max_epoch + self.start_epoch = 1 + + self.resume(resume_from, load_from) + self.setup_logger(resume_from, run_name, log_dir, use_wandb) + +
[docs] def setup_logger(self, resume_from, run_name, log_dir, use_wandb): + if resume_from is not None: + if os.path.isfile(resume_from): + log_path = os.path.dirname(resume_from) + else: + log_path = resume_from + else: + now = datetime.now().strftime('%m-%d-%H-%M-%S') + run_name = run_name + '_' + now + log_path = os.path.join(log_dir, run_name) + ensure_dir(log_path) + wandb_project_name = run_name if use_wandb else None + self.logger = LogMeter(self.total_iter, log_path, log_every=self.log_every, + wandb_project=wandb_project_name)
+ +
[docs] def resume(self, resume_from, load_from): + if resume_from is not None or load_from is not None: + load_path = resume_from if resume_from is not None else load_from + assert os.path.exists(load_path), f'resume/load path does not exist: {resume_from}.' + if os.path.isdir(load_path): + ckpts = glob.glob(os.path.join(load_path, 'epoch*.pth')) + if len(ckpts) > 0: + epochs = [int(os.path.basename(ckpt)[5:-4]) for ckpt in ckpts] + max_idx = epochs.index(max(epochs)) + ckpt = ckpts[max_idx] + elif os.path.exists(os.path.join(load_path, 'last.pth')): + ckpt = os.path.join(load_path, 'last.pth') + else: + raise IOError(f'No checkpoint found in directory {load_path}.') + elif os.path.isfile(load_path): + ckpt = load_path + else: + raise IOError(f'Failed to load checkpoint from {load_path}.') + logging.info(f"Resuming the model from checkpoint: {ckpt}") + ckpt = torch.load(ckpt) + load_model_dict(self.forward_runner, ckpt['model']) + if resume_from is not None: + self.start_epoch = ckpt['epoch'] + 1 + self.epoch = ckpt['epoch'] + 1 + if 'lr_scheduler' in ckpt: + self.lr_scheduler.load_state_dict(ckpt['lr_scheduler']) + try: + if 'optimizer' in ckpt: + self.optimizer.load_state_dict(ckpt['optimizer']) + except: + warnings.warn("Cannot load optimizer state_dict, " + "there might be training parameter changes, " + "please consider using 'load-from'.")
+ +
[docs] def run(self): + with torch.autograd.set_detect_anomaly(True): + for i in range(self.start_epoch, self.total_epochs + 1): + self.hooks(self, 'pre_epoch') + self.run_epoch() + self.hooks(self, 'post_epoch') + self.lr_scheduler.step_epoch(i) + self.epoch += 1 + self.iter = 1
+ +
[docs] def step(self): + data = self.next_batch() + self.run_itr(data)
+ +
[docs] def run_epoch(self): + if self.dist: + self.dataloader.sampler.set_epoch(self.epoch) + for data in self.dataloader: + # print(f'{self.gpu_id}: run_itr{self.iter}: 0') + self.hooks(self, 'pre_iter') + self.run_itr(data) + self.hooks(self, 'post_iter')
+ + @save_ckpt_on_error + def run_itr(self, data): + load_tensors_to_gpu(data, self.gpu_id) + self.optimizer.zero_grad() + total_loss, loss_dict = self.controller.train_forward( + data, epoch=self.epoch, itr=self.iter, gpu_id=self.gpu_id) + total_loss.backward() + + grad_norm = clip_grads(self.controller.parameters) + loss_dict['grad_norm'] = grad_norm + # Updating parameters + self.optimizer.step() + + self.lr_scheduler.step_itr(self.iter + self.epoch * self.total_iter) + + if self.logger is not None and self.gpu_id == 0: + # rec_lr = self.lr_scheduler.optimizer.param_groups[0]['lr'] + rec_lr = self.lr_scheduler.get_last_lr()[0] + self.logger.log(self.epoch, self.iter, rec_lr, **loss_dict) + + del data + self.iter += 1
+ + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/core/vis_runner.html b/docs/_build/html/_modules/cosense3d/agents/core/vis_runner.html new file mode 100644 index 00000000..0375b7f0 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/core/vis_runner.html @@ -0,0 +1,169 @@ + + + + + + cosense3d.agents.core.vis_runner — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.core.vis_runner

+
+
+import os, glob, logging
+from tqdm import tqdm
+from datetime import datetime
+
+from cosense3d.utils.train_utils import *
+from cosense3d.utils.logger import TestLogger
+from cosense3d.utils.misc import ensure_dir, setup_logger
+from cosense3d.agents.core.base_runner import BaseRunner
+
+
+
[docs]class VisRunner(BaseRunner): + def __init__(self, + **kwargs + ): + super().__init__(**kwargs) + self.progress_bar = tqdm(total=self.total_iter) + +
[docs] def load(self, load_from): + assert load_from is not None, "load path not given." + assert os.path.exists(load_from), f'resume path does not exist: {load_from}.' + if os.path.isfile(load_from): + ckpt = load_from + else: + ckpts = glob.glob(os.path.join(load_from, 'epoch*.pth')) + if len(ckpts) > 0: + epochs = [int(os.path.basename(ckpt)[5:-4]) for ckpt in ckpts] + max_idx = epochs.index(max(epochs)) + ckpt = ckpts[max_idx] + elif os.path.exists(os.path.join(load_from, 'last.pth')): + ckpt = os.path.join(load_from, 'last.pth') + else: + raise IOError('No checkpoint found.') + logging.info(f"Resuming the model from checkpoint: {ckpt}") + ckpt_dict = torch.load(ckpt) + load_model_dict(self.forward_runner, ckpt_dict['model']) + return ckpt
+ +
[docs] def run(self): + for data in self.dataloader: + self.run_itr(data) + self.progress_bar.close()
+ +
[docs] def step(self): + data = self.next_batch() + self.run_itr(data)
+ +
[docs] def run_itr(self, data): + self.hooks(self, 'pre_iter') + if data['scenario'][0][0] == '10.0' and data['frame'][0][0] == '018076': + print('d') + load_tensors_to_gpu(data) + self.controller.vis_forward(data) + + self.hooks(self, 'post_iter') + self.iter += 1 + self.progress_bar.update(1)
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/utils/deco.html b/docs/_build/html/_modules/cosense3d/agents/utils/deco.html new file mode 100644 index 00000000..9b50411a --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/utils/deco.html @@ -0,0 +1,123 @@ + + + + + + cosense3d.agents.utils.deco — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.agents.utils.deco

+
+
+from cosense3d.agents.core.hooks import CheckPointsHook
+
+
+
[docs]def save_ckpt_on_error(func): + def wrapper(*args, **kwargs): + try: + result = func(*args, **kwargs) + return result + except Exception as e: + CheckPointsHook.save(args[0], f'debug_ep{args[0].epoch}.pth') + print(f"Exception caught in {func.__name__}: {e}") + raise e + return wrapper
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/utils/transform.html b/docs/_build/html/_modules/cosense3d/agents/utils/transform.html new file mode 100644 index 00000000..1d9b7911 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/utils/transform.html @@ -0,0 +1,473 @@ + + + + + + cosense3d.agents.utils.transform — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.utils.transform

+
+
+import torch
+import numpy as np
+from scipy.spatial.transform.rotation import Rotation as R
+from torch_scatter import scatter_mean
+
+from cosense3d.utils import pclib, box_utils
+from cosense3d.modules.utils.common import limit_period
+
+
+
[docs]def add_rotate(tf, rot): + if isinstance(rot, list) and len(rot) == 3: + # param: [roll, pitch, yaw] in radian + rot = pclib.rotation_matrix(rot, degrees=False) + rot = torch.from_numpy(rot).to(tf.device) + tf[:3, :3] = rot @ tf[:3, :3] + elif isinstance(rot, torch.Tensor): + assert rot.shape[0] == 4 + tf = rot.to(tf.device) @ tf + else: + raise NotImplementedError + return tf
+ + +
[docs]def add_flip(tf, flip_idx, flip_axis='xy'): + # flip_idx =1 : flip x + # flip_idx =2 : flip y + # flip_idx =3 : flip x & y + rot = torch.eye(4).to(tf.device) + # flip x + if 'x' in flip_axis and (flip_idx == 1 or flip_idx == 3): + rot[0, 0] *= -1 + # flip y + if 'y' in flip_axis and (flip_idx == 2 or flip_idx == 3): + rot[1, 1] *= -1 + tf = rot @ tf + return tf
+ + +
[docs]def add_scale(tf, scale_ratio): + scale = torch.eye(4).to(tf.device) + scale[[0, 1, 2], [0, 1, 2]] = scale_ratio + tf = scale @ tf + return tf
+ + +
[docs]def apply_transform(data, transform, key): + if (transform.cpu() == torch.eye(4)).all(): + return + if key == 'points': + C = data['points'].shape[-1] + points = data['points'][:, :3] + points = torch.cat([points, torch.ones_like(points[:, :1])], dim=-1).T + points = (transform @ points).T + + if C > 3: + data['points'] = torch.cat([points[:, :3], + data['points'][:, 3:]], dim=-1) + else: + data['points'] = points + elif 'annos_global' == key or 'annos_local' == key: + box_key = f"{key.split('_')[1]}_bboxes_3d" + if box_key not in data or data[box_key] is None: + return + boxes = data[box_key] + data[box_key][:, :7] = box_utils.transform_boxes_3d(boxes[:, :7], transform, mode=7) + elif key == 'annos_global_pred': + preds = data['bboxes_3d_pred'] + boxes = data['global_bboxes_3d'][..., :7].detach().clone() + boxes = boxes.unsqueeze(0).repeat(2, 1, 1) + boxes[..., [0, 1, 2, 6]] = data['bboxes_3d_pred'] + boxes = box_utils.transform_boxes_3d(boxes.view(-1, 7), transform, mode=7) + data['bboxes_3d_pred'] = boxes[..., [0, 1, 2, 6]].reshape(*preds.shape) + elif key == 'img': + for i in range(len(data['img'])): + data['extrinsics'][i] = data['extrinsics'][i] @ transform.inverse() + data['lidar2img'][i] = data['intrinsics'][i] @ data['extrinsics'][i] + elif key == 'bev_tgt_pts' and key in data: + if key not in data or data[key] is None: + return + points = data['bev_tgt_pts'].clone() + points[:, 2] = 0 + points = torch.cat([points, torch.ones_like(points[:, :1])], dim=-1).T + points = (transform @ points).T + data['bev_tgt_pts'][:, :2] = points[:, :2] + elif 'roadline' in key and key in data: + if key not in data or data[key] is None: + return + points = data[key][:, :2].clone() + points = torch.cat([points, torch.ones_like(points)], dim=-1).T + points[2] = 0 + points = (transform @ points).T + data[key][:, :2] = points[:, :2]
+ + +
[docs]def filter_range(data, lidar_range, key): + if 'points' in key: + mask = filter_range_mask(data[key], lidar_range) + points = data[key][mask] + if len(points) == 0: + # pad empty point cloud with random points to ensure batch norm validity + points = data[key].new_zeros((8, points.shape[-1])) + points[:, :2] = torch.rand_like(points[:, :2]) * 2 - 1 + points[:, 3] = -1 + points[:, -1] = data[key][:, -1].min() + data[key] = points + elif 'annos_global' == key or 'annos_local' == key: + coor = key.split('_')[1] + if f'{coor}_bboxes_3d' not in data or data[f'{coor}_bboxes_3d'] is None: + return + mask = filter_range_mask(data[f'{coor}_bboxes_3d'][:, :3], lidar_range) + data[f'{coor}_bboxes_3d'] = data[f'{coor}_bboxes_3d'][mask] + data[f'{coor}_labels_3d'] = data[f'{coor}_labels_3d'][mask] + data[f'{coor}_bboxes_id'] = data[f'{coor}_bboxes_id'][mask] + data[f'{coor}_names'] = [data[f'{coor}_names'][i] for i, m in enumerate(mask) if m] + if coor == 'global' and 'bboxes_3d_pred' in data: + data['bboxes_3d_pred'] = data['bboxes_3d_pred'][:, mask]
+ + +
[docs]def filter_range_mask(points, lidar_range, eps=1e-4): + lr = lidar_range.to(points.device) + mask = (points[:, :3] > lr[:3].view(1, 3) + eps) & (points[:, :3] < lr[3:].view(1, 3) - eps) + return mask.all(dim=-1)
+ + +
[docs]def generate_bev_tgt_pts(points, data, transform=None, sam_res=0.4, map_res=0.2, range=50, + max_num_pts=5000, discrete=False): + if 'bevmap' not in data or data['bevmap'] is None: + return None + bevmap = data['bevmap'] + bevmap_coor = data['bevmap_coor'] + sx, sy = bevmap.shape[:2] + points2d = points[:, :2] + points2d = points2d[(points2d.abs() <= range).all(1)] + device = points2d.device + + # sample random points + offsets = torch.randn((len(points2d), 10, 2), device=device) * 3 + points2d = (points2d.reshape(-1, 1, 2) + offsets).reshape(-1, 2) + points2d = torch.unique(torch.floor(points2d / sam_res).int(), dim=0) * sam_res + if not discrete: + points2d = points2d + torch.randn_like(points2d) + + # transform points to global coordinates + if transform is not None: + points = torch.cat([points2d, + torch.zeros_like(points2d[:, :1]), + torch.ones_like(points2d[:, :1])], + dim=-1) + points = transform @ points.T + else: + points = points2d.T + + xs = torch.floor((points[0] - bevmap_coor[0]) / map_res).int() + ys = torch.floor((points[1] - bevmap_coor[1]) / map_res).int() + xs = torch.clamp(xs, 0, sx - 1).long() + ys = torch.clamp(ys, 0, sy - 1).long() + road_mask = bevmap[xs, ys] + + bev_pts = torch.cat([points2d, road_mask.unsqueeze(1)], dim=1) + return bev_pts[torch.randperm(len(bev_pts))[:max_num_pts]]
+ + +
[docs]class DataOnlineProcessor: +
[docs] @staticmethod + def update_transform_with_aug(transform, aug_params): + if 'rot' in aug_params: + transform = add_rotate(transform, aug_params['rot']) + if 'flip' in aug_params: + transform = add_flip(transform, **aug_params['flip']) + if 'scale' in aug_params: + transform = add_scale(transform, aug_params['scale']) + return transform
+ +
[docs] @staticmethod + def apply_transform(data, transform, apply_to=['points']): + for k in apply_to: + apply_transform(data, transform, k)
+ +
[docs] @staticmethod + def cav_aug_transform(data, transform, aug_params, + apply_to=['points', 'imgs', 'annos_global']): + # augmentation + if aug_params is not None: + transform = DataOnlineProcessor.update_transform_with_aug(transform, aug_params) + + DataOnlineProcessor.apply_transform(data, transform, apply_to)
+ +
[docs] @staticmethod + def filter_range(data, lidar_range, apply_to: list): + for k in apply_to: + filter_range(data, lidar_range, k)
+ +
[docs] @staticmethod + @torch.no_grad() + def free_space_augmentation(data, d: float=10.0, h: float=-1.5, step: float=1.5, res=0.25): + lidar = data['points'] + # get point lower than z_min=1.5m + m = lidar[:, 2] < h + points = lidar[m][:, :3] + + # generate free space points based on points + dists = torch.norm(points[:, :2], dim=1).reshape(-1, 1) + delta_d = torch.arange(1, d, step, + device=lidar.device).reshape(1, -1) + steps = delta_d.shape[1] + tmp = (dists - delta_d) / dists # Nxsteps + xyz_new = points[:, None, :] * tmp[:, :, None] # Nxstepsx3 + + # 1.remove free space points with negative distances to lidar center + # 2.remove free space points higher than z_min + # 3.remove duplicated points with resolution 1m + xyz_new = xyz_new[tmp > 0] + xyz_new = xyz_new[(xyz_new[..., 2] < h)] + xyz_new = xyz_new[torch.randperm(len(xyz_new))] + selected = torch.unique(torch.floor(xyz_new * res).long(), return_inverse=True, dim=0)[1] + xyz_new = scatter_mean(src=xyz_new, index=selected, dim=0) + + # pad free space point intensity as -1 + xyz_new = torch.cat([xyz_new, - torch.ones_like(xyz_new[:, :1])], dim=-1) + data['points'] = torch.cat([lidar, xyz_new], dim=0)
+ +
[docs] @staticmethod + @torch.no_grad() + def adaptive_free_space_augmentation(data: dict, min_h: float=-1.5, steps: int=20, + alpha: float=0.05, res: float=0.5, time_idx: int=None): + r""" + Add free space points according to the distance of points to the origin. + + .. raw:: html + + <pre> + lidar origin -> * + * * + * * h + * ele * + ************ + d + + </pre> + + Assume the :math:`\theta = \frac{\\pi}{2} - \text{ele}` (elevation angle), + :math:`\alpha` = average angle between two lidar rings, + :math:`d_k` is the ground distance of the :math:`n_{th}` lidar ring to lidar origin, :math:`k=1,...,n`, + :math:`\delta_d` is the distance between two neighboring lidar rings, + then + + .. math:: + d &= h \tan(\theta) \\ + \delta_d &= d_n - d_{n-1} = d_n - h\tan(\arctan(\frac{h}{d_n}) - \alpha) + + we sample free space points in the ground distance of :math:`\delta_d` relative to each ring + with the given 'step' distance. + + :param data: input data dict containing 'points'. + :param min_h: minimum sample height relative to lidar origin. Default is -1.5. + :param steps: number of points to be sampled for each lidar ray. Default is 20. + :param alpha: average angle offset between two neighboring lidar casting rays. Default is 0.05. + :param res: resolution for down-sampling the free space points. Default is 0.5. + :param time_idx: if provided, time will be copied from the original points to free space points. + :return: + updated data. + """ + + lidar = data['points'] + # get point lower than z_min=1.5m + m = lidar[:, 2] < min_h + points = lidar[m] + + # generate free space points based on points + dn = torch.norm(points[:, :2], dim=1).view(-1, 1) + dn1 = - points[:, 2:3] * torch.tan(torch.atan2(dn, -points[:, 2:3]) - alpha) + delta_d = dn - dn1 + steps_arr = torch.linspace(0, 1, steps + 1)[:-1].view(1, steps).to(delta_d.device) + tmp = (dn - steps_arr * delta_d) / dn # Nxsteps + xyz_new = points[:, None, :3] * tmp[:, :, None] # Nxstepsx3 + if time_idx is not None: + times = points[:, time_idx].view(-1, 1, 1).repeat(1, steps, 1) + xyz_new = torch.cat([xyz_new, times], dim=-1) + + # 1.remove free space points with negative distances to lidar center + # 2.remove free space points higher than z_min + # 3.remove duplicated points with resolution 1m + xyz_new = xyz_new[tmp > 0] + # xyz_new = xyz_new[(xyz_new[..., 2] < min_h)] + xyz_new = xyz_new[torch.randperm(len(xyz_new))] + uniq, selected = torch.unique(torch.floor(xyz_new[..., :3] * res).long(), return_inverse=True, dim=0) + # xyz = torch.zeros_like(xyz_new[:len(uniq)]) + tmin = xyz_new[:, -1].min() + xyz_new[:, -1] -= tmin + xyz_new = scatter_mean(src=xyz_new, index=selected, dim=0) + xyz_new[:, -1] += tmin + + # pad free space point intensity as -1 + xyz_new = torch.cat([xyz_new[:, :3], - torch.ones_like(xyz_new[:, :1]), xyz_new[:, 3:]], dim=-1) + pad_dim = lidar.shape[-1] - xyz_new.shape[-1] + if pad_dim > 0: + xyz_new = torch.cat([xyz_new, torch.zeros_like(xyz_new[:, :1]).repeat(1, pad_dim)], dim=-1) + data['points'] = torch.cat([lidar, xyz_new], dim=0)
+ +
[docs] @staticmethod + @torch.no_grad() + def generate_sparse_target_bev_points(data: dict, + transform=None, + sam_res=0.4, + map_res=0.2, + range=50, + max_num_pts=3000, + discrete=False): + data['bev_tgt_pts'] = generate_bev_tgt_pts( + data['points'], data, + transform, sam_res, map_res, range, max_num_pts, discrete + )
+ + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # lidar = data['points'].cpu().numpy() + # pts = data['bev_tgt_pts'].cpu().numpy() + # pos = pts[:, 2] == 1 + # neg = pts[:, 2] == 0 + # + # ax = draw_points_boxes_plt( + # pc_range=50, + # points=pts[pos, :], + # points_c='r', + # return_ax=True + # ) + # ax.plot(pts[neg, 0], pts[neg, 1], '.', c='b', markersize=1) + # ax.plot(lidar[:, 0], lidar[:, 1], '.', c='gray', markersize=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + +
[docs] @staticmethod + @torch.no_grad() + def generate_sparse_target_roadline_points(data: dict, + transform=None, + map_res=0.2, + range=50, + kernel=3, + max_num_pts=3000): + if 'bevmap' not in data or data['bevmap'] is None: + return + bevmap = data['bevmap'].clone().float() + bevmap[bevmap==0] = -1 + bevmap_coor = data['bevmap_coor'] + sx, sy = bevmap.shape[:2] + filters = torch.ones(1, 1, kernel, kernel, device=bevmap.device) / (kernel ** 2 * 2) + road = torch.conv2d(bevmap[None, None], filters).squeeze() + mask = (road < 0.5) & (road > -0.5) + inds = torch.where(mask) + scores = 1 - road[mask].abs() + coords = torch.stack(inds).T * map_res + 2.5 * map_res - range + + data['roadline_tgts'] = torch.cat([coords, scores.unsqueeze(1)], dim=1)
+ + + + + + + + + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/viewer/gl_viewer.html b/docs/_build/html/_modules/cosense3d/agents/viewer/gl_viewer.html new file mode 100644 index 00000000..f97769e2 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/viewer/gl_viewer.html @@ -0,0 +1,642 @@ + + + + + + cosense3d.agents.viewer.gl_viewer — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.viewer.gl_viewer

+
+
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+Color4f = Tuple[float, float, float, float]  # type alias for type hinting
+
+import logging
+import queue
+
+import numpy as np
+from PyQt5.QtCore import Qt, QEvent, QPointF, QRectF
+from PyQt5 import QtWidgets, QtGui, QtCore
+import pyqtgraph as pg
+from PyQt5.QtGui import QPen, QBrush, QColor
+import pyqtgraph.opengl as gl
+from matplotlib import colormaps
+from OpenGL.GL import *
+from OpenGL import GLU
+from cosense3d.agents.viewer.utils import depth_min
+from cosense3d.agents.viewer.items.graph_items import LineBoxItem
+
+SIZE_OF_FLOAT = ctypes.sizeof(ctypes.c_float)
+TRANSLATION_FACTOR = 0.03
+jet = colormaps['jet']
+cav_colors = np.array([
+    [0.745, 0.039, 1.000, 1.000],
+    [0.039, 0.937, 1.000, 1.000],
+    [0.078, 0.490, 0.961, 1.000],
+    [0.039, 1.000, 0.600, 1.000],
+    [1.000, 0.529, 0.000, 1.000],
+    [0.345, 0.039, 1.000, 1.000],
+    [0.631, 1.000, 0.039, 1.000],
+    [1.000, 0.827, 0.000, 1.000],
+])
+
+
+# Main widget for presenting the point cloud and bounding boxes
+
[docs]class GLViewer(gl.GLViewWidget): + + def __init__(self, name: str, parent=None) -> None: + super(GLViewer, self).__init__(parent) + self.setObjectName(name) + self.controller = None + + self.setCameraPosition(distance=300, elevation=30, azimuth=-90) + self.pan(0, 0, 0) + self.draw_axes() + + self.tasks = queue.Queue() + + # point cloud data + self.pcd = None + self.boxes = [] + self.local_boxes = {} + self.pcd_items = {} + self.visibility = {} + + # drag window control + self.dragging = False + self.start_pos = None + self.end_pos = None + + # box control + self.rectangle = None # (pos1, pos2) + self.center = None # evt pose + self.highlight_mode = False + self.highlighted_item = None + self.activate_item = None + +
[docs] def initializeGL(self): + glEnable(GL_DEPTH_TEST) # for visualization of depth + glDepthFunc(GL_LESS) # drawn if depth is less than the existing depth + glEnable(GL_BLEND) # enable transparency + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) + super().initializeGL() + + depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + print('viwer init:', depth_enabled)
+ +
[docs] def paintGL(self, region=None, viewport=None, useItemNames=False): + super().paintGL(region, viewport, useItemNames) + # self.draw_depth_buffer() + self.addBox() + self.paintRect()
+ # depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + # print("paintGL", depth_enabled) + +
[docs] def draw_axes(self): + axis = gl.GLAxisItem(size=QtGui.QVector3D(5, 5, 5)) + self.addItem(axis)
+ +
[docs] def updatePCDs(self, pcds, color_mode='united', **kwargs): + self.pcds = pcds + if color_mode == 'height': + points_all = np.concatenate([pcd for pcd in pcds.values()], axis=0) + global_min = points_all[:, 2].min() + global_max = points_all[:, 2].max() + elif color_mode == 'time': + points_all = np.concatenate([pcd for pcd in pcds.values()], axis=0) + global_min = points_all[:, -1].min() + global_max = points_all[:, -1].max() + else: + global_min = None + global_max = None + + for i, (lidar_id, pcd)in enumerate(pcds.items()): + if color_mode == 'united': + colors = [1.0, 1.0, 1.0, 1.0] + elif color_mode == 'height': + height_norm = (pcd[:, 2] - global_min) / (global_max - global_min) + colors = jet(height_norm) + elif color_mode == 'cav': + colors = cav_colors[i] + colors[-1] = 0.5 + colors = colors.reshape(1, 4).repeat(len(pcd), 0) + elif color_mode == 'time': + time_norm = (pcd[:, -1] - global_min) / (global_max - global_min) + colors = jet(time_norm) + else: + raise NotImplementedError + item = gl.GLScatterPlotItem( + pos=pcd[:, :3], size=2, glOptions='opaque', color=colors + ) + if lidar_id in self.visibility: + item.setVisible(self.visibility[lidar_id]) + else: + self.visibility[lidar_id] = True + self.pcd_items[lidar_id] = item + self.addItem(item)
+ +
[docs] def updateLabel(self, local_labels, global_labels, local_det, global_det, + successor=None, successor_gt=None, predecessor=None): + self.boxes = [] + if local_labels is not None: + for agent_id, labels in local_labels.items(): + self.local_boxes[agent_id] = [] + for id, label in labels.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='local_gt', line_width=2) + item.setVisible(self.visibility.get(f'{agent_id}.0', True)) + self.local_boxes[agent_id].append(item) + self.addItem(item) + if global_labels is not None: + for id, label in global_labels.items(): + prev_label = None if predecessor is None else predecessor[id] + item = LineBoxItem(box=[id, ] + label, last_pose=prev_label, + status='global_gt', line_width=2) + self.boxes.append(item) + self.addItem(item) + if local_det is not None: + for agent_id, labels in local_det.items(): + self.local_boxes[agent_id] = [] + for id, label in labels.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='det', line_width=2) + item.setVisible(self.visibility.get(f'{agent_id}.0', True)) + self.local_boxes[agent_id].append(item) + self.addItem(item) + if global_det is not None: + for id, label in global_det.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='det', line_width=2) + self.boxes.append(item) + self.addItem(item) + if successor is not None: + for id, label in successor.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='successor', line_width=2) + self.boxes.append(item) + self.addItem(item) + if successor_gt is not None: + for id, label in successor_gt.items(): + item = LineBoxItem(box=[id, ] + label, last_pose=None, + status='successor_gt', line_width=2) + self.boxes.append(item) + self.addItem(item)
+ +
[docs] def updateFrameData(self, pcds, + local_label=None, + global_label=None, + local_det=None, + global_det=None, + predecessor=None, + successor=None, + successor_gt=None, + pcd_color='united'): + self.clear() + self.draw_axes() + self.updatePCDs(pcds, color_mode=pcd_color) + self.updateLabel(local_label, + global_label, + local_det, + global_det, + successor, + successor_gt, + predecessor,) + self.update()
+ +
[docs] def refresh(self, data_dict, visible_keys=['globalGT'], color_mode='united', **kwargs): + pcds = data_dict.get('points', {}) + ego_id = list(data_dict['scenario'].keys())[0] + local_labels, global_labels, local_det, global_det = None, None, None, None + global_pred_gt, global_pred = None, None + if 'globalGT' in visible_keys: + global_labels = data_dict.get('global_labels', {}) + global_labels = global_labels[ego_id] + if 'localGT' in visible_keys: + local_labels = data_dict.get('local_labels', {}) + if pcds is None and global_labels is {} and local_labels is None: + return + + if 'localDet' in visible_keys: + if 'detection_local' in data_dict: + local_det = {k: v.get('labels', {}) for k, v in data_dict['detection_local'].items()} + if 'globalDet' in visible_keys: + if 'detection' in data_dict: + global_det = data_dict.get('detection', {}) + else: + global_det = data_dict.get('detection_global', {}) + global_det = global_det.get(ego_id, {'labels': {}})['labels'] + if 'globalPredGT' in visible_keys: + global_pred_gt = data_dict.get('global_pred_gt', {}) + global_pred_gt = global_pred_gt.get(ego_id, {}) + if 'globalPred' in visible_keys: + global_pred = data_dict.get('global_pred', {}) + global_pred = global_pred.get(ego_id, {'labels': {}})['labels'] + + self.updateFrameData(pcds, + local_label=local_labels, + global_label=global_labels, + local_det=local_det, + global_det=global_det, + successor=global_pred, + successor_gt=global_pred_gt, + pcd_color=color_mode)
+ +
[docs] def addBox(self): + if self.rectangle is not None: + world_pos = self.evt_pos_to_world(*self.rectangle) + self.rectangle = None + if world_pos is not None: + box = LineBoxItem([self.controller.curr_box_type] + [0, 0, 0] + [4, 2, 1.7] + [0, 0, 0]) + azi = self.opts['azimuth'] + box.rotate(azi, 0, 0, 1) + box.translate(*world_pos, False) + self.boxes.append(box) + self.addItem(box) + self.controller.save_frame_labels(self.boxes) + logging.info("Add box: ", box.id) + if self.center is not None: + world_pos = self.evt_pos_to_world(self.center) + self.center = None + if world_pos is not None: + self.controller.track_singleton(world_pos)
+ +
[docs] def highlightBox(self, pos): + w = 30 + h = 30 + x = pos.x() - w / 2 + y = pos.y() - h / 2 + self.removeHeilight() + items = self.itemsAt((x, y, w, h)) + for item in items: + if isinstance(item, LineBoxItem): + item.highlight() + self.highlighted_item = item + self.update() + return
+ +
[docs] def removeHeilight(self): + if self.highlighted_item is not None: + self.highlighted_item.deactivate() + self.highlighted_item = None + self.update()
+ +
[docs] def selectHeilight(self): + # remove previous activate item if exists + self.removeActivate() + self.highlighted_item.activate() + self.activate_item = self.highlighted_item + self.highlighted_item = None + self.controller.show_obj_info(self.activate_item) + self.update()
+ +
[docs] def removeActivate(self): + if self.activate_item is not None: + self.activate_item.deactivate() + self.controller.hide_obj_info() + self.update()
+ +
[docs] def mousePressEvent(self, evt: QtGui.QMouseEvent) -> None: + depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + print('mousePressEvent:', depth_enabled) + self.mousePos = evt.pos() + if evt.button() == Qt.LeftButton and \ + evt.modifiers() == Qt.ShiftModifier: + logging.debug("mousePress+Shift: drag box") + self.start_pos = evt.pos() + self.end_pos = evt.pos() + self.dragging = True + elif evt.button() == Qt.LeftButton and \ + self.highlighted_item is not None: + logging.debug("Select Highlighted box") + self.selectHeilight() + elif evt.button() == Qt.LeftButton and not self.highlight_mode: + self.removeActivate() + else: + super().mousePressEvent(evt)
+ +
[docs] def mouseDoubleClickEvent(self, evt: QtGui.QMouseEvent) -> None: + if evt.button() == Qt.LeftButton: + self.center = evt.pos() + logging.debug('Double click left mouse button.') + self.update()
+ +
[docs] def mouseMoveEvent(self, evt: QtGui.QMouseEvent) -> None: + if evt.buttons() == Qt.LeftButton and \ + evt.modifiers() == Qt.ShiftModifier: + logging.debug("mousePress+Shift+mouseMove") + if self.dragging: + self.end_pos = evt.pos() + self.update() + elif self.highlight_mode: + logging.debug("Highlight box") + self.highlightBox(evt.pos()) + else: + super().mouseMoveEvent(evt) + logging.debug("mouseMove-super")
+ +
[docs] def mouseReleaseEvent(self, evt: QtGui.QMouseEvent): + if evt.button() == Qt.LeftButton and self.dragging: + self.dragging = False + self.rectangle = (self.start_pos, self.end_pos) + self.start_pos = None + self.end_pos = None + self.update() + else: + super().mouseReleaseEvent(evt)
+ +
[docs] def keyPressEvent(self, evt: QEvent) -> None: + if evt.isAutoRepeat(): + return + if evt.key() == Qt.Key_Shift: + logging.debug("keyShiftPressed") + self.key_shift = True + elif evt.key() == Qt.Key_C: + logging.debug("keyCressed: highlight mode") + self.highlight_mode = True + self.setMouseTracking(True) + elif evt.key() == Qt.Key_3: + evt.accept() + self.controller.last_frame() + elif evt.key() == Qt.Key_4: + evt.accept() + self.controller.next_frame() + elif evt.key() == Qt.Key_T: + evt.accept() + self.controller.track() + elif evt.key() == Qt.Key_2: + evt.accept() + self.controller.next_frame() + self.controller.track() + else: + super().keyPressEvent(evt)
+ +
[docs] def keyReleaseEvent(self, event: QEvent) -> None: + if event.isAutoRepeat(): + return + if event.key() == Qt.Key_C: + logging.debug("key C released: deactivate highlighted box") + self.highlight_mode = False + self.setMouseTracking(False) + self.removeHeilight()
+ +
[docs] def model_pose_to_world(self, x, y, z): + modelview = glGetDoublev(GL_MODELVIEW_MATRIX) + projection = glGetDoublev(GL_PROJECTION_MATRIX) + viewport = self.getViewport() + world_pos = GLU.gluUnProject( + x, y, z, modelview, projection, viewport + ) + return world_pos
+ +
[docs] def evt_pos_to_world(self, pos1, pos2=None): + """ + Args: + pos1: center pos if pos2 is None else start post of a region + pos2: end pos of a region + """ + if pos2 is None: + pos1 = QtCore.QPoint(pos1.x() - 20, pos1.y() - 20) + pos2 = QtCore.QPoint(pos1.x() + 20, pos1.y() + 20) + depths = self.get_region_depth(pos1, pos2) + valid = depths < 1 + if valid.sum() == 0: + logging.info("No point found, skip drawing box") + return None + else: + z = depths[valid].mean() + y = (pos1.y() + pos2.y()) / 2 + x = (pos1.x() + pos2.x()) / 2 + real_y = self.height() - y + world_pos = self.model_pose_to_world(x, real_y, z) + return world_pos
+ +
[docs] def get_point_depth(self, x, y): + buffer_size = 201 + center = buffer_size // 2 + 1 + depths = glReadPixels( + x - center + 1, + y - center + 1, + buffer_size, + buffer_size, + GL_DEPTH_COMPONENT, + GL_FLOAT, + ) + z = depths[center][center] # Read selected pixel from depth buffer + + if z == 1: + z = depth_min(depths, center) + return z
+ +
[docs] def get_region_depth(self, p1: QtCore.QPoint, p2: QtCore.QPoint) -> np.ndarray: + """ + Args: + p1: start point of region. + p2: end point of region + """ + buffer_size_x = abs(p2.x() - p1.x()) + buffer_size_y = abs(p2.y() - p1.y()) + x = min(p1.x(), p2.x()) + y = self.height() - max(p1.y(), p2.y()) + + # Create a buffer to hold the depth values + depth_buffer = np.zeros((buffer_size_y, buffer_size_x), dtype=np.float32) + + glReadPixels( + x, y, + buffer_size_x, + buffer_size_y, + GL_DEPTH_COMPONENT, + GL_FLOAT, + depth_buffer + ) + depth_buffer = depth_buffer[::-1, :] + + return depth_buffer
+ +
[docs] def draw_depth_buffer(self): + """!!!! + Remember the depth buffer is only available under paintGL loop. + Only in this loop the gl context is active. + """ + # Get the OpenGL extensions string + depth_enabled = glGetBooleanv(GL_DEPTH_TEST) + print(depth_enabled) + # Retrieve the dimensions of the framebuffer + viewport = glGetIntegerv(GL_VIEWPORT) + width, height = viewport[2], viewport[3] + + # Create a buffer to hold the depth values + depth_buffer = np.zeros((height, width), dtype=np.float32) + + # Read the depth buffer into the buffer + glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT, depth_buffer) + depth_buffer = depth_buffer[::-1, :] + + # Convert the depth buffer to an image + print("min depth value:", depth_buffer.min()) + depth_image = ((1 - depth_buffer) * 500) * 255 + depth_image = np.repeat(depth_image[:, :, np.newaxis], 3, axis=2).astype(np.uint8) + + # Save the image to a file + import imageio + imageio.imwrite('/media/hdd/tmp/depth_image.png', depth_image)
+ +
[docs] def box(self): + p1 = self.box_start_pos + p2 = self.box_end_pos + new_lines = np.array([ + [p1.x(), p1.y(), p1.z()], + [p2.x(), p2.y(), p2.z()], + ]) + + # create a GLLinePlotItem for the axes + line_item = gl.GLLinePlotItem(pos=new_lines, color=QtGui.QColor(255, 0, 0, 255), + width=3) + + # add the axes to the view + self.addItem(line_item)
+ +
[docs] def drawRectangle(self): + if self.rectItem is None: + self.rectItem = pg.QtGui.QGraphicsRectItem() + self.scene.addItem(self.rectItem) + x1, y1 = self.startPoint.x(), self.startPoint.y() + x2, y2 = self.endPoint.x(), self.endPoint.y() + rect = QRectF(QPointF(x1, y1), QPointF(x2, y2)) + pen = QPen(QColor(255, 0, 0)) + brush = QBrush(QColor(0, 0, 0, 0)) + self.rectItem.setPen(pen) + self.rectItem.setBrush(brush) + self.rectItem.setRect(rect)
+ +
[docs] def removeRectangle(self): + if self.rectItem is not None: + self.scene.removeItem(self.rectItem) + self.rectItem = None + self.update()
+ +
[docs] def paintRect(self): + if self.dragging: + painter = QtGui.QPainter(self) + painter.setRenderHint(QtGui.QPainter.Antialiasing) + glDisable(GL_DEPTH_TEST) + glDisable(GL_BLEND) + # draw the rectangle + painter.setPen(QtGui.QPen(QtGui.QColor(255, 0, 0))) + painter.setBrush(QtGui.QBrush(QtGui.QColor(255, 255, 0, 80))) + painter.drawRect(self.start_pos.x(), + self.start_pos.y(), + self.end_pos.x() - self.start_pos.x(), + self.end_pos.y() - self.start_pos.y()) + + glEnable(GL_DEPTH_TEST)
+ +
[docs] def change_visibility(self, key, visible): + ai, li = key.split('.') + self.visibility[key] = visible + self.pcd_items[key].setVisible(visible) + for item in self.local_boxes[ai]: + item.setVisible(visible)
+ + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/viewer/img_anno3d_viewer.html b/docs/_build/html/_modules/cosense3d/agents/viewer/img_anno3d_viewer.html new file mode 100644 index 00000000..e25cf3b5 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/viewer/img_anno3d_viewer.html @@ -0,0 +1,145 @@ + + + + + + cosense3d.agents.viewer.img_anno3d_viewer — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.viewer.img_anno3d_viewer

+
+
+import matplotlib
+import numpy as np
+from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
+from matplotlib.figure import Figure
+
+from cosense3d.utils import vislib
+
+matplotlib.use('Qt5Agg')
+
+
[docs]class ImgAnno3DViewer(FigureCanvasQTAgg): + + def __init__(self, dpi=50): + self.fig = Figure(dpi=dpi) + super(ImgAnno3DViewer, self).__init__(self.fig) + +
[docs] def refresh(self, data, **kwargs): + if len(data['img']) == 0: + return + self.fig.clear() + n_cavs = len(data['img']) + n_imgs = len(list(data['img'].values())[0]) + cav_ids = sorted(list(data['img'].keys())) + for i, cav_id in enumerate(cav_ids): + if cav_id in data['local_labels']: + bboxes3d = np.array(list(data['local_labels'][cav_id].values()) + )[:, [1, 2, 3, 4, 5, 6, 9]] + elif cav_id in data['global_labels']: + bboxes3d = np.array(list(data['global_labels'][cav_id].values()) + )[:, [1, 2, 3, 4, 5, 6, 9]] + else: + return + for j in range(n_imgs): + ax = self.fig.add_subplot(n_cavs, n_imgs, i * n_imgs + j + 1) + img = data['img'][cav_id][j].astype(np.uint8) + lidar2img = data['lidar2img'][cav_id][j] + vislib.draw_3d_points_boxes_on_img(ax, img, lidar2img, boxes=bboxes3d) + self.draw()
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/viewer/img_viewer.html b/docs/_build/html/_modules/cosense3d/agents/viewer/img_viewer.html new file mode 100644 index 00000000..4c5a7f50 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/viewer/img_viewer.html @@ -0,0 +1,146 @@ + + + + + + cosense3d.agents.viewer.img_viewer — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.viewer.img_viewer

+
+
+import numpy as np
+import matplotlib
+from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
+from matplotlib.figure import Figure
+
+from cosense3d.utils import vislib
+
+matplotlib.use('Qt5Agg')
+
+
+
[docs]class ImgViewer(FigureCanvasQTAgg): + + def __init__(self, dpi=100, mean=None, std=None): + self.fig = Figure(dpi=dpi) + super(ImgViewer, self).__init__(self.fig) + self.mean = np.array(mean) if mean is not None else None + self.std = np.array(std) if std is not None else None + +
[docs] def refresh(self, data, **kwargs): + if len(data['img']) == 0: + return + self.fig.clear() + n_cavs = len(data['img']) + n_imgs = len(list(data['img'].values())[0]) + cav_ids = sorted(list(data['img'].keys())) + for i, cav_id in enumerate(cav_ids): + for j in range(n_imgs): + ax = self.fig.add_subplot(n_cavs, n_imgs, i * n_imgs + j + 1) + img = data['img'][cav_id][j] + if self.std is not None and self.mean is not None: + img = img * self.std + self.mean + img = img.astype(np.uint8) + if len(data['bboxes2d']) == 0: + bboxes2d = None + else: + bboxes2d = data['bboxes2d'][cav_id][j].reshape(-1, 2, 2) + vislib.draw_2d_bboxes_on_img(img, bboxes2d, ax) + self.draw()
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/viewer/items/graph_items.html b/docs/_build/html/_modules/cosense3d/agents/viewer/items/graph_items.html new file mode 100644 index 00000000..8bb9bbfe --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/viewer/items/graph_items.html @@ -0,0 +1,366 @@ + + + + + + cosense3d.agents.viewer.items.graph_items — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.viewer.items.graph_items

+
+
+import pyqtgraph.opengl as gl
+import pyqtgraph as pg
+from PyQt5.QtWidgets import QGraphicsRectItem, QGraphicsLineItem
+from PyQt5 import QtCore
+
+from cosense3d.utils.box_utils import *
+from cosense3d.dataset.toolkit.cosense import csColors
+from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs
+
+CSCOLORS = (np.array([csColors[k] for k in cs.OBJ_LIST]) / 255.).tolist()
+
+BOX_COLORs = {
+    'inactive': CSCOLORS,
+    'highlight': (0., 1, 1, 1),
+    'active': (0.9, 0, 1, 1),
+    'local_gt': (1, 1, 0, 1),
+    'global_gt': (0, 1, 0, 1),
+    'gt': (0, 1, 0, 1),
+    'det': (1, 0, 0, 1),
+    'pred': (1, 0, 1, 1),
+    'successor': (0, 0.5, 1, 1),
+    'successor_gt': (0, 1, 1, 1)
+}
+
+pens = {
+    'yellow_dashed': pg.mkPen('y', width=1, style=QtCore.Qt.DashLine),
+    'yellow_solid': pg.mkPen('y', width=1, style=QtCore.Qt.SolidLine),
+    'virtual': pg.mkPen(color=(0, 0, 0, 0), width=1),
+}
+
+
+
[docs]class MeshBoxItem(gl.GLMeshItem): + def __init__(self, size=(1, 1, 1), color=(0.0, 1.0, 0.0, 0.25)): + l, w, h = size + verts = [ + [0, 0, 0], + [l, 0, 0], + [l, 0, h], + [0, 0, h], + [0, w, 0], + [l, w, 0], + [l, w, h], + [0, w, h] + ] + verts = np.array(verts) + + faces = [ + [0, 1, 2], + [0, 2, 3], + [1, 5, 6], + [1, 6, 2], + [5, 4, 7], + [5, 7, 6], + [4, 0, 3], + [4, 3, 7], + [3, 2, 6], + [3, 6, 7], + [0, 4, 5], + [0, 5, 1] + ] + faces = np.array(faces) + + normals = np.array([ + [0, -1, 0], + [0, 1, 0], + [1, 0, 0], + [-1, 0, 0], + [0, 0, -1], + [0, 0, 1] + ]) + + colors = [color] * len(faces) + + meshdata = gl.MeshData(vertexes=verts, faces=faces, faceColors=colors) + super().__init__(meshdata=meshdata, shader='balloon', glOptions='translucent')
+ + +
[docs]class LineBoxItem(gl.GLLinePlotItem): + ids = set() # TODO: need to be initialized by labeled data in the current scenario + id_ptr = 0 + def __init__(self, + box, + status='inactive', + show_direction=False, + last_pose=None, + line_width=1.): + """ + :param box: ([id], type_id, x, y, z, l, w, h, roll, pitch, yaw) + :param color: + + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + """ + id = None + box_score = None + if len(box) == 11: + id = int(box[0]) + type_id = int(box[1]) + box = box[2:] + elif len(box) == 10: + type_id = int(box[0]) + box = box[1:] + elif len(box) == 12: + id = int(box[0]) + type_id = int(box[1]) + box_score = box[-1] + box = box[2:-1] + else: + raise NotImplementedError + vertices = np.zeros((12, 3)) + vertices[:8] = boxes_to_corners_3d(np.array([box]))[0] + if show_direction: + # ----- + # | |---- direction on top + # ----- + top_center = np.mean(vertices[4:], axis=0) + top_front = np.mean(vertices[[4, 5]], axis=0) + top_ff = top_front * 2 - top_center + vertices[8] = top_front + vertices[9] = top_ff + if last_pose is not None: + # ----- + # last pose on bottom of the boxe o----| | + # ----- + assert len(last_pose) == 3 + bottom_center = np.mean(vertices[:4], axis=0) + last_pose[2] = bottom_center[2] # set last pose z to ground + vertices[10] = np.array(last_pose) + vertices[11] = np.array(bottom_center) + + self.vertices = vertices + + # Define the edges of the box + edges = [ + [0, 1], # front-bottom + [1, 5], # front-right + [5, 4], # front-top + [4, 0], # front-left + [0, 3], # left-bottom + [1, 2], # right-bottom + [5, 6], # right-top + [4, 7], # left-top + [3, 2], # back-bottom + [2, 6], # back-right + [6, 7], # back-top + [7, 3], # back-left + ] + if show_direction: + edges.append([8, 9]) + if last_pose is not None: + edges.append([10, 11]) + self.edges = np.array(edges) + + vertices_pairs = self.vertices[self.edges.flatten()] + + while id is None: + if LineBoxItem.id_ptr not in LineBoxItem.ids: + id = LineBoxItem.id_ptr + else: + LineBoxItem.id_ptr += 1 + self.id = id + self.typeid = type_id + LineBoxItem.ids.add(id) + + super().__init__(pos=vertices_pairs, + color=self.color(status), + width=line_width, + mode='lines', + glOptions='opaque') + +
[docs] def to_center(self): + """Convert box to center format""" + transform = self.transform().matrix() + corners = (transform[:3, :3] @ self.vertices[:8].T) + transform[:3, 3:] + box_center = corners_to_boxes_3d(corners.T[None, :]) + return box_center[0]
+ +
[docs] def activate(self): + self.setData(color=BOX_COLORs['active'], width=2.0)
+ +
[docs] def deactivate(self): + self.setData(color=BOX_COLORs['inactive'][self.typeid] + [0.5])
+ +
[docs] def highlight(self): + self.setData(color=BOX_COLORs['highlight'], width=2.0)
+ + @property + def isActive(self): + return self.color == BOX_COLORs['active'] + +
[docs] def color(self, status): + if status in ['inactive']: + return BOX_COLORs[status][self.typeid] + [0.5] + else: + return BOX_COLORs[status]
+ + +
[docs]class LineItem(QGraphicsLineItem): + def __init__(self, line, parent=None): + super().__init__(parent) + self.inactive_pen = pens['yellow_dashed'] + self.active_pen = pens['yellow_solid'] + self.setLine(*line) + self.setPen(self.inactive_pen) + self.setZValue(5) + self.active = False + +
[docs] def hoverEvent(self, event): + if event.isExit(): + self.setPen(self.inactive_pen) + self.active = False + else: + self.setPen(self.active_pen) + self.active = True
+ + +
[docs]class RectangleItem(QGraphicsRectItem): + def __init__(self, rect): + super().__init__(*rect) + self.setPen(pens['virtual']) + self.setZValue(0) + self.active = False + +
[docs] def hoverEvent(self, event): + if event.isExit(): + self.setPen(pens['virtual']) + self.active = False + else: + pos = event.pos() + if abs(pos.x()) < 0.3 and abs(pos.y()) < 0.3: + self.setPen(pens['yellow_solid']) + self.active = True
+ + + +if __name__ == "__main__": + from PyQt5 import QtWidgets + + app = QtWidgets.QApplication([]) + w = gl.GLViewWidget() + w.opts['distance'] = 20 + w.show() + + boxItem = LineBoxItem( + box=[-5, 8, -1, 4, 3, 2, 0, 0, 0], + show_direction=True + ) + w.addItem(boxItem) + + app.exec_() + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/viewer/output_viewer.html b/docs/_build/html/_modules/cosense3d/agents/viewer/output_viewer.html new file mode 100644 index 00000000..4e070b8f --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/viewer/output_viewer.html @@ -0,0 +1,354 @@ + + + + + + cosense3d.agents.viewer.output_viewer — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.viewer.output_viewer

+
+
+import matplotlib
+import numpy as np
+from PyQt5 import QtWidgets
+from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
+from matplotlib.figure import Figure
+
+from cosense3d.utils.vislib import draw_points_boxes_plt
+
+matplotlib.use('Qt5Agg')
+
+
+
[docs]class MplCanvas(FigureCanvasQTAgg): + + def __init__(self, data_keys, width=5, height=4, dpi=100, title='plot', nrows=1, ncols=1): + fig = Figure(figsize=(width, height), dpi=dpi) + fig.suptitle(title, fontsize=16) + self.axes = fig.subplots(nrows, ncols) + self.data_keys = data_keys + super(MplCanvas, self).__init__(fig) + +
[docs] def update_title(self, scenario, frame, cav_id): + self.axes.set_title(f"{scenario[cav_id]}.{frame[cav_id]}")
+ + +
[docs]class BEVSparseCanvas(MplCanvas): + def __init__(self, lidar_range=None, s=4, **kwargs): + super().__init__(**kwargs) + assert len(self.data_keys) >=1, ('1st key should be pred bev map, ' + '2nd key (optional) should be gt bev map.') + self.lidar_range = lidar_range + self.s = s + self.pred_key = self.data_keys[0] + self.gt_key = None + if len(self.data_keys) > 1: + self.gt_key = self.data_keys[1] + +
[docs] def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, data_dict in data[self.pred_key].items(): + if 'ctr' in data_dict: + centers = data_dict['ctr'].cpu().numpy() + elif 'ref_pts' in data_dict: + centers = data_dict['ref_pts'].cpu().numpy() + else: + raise NotImplementedError(f'only ctr or ref_pts are supported.') + conf = data_dict['conf'][:, 1:].detach().max(dim=-1).values.cpu().numpy() + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.scatter = self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=conf, s=self.s, vmin=0, vmax=1) + # self.scatter.set_array(conf) + # self.scatter.set_offsets(centers) + if self.gt_key is not None: + gt_boxes = list(data[self.gt_key][cav_id].values()) + gt_boxes = np.array(gt_boxes)[:, [1, 2, 3, 4, 5, 6, 9]] + self.axes = draw_points_boxes_plt( + self.lidar_range, + boxes_gt=gt_boxes, + ax=self.axes, + return_ax=True + ) + self.draw() + break
+ + +
[docs]class DetectionScoreMap(MplCanvas): + def __init__(self, lidar_range=None, s=4, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.s = s + self.pred_key = self.data_keys[0] + # self.gt_key = self.data_keys[1] + +
[docs] def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, det_dict in data[self.pred_key].items(): + assert 'ctr' in det_dict and 'scr' in det_dict + centers = det_dict['ctr'].cpu().numpy() + conf = det_dict['scr'].cpu().numpy() + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.scatter = self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=conf, s=self.s, vmin=0, vmax=1) + # self.scatter.set_array(conf) + # self.scatter.set_offsets(centers) + self.draw() + break
+ + +
[docs]class BEVDenseCanvas(MplCanvas): + def __init__(self, lidar_range=None, **kwargs): + super().__init__(**kwargs) + assert len(self.data_keys) == 2, '1st key should be pred bev map, 2nd key should be gt bev map.' + self.lidar_range = lidar_range + self.pred_key = self.data_keys[0] + self.gt_key = self.data_keys[1] + +
[docs] def refresh(self, data, **kwargs): + if self.pred_key not in data and self.gt_key not in data: + return + gt_bev = data.get(self.gt_key, False) + for cav_id, pred_bev in data[self.pred_key].items(): + self.axes[0].clear() + self.axes[1].clear() + self.axes[0].set_title(f"Pred: {data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.axes[1].set_title(f"GT: {data['scenario'][cav_id]}.{data['frame'][cav_id]}") + self.axes[0].imshow(pred_bev[..., 1]) + if gt_bev: + self.axes[1].imshow(gt_bev[cav_id]) + self.draw() + break
+ + +
[docs]class SparseDetectionCanvas(MplCanvas): + def __init__(self, lidar_range=None, topk_ctr=0, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.topk_ctr = topk_ctr + self.pred_key = self.data_keys[0] + self.gt_key = self.data_keys[1] + +
[docs] def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, det_dict in data[self.pred_key].items(): + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + # plot points + for points in data['points'].values(): + draw_points_boxes_plt( + pc_range=self.lidar_range, + points=points, + ax=self.axes, + # return_ax=True + ) + # plot centers + if 'ctr' in det_dict: + centers = det_dict['ctr'].detach().cpu().numpy() + if self.topk_ctr > 0: + topk_inds = det_dict['scr'].topk(self.topk_ctr).indices + conf = det_dict['scr'][topk_inds] + centers = centers[topk_inds] + elif 'conf' in det_dict: + conf = det_dict['conf'][:, 0, 1].detach().cpu().numpy() + mask = conf > 0.5 + centers = centers[mask] + conf = conf[mask] + + self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=conf, s=1, vmin=0, vmax=1) + # plot pcds and boxes + gt_boxes = list(data[self.gt_key][cav_id].values()) + gt_boxes = np.array(gt_boxes)[:, [1, 2, 3, 4, 5, 6, 9]] + pred_boxes = det_dict['box'].detach().cpu().numpy() + draw_points_boxes_plt( + pc_range=self.lidar_range, + boxes_pred=pred_boxes, + boxes_gt=gt_boxes, + ax=self.axes, + # return_ax=True + ) + self.draw() + break
+ + +
[docs]class DetectionCanvas(MplCanvas): + def __init__(self, lidar_range=None, topk_ctr=0, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.topk_ctr = topk_ctr + self.pred_key = self.data_keys[0] + self.gt_key = self.data_keys[1] + +
[docs] def refresh(self, data, **kwargs): + if self.pred_key not in data: + return + for cav_id, det_dict in data[self.pred_key].items(): + self.axes.clear() + self.axes.set_title(f"{data['scenario'][cav_id]}.{data['frame'][cav_id]}") + # plot points + for points in data['points'].values(): + draw_points_boxes_plt( + pc_range=self.lidar_range, + points=points, + ax=self.axes, + # return_ax=True + ) + + # plot centers + if 'ctr' in det_dict: + if self.topk_ctr > 0: + topk_inds = det_dict['scr'].topk(self.topk_ctr).indices + scr = det_dict['scr'][topk_inds].detach().cpu().numpy() + centers = det_dict['ctr'][topk_inds].detach().cpu().numpy() + else: + centers = det_dict['ctr'].detach().cpu().numpy() + if 'scr' in det_dict: + scr = det_dict['scr'].detach().cpu().numpy() + elif 'conf' in det_dict: + scr = det_dict['conf'][:, 0, 1].detach().cpu().numpy() + else: + break + mask = scr > 0.5 + centers = centers[mask] + scr = scr[mask] + self.axes.scatter(centers[:, 0], centers[:, 1], + cmap='jet', c=scr, s=.1, vmin=0, vmax=1) + # plot pcds and boxes + gt_boxes = list(data[self.gt_key][cav_id].values()) + gt_boxes = np.array(gt_boxes)[:, [1, 2, 3, 4, 5, 6, 9]] + if 'preds' in det_dict: + det_dict = det_dict['preds'] + pred_boxes = det_dict['box'].detach().cpu().numpy() + draw_points_boxes_plt( + pc_range=self.lidar_range, + boxes_pred=pred_boxes, + boxes_gt=gt_boxes, + ax=self.axes, + # return_ax=True + ) + self.draw() + break
+ + +
[docs]class OutputViewer(QtWidgets.QWidget): + def __init__(self, plots, parent=None): + super(OutputViewer, self).__init__(parent) + layout = QtWidgets.QVBoxLayout(self) + self.gather_data_keys = [] + self.plots = [] + for p in plots: + plot = globals()[p['title']](**p) + layout.addWidget(plot) + self.plots.append(plot) + self.gather_data_keys = self.gather_data_keys + plot.data_keys + self.gather_data_keys = list(set(self.gather_data_keys)) + +
[docs] def refresh(self, data, **kwargs): + for plot in self.plots: + plot.refresh(data)
+ + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/agents/viewer/utils.html b/docs/_build/html/_modules/cosense3d/agents/viewer/utils.html new file mode 100644 index 00000000..18b0dc7f --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/agents/viewer/utils.html @@ -0,0 +1,129 @@ + + + + + + cosense3d.agents.viewer.utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.agents.viewer.utils

+
+
+import numpy as np
+
+
+# Returns the minimum (closest) depth for a specified radius around the center
+
[docs]def depth_min(depths, center, r=10) -> float: + selected_depths = depths[circular_mask(len(depths), center, r)] + filtered_depths = selected_depths[(0 < selected_depths) & (selected_depths < 1)] + if 0 in depths: # Check if cursor is at widget border + return 1 + if len(filtered_depths) > 0: + return np.min(filtered_depths) + else: + return 1
+ + +# Creates a circular mask with radius around center +
[docs]def circular_mask(arr_length, center, radius): + dx = np.arange(arr_length) + dx2 = (dx[np.newaxis, :] - center) ** 2 + \ + (dx[:, np.newaxis] - center) ** 2 + return dx2 < radius ** 2
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset.html b/docs/_build/html/_modules/cosense3d/dataset.html new file mode 100644 index 00000000..763ad2c1 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset.html @@ -0,0 +1,139 @@ + + + + + + cosense3d.dataset — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset

+import logging
+import torch
+import importlib
+
+from torch.utils.data.distributed import DistributedSampler
+
+
+
[docs]def get_dataloader(cfgs, mode='train', distributed=False): + name = cfgs['dataset'] + module = importlib.import_module(f'cosense3d.dataset.{name.lower()}_dataset') + dataset_full_name = ''.join([n[:1].upper() + n[1:] for n in name.split('_')]) + 'Dataset' + assert hasattr(module, dataset_full_name), "Invalid dataset." + module_class = getattr(module, dataset_full_name) + dataset = module_class(cfgs, mode) + shuffle = cfgs.get('shuffle', True) if mode=='train' else False + if distributed: + shuffle = False + sampler = DistributedSampler(dataset) + else: + sampler = None + dataloader = torch.utils.data.DataLoader(dataset, + batch_size=cfgs[f'batch_size_{mode}'], + sampler=sampler, + num_workers=cfgs['n_workers'], + shuffle=shuffle, + collate_fn=dataset.collate_batch, + pin_memory=True, + drop_last=True) + return dataloader
+ + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/cosense_dataset.html b/docs/_build/html/_modules/cosense3d/dataset/cosense_dataset.html new file mode 100644 index 00000000..61a38636 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/cosense_dataset.html @@ -0,0 +1,351 @@ + + + + + + cosense3d.dataset.cosense_dataset — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.cosense_dataset

+import copy
+import glob
+import os
+import logging
+import time
+import random
+from typing import List, Optional, Union
+
+import open3d as o3d
+import cv2
+from PIL import Image
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch.utils.data import Dataset
+
+from cosense3d.dataset.pipeline import Pipeline
+from cosense3d.utils.misc import load_json
+from cosense3d.dataset.const import CoSenseBenchmarks as csb
+from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs
+
+
+
[docs]class CosenseDataset(Dataset): + LABEL_COLORS = {} + VALID_CLS = [] + + def __init__(self, cfgs, mode): + self.cfgs = cfgs + self.mode = mode + self.COM_RANGE = self.cfgs.get('com_range', 70) + self.latency = cfgs.get('latency', 0) + self.loc_err = np.array(cfgs.get('loc_err', [0, 0, 0])) + if cfgs.get('enable_split_sub_folder', True): + self.data_path = os.path.join(self.cfgs['data_path'], self.mode) + else: + self.data_path = self.cfgs['data_path'] + + self.max_num_cavs = cfgs['max_num_cavs'] + + self.init_dataset() + + self.pipeline = Pipeline(cfgs[f'{mode}_pipeline']) + # for frames that do not need loss calculation, omit gt-loading to save time + if 'inf_pipeline' in cfgs: + self.inf_pipeline = Pipeline(cfgs['inf_pipeline']) + else: + self.inf_pipeline = self.pipeline + + def __len__(self): + return len(self.samples) + + def __getitem__(self, item): + return self.load_frame_data(item) + +
[docs] def load_frame_data(self, + item: int, + prev_agents: Optional[List] = None, + prev_item: Optional[int] = None, + omit_gt: Optional[bool] = False, + loc_err: Union[list, None] = None) -> dict: + """ + Load all data and annotations from one frame to standard CoSense format. + + :param item: sample index. + :param prev_agents: only load data the previous agents if given, this is used for temporal data loading. + :param prev_item: the index of the previous loaded sample. + :param omit_gt: whether to omit loading the ground truth annotations. + :param loc_err: localization error. + :return: data_dict + """ + sample_info = self.load_sample_info(item, prev_agents, prev_item) + sample_info['loc_err'] = loc_err + if omit_gt: + data_dict = self.inf_pipeline(sample_info) + else: + data_dict = self.pipeline(sample_info) + data_dict.pop('sample_info') + data_dict.pop('data_path') + return data_dict
+ +
[docs] def init_dataset(self): + """Load all necessary meta information""" + self.load_meta() + self.parse_samples()
+ +
[docs] def parse_samples(self): + """List all frame-wise instances""" + # list all frames, each frame as a sample + self.samples = [] + drop_scenarios = self.cfgs.get('drop_scenarios', []) + for scenario, scontent in self.meta_dict.items(): + if scenario in drop_scenarios: + continue + self.samples.extend(sorted([[scenario, frame] for frame in scontent.keys()])) + self.samples = sorted(self.samples) + + print(f"{self.mode} : {len(self.samples)} samples.")
+ +
[docs] def load_meta(self): + """Load meta data from CoSense json files""" + self.meta_dict = {} + meta_dir = self.cfgs['meta_path'] + if meta_dir == '': + return + if 'split' in self.cfgs: + scenarios = self.cfgs['split'][self.mode] + elif os.path.exists(os.path.join(self.cfgs['meta_path'], f"{self.mode}.txt")): + with open(os.path.join(self.cfgs['meta_path'], f"{self.mode}.txt"), 'r') as fh: + scenarios = [l.strip() for l in fh.readlines() if len(l.strip()) > 0] + else: + scenarios = [d[:-5] for d in os.listdir(meta_dir) if 'json' in d] + + for scenario in scenarios: + meta_file = os.path.join(meta_dir, f"{scenario}.json") + scenario_dict = load_json(meta_file) + # scenario_dict = {s: scenario_dict[s] for s in list(scenario_dict.keys())[:1]} + self.meta_dict[scenario] = scenario_dict
+ +
[docs] def load_sample_info(self, item: int, prev_agents: Optional[List] = None, prev_item: Optional[int] = None) -> dict: + """ + Load meta info of the ```item```'th sample. + + :param item: sample index. + :param prev_agents: only load data the previous agents if given, this is used for temporal data loading. + :param prev_item: the index of the previous loaded sample. + :return: batch_dict: dict(scenario: str, frame: str, sample_info: dict) + """ + # load meta info + scenario, frame = self.samples[item] + sample_info = copy.deepcopy(self.meta_dict[scenario][frame]) + + if prev_item is None: + prev_item = max(item - 1, 0) + prev_scenario, prev_frame = self.samples[prev_item] + prev_idx = f'{prev_scenario}.{prev_frame}' + next_item = min(item + 1, self.__len__() - 1) + next_scenario, next_frame = self.samples[next_item] + next_idx = f'{next_scenario}.{next_frame}' + + if prev_scenario != scenario: + prev_agents = None + valid_agent_ids = self.get_valid_agents(sample_info, prev_agents) + + # previous agents might not in current frame when load sequential data + scenario_tokens = [f'{scenario}.{ai}' for ai in valid_agent_ids if ai in sample_info['agents']] + + # if latency > 0, set the sample info of coop. cavs to previous frame at -latency + if self.latency != 0: + # get random latency if latency flag is -1 + latency = np.random.randint(3) if self.latency == -1 else self.latency + latent_item = max(item - latency, 0) + latent_scenario, latent_frame = self.samples[latent_item] + if latent_scenario != scenario: + # make sure the scenario is the same as the current frame + latent_scenario = scenario + latent_frame = frame + latent_info = copy.deepcopy(self.meta_dict[latent_scenario][latent_frame]) + # update coop agent info to latent frame + for cav_id in valid_agent_ids: + if cav_id == sample_info['meta']['ego_id']: + continue + if cav_id in latent_info['agents']: + sample_info['agents'][cav_id] = latent_info['agents'][cav_id] + + return { + 'scenario': scenario, + 'frame': frame, + 'data_path': self.data_path, + 'sample_info': sample_info, + 'valid_agent_ids': valid_agent_ids, + 'scene_tokens': scenario_tokens, + }
+ +
[docs] def get_valid_agents(self, sample_info: dict, prev_agents: Optional[List] = None) -> List: + """ + Return prev_agents if given else select the given number of agents in the communication range + which includes the ego agent. + + Parameters + ---------- + sample_info: meta info the one sample. + prev_agents: list of the agent ids loader last time. + + Returns + ------- + agents_ids: list of valid agent for the current sample + """ + if prev_agents is not None: + return prev_agents + else: + agents = sample_info['agents'] + ego_id = str(sample_info['meta']['ego_id']) + agents_ids = [ego_id] + # filter cavs in communication range + ego_pose_vec = agents[ego_id]['pose'] + in_range_cavs = [] + for ai, adict in agents.items(): + if ai == ego_id: + continue + if ((adict['pose'][0] - ego_pose_vec[0])**2 + (adict['pose'][1] - ego_pose_vec[1])**2 + < self.COM_RANGE**2): + in_range_cavs.append(ai) + if self.max_num_cavs > 1: + agents_ids += random.sample(in_range_cavs, k=min(self.max_num_cavs - 1, len(in_range_cavs))) + return agents_ids
+ +
[docs] @staticmethod + def collate_batch(batch_list): + keys = batch_list[0].keys() + batch_dict = {k:[] for k in keys} + + def list_np_to_tensor(ls): + ls_tensor = [] + for i, l in enumerate(ls): + if isinstance(l, list): + l_tensor = list_np_to_tensor(l) + ls_tensor.append(l_tensor) + elif isinstance(l, np.ndarray): + tensor = torch.from_numpy(l) + if l.dtype == np.float64: + tensor = tensor.float() + ls_tensor.append(tensor) + else: + ls_tensor.append(l) + return ls_tensor + + for k in keys: + if isinstance(batch_list[0][k], np.ndarray): + batch_dict[k] = [torch.from_numpy(batch[k]) for batch in batch_list] + elif isinstance(batch_list[0][k], list): + batch_dict[k] = [list_np_to_tensor(batch[k]) for batch in batch_list] + else: + batch_dict[k] = [batch[k] for batch in batch_list] + return batch_dict
+ + +if __name__=="__main__": + from cosense3d.utils.misc import load_yaml + from torch.utils.data import DataLoader + cfgs = load_yaml("/mars/projects20/CoSense3D/cosense3d/config/petr.yaml") + cosense_dataset = CosenseDataset(cfgs['DATASET'], 'train') + cosense_dataloader = DataLoader(dataset=cosense_dataset, collate_fn=cosense_dataset.collate_batch) + for data in cosense_dataloader: + print(data.keys()) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/pipeline.html b/docs/_build/html/_modules/cosense3d/dataset/pipeline.html new file mode 100644 index 00000000..8f9fed37 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/pipeline.html @@ -0,0 +1,139 @@ + + + + + + cosense3d.dataset.pipeline — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.pipeline

+from cosense3d.dataset.pipeline.loading import *
+from cosense3d.dataset.pipeline.transform import *
+
+
+
[docs]class Pipeline(object): + """Composes several processing modules together. + Take care that these functions modify the input data directly. + """ + + def __init__(self, cfgs): + self.processes = [] + if isinstance(cfgs, list): + for cfg in cfgs: + for k, v in cfg.items(): + self.build_process(k, v) + elif isinstance(cfgs, OrderedDict): + for k, v in cfgs.items(): + self.build_process(k, v) + else: + raise NotImplementedError + +
[docs] def build_process(self, k, v): + cls = globals().get(k, None) + assert cls is not None, f"Pipeline process node {k} not found." + self.processes.append(cls(**v))
+ + def __call__(self, data_dict): + for p in self.processes: + p(data_dict) + return data_dict
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/pipeline/loading.html b/docs/_build/html/_modules/cosense3d/dataset/pipeline/loading.html new file mode 100644 index 00000000..e2a667e4 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/pipeline/loading.html @@ -0,0 +1,787 @@ + + + + + + cosense3d.dataset.pipeline.loading — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.pipeline.loading

+import os, random, copy
+import glob
+from collections import OrderedDict
+
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+from plyfile import PlyData
+import cv2
+
+from cosense3d.utils.pclib import pose_to_transformation
+from cosense3d.utils.pcdio import point_cloud_from_path
+from cosense3d.utils.misc import load_json
+
+
+
[docs]class LoadLidarPoints: + + def __init__(self, + coop_mode=True, + load_attributes=['xyz', 'intensity'], + time_offset=0): + self.coop_mode = coop_mode + self.load_attributes = load_attributes + self.time_offset = time_offset + +
[docs] def read_pcd(self, pts_filename): + pcd = point_cloud_from_path(pts_filename) + points = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1) + lidar_dict = {'xyz': points} + if 'intensity' in pcd.fields: + lidar_dict['intensity'] = pcd.pc_data['intensity'] + if 'timestamp' in pcd.fields: + lidar_dict['time'] = pcd.pc_data['timestamp'] + return lidar_dict
+ + def _load_points(self, pts_filename): + """ + Load point clouds data form file. + + Parameters + ---------- + pcd_file : str + The pcd file that contains the point cloud. + return_o3d: bool + Default returns numpy array, set True to return pcd as o3d PointCloud object + + Returns + ------- + lidar_dict: + xyz: pcd_np | pcd : np.ndarray | o3d.geometry.PointCloud + The lidar xyz coordinates in numpy format, shape:(n, 3); + intensity: (optional) np.ndarray, (n,); + label: (optional) np.ndarray, (n,); + time: (optional) np.ndarray, (n,); + ray: (optional) np.ndarray, (n,); + """ + lidar_dict = {} + ext = os.path.splitext(pts_filename)[-1] + if ext == '.pcd': + # we do not use to avoid conflict with PyQt5 + lidar_dict = self.read_pcd(pts_filename) + + # pcd = o3d.io.read_point_cloud(pts_filename) + # xyz = np.asarray(pcd.points, dtype=np.float32) + # lidar_dict['xyz'] = xyz + # # we save the intensity in the first channel + # intensity = np.expand_dims(np.asarray(pcd.colors)[:, 0], -1) + # if len(intensity) == len(xyz): + # lidar_dict['intensity'] = intensity + + elif ext == '.bin': + pcd_np = np.fromfile(pts_filename, dtype=np.float32).reshape(-1, 4) + lidar_dict['xyz'] = pcd_np[:, :3] + # check attribute of last column, + # num of unique labels for the datasets in this projects is less than 50, + # unique intensities is normally larger then 50 + if len(np.unique(pcd_np[:, -1])) < 50: + lidar_dict['label'] = pcd_np[:, -1] + elif pcd_np[:, -1].max() > 1: + lidar_dict['intensity'] = pcd_np[:, -1] / 255 + else: + lidar_dict['intensity'] = pcd_np[:, -1] + + elif ext == '.ply': + ply = PlyData.read(pts_filename) + data = ply['vertex'] + properties = [prop.name for prop in data.properties] + data = {name: np.array(data[name]) for name in properties} + xyz = np.stack([data.pop(x) for x in 'xyz'], axis=1) + lidar_dict['xyz'] = xyz + lidar_dict.update(data) + else: + raise NotImplementedError + # reshape for cat + for k, v in lidar_dict.items(): + if v.ndim == 1: + lidar_dict[k] = v.reshape(-1, 1) + return lidar_dict + + def _load_single(self, pts_filename, timestamp=0): + lidar_dict = self._load_points(pts_filename) + if 'intensity' in self.load_attributes and 'intensity' not in lidar_dict: + lidar_dict['intensity'] = np.ones_like(lidar_dict['xyz'][:, :1]) + if 'time' in self.load_attributes: + if 'time' in lidar_dict: + lidar_dict['time'] -= self.time_offset + else: + lidar_dict['time'] = np.zeros_like(lidar_dict['xyz'][:, :1]) + (timestamp - self.time_offset) + if 'distance' in self.load_attributes: + lidar_dict['distance'] = np.linalg.norm(lidar_dict['xyz'][:, :2], axis=1, keepdims=True) + if 'cosine' in self.load_attributes: + lidar_dict['cosine'] = np.cos(np.arctan2(lidar_dict['xyz'][:, 1:2], lidar_dict['xyz'][:, 0:1])) + if 'sine' in self.load_attributes: + lidar_dict['sine'] = np.sin(np.arctan2(lidar_dict['xyz'][:, 1:2], lidar_dict['xyz'][:, 0:1])) + + points = np.concatenate( + [lidar_dict[attri] for attri in self.load_attributes], axis=-1) + + return points + + def __call__(self, data_dict): + if self.coop_mode: + points = [] + for ai in data_dict['valid_agent_ids']: + adict = data_dict['sample_info']['agents'][ai] + filename = os.path.join(data_dict['data_path'], adict['lidar']['0']['filename']) + points.append(self._load_single(filename, adict['lidar']['0']['time'])) + else: + ego_id = data_dict['sample_info']['meta']['ego_id'] + ego_dict = data_dict['sample_info']['agents'][ego_id] + filename = os.path.join(data_dict['data_path'], ego_dict['lidar']['0']['filename']) + points = self._load_single(filename) + + data_dict['points'] = points + data_dict['points_attributes'] = self.load_attributes + + return data_dict
+ + +
[docs]class LoadMultiViewImg: + def __init__(self, bgr2rgb=False, to_float32=False, max_num_img=None, img_filter_keys=None): + self.bgr2rgb = bgr2rgb + self.to_float32 = to_float32 + self.max_num_img = max_num_img + self.img_filter_keys = img_filter_keys + + def __call__(self, data_dict): + agents = data_dict['sample_info']['agents'] + chosen_cams = OrderedDict() + + img = [] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + chosen_cams[ai] = [] + # get image info + num_cam = 0 + if self.max_num_img is not None and self.max_num_img < len(adict['camera']): + selected = random.sample(list(adict['camera'].keys()), k=self.max_num_img) + cam_dicts = {ci: adict['camera'][ci] for ci in selected} + else: + cam_dicts = copy.copy(adict['camera']) + for ci, cdict in cam_dicts.items(): + # One lidar frame might have several images, only take the 1st one + filename = cdict['filenames'][0] + if self.img_filter_keys is not None and \ + len([1 for k in self.img_filter_keys if k in filename]) == 0: + continue + num_cam += 1 + chosen_cams[ai].append(ci) + img_file = os.path.join(data_dict['data_path'], filename) + I = cv2.imread(img_file) + if self.bgr2rgb: + I = cv2.cvtColor(I, cv2.COLOR_BGR2RGB) + img.append(I) + # img is of shape (h, w, c, num_views) + img = np.stack(img, axis=0) + if self.to_float32: + img = img.astype(np.float32) + + data_dict['img'] = img + data_dict['chosen_cams'] = chosen_cams + return data_dict
+ + +
[docs]class LoadAnnotations: + def __init__(self, + load2d=False, load_cam_param=False, + load3d_local=False, load3d_global=False, + load_global_time=False, load3d_pred=False, + min_num_pts=0, with_velocity=False, + class_agnostic_3d=True, time_offset=0, + loc_err=None): + self.load2d = load2d + self.load_cam_param = load_cam_param + self.load3d_local = load3d_local + self.load3d_global = load3d_global + self.load3d_pred = load3d_pred + self.load_global_time = load_global_time + self.min_num_pts = min_num_pts + self.with_velocity = with_velocity + self.class_agnostic_3d = class_agnostic_3d + self.time_offset = time_offset + self.loc_err = np.array(loc_err) if loc_err is not None else None # x, y, r + + def __call__(self, data_dict): + self._load_essential(data_dict) + if self.load2d: + data_dict = self._load_anno2d(data_dict) + elif self.load_cam_param: + data_dict = self._load_cam_param(data_dict) + + if self.load3d_local: + data_dict = self._load_anno3d_local(data_dict) + if self.load3d_global: + data_dict = self._load_anno3d_global(data_dict) + if self.load_global_time: + data_dict = self._load_global_time(data_dict) + if self.load3d_pred: + data_dict = self._load_anno3d_pred(data_dict) + + return data_dict + + def _add_loc_err(self, pose, loc_err): + pose_ = copy.deepcopy(pose) + if self.loc_err is not None: + loc_err = np.random.randn(3) * self.loc_err + + pose_[0] = pose[0] + loc_err[0] + pose_[1] = pose[1] + loc_err[1] + pose_[5] = pose[5] + loc_err[2] + return pose_ + + def _load_essential(self, data_dict): + lidar_poses = [] + lidar_poses_gt = [] + vehicle_poses = [] + timestampes = [] + agents = data_dict['sample_info']['agents'] + loc_err = data_dict['loc_err'] + ego_pose = agents[data_dict['sample_info']['meta']['ego_id']]['lidar']['0']['pose'] + ego_pose = self._add_loc_err(ego_pose, loc_err[0]) + ego_pose = pose_to_transformation(ego_pose) + for i, ai in enumerate(data_dict['valid_agent_ids']): + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + lidar_pose = self._add_loc_err(adict['lidar']['0']['pose'], loc_err=loc_err[i]) + lidar_pose = pose_to_transformation(lidar_pose) + lidar_poses.append(lidar_pose) + if self.loc_err is not None or loc_err is not None: + lidar_poses_gt.append(pose_to_transformation(adict['lidar']['0']['pose'])) + vehicle_poses.append(adict['pose']) + if adict['lidar']['0']['time'] is not None: + # dairv2x + timestampes.append(adict['lidar']['0']['time'] - self.time_offset) + else: + # opv2v + # TODO update opv2v meta files with lidar timestamps + timestampes.append(int(data_dict['frame']) * 0.1 - self.time_offset) + + data_dict.update({ + 'lidar_poses': lidar_poses, + 'ego_poses': ego_pose, + 'timestamp': timestampes, + 'vehicle_poses': vehicle_poses + }) + + if len(lidar_poses) == len(lidar_poses_gt): + data_dict['lidar_poses_gt'] = lidar_poses_gt + + return data_dict + + def _load_cam_param(self, data_dict): + intrinsics = [] + extrinsics = [] + lidar2img = [] + + agents = data_dict['sample_info']['agents'] + chosen_cams = data_dict['chosen_cams'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + cam_ids = chosen_cams[ai] + for ci in cam_ids: + cdict = adict['camera'][ci] + I4x4 = np.eye(4) + I4x4[:3, :3] = np.array(cdict['intrinsic']) + intrinsics.append(I4x4.astype(np.float32)) + extrinsics.append(np.array(cdict['lidar2cam']).astype(np.float32)) + lidar2img.append(self.get_lidar2img_transform( + cdict['lidar2cam'], cdict['intrinsic']).astype(np.float32)) + + data_dict.update({ + 'intrinsics': intrinsics, + 'extrinsics': extrinsics, + 'lidar2img': lidar2img, + }) + return data_dict + + def _load_anno2d(self, data_dict): + intrinsics = [] + extrinsics = [] + lidar2img = [] + bboxes2d = [] + centers2d = [] + depths = [] + labels = [] + + agents = data_dict['sample_info']['agents'] + chosen_cams = data_dict['chosen_cams'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + cam_ids = chosen_cams[ai] + for ci in cam_ids: + cdict = adict['camera'][ci] + I4x4 = np.eye(4) + I4x4[:3, :3] = np.array(cdict['intrinsic']) + intrinsics.append(I4x4.astype(np.float32)) + extrinsics.append(np.array(cdict['lidar2cam']).astype(np.float32)) + lidar2img.append(self.get_lidar2img_transform( + cdict['lidar2cam'], cdict['intrinsic']).astype(np.float32)) + cam_info = adict['camera'][ci] + # num_lidar_pts = np.ones(len(gt_names)).astype(int) + # valid_flag = np.ones(len(gt_names)).astype(bool) + mask = np.array(cam_info['num_pts']) > self.min_num_pts + bboxes2d.append(np.array(cam_info['bboxes2d']).astype(np.float32)[mask]) + centers2d.append(np.array(cam_info['centers2d']).astype(np.float32)[mask]) + depths.append(np.array(cam_info['depths']).astype(np.float32)[mask]) + labels.append(np.zeros(mask.sum(), dtype=int)) + + data_dict.update({ + 'intrinsics': intrinsics, + 'extrinsics': extrinsics, + 'lidar2img': lidar2img, + 'bboxes2d': bboxes2d, + 'centers2d': centers2d, + 'depths2d': depths, + 'labels2d': labels + }) + return data_dict + + def _load_anno3d_local(self, data_dict): + local_bboxes_3d = [] + local_labels_3d = [] + local_boxes_3d_id = [] + local_names = [] + agents = data_dict['sample_info']['agents'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + adict = agents[ai] + boxes = np.array(adict['gt_boxes']).reshape(-1, 11) + if 'num_pts' not in adict: + mask = np.ones_like(boxes[:, 0]).astype(bool) + else: + mask = np.array(adict['num_pts']) > self.min_num_pts + if len(boxes) != len(mask): + # TODO: update num pts in meta + mask = np.ones_like(boxes[..., 0]).astype(bool) + boxes = boxes[mask] + local_boxes = boxes[:, [2, 3, 4, 5, 6, 7, 10]].astype(np.float32) + local_boxes_id = boxes[:, 0].astype(int) + if self.class_agnostic_3d: + local_labels = np.zeros(len(boxes), dtype=int) + else: + local_labels = boxes[:, 1].astype(int) + if self.with_velocity: + if 'velos' in adict: + velos = np.array(adict['velos']).reshape(-1, 2).astype(np.float32) / 3.6 + local_boxes = np.concatenate([local_boxes, velos[mask]], axis=-1) + else: + velos = np.zeros_like(local_boxes[:, :2]) + local_boxes = np.concatenate([local_boxes, velos], axis=-1) + local_bboxes_3d.append(local_boxes) + local_labels_3d.append(local_labels) + local_boxes_3d_id.append(local_boxes_id) + assert np.all(local_labels == 0), "Num. cls > 1 not implemented." + local_names.append(['car' for _ in local_labels]) + + data_dict.update({ + 'local_bboxes_3d': local_bboxes_3d, + 'local_labels_3d': local_labels_3d, + 'local_bboxes_id': local_boxes_3d_id, + 'local_names': local_names, + }) + + return data_dict + + def _load_anno3d_global(self, data_dict): + frame_meta = data_dict['sample_info']['meta'] + boxes = np.array(frame_meta['bbx_center_global']) + global_bboxes_3d = boxes[:, [2, 3, 4, 5, 6, 7, 10]].astype(np.float32) + global_bboxes_id = boxes[:, 0].astype(int) + if self.class_agnostic_3d: + global_labels_3d = np.zeros(len(boxes), dtype=int) + else: + global_labels_3d = boxes[:, 1].astype(int) + + if self.with_velocity: + if 'bbx_velo_global' in frame_meta: + global_velocity = np.array(frame_meta['bbx_velo_global']).astype(np.float32) / 3.6 + else: + global_velocity = np.zeros_like(global_bboxes_3d[:, :2]) + global_bboxes_3d = np.concatenate([global_bboxes_3d, global_velocity], axis=-1) + + if 'num_pts' in frame_meta and self.min_num_pts > 0: + global_box_num_pts = np.array(frame_meta['num_pts']) + mask = global_box_num_pts > self.min_num_pts + global_bboxes_3d = global_bboxes_3d[mask] + global_labels_3d = global_labels_3d[mask] + global_bboxes_id = global_bboxes_id[mask] + + # TODO: currently only support car + global_names = ['car' for _ in global_labels_3d] + data_dict.update({ + 'global_bboxes_3d': global_bboxes_3d, + 'global_labels_3d': global_labels_3d, + 'global_bboxes_id': global_bboxes_id, + 'global_names': global_names, + }) + return data_dict + + def _load_global_time(self, data_dict): + frame_meta = data_dict['sample_info']['meta'] + if 'global_bbox_time' in frame_meta: + data_dict['global_time'] = frame_meta['global_bbox_time'][0] + else: + data_dict['global_time'] = data_dict['points'][0][:, -1].max() + return data_dict + + def _load_anno3d_pred(self, data_dict): + frames = sorted(list(data_dict['sample_info']['meta']['boxes_pred'].keys())) + boxes_preds = [data_dict['sample_info']['meta']['boxes_pred'][f] for f in frames] + data_dict['bboxes_3d_pred'] = np.array(boxes_preds) + return data_dict + +
[docs] def get_lidar2img_transform(self, lidar2cam, intrinsic): + if isinstance(lidar2cam, list): + intrinsic = np.array(intrinsic) + try: + P = intrinsic @ lidar2cam[:3] + except: + print(intrinsic) + print(lidar2cam) + lidar2img = np.concatenate([P, lidar2cam[3:]], axis=0) + return lidar2img
+ + +
[docs]class LoadOPV2VBevMaps: + def __init__(self, keys=None, use_global_map=True, ego_only=True, range=75): + self.keys = keys + self.use_global_map = use_global_map + self.ego_only = ego_only + self.range = range + self.map_res = 0.2 + self.map_size = int(self.range * 2 / self.map_res) + pad = int(range / self.map_res) + if self.use_global_map: + self.keys = ['bevmap', 'bevmap_coor'] + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps/png" + map_files = glob.glob(os.path.join(map_path, '*.png')) + self.scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + self.map_bounds = load_json(os.path.join(assets_path, 'map_bounds.json')) + self.bevmaps = {} + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + bevmap = cv2.imread(mf) / 255. + # bevmap = np.pad(bevmap, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0) + self.bevmaps[town] = bevmap + + # grid coor template + grid = np.ones((self.map_size, self.map_size)) + inds = np.stack(np.where(grid)) + xy = inds * 0.2 - self.range + 0.1 + self.xy_pad = np.concatenate([xy, np.zeros_like(xy[:1]), np.ones_like(xy[:1])], axis=0) + # carla has different coor system as cosense3d, T_corr: carla -> cosense3d + self.T_corr = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 0, 1]]) + else: + assert keys is not None and len(keys) > 0 + + def __call__(self, data_dict): + path = os.path.join(data_dict['data_path'], data_dict['scenario']) + ego_id = data_dict['sample_info']['meta']['ego_id'] + load_dict = {} + + agents = data_dict['valid_agent_ids'] + for ai in agents: + if self.ego_only and ego_id != ai: + for k in load_dict.keys(): + load_dict[k].append(None) + continue + else: + out = self.load_single(path, ai, data_dict) + for k in out.keys(): + if k not in load_dict: + load_dict[k] = [] + load_dict[k].append(out[k]) + + data_dict.update(load_dict) + return data_dict + +
[docs] def load_single(self, path, ai, data_dict): + # map1 = self.crop_map_for_pose(data_dict, ai)[0] + # map2 = cv2.imread(os.path.join(path, ai, f"{data_dict['frame']}_bev.png"))[..., 0] + # map2 = np.array(map2, dtype=float) / 255. + # map2[map2 > 0] = 1 + # map2 = np.flip(map2, 0).copy() + # bevmap = np.zeros((500, 500, 3)) + # bevmap[..., 0] = map1 + # bevmap[..., 1] = map2 + # import matplotlib.pyplot as plt + # plt.imshow(bevmap) + # plt.show() + # plt.close() + + out = {} + if self.use_global_map: + out['bevmap'], out['bevmap_coor'] = self.crop_map_for_pose(data_dict, ai) + else: + frame = data_dict['frame'] + for k in self.keys: + filename = os.path.join(path, ai, f"{frame}_{k}.png") + bev_map = cv2.imread(filename)[..., 0] + # bev_map = cv2.cvtColor(bev_map, cv2.COLOR_BGR2GRAY) + bev_map = np.array(bev_map, dtype=float) / 255. + bev_map[bev_map > 0] = 1 + out[f'{k}map'] = np.flip(bev_map, 0).copy() + out[f'{k}map_coor'] = [-self.range, - self.range] + return out
+ +
[docs] def crop_map_for_pose(self, data_dict, ai): + scenario = data_dict['scenario'] + town = self.scene_maps[scenario] + # lidar_pose = pose_to_transformation(data_dict['sample_info']['agents'][ai]['lidar']['0']['pose']) + lidar_pose = data_dict['lidar_poses'][data_dict['valid_agent_ids'].index(ai)] + cur_map = self.bevmaps[town] + sx, sy = cur_map.shape[:2] + bound = self.map_bounds[town] + + # transform template bev points to world coordinates + transform = self.T_corr @ lidar_pose + xy_tf = transform @ self.xy_pad + # calculate map indices of bev points + xy_tf[0] -= bound[0] + 1.0 + xy_tf[1] -= bound[1] + 1.0 + map_inds = np.floor(xy_tf[:2] / 0.2) + xs = np.clip(map_inds[0], 0, sx - 1).astype(int) + ys = np.clip(map_inds[1], 0, sy - 1).astype(int) + # retrieve cropped bev map from global map + bevmap = cur_map[xs, ys].reshape(self.map_size, self.map_size, 3) # [::-1, ::-1] + + # # bound[0] -= self.range + # # bound[1] -= self.range + # offset_x = int((lidar_pose[0] - self.range - bound[0]) / self.map_res) + # offset_y = int((lidar_pose[1] - self.range - bound[1]) / self.map_res) + # + # xmin = max(offset_x, 0) + # xmax = min(offset_x + size, bevmap.shape[0] - 1) + # ymin = max(offset_y, 0) + # ymax = min(offset_y + size, bevmap.shape[1] - 1) + # bevmap_crop = bevmap[xmin:xmax, ymin:ymax] + # bevmap_coor = [bound[0] + xmin * self.map_res, bound[1] + ymin * self.map_res] + + if data_dict['sample_info']['agents'][ai]['pose'][2] > 2: + bevmap = bevmap[..., 1].astype(bool) + else: + bevmap = bevmap[..., :2].any(-1) + + # import matplotlib.pyplot as plt + # points = data_dict['points'][0] + # mask = bevmap.reshape(-1).astype(bool) + # plt.plot(self.xy_pad[0, mask], self.xy_pad[1, mask], 'g.', markersize=1) + # plt.plot(points[:, 0], points[:, 1], 'b.', markersize=1) + # plt.show() + # plt.close() + + return bevmap, [-self.range, - self.range]
+ + +
[docs]class LoadCarlaRoadlineMaps: + def __init__(self, ego_only=True, range=75): + self.ego_only = ego_only + self.range = range + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps/roadline" + map_files = glob.glob(os.path.join(map_path, '*.bin')) + self.scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + self.maps = {} + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + rlmap = np.fromfile(mf, dtype=float).reshape(-1, 2) + # bevmap = np.pad(bevmap, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0) + self.maps[town] = rlmap + + def __call__(self, data_dict): + path = os.path.join(data_dict['data_path'], data_dict['scenario']) + ego_id = data_dict['sample_info']['meta']['ego_id'] + roadline = [] + + agents = data_dict['valid_agent_ids'] + for ai in agents: + if self.ego_only and ego_id != ai: + out = None + else: + out = self.load_single(path, ai, data_dict) + roadline.append(out) + + data_dict['roadline'] = roadline + return data_dict + +
[docs] def load_single(self, path, ai, data_dict): + scenario = data_dict['scenario'] + town = self.scene_maps[scenario] + # lidar_pose = pose_to_transformation(data_dict['sample_info']['agents'][ai]['lidar']['0']['pose']) + lidar_pose = data_dict['lidar_poses'][data_dict['valid_agent_ids'].index(ai)] + cur_map = self.maps[town] + + mask = (cur_map[:, 0] > (lidar_pose[0, 3] - self.range)) & \ + (cur_map[:, 0] < (lidar_pose[0, 3] + self.range)) & \ + (cur_map[:, 1] > (lidar_pose[1, 3] - self.range)) & \ + (cur_map[:, 1] < (lidar_pose[1, 3] + self.range)) + + roadline = cur_map[mask] + + # # visualize + # lidar_file = data_dict['sample_info']['agents'][ai]['lidar']['0']['filename'] + # lidar_file = os.path.join(os.path.dirname(path), lidar_file) + # ply = PlyData.read(lidar_file) + # data = ply['vertex'] + # properties = [prop.name for prop in data.properties] + # data = {name: np.array(data[name]) for name in properties} + # pcd = np.stack([data.pop(x) for x in 'xyz'], axis=1) + # pcd = lidar_pose @ np.concatenate([pcd, np.ones_like(pcd[:, :1])], axis=-1).T + # + # import matplotlib.pyplot as plt + # plt.plot(roadline[:, 0], roadline[:, 1], 'g.', markersize=1) + # plt.plot(pcd[0], pcd[1], 'r.', markersize=1) + # plt.show() + # plt.close() + + return roadline
+ + +
[docs]class LoadSparseBevTargetPoints: + def __init__(self, num_points=3000, ego_only=False): + self.num_points = num_points + self.ego_only = ego_only + + def __call__(self, data_dict): + bev_pts = [] + agents = data_dict['sample_info']['agents'] + ego_id = data_dict['sample_info']['meta']['ego_id'] + for ai in data_dict['valid_agent_ids']: + if ai not in agents: + # previous agents might not in current frame when load sequential data + continue + if self.ego_only and ai != ego_id: + bev_pts.append(np.empty((0, 3))) + else: + pass + +
[docs] def generate_sparse_bev_pts(self, pcd): + pass
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/pipeline/transform.html b/docs/_build/html/_modules/cosense3d/dataset/pipeline/transform.html new file mode 100644 index 00000000..11f04531 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/pipeline/transform.html @@ -0,0 +1,322 @@ + + + + + + cosense3d.dataset.pipeline.transform — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.pipeline.transform

+import numpy as np
+from PIL import Image
+import torch
+
+
+
[docs]class ResizeCropFlipRotImage: + """ + Augment images with random resize, crop, flip and rotation. Modified from StreamPETR. + """ + def __init__(self, data_aug_conf=None, with_2d=True, filter_invisible=True, training=True): + self.data_aug_conf = data_aug_conf + self.training = training + self.min_size = 2.0 + self.with_2d = with_2d + self.filter_invisible = filter_invisible + + def __call__(self, data_dict): + imgs = data_dict['img'] + N = len(imgs) + new_imgs = [] + new_gt_bboxes = [] + new_centers2d = [] + new_gt_labels = [] + new_depths = [] + assert self.data_aug_conf['rot_lim'] == [0.0, 0.0], "Rotation is not currently supported" + + resize, resize_dims, crop, flip, rotate = self._sample_augmentation() + + for i in range(N): + img = Image.fromarray(np.uint8(imgs[i])) + img, ida_mat = self._img_transform( + img, + resize=resize, + resize_dims=resize_dims, + crop=crop, + flip=flip, + rotate=rotate, + ) + if self.with_2d: # sync_2d bbox labels + gt_bboxes = data_dict['bboxes2d'][i] + centers2d = data_dict['centers2d'][i] + gt_labels = data_dict['labels2d'][i] + depths = data_dict['depths2d'][i] + if len(gt_bboxes) != 0: + gt_bboxes, centers2d, gt_labels, depths = self._bboxes_transform( + gt_bboxes, + centers2d, + gt_labels, + depths, + resize=resize, + crop=crop, + flip=flip, + ) + if len(gt_bboxes) != 0 and self.filter_invisible: + gt_bboxes, centers2d, gt_labels, depths = self._filter_invisible(gt_bboxes, centers2d, gt_labels, depths) + + new_gt_bboxes.append(gt_bboxes) + new_centers2d.append(centers2d) + new_gt_labels.append(gt_labels) + new_depths.append(depths) + + new_imgs.append(np.array(img).astype(np.float32)) + data_dict['intrinsics'][i][:3, :3] = ida_mat @ data_dict['intrinsics'][i][:3, :3] + data_dict['bboxes2d'] = new_gt_bboxes + data_dict['centers2d'] = new_centers2d + data_dict['labels2d'] = new_gt_labels + data_dict['depths2d'] = new_depths + data_dict['img'] = new_imgs + data_dict['lidar2img'] = [data_dict['intrinsics'][i] @ data_dict['extrinsics'][i] + for i in range(len(data_dict['extrinsics']))] + + return data_dict + + def _bboxes_transform(self, bboxes, centers2d, gt_labels, depths,resize, crop, flip): + assert len(bboxes) == len(centers2d) == len(gt_labels) == len(depths) + fH, fW = self.data_aug_conf["final_dim"] + bboxes = bboxes * resize + bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop[0] + bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop[1] + bboxes[:, [0, 2]] = np.clip(bboxes[:, [0, 2]], 0, fW) + bboxes[:, [1, 3]] = np.clip(bboxes[:, [1, 3]], 0, fH) + keep = ((bboxes[:, 2] - bboxes[:, 0]) >= self.min_size) & ((bboxes[:, 3] - bboxes[:, 1]) >= self.min_size) + + if flip: + x0 = bboxes[:, 0].copy() + x1 = bboxes[:, 2].copy() + bboxes[:, 2] = fW - x0 + bboxes[:, 0] = fW - x1 + bboxes = bboxes[keep] + + centers2d = centers2d * resize + centers2d[:, 0] = centers2d[:, 0] - crop[0] + centers2d[:, 1] = centers2d[:, 1] - crop[1] + centers2d[:, 0] = np.clip(centers2d[:, 0], 0, fW) + centers2d[:, 1] = np.clip(centers2d[:, 1], 0, fH) + if flip: + centers2d[:, 0] = fW - centers2d[:, 0] + + centers2d = centers2d[keep] + gt_labels = gt_labels[keep] + depths = depths[keep] + + return bboxes, centers2d, gt_labels, depths + + def _filter_invisible(self, bboxes, centers2d, gt_labels, depths): + # filter invisible 2d bboxes + assert len(bboxes) == len(centers2d) == len(gt_labels) == len(depths) + fH, fW = self.data_aug_conf["final_dim"] + indices_maps = np.zeros((fH,fW)) + tmp_bboxes = np.zeros_like(bboxes) + tmp_bboxes[:, :2] = np.ceil(bboxes[:, :2]) + tmp_bboxes[:, 2:] = np.floor(bboxes[:, 2:]) + tmp_bboxes = tmp_bboxes.astype(np.int64) + sort_idx = np.argsort(-depths, axis=0, kind='stable') + tmp_bboxes = tmp_bboxes[sort_idx] + bboxes = bboxes[sort_idx] + depths = depths[sort_idx] + centers2d = centers2d[sort_idx] + gt_labels = gt_labels[sort_idx] + for i in range(bboxes.shape[0]): + u1, v1, u2, v2 = tmp_bboxes[i] + indices_maps[v1:v2, u1:u2] = i + indices_res = np.unique(indices_maps).astype(np.int64) + bboxes = bboxes[indices_res] + depths = depths[indices_res] + centers2d = centers2d[indices_res] + gt_labels = gt_labels[indices_res] + + return bboxes, centers2d, gt_labels, depths + + def _get_rot(self, h): + return torch.Tensor( + [ + [np.cos(h), np.sin(h)], + [-np.sin(h), np.cos(h)], + ] + ) + + def _img_transform(self, img, resize, resize_dims, crop, flip, rotate): + ida_rot = torch.eye(2) + ida_tran = torch.zeros(2) + # adjust image + img = img.resize(resize_dims) + img = img.crop(crop) + if flip: + img = img.transpose(method=Image.FLIP_LEFT_RIGHT) + img = img.rotate(rotate) + + # post-homography transformation + ida_rot *= resize + ida_tran -= torch.Tensor(crop[:2]) + if flip: + A = torch.Tensor([[-1, 0], [0, 1]]) + b = torch.Tensor([crop[2] - crop[0], 0]) + ida_rot = A.matmul(ida_rot) + ida_tran = A.matmul(ida_tran) + b + A = self._get_rot(rotate / 180 * np.pi) + b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2 + b = A.matmul(-b) + b + ida_rot = A.matmul(ida_rot) + ida_tran = A.matmul(ida_tran) + b + ida_mat = torch.eye(3) + ida_mat[:2, :2] = ida_rot + ida_mat[:2, 2] = ida_tran + return img, ida_mat + + def _sample_augmentation(self): + H, W = self.data_aug_conf["H"], self.data_aug_conf["W"] + fH, fW = self.data_aug_conf["final_dim"] + if self.training: + resize = np.random.uniform(*self.data_aug_conf["resize_lim"]) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int((1 - np.random.uniform(*self.data_aug_conf["bot_pct_lim"])) * newH) - fH + crop_w = int(np.random.uniform(0, max(0, newW - fW))) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + if self.data_aug_conf["rand_flip"] and np.random.choice([0, 1]): + flip = True + rotate = np.random.uniform(*self.data_aug_conf["rot_lim"]) + else: + resize = max(fH / H, fW / W) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int((1 - np.mean(self.data_aug_conf["bot_pct_lim"])) * newH) - fH + crop_w = int(max(0, newW - fW) / 2) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + rotate = 0 + return resize, resize_dims, crop, flip, rotate
+ + +
[docs]class ResizeImage: + """ + Resize images. + """ + def __init__(self, img_size): + self.img_size = img_size + + def __call__(self, data_dict): + imgs = data_dict['img'] + imgs_out = [] + for i, img in enumerate(imgs): + img = Image.fromarray(np.uint8(img)) + W, H = img.size + img = img.resize(self.img_size) + imgs_out.append(np.array(img).astype(np.float32)) + + data_dict['intrinsics'][i][0, 0] = self.img_size[0] / W * data_dict['intrinsics'][i][0, 0] + data_dict['intrinsics'][i][1, 1] = self.img_size[1] / H * data_dict['intrinsics'][i][1, 1] + + # todo convert 2d annotations + data_dict['img'] = imgs_out + return data_dict
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/temporal_cosense_dataset.html b/docs/_build/html/_modules/cosense3d/dataset/temporal_cosense_dataset.html new file mode 100644 index 00000000..5c40cbce --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/temporal_cosense_dataset.html @@ -0,0 +1,175 @@ + + + + + + cosense3d.dataset.temporal_cosense_dataset — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.temporal_cosense_dataset

+import random
+import numpy as np
+from cosense3d.dataset.cosense_dataset import CosenseDataset
+
+
+
[docs]class TemporalCosenseDataset(CosenseDataset): + """Sequential Cosense data loader.""" + def __init__(self, cfgs, mode): + super().__init__(cfgs, mode) + self.seq_len = cfgs['seq_len'] + self.n_loss_frame = cfgs.get('n_loss_frame', 1) + self.rand_len = cfgs.get('rand_len', 0) + self.seq_mode = cfgs.get('seq_mode', False) + self.clean_seq = cfgs.get('clean_seq', False) + + def __getitem__(self, index): + queue = [] + index_list = list(range(index - self.seq_len - self.rand_len + 1, index)) + random.shuffle(index_list) + index_list = sorted(index_list[self.rand_len:]) + index_list.append(index) + prev_scene_token = None + prev_agents = None + prev_i = None + num_cav = None + omit_gt = [True] * (len(index_list) - self.n_loss_frame) + [False] * self.n_loss_frame + loc_err = np.random.randn(self.max_num_cavs, 3) * self.loc_err.reshape(-1, 3) + + for i, idx in enumerate(index_list): + idx = max(0, idx) + input_dict = self.load_frame_data( + idx, prev_agents, prev_i, omit_gt=omit_gt[i], loc_err=loc_err) + prev_i = idx + + if not self.seq_mode: # for sliding window only + prev_exists = [] + prev_agents = [] + for tk in input_dict['scene_tokens']: + prev_agents.append(tk.split('.')[-1]) + if prev_scene_token is not None and tk in prev_scene_token: + prev_exists.append(np.array([True])) + else: + prev_exists.append(np.array([False])) + input_dict.update(dict(prev_exists=prev_exists)) + prev_scene_token = input_dict['scene_tokens'] + + queue.append(input_dict) + + # remove frames not belong to the current sequence + # and ensure all frames have the same ego id + valid_idx_start = 0 + if self.clean_seq: + ego_id = queue[-1]['valid_agent_ids'][0] + for idx in range(len(queue)): + if queue[idx]['valid_agent_ids'][0] != ego_id: + valid_idx_start = idx + 1 + queue = {k: [q[k] if k in q else None for q in queue[valid_idx_start:]] for k in queue[-1].keys()} + return queue
+ + +if __name__=="__main__": + from cosense3d.utils.misc import load_yaml + from torch.utils.data import DataLoader + cfgs = load_yaml("/mars/projects20/CoSense3D/cosense3d/config/petr.yaml") + cosense_dataset = TemporalCosenseDataset(cfgs['DATASET'], 'train') + cosense_dataloader = DataLoader(dataset=cosense_dataset, collate_fn=cosense_dataset.collate_batch) + for data in cosense_dataloader: + print(data.keys()) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/toolkit.html b/docs/_build/html/_modules/cosense3d/dataset/toolkit.html new file mode 100644 index 00000000..eb92f8f5 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/toolkit.html @@ -0,0 +1,220 @@ + + + + + + cosense3d.dataset.toolkit — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.toolkit

+import open3d as o3d
+import copy
+import numpy as np
+
+
+
[docs]def register_pcds(source_cloud, target_cloud, initial_transf, thr=0.2, visualize=False, title="PCD"): + # Load point clouds + if isinstance(source_cloud, str): + source_cloud = o3d.io.read_point_cloud(source_cloud) + if isinstance(target_cloud, str): + target_cloud = o3d.io.read_point_cloud(target_cloud) + + # source_cloud.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=2, max_nn=50)) + # target_cloud.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=2, max_nn=50)) + + # Perform ICP registration + icp_result = initial_transf + if not isinstance(thr, list): + thr = [thr] + + icp_result = o3d.pipelines.registration.registration_icp( + source_cloud, target_cloud, thr[0], initial_transf, + o3d.pipelines.registration.TransformationEstimationPointToPoint()) + + if len(thr) > 1: + for x in thr[1:]: + icp_result = o3d.pipelines.registration.registration_icp( + source_cloud, target_cloud, x, icp_result.transformation, + o3d.pipelines.registration.TransformationEstimationPointToPoint()) + + # Obtain the final transformation matrix + # transformation_matrix = initial_transf + transformation_matrix = icp_result.transformation + + if visualize: + # Apply the final transformation to the source point cloud + source_aligned0 = copy.deepcopy(source_cloud).transform(initial_transf) + source_aligned = copy.deepcopy(source_cloud).transform(transformation_matrix) + # + # src_pts = np.array(source_cloud.points) + # src_pts_aligned = np.array(source_aligned.points) + # tgt_pts = np.array(target_cloud.points) + # src_angles = (np.arctan2(src_pts[:, 1], src_pts[:, 0]) + np.pi * 3 - np.deg2rad(100)) % ( 2 * np.pi) + # tgt_angles = (np.arctan2(tgt_pts[:, 1], tgt_pts[:, 0]) + np.pi * 3 - np.deg2rad(255)) % ( 2 * np.pi) + # steps = 10 + # res = 1 / steps + # pcds = [] + # for i in range(steps): + # mask_src = (src_angles >= np.pi * 2 * i * res) & (src_angles < np.pi * 2 * (i + 1) * res) + # mask_tgt = (tgt_angles >= np.pi * 2 * i * res) & (tgt_angles < np.pi * 2 * (i + 1) * res) + # + # cur_src_cloud = o3d.geometry.PointCloud() + # cur_tgt_cloud = o3d.geometry.PointCloud() + # cur_src_cloud.points = o3d.utility.Vector3dVector(src_pts[mask_src]) + # cur_tgt_cloud.points = o3d.utility.Vector3dVector(tgt_pts[mask_tgt]) + # cur_src_cloud.paint_uniform_color([0, 0.0 + i / steps * 1.0, 0]) + # cur_tgt_cloud.paint_uniform_color([0, 0, 0.2 + i / steps * 0.8]) + # pcds += [cur_src_cloud] + # o3d.visualization.draw_geometries(pcds) + + # Visualize the aligned point clouds + source_aligned0.paint_uniform_color([1, 0, 0]) + source_aligned.paint_uniform_color([1, 0.706, 0]) + target_cloud.paint_uniform_color([0, 0.651, 0.929]) + o3d.visualization.draw_geometries([source_aligned0, target_cloud], window_name=title) + o3d.visualization.draw_geometries([source_aligned, target_cloud], window_name=title) + + return copy.deepcopy(transformation_matrix)
+ + +
[docs]def callback_registrations(source, target, source_points, target_points): + """ + Callback function for point picking. Registers two point clouds using selected corresponding points. + """ + print("Point picking callback called!") + + # Corresponding points + correspondences = np.asarray([source_points, target_points]) + + # Create Open3D point cloud from numpy arrays + source_pc = o3d.geometry.PointCloud() + source_pc.points = o3d.utility.Vector3dVector(source.points[source_points]) + target_pc = o3d.geometry.PointCloud() + target_pc.points = o3d.utility.Vector3dVector(target.points[target_points]) + + # Perform registration + transformation = o3d.pipelines.registration.registration_ransac_based_on_feature_matching( + source_pc, target_pc, correspondences, + o3d.pipelines.registration.TransformationEstimationPointToPoint(), + o3d.pipelines.registration.RANSACConvergenceCriteria(4000000, 500) + ) + + # Apply the transformation to the source point cloud + source.transform(transformation.transformation) + + # Visualize the result + o3d.visualization.draw_geometries([source, target]) + return transformation
+ + +
[docs]def click_register(source, target): + # Visualize the two point clouds + o3d.visualization.draw_geometries([source, target]) + + # Register point clouds by picking corresponding points + print("Pick corresponding points in both point clouds. Press 'Q' to finish picking.") + source_points = o3d.visualization.PointCloudPickPoints() + target_points = o3d.visualization.PointCloudPickPoints() + transformation = o3d.visualization.draw_geometries_with_editing( + [source, target, source_points, target_points], + callback=callback_registrations, + window_name="Pick corresponding points") + return transformation
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/toolkit/cosense.html b/docs/_build/html/_modules/cosense3d/dataset/toolkit/cosense.html new file mode 100644 index 00000000..587e0aa7 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/toolkit/cosense.html @@ -0,0 +1,724 @@ + + + + + + cosense3d.dataset.toolkit.cosense — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.toolkit.cosense

+import copy
+import glob
+import os
+import pickle
+import random
+
+import torch
+import tqdm
+import yaml
+
+import numpy as np
+from cosense3d.utils.misc import load_json, save_json
+from cosense3d.utils import pclib
+from cosense3d.ops.utils import points_in_boxes_cpu
+from cosense3d.dataset.toolkit import register_pcds
+
+
+type_sustech2cosense = {
+    'Car': 'vehicle.car',
+    'Van': 'vehicle.van',
+    'Truck': 'vehicle.truck',
+    'Bus': 'vehicle.bus',
+    'Tram': 'vehicle.tram',
+    'Unknown': 'unknown',
+    'BicycleRider': 'vehicle.cyclist',
+    'Bicyclerider': 'vehicle.cyclist',
+    'MotorcyleRider': 'vehicle.motorcycle',
+    'Pedestrian': 'human.pedestrian',
+    'HumanSitting': 'human.sitting',
+    'Scooterrider': 'vehicle.scooter'
+}
+type_cosense2sustech = {
+    v: k for k, v in type_sustech2cosense.items()
+}
+
+csColors = {
+    'vehicle.car': [0, 215, 255], #0
+    'vehicle.van': [246, 250, 112], #1
+    'vehicle.truck': [255, 132, 0], #2
+    'vehicle.bus': [0, 223, 162], #3
+    'vehicle.tram': [0, 121, 255], #4
+    'vehicle.motorcycle': [255, 0, 96], #5
+    'vehicle.cyclist': [244, 35, 232], #6
+    'vehicle.scooter': [227, 132, 255], #7
+    'vehicle.other': [180, 254, 152], #8
+    'human.pedestrian': [220, 20, 60], #9
+    'human.wheelchair': [134, 93, 255], #10
+    'human.sitting': [56, 229, 77], #11
+    'static.trafficcone': [255, 0, 0], #12
+    'static.barrowlist': [255, 50, 0], #13
+    'vehicle.tricyclist': [255, 50, 50], #14
+    'unknown': [255, 255, 255],
+}
+
+
+
[docs]class CoSenseDataConverter: + OBJ_LIST = [ + 'vehicle.car', #0 + 'vehicle.van', #1 + 'vehicle.truck', #2 + 'vehicle.bus', #3 + 'vehicle.tram', #4 + 'vehicle.motorcycle', #5 + 'vehicle.cyclist', #6 + 'vehicle.scooter', #7 + 'vehicle.other', #8 + 'human.pedestrian', #9 + 'human.wheelchair', #10 + 'human.sitting', #11 + 'static.trafficcone', #12 + 'static.barrowlist', #13 + 'vehicle.tricyclist', #13 + 'unknown', #14 + ] + OBJ_ID2NAME = {i: n for i, n in enumerate(OBJ_LIST)} + OBJ_NAME2ID = {n: i for i, n in enumerate(OBJ_LIST)} + + def __init__(self, data_path, meta_path, mode='all'): + self.data_path = data_path + self.meta_path = meta_path + self.meta = self.load_meta(meta_path, mode) + +
[docs] def update_from_sustech(self, sustech_path): + for scenario, sdict in self.meta.items(): + for frame, fdict in sdict.items(): + new_label_file = os.path.join( + sustech_path, + scenario, 'label', + frame + '.json' + ) + objects = self.obj_from_sustech(new_label_file) + # TODO the transformation from local to global + self.meta[scenario][frame]['meta']['bbx_center_global'] = objects + + save_json(sdict, os.path.join(self.meta_path, f"{scenario}.json"))
+ +
[docs] def to_sustech(self, out_dir=None): + # make out dirs + out_dir = os.path.join(self.data_path, '..', 'sustech_fmt') \ + if out_dir is None else out_dir + for s, sdict in self.meta.items(): + scenario_dir = os.path.join(out_dir, s) + os.makedirs(os.path.join(scenario_dir, 'lidar'), exist_ok=True) + os.makedirs(os.path.join(scenario_dir, 'label'), exist_ok=True) + for f, fdict in tqdm.tqdm(sdict.items()): + bbx_global_center = np.array(fdict['meta']['bbx_center_global']) + # bbx_global_corner = boxes_to_corners_3d(bbx_global_center[:, 2:]) + lidars = [] + for a, adict in fdict['agents'].items(): + for l, ldict in adict['lidar'].items(): + lidar_pose = ldict['pose'] + filename = ldict['filename'].replace('\\', '/') + # TODO rotate points and bbxs + pcd = pclib.load_pcd(os.path.join(self.data_path, filename)) + points = np.concatenate([pcd['xyz'], pcd['intensity'].reshape(-1, 1)], axis=-1) + lidars.append(points.astype(np.float32)) + lidars = np.concatenate(lidars, axis=0) + lidars.tofile(os.path.join(out_dir, scenario_dir, 'lidar', f"{f}.bin")) + # write label file + self.obj_to_sustech( + bbx_global_center, + os.path.join(out_dir, scenario_dir, 'label', f"{f}.json") + )
+ +
[docs] def to_opv2v(self, out_dir=None): + # make out dirs + out_dir = os.path.join(self.data_path, '..', 'opv2v_fmt') \ + if out_dir is None else out_dir + os.makedirs(out_dir, exist_ok=True) + for s, sdict in self.meta.items(): + scenario_dir = os.path.join(out_dir, s) + os.makedirs(scenario_dir, exist_ok=True) + for f, fdict in tqdm.tqdm(sdict.items()): + bbx_global_center = np.array(fdict['meta']['bbx_center_global']) + # bbx_global_corner = boxes_to_corners_3d(bbx_global_center[:, 2:]) + for a, adict in fdict['agents'].items(): + agent_dir = os.path.join(scenario_dir, a) + if not os.path.exists(agent_dir): + os.makedirs(agent_dir) + for l, ldict in adict['lidar'].items(): + lidar_pose = ldict['pose'] + filename = ldict['filename'].replace('\\', '/') + # TODO rotate points and bbxs + pclib.lidar_bin2bin( + os.path.join(self.data_path, filename), + os.path.join(agent_dir, f + '.bin') + ) + # write label file + self.obj_to_opv2v(bbx_global_center, lidar_pose, + os.path.join(agent_dir, f + '.yaml'))
+ +
[docs] def to_kitti(self, out_dir=None): + from cosense3d.dataset.toolkit.kitti import type_cosense2kitti + split = { + # 'train': ['measurement4_0'], + # 'val': ['measurement4_1'], + 'test': sorted(self.meta.keys()), + } + # make out dirs + out_dir = os.path.join(self.data_path, '..', 'kitti_test') \ + if out_dir is None else out_dir + os.makedirs(os.path.join(out_dir, 'ImageSets'), exist_ok=True) + for dir_name in ['velodyne', 'image_2', 'label_2', 'calib']: + os.makedirs(os.path.join(out_dir, 'training', dir_name), exist_ok=True) + os.makedirs(os.path.join(out_dir, 'validating', dir_name), exist_ok=True) + os.makedirs(os.path.join(out_dir, 'testing', dir_name), exist_ok=True) + # create split files + for sp, seqs in split.items(): + with open(os.path.join(out_dir, 'ImageSets', f"{sp}.txt"), 'w') as fh: + frames = [] + for seq in seqs: + cur_frames = sorted(self.meta[seq].keys()) + cur_frames = [seq.split('_')[0][-1] + f[1:] for f in cur_frames] + frames.extend(cur_frames) + fh.write("\n".join(sorted(frames))) + for s, sdict in self.meta.items(): + if s not in split[sp] or int(s.split('_')[1]) < 10: + continue + print(sp, s) + scenario_dir = os.path.join(out_dir, s) + cur_split = {'train': 'training', 'val': 'validating', 'test': 'testing'}[sp] + # os.makedirs(scenario_dir, exist_ok=True) + # sdict = {k: sdict[k] for k in sorted(list(sdict.keys()))[:10]} + for f, fdict in tqdm.tqdm(sdict.items()): + ##### save lidar ###### + points = [] + for ai, adict in fdict['agents'].items(): + for li, ldict in adict['lidar'].items(): + lidar_file = os.path.join(self.data_path, ldict['filename']) + points.append( + np.fromfile(lidar_file, np.float32).reshape(-1, 4) + ) + points = np.concatenate(points, axis=0) + lidar_out_file = os.path.join( + out_dir, cur_split, 'velodyne', f"{s.split('_')[0][-1] + f[1:]}.bin" + ) + points.tofile(lidar_out_file) + ######## save label ####### + label = fdict['meta']['bbx_center_global'] + label_out_file = os.path.join( + out_dir, cur_split, 'label_2', f"{s.split('_')[0][-1] + f[1:]}.txt" + ) + with open(label_out_file, 'w') as fh: + for l in label: + # kitti label format + cosense_type = self.OBJ_ID2NAME[l[1]] + type = [type_cosense2kitti[cosense_type]] + + trancated = ['0'] + occluded = ['0'] + alpha = [f"{np.arctan2(l[3], l[2]):.2f}"] + bbox = ['0'] * 4 + dimensions = [f"{l[x]:.2f}" for x in [7, 6, 5]] # hwl + l[4] -= l[7] / 2 + location = [f"{l[x]:.2f}" for x in [2, 3, 4]] # in cam coor + rotation_y = [f"{-l[10] - np.pi/2:.2f}"] + ls = type + trancated + occluded + alpha + bbox + dimensions +\ + location + rotation_y + line = " ".join(ls) + fh.write(line) + fh.write('\n')
+ +
[docs] def obj_from_sustech(self, label_file): + if not os.path.exists(label_file): + return [] + objs = load_json(label_file) + bboxes = [] + for obj_dict in objs: + obj_id = obj_dict['obj_id'] + obj_type = obj_dict['obj_type'] + position = obj_dict['psr']['position'] + rotation = obj_dict['psr']['rotation'] + scale = obj_dict['psr']['scale'] + + cosense_type_name = type_sustech2cosense[obj_type] + obj_type_id = self.OBJ_NAME2ID[cosense_type_name] + bbx_center = [ + float(obj_id), + float(obj_type_id), + position['x'], + position['y'], + position['z'], + scale['x'], + scale['y'], + scale['z'], + rotation['x'], + rotation['y'], + rotation['z'], + ] + bboxes.append(bbx_center) + return bboxes
+ +
[docs] def obj_to_sustech(self, cosense_objs, sustech_file): + sustech_objs = [] + if len(cosense_objs.shape) == 0: + save_json(sustech_objs, sustech_file) + return + for obj in cosense_objs: + obj_type = type_cosense2sustech[ + self.OBJ_ID2NAME[int(obj[1])] + ] + sustech_objs.append( + { + 'obj_id': obj[0], + 'obj_type': obj_type, + 'psr': { + 'position': { + 'x': obj[2], + 'y': obj[3], + 'z': obj[4] + }, + 'scale': { + 'x': obj[5], + 'y': obj[6], + 'z': obj[7] + }, + 'rotation': { + 'x': obj[8], + 'y': obj[9], + 'z': obj[10] + } + } + } + ) + save_json(sustech_objs, sustech_file)
+ +
[docs] def obj_to_opv2v(self, bbxs, pose, out_file, timestamp=None): + vehicles = {} + # only keep car, van, bus, truck + bbxs = bbxs[bbxs[:, 1] < 4] + for bbx in bbxs: + obj_id = int(bbx[0]) + obj_type = int(bbx[1]) + # process the information to the opv2v format + location = bbx[2:5] + angle = bbx[[8, 10, 9]] / np.pi * 180 + angle[[0, 2]] *= -1 + extent = bbx[5:8] / 2 + + vehicles[int(obj_id)] = { + 'angle': angle.tolist(), + 'center': [0.0] * 3, + 'extent': extent.tolist(), + 'location': location.tolist(), + 'speed': 0, + 'type': obj_type + } + if isinstance(pose, np.ndarray): + pose = pose.tolist() + yaml_dict = { + 'lidar_pose': pose, + 'true_ego_pos': pose, + 'ego_speed': 0, + 'vehicles': vehicles + } + if timestamp is not None: + # timestamp for ouster is corrected by subtracting a systematic time offset (0.35s) + yaml_dict['timestamp'] = float(timestamp) + with open(out_file, 'w') as fh: + yaml.dump(yaml_dict, fh, default_flow_style=False)
+ + +
[docs] @staticmethod + def load_meta(meta_path, mode): + if mode == 'all': + scenario_meta_files = sorted(glob.glob(meta_path + "/*.json")) + else: + scenario_meta_files = [] + with open(os.path.join(meta_path, f'{mode}.txt'), 'r') as fh: + for line in fh.readlines(): + scenario_meta_files.append(os.path.join(meta_path, f'{line.strip()}.json')) + + meta_dict = {} + + for f in scenario_meta_files: + scenario = os.path.basename(f)[:-5] + meta_dict[scenario] = load_json(f) + + return meta_dict
+ +
[docs] @staticmethod + def cal_vbbx_mean_dim(meta): + """Calculate mean dimensions of four-wheel vehicles""" + dimensions = [] + for s, sdict in meta.items(): + for f, fdict in sdict.items(): + bbx = np.array(fdict['meta']['bbx_center_global']) + dimensions.append(bbx[bbx[:, 5] > 2, 5:8]) + print(np.concatenate(dimensions, axis=0).mean(axis=0))
+ +
[docs] @staticmethod + def fdict_template(): + return { + 'agents': { + '0': { + 'type': None, + 'pose': [0.0] * 6, + 'time': None, # timestamp for the current vehicle pose + 'lidar': { + '0': { + 'pose': [0.0] * 6, + 'time': None, # timestamp for the current lidar triggering round + 'filename': None + } + }, + 'camera': {}, # TODO API for cameras + } + }, + # no cooperation needed, take lidar as global for each frame + 'meta': {'bbx_center_global': []} + }
+ +
[docs] @staticmethod + def add_cam_to_fdict(fdict, agent_id, cam_id, filenames, intrinsic, extrinsic, **kwargs): + if agent_id not in fdict['agents']: + adict = CoSenseDataConverter.fdict_template()['agents'][0] + fdict['agents'][agent_id] = adict + kwargs.update({ + 'filenames': filenames, + 'intrinsic': intrinsic, + 'extrinsic': extrinsic + }) + fdict['agents'][agent_id]['camera'][cam_id] = kwargs
+ +
[docs] @staticmethod + def update_frame_bbx(fdict, bbx): + fdict['meta']['bbx_center_global'] = bbx
+ +
[docs] @staticmethod + def update_agent(fdict, + agent_id, + agent_type=None, + agent_pose=None, + agent_time=None, + **kwargs): + if agent_id not in fdict['agents']: + fdict['agents'][agent_id] = CoSenseDataConverter.fdict_template()['agents']['0'] + if agent_type is not None: + fdict['agents'][agent_id]['type'] = agent_type + if agent_pose is not None: + fdict['agents'][agent_id]['pose'] = agent_pose + if agent_time is not None: + fdict['agents'][agent_id]['time'] = agent_time + for k, v in kwargs.items(): + fdict['agents'][agent_id][k] = v
+ +
[docs] @staticmethod + def update_agent_lidar(fdict, + agent_id, + lidar_id, + lidar_pose=None, + lidar_time=None, + lidar_file=None): + if agent_id not in fdict['agents']: + fdict['agents'][agent_id] = CoSenseDataConverter.fdict_template()['agents']['0'] + if lidar_pose is not None: + fdict['agents'][agent_id]['lidar'][lidar_id]['pose'] = lidar_pose + if lidar_time is not None: + fdict['agents'][agent_id]['lidar'][lidar_id]['time'] = lidar_time + if lidar_file is not None: + fdict['agents'][agent_id]['lidar'][lidar_id]['filename'] = lidar_file
+ +
[docs] @staticmethod + def update_agent_gt_boxes(fdict, + agent_id, + gt_boxes): + if agent_id not in fdict['agents']: + fdict['agents'][agent_id] = CoSenseDataConverter.fdict_template()['agents']['0'] + fdict['agents'][agent_id]['gt_boxes'] = gt_boxes
+ +
[docs] @staticmethod + def remove_lidar_info(fdict, agent_id): + fdict['agents'][agent_id]['lidar'] = {}
+ +
[docs] @staticmethod + def supervison_full_to_sparse(meta_dict, out_path, lidar_range=None, det_r=None, + num_box_per_frame=None, num_box_total=None, label_ratio=None): + def select_box(bboxes, cls_idx, num): + bboxes = np.array(bboxes) + bboxes_car = bboxes[bboxes[:, 1] == cls_idx] + if lidar_range is not None: + mask = (bboxes_car[:, 2] > lidar_range[0]) & (bboxes_car[:, 2] < lidar_range[3]) & \ + (bboxes_car[:, 3] > lidar_range[1]) & (bboxes_car[:, 3] < lidar_range[4]) & \ + (bboxes_car[:, 4] > lidar_range[2]) & (bboxes_car[:, 4] < lidar_range[5]) + else: + mask = np.linalg.norm(bboxes_car[:, 2:4], axis=1) < det_r + bboxes_car = bboxes_car[mask] + if len(bboxes_car) == 0: + return None + choice = np.random.choice(np.array(len(bboxes_car)), num) + bboxes_car = bboxes_car[choice].reshape(num, 11).tolist() + return bboxes_car + + if num_box_per_frame is not None: + for s, sdict in meta_dict.items(): + sdict_out = copy.deepcopy(sdict) + for f, fdict in sdict.items(): + bboxes = fdict['meta']['bbx_center_global'] + choice = select_box(bboxes, 0, 1) + if choice is None: + sdict_out.pop(f) + else: + sdict_out[f]['meta']['bbx_center_global'] = choice + save_json(sdict_out, os.path.join(out_path, f'{s}.json')) + elif num_box_total is not None: + samples = [] + # find frames with car labels + for s, sdict in meta_dict.items(): + for f, fdict in sdict.items(): + bboxes = fdict['meta']['bbx_center_global'] + classes = [int(b[1]) for b in bboxes] + if 0 in classes: + samples.append((s, f)) + # select given number of frames + samples = random.choices(samples, k=num_box_total) + sdict_out = {} + for sample in samples: + fdict = copy.deepcopy(meta_dict[sample[0]][sample[1]]) + bboxes = fdict['meta']['bbx_center_global'] + fdict['meta']['bbx_center_global'] = select_box(bboxes, 0, 1) + sdict_out[sample[1]] = fdict + save_json(sdict_out, os.path.join(out_path, 'train.json')) + with open(os.path.join(out_path, 'train.txt'), 'w') as fh: + fh.write('train')
+ +
[docs] @staticmethod + def global_boxes_to_local(meta_dict, data_path, meta_path): + samples = {i: {'box': [], 'points': []} for i in CoSenseDataConverter.OBJ_ID2NAME.keys()} + for s, sdict in meta_dict.items(): + for f, fdict in tqdm.tqdm(meta_dict[s].items()): + global_boxes = fdict['meta']['bbx_center_global'] + global_boxes = np.array(global_boxes) + for a, adict in fdict['agents'].items(): + for l, ldict in adict['lidar'].items(): + lidar = pclib.load_pcd(os.path.join(data_path, ldict['filename'])) + box_cls = global_boxes[:, 1] + res = points_in_boxes_cpu(lidar['xyz'], global_boxes[:, [2, 3, 4, 5, 6, 7, 10]]) + box_n_pts = res.sum(axis=1) + valid = box_n_pts > 10 + boxes = global_boxes[valid] + box_cls = box_cls[valid] + pts_idx_of_boxes = res[valid] + CoSenseDataConverter.update_agent_gt_boxes(fdict, a, boxes.tolist()) + + for i, box in enumerate(boxes): + cls = box[1] + points = lidar['xyz'][pts_idx_of_boxes[i].astype(bool)] + intensity = lidar['intensity'][pts_idx_of_boxes[i].astype(bool)] + # transform box and points to box coodiante + points = points - box[2:5].reshape(1, 3) + # points will be modified during transformation, so make a copy here + new_points = np.copy(points) + st = np.sin(-box[-1]) + ct = np.cos(-box[-1]) + points[:, 0] = new_points[:, 0] * ct - new_points[:, 1] * st + points[:, 1] = new_points[:, 0] * st + new_points[:, 1] * ct + points = np.concatenate([points, intensity[:, None]], axis=1) + samples[cls]['box'].append(box[5:8]) + samples[cls]['points'].append(points) + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # box_vis = np.array([[0]*3 + box[5:8].tolist() + [0]]) + # ax = plt.figure(figsize=(10, 10)).add_subplot(1, 1, 1) + # draw_points_boxes_plt( + # ax=ax, + # pc_range=5, + # points=points, + # boxes_gt=box_vis, + # filename='/home/yuan/Downloads/tmp.png' + # ) + + + save_json(sdict, os.path.join(meta_path, f'{s}.json')) + for sample_id, content in samples.items(): + if len(content['box']) == 0: + continue + sample_name = CoSenseDataConverter.OBJ_ID2NAME[sample_id] + with open(os.path.join(meta_path, f'{sample_name}.pickle'), 'wb') as file: + pickle.dump(content, file)
+ +
[docs] @staticmethod + def parse_global_bbox_velo(meta_dict, data_path, meta_path): + for s, sdict in meta_dict.items(): + for f, fdict in sdict.items(): + cur_global_boxes = fdict['meta']['bbx_center_global'] + # cur_global_boxes = {box[0]: box[1:] for box in cur_global_boxes} + velos = [] + next_frame = f'{int(f) + 1:06d}' + last_frame = f'{int(f) - 1:06d}' + next_global_boxes = {} + prev_global_boxes = {} + if next_frame in sdict: + next_global_boxes = sdict[next_frame]['meta']['bbx_center_global'] + next_global_boxes = {box[0]: box[1:] for box in next_global_boxes} + if last_frame in sdict: + prev_global_boxes = sdict[last_frame]['meta']['bbx_center_global'] + prev_global_boxes = {box[0]: box[1:] for box in prev_global_boxes} + + for box_ in cur_global_boxes: + box_id = box_[0] + box = box_[1:] + if box_id in next_global_boxes: + velo = [(next_global_boxes[box_id][1] - box[1]) * 10, # m/s + (next_global_boxes[box_id][2] - box[2]) * 10,] + elif box_id in prev_global_boxes: + velo = [(box[1] - prev_global_boxes[box_id][1]) * 10, + (box[2] - prev_global_boxes[box_id][2]) * 10] + else: + velo = [0., 0.] + + velos.append(velo) + fdict['meta']['bbx_velo_global'] = velos + + save_json(sdict, os.path.join(meta_path, f'{s}.json'))
+ + +
[docs] @staticmethod + def draw_sample_distributions(meta_path): + """ + Draw distribution of the number of observation points for each sample category. + + :param meta_path: path contains pickle files of object samples + :return: + """ + import matplotlib.pyplot as plt + files = glob.glob(os.path.join(meta_path, '*.pickle')) + for f in files: + with open(f, 'rb') as file: + samples = pickle.load(file) + n_points = np.array([min(len(points), 500) for points in samples['points']]) + plt.hist(n_points, bins=10, density=True, alpha=0.6, label=os.path.basename(f)[:-7]) + plt.title(os.path.basename(f)[:-7]) + # plt.legend() + plt.savefig(os.path.join(meta_path, f'{os.path.basename(f)[:-7]}.png')) + plt.close()
+ + + +if __name__=="__main__": + cosense3d = CoSenseDataConverter( + "/koko/LUMPI/lumpi_selected/data", + "/koko/LUMPI/lumpi_selected/meta", + 'all' + ) + # cosense3d.to_kitti("/koko/LUMPI/kitti_test") + # cosense3d.to_sustech("/koko/LUMPI/lumpi_selected_sustech") + # cosense3d.to_opv2v("/media/hdd/yuan/koko/data/LUMPI/opv2v_fmt") + # cosense3d.update_from_sustech("/koko/LUMPI/sustech_fmt") + # cosense.supervison_full_to_sparse(cosense.meta, + # '/koko/cosense3d/kitti-sparse-num534', + # lidar_range=[-100, -40, -3.5, 100, 40, 3], + # num_box_total=534) + # cosense.global_boxes_to_local(cosense.meta, cosense.data_path, cosense.meta_path) + # cosense.update_from_sustech('/koko/LUMPI/sustech_fmt') + # cosense.parse_global_bbox_velo(cosense.meta, cosense.data_path, cosense.meta_path) + # cosense.draw_sample_distributions(cosense.meta_path) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/toolkit/dairv2x.html b/docs/_build/html/_modules/cosense3d/dataset/toolkit/dairv2x.html new file mode 100644 index 00000000..d481731a --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/toolkit/dairv2x.html @@ -0,0 +1,900 @@ + + + + + + cosense3d.dataset.toolkit.dairv2x — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.toolkit.dairv2x

+import copy
+import glob
+import math
+import os
+
+import matplotlib.pyplot as plt
+import tqdm
+import numpy as np
+import open3d as o3d
+from scipy.optimize import linear_sum_assignment
+
+from cosense3d.utils import pclib, vislib, box_utils
+from cosense3d.utils.misc import load_json, save_json
+from cosense3d.utils.box_utils import corners_to_boxes_3d, transform_boxes_3d
+from cosense3d.dataset.toolkit import register_pcds
+from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs
+from cosense3d.ops.utils import points_in_boxes_cpu
+from cosense3d.utils.pcdio import point_cloud_from_path
+from cosense3d.utils.vislib import o3d_draw_frame_data, \
+    o3d_draw_agent_data, o3d_draw_pcds_bbxs
+
+global_time_offset = 1.62616 * 1e9
+
+
+
[docs]def calib_to_tf_matrix(calib_file): + calib = load_json(calib_file) + if 'transform' in calib: + tf = calib['transform'] + else: + tf = calib + tf_matrix = np.eye(4) + tf_matrix[:3, :3] = np.array(tf['rotation']) + tf_matrix[:3, 3:] = np.array(tf['translation']) + if 'relative_error' in calib: + tf_matrix[0, 3] += calib['relative_error']['delta_x'] + tf_matrix[1, 3] += calib['relative_error']['delta_y'] + return tf_matrix
+ + +
[docs]def load_label(label_file): + labels = load_json(label_file) + bbxs_center = [] + bbxs_corner = [] + for l in labels: + obj_type = { + 'car': 'vehicle.car', + 'van': 'vehicle.van', + 'truck': 'vehicle.truck', + 'bus': 'vehicle.bus', + 'pedestrian': 'human.pedestrian', + 'trafficcone': 'static.trafficcone', + 'motorcyclist': 'vehicle.motorcycle', + 'cyclist': 'vehicle.cyclist', + 'tricyclist': 'vehicle.tricyclist', + 'barrowlist': 'static.barrowlist', + }[l.get('type', "car").lower()] + track_id = l.get('track_id', -1) + bbx = [ + int(track_id), + cs.OBJ_NAME2ID[obj_type], + l['3d_location']['x'], + l['3d_location']['y'], + l['3d_location']['z'], + l['3d_dimensions']['l'], + l['3d_dimensions']['w'], + l['3d_dimensions']['h'], + 0, + 0, + l['rotation'] + ] + bbxs_center.append([float(x) for x in bbx]) + if 'world_8_points' in l: + bbx_corner = np.array(l['world_8_points']) + bbx_corner = [bbx.tolist() for bbx in bbx_corner] + bbxs_corner.append(bbx_corner) + return bbxs_center, bbxs_corner
+ + +
[docs]def load_info_to_dict(info_file): + infos = load_json(info_file) + info_dict = {} + for info in infos: + frame = os.path.basename(info['pointcloud_path'][:-4]) + info_dict[frame] = info + return info_dict
+ + +
[docs]def convert_v2x_c(root_dir, meta_out_dir): + cvi_path = "cooperative-vehicle-infrastructure" + infra_path = "infrastructure-side" + cav_path = "vehicle-side" + coop_path = "cooperative" + info_file = "data_info.json" + inf_lidar_path = "cooperative-vehicle-infrastructure-infrastructure-side-velodyne" + cav_lidar_path = "cooperative-vehicle-infrastructure-vehicle-side-velodyne" + new_label_path = "DAIR-V2X-C_Complemented_Anno" + inf_info_file = os.path.join(root_dir, cvi_path, infra_path, info_file) + inf_info = load_info_to_dict(inf_info_file) + veh_info_file = os.path.join(root_dir, cvi_path, cav_path, info_file) + veh_info = load_info_to_dict(veh_info_file) + frame_pairs = load_json(os.path.join(root_dir, cvi_path, coop_path, info_file)) + + meta_dict = {} + veh_frames = [] + inf_frames = [] + offsets = [] + for pair in frame_pairs: + veh_frame = os.path.basename(pair['vehicle_pointcloud_path'][:-4]) + inf_frame = os.path.basename(pair['infrastructure_pointcloud_path'][:-4]) + label_frame = os.path.basename(pair['cooperative_label_path'][:-5]) + assert veh_frame == label_frame + veh_frames.append(veh_frame) + inf_frames.append(inf_frame) + offsets.append(pair['system_error_offset']) + + # load all re-annotated samples + train = load_json(os.path.join(root_dir, new_label_path, 'train.json')) + val = load_json(os.path.join(root_dir, new_label_path, 'val.json')) + split = { + 'train': train, + # 'test': val + } + + for sp, frames in split.items(): + for frame in tqdm.tqdm(frames): + cur_veh_info = veh_info[frame] + scenario = cur_veh_info['batch_id'] + # processing vehicle meta + tf_novatel2world = calib_to_tf_matrix( + os.path.join(root_dir, cvi_path, cav_path, + cur_veh_info['calib_novatel_to_world_path']) + ) + tf_lidar2novatel = calib_to_tf_matrix( + os.path.join(root_dir, cvi_path, cav_path, + cur_veh_info['calib_lidar_to_novatel_path']) + ) + tf_lidar2world = tf_novatel2world @ tf_lidar2novatel + veh_lidar_pose = pclib.tf2pose(tf_lidar2world) + veh_pose = pclib.tf2pose(tf_novatel2world) + veh_lidar_time = float(cur_veh_info['pointcloud_timestamp']) * 1e-6 + veh_lidar_file = os.path.join(cav_lidar_path, frame + '.pcd') + veh_bbxs_center, _ = load_label( + os.path.join(root_dir, + f"{new_label_path}/new_labels/vehicle-side_label/lidar", + frame + '.json' + ) + ) + + # process infra info + cur_inf_frame = inf_frames[veh_frames.index(frame)] + cur_inf_info = inf_info[cur_inf_frame] + tf_virtuallidar2world = calib_to_tf_matrix( + os.path.join(root_dir, cvi_path, infra_path, + cur_inf_info['calib_virtuallidar_to_world_path']) + ) + + inf_lidar_time = float(cur_inf_info['pointcloud_timestamp']) * 1e-6 + inf_lidar_file = os.path.join(inf_lidar_path, cur_inf_frame + ".pcd") + + # inf_lidar_pose = pclib.tf2pose(tf_infra2ego) + inf_lidar_pose = pclib.tf2pose(tf_virtuallidar2world) + inf_label_path = os.path.join(root_dir, + f"{cvi_path}/infrastructure-side/label/virtuallidar", + cur_inf_frame + '.json') + inf_bbxs_center, _ = load_label(inf_label_path) + + # process global meta + coop_label_path = os.path.join(root_dir, + f"{new_label_path}/new_labels/cooperative_label/label_world", + frame + '.json' + ) + world_bbxs_center, world_bbxs_corner = load_label(coop_label_path) + coop_bbxs_corner = pclib.rotate_box_corners_with_tf_np( + np.array(world_bbxs_corner), np.linalg.inv(tf_lidar2world) + ) + coop_bbxs_center = np.concatenate( + [np.array(world_bbxs_center)[:, :2], + corners_to_boxes_3d(coop_bbxs_corner)], + axis=1 + ).tolist() + + # if not os.path.exists(inf_label_path): + # print('infra label not found.') + # inf_bbxs_center = pclib.rotate_box_corners_with_tf_np( + # np.array(world_bbxs_corner), np.linalg.inv(tf_virtuallidar2world) + # ) + # inf_bbxs_center = np.concatenate( + # [np.array(world_bbxs_center)[:, :2], + # corners_to_boxes_3d(inf_bbxs_center)], + # axis=1 + # ).tolist() + + + + # pcd = point_cloud_from_path(os.path.join(root_dir, veh_lidar_file)) + # points = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1) + # o3d_draw_pcds_bbxs([points], [np.array(veh_bbxs_center)]) + + # construct meta dict + fdict = cs.fdict_template() + # add cav lidar meta + cs.update_agent(fdict, + agent_id='0', + agent_type='cav', + agent_pose=veh_pose, + gt_boxes=veh_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='0', + lidar_id='0', + lidar_pose=veh_lidar_pose, + lidar_time=veh_lidar_time, + lidar_file=veh_lidar_file) + # add infra lidar meta + cs.update_agent(fdict, + agent_id='1', + agent_type='infra', + agent_pose=inf_lidar_pose, + gt_boxes=inf_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='1', + lidar_id='0', + lidar_pose=inf_lidar_pose, + lidar_time=inf_lidar_time, + lidar_file=inf_lidar_file) + cs.update_frame_bbx(fdict, + coop_bbxs_center + )# in global coords + fdict['meta']['ego_id'] = '0' + fdict['meta']['ego_lidar_pose'] = veh_lidar_pose + if scenario not in meta_dict: + meta_dict[scenario] = {} + meta_dict[scenario][frame] = fdict + # save meta + os.makedirs(meta_out_dir, exist_ok=True) + for scenario, meta in meta_dict.items(): + meta_file = os.path.join(meta_out_dir, f'{scenario}.json') + save_json(meta, meta_file) + with open(os.path.join(meta_out_dir, f'{sp}.txt'), 'w') as fh: + fh.write('\n'.join(list(meta_dict.keys())))
+ + +
[docs]def convert_v2x_seq(root_dir, meta_out_dir): + split = "test" + inf_info_file = os.path.join(root_dir, "infrastructure-side/data_info.json") + inf_info = load_info_to_dict(inf_info_file) + veh_info_file = os.path.join(root_dir, "vehicle-side/data_info.json") + veh_info = load_info_to_dict(veh_info_file) + frame_pairs = load_json(os.path.join(root_dir, "cooperative/data_info.json")) + + meta_dict = {} + for pdict in frame_pairs: + scenario = pdict['vehicle_sequence'] + ############################################################# + # processing vehicle meta + cur_veh_info = veh_info[pdict['vehicle_frame']] + tf_novatel2world = calib_to_tf_matrix( + os.path.join(root_dir, "vehicle-side", cur_veh_info['calib_novatel_to_world_path']) + ) + tf_lidar2novatel = calib_to_tf_matrix( + os.path.join(root_dir, "vehicle-side", cur_veh_info['calib_lidar_to_novatel_path']) + ) + tf_lidar2world = tf_novatel2world @ tf_lidar2novatel + veh_lidar_pose = pclib.tf2pose(tf_lidar2world) + veh_pose = pclib.tf2pose(tf_novatel2world) + + veh_lidar_time = float(cur_veh_info['pointcloud_timestamp']) * 1e-6 + veh_lidar_file = os.path.join("vehicle-side", cur_veh_info['pointcloud_path']) + veh_bbxs_center, _ = load_label( + os.path.join(root_dir, "vehicle-side", cur_veh_info['label_lidar_std_path']) + ) + + ############################################################### + # process infra info + cur_inf_info = inf_info[pdict['infrastructure_frame']] + tf_virtuallidar2world = calib_to_tf_matrix( + os.path.join(root_dir, "infrastructure-side", cur_inf_info['calib_virtuallidar_to_world_path']) + ) + inf_lidar_pose = pclib.tf2pose(tf_virtuallidar2world) + inf_lidar_time = float(cur_inf_info['pointcloud_timestamp']) * 1e-6 + inf_lidar_file = os.path.join("infrastructure-side", cur_inf_info['pointcloud_path']) + inf_bbxs_center, _ = load_label( + os.path.join(root_dir, "infrastructure-side", cur_inf_info['label_lidar_std_path']) + ) + inf_bbxs_center = [] + + ############################################################### + # process global meta + coop_bbxs_center, _ = load_label( + os.path.join(root_dir, "cooperative", "label", f"{pdict['vehicle_frame']}.json") + ) + + ############################################################### + # construct meta dict + fdict = cs.fdict_template() + # add cav lidar meta + cs.update_agent(fdict, + agent_id='0', + agent_type='cav', + agent_pose=veh_pose, + gt_boxes=veh_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='0', + lidar_id='0', + lidar_pose=veh_lidar_pose, + lidar_time=veh_lidar_time, + lidar_file=veh_lidar_file) + # add infra lidar meta + cs.update_agent(fdict, + agent_id='1', + agent_type='infra', + agent_pose=inf_lidar_pose, + gt_boxes=inf_bbxs_center) + cs.update_agent_lidar(fdict, + agent_id='1', + lidar_id='0', + lidar_pose=inf_lidar_pose, + lidar_time=inf_lidar_time, + lidar_file=inf_lidar_file) + cs.update_frame_bbx(fdict, + coop_bbxs_center + )# in global coords + fdict['meta']['ego_id'] = '0' + fdict['meta']['ego_lidar_pose'] = veh_lidar_pose + if scenario not in meta_dict: + meta_dict[scenario] = {} + meta_dict[scenario][pdict['vehicle_frame']] = fdict + # save meta + os.makedirs(meta_out_dir, exist_ok=True) + for scenario, meta in meta_dict.items(): + meta_file = os.path.join(meta_out_dir, f'{scenario}.json') + save_json(meta, meta_file) + with open(os.path.join(meta_out_dir, f'{split}.txt'), 'w') as fh: + fh.write('\n'.join(list(meta_dict.keys())))
+ + +
[docs]def parse_static_pcd(adict, root_dir): + pose = pclib.pose_to_transformation(adict['lidar']['0']['pose']) + pcd = o3d.io.read_point_cloud(os.path.join(root_dir, adict['lidar']['0']['filename'])) + points = np.array(pcd.points) + boxes = np.array(adict['gt_boxes'])[:, [2, 3, 4, 5, 6, 7, 10]] + in_box_mask = points_in_boxes_cpu(points, boxes).any(axis=0) + pcd.points = o3d.utility.Vector3dVector(points[np.logical_not(in_box_mask)]) + return pcd, pose
+ + +
[docs]def register_sequence(sdict, frames, root_dir, ignore_ids=[], vis=False): + agents_reg = {} + for f in tqdm.tqdm(frames): + # print(f) + fdict = sdict[f] + for ai, adict in fdict['agents'].items(): + if ai in ignore_ids: + continue + pcd, pose = parse_static_pcd(adict, root_dir) + if ai not in agents_reg: + agents_reg[ai] = { + 'init_pose': pose, + 'last_pose_old': pose, + 'last_pose_new': pose, + 'last_pcd': pcd, + 'pcd_merged': copy.copy(pcd).transform(pose), + 'last_frame': f, + 'sequence_info': {f: {'lidar_pose': pose}}} + else: + source_pcd = pcd + target_pcd = agents_reg[ai]['last_pcd'] + tf_init = np.linalg.inv(agents_reg[ai]['last_pose_old']) @ pose + tf_out = register_pcds(source_pcd, target_pcd, tf_init, [0.2], visualize=vis) + pose_new = agents_reg[ai]['last_pose_new'] @ tf_out + pcd_merged = agents_reg[ai]['pcd_merged'] + pcd_transformed = copy.copy(source_pcd).transform(pose_new) + # if vis: + # pcd_transformed.paint_uniform_color([1, 0.706, 0]) + # pcd_merged.paint_uniform_color([0, 0.651, 0.929]) + # o3d.visualization.draw_geometries([pcd_merged, pcd_transformed]) + pcd_merged = pcd_merged + pcd_transformed + pcd_merged = pcd_merged.voxel_down_sample(voxel_size=0.1) + + agents_reg[ai]['last_pose_old'] = pose + agents_reg[ai]['last_pose_new'] = pose_new + agents_reg[ai]['last_pcd'] = pcd + agents_reg[ai]['pcd_merged'] = pcd_merged + agents_reg[ai]['sequence_info'][f] = {'lidar_pose': pose} + + return agents_reg
+ + +
[docs]def register_pcds_to_blocks(seq, sdict, root_dir, idx=0): + frames = sorted(sdict.keys()) + sub_seq = frames[:1] + cnt = 0 + for i, f in enumerate(frames[1:]): + if (i == len(frames) - 2 or int(f) - int(sub_seq[-1]) > 2): + if i == len(frames) - 2: + sub_seq.append(f) + if len(sub_seq) >= 8: + vis = False + agents_reg = register_sequence(sdict, sub_seq, root_dir, ['1'], vis) + pcd_merged = agents_reg['0']['pcd_merged'] + o3d.visualization.draw_geometries([pcd_merged]) + o3d.io.write_point_cloud(f"{root_dir}/agent0_seq{seq}_{cnt}.pcd", pcd_merged) + info_file = f"{root_dir}/agent0_seq{seq}_{cnt}.npy" + np.save(info_file, {k: v for k, v in agents_reg['0'].items() if 'pcd' not in k}, allow_pickle=True) + cnt += 1 + if not i == len(frames) - 2: + sub_seq = [f] + else: + sub_seq.append(f)
+ + +
[docs]def optimize_trajectory(seq, sdict, root_dir, out_meta_dir, ego_agent_id, idx, sub_idx): + """ + This function iterates over scenarios, for each scenario it does the following steps: + 1. register point clouds sequentially for each agent to get accurate trajectory of agents. + Before registration, the points belonging to the labeled objets with high dynamics are removed. + After registration of each sequence pair, the merged point cloud is down-sampled to save space. + 2. match the registered point clouds of different agents to get optimized relative poses. + 3. recover the relative pose to the world pose. + + Parameters + ---------- + meta_path: directory of meta files + root_dir: root dir of data + + Returns + ------- + meta: meta information with updated poses of agents + """ + info_file = f"{root_dir}/agent0_seq{seq}_{sub_idx}.npy" + ego_info = np.load(info_file, allow_pickle=True).item() + pcd_merged = o3d.io.read_point_cloud(f"{root_dir}/agent0_seq{seq}_{sub_idx}.pcd") + frames = sorted(ego_info['sequence_info'].keys()) + sub_seq_dict = {} + + infra_info = sdict[frames[0]]['agents']['1'] + pcd, pose = parse_static_pcd(infra_info, root_dir) + tf_init = pose + # o3d.visualization.draw_geometries([pcd_merged]) + tf_out = register_pcds(pcd, pcd_merged, tf_init, [1, 0.2], visualize=True) + pose = pclib.tf2pose(tf_out) + + for f in tqdm.tqdm(frames): + fdict = sdict[f] + fdict['agents']['1']['lidar']['0']['pose'] = pose + fdict['agents']['1']['pose'] = pose + + lidar_pose_new = ego_info['sequence_info'][f]['lidar_pose'] + lidar_pose_old = pclib.pose_to_transformation(fdict['agents'][ego_agent_id]['lidar']['0']['pose']) + # lidar_old2new = np.linalg.inv(lidar_pose_new) @ lidar_pose_old + vpose_to_lpose = np.linalg.inv(lidar_pose_old) @ pclib.pose_to_transformation(fdict['agents'][ego_agent_id]['pose']) + vpose_new = lidar_pose_new @ vpose_to_lpose + fdict['agents'][ego_agent_id]['pose'] = pclib.tf2pose(vpose_new) + fdict['agents'][ego_agent_id]['lidar']['0']['pose'] = pclib.tf2pose(lidar_pose_new) + sub_seq_dict[f] = fdict + if int(f) > 1002: + vis_pcd, vis_pose = parse_static_pcd(fdict['agents'][ego_agent_id], root_dir) + vis_pcd2, vis_pose2 = parse_static_pcd(fdict['agents']['1'], root_dir) + vis_pcd = vis_pcd.transform(lidar_pose_new) + vis_pcd2 = vis_pcd2.transform(vis_pose2) + + # o3d.visualization.draw_geometries([pcd_merged]) + corr = register_pcds(vis_pcd2, vis_pcd, np.eye(4), [1, 0.2], visualize=True) + vis_pose2 = corr @ vis_pcd2 + vis_pose2 = pclib.tf2pose(vis_pose2) + fdict['agents']['1']['lidar']['0']['pose'] = vis_pose2 + fdict['agents']['1']['pose'] = vis_pose2 + + # vis_pcd.paint_uniform_color([1, 0.706, 0]) + # vis_pcd2.paint_uniform_color([0, 0.651, 0.929]) + # o3d.visualization.draw_geometries([vis_pcd, vis_pcd2.transform(corr)]) + + save_json(sub_seq_dict, os.path.join(out_meta_dir, f"{seq}_{sub_idx}.json"))
+ + +
[docs]def optimize_poses(meta_path): + mfiles = glob.glob(os.path.join(meta_path, '*.json'))[3:] + mfiles = ["/koko/cosense3d/dairv2x/45.json"] + for idx, mf in enumerate(mfiles): + sdict = load_json(mf) + seq = os.path.basename(mf)[:-5] + print('###########################', seq, len(sdict)) + + # register_pcds_to_blocks( + # seq, + # sdict, + # "/home/data/DAIR-V2X", + # idx + # ) + files = glob.glob(f"/home/data/DAIR-V2X/agent0_seq{seq}_*.npy") + for sub_idx in range(len(files)): + optimize_trajectory(seq, sdict, + "/home/data/DAIR-V2X", + "/home/data/DAIR-V2X/meta", + '0', + idx, + sub_idx=sub_idx + )
+ + +
[docs]def register_step_one(mf): + """Find vehicle that is most close to infra""" + sdict = load_json(mf) + seq = os.path.basename(mf)[:-5] + frames = sorted(sdict.keys()) + min_dist = 1000 + min_dist_frame = frames[0] + for f in frames: + fdict = sdict[f] + veh_pose = fdict['agents']['0']['lidar']['0']['pose'] + inf_pose = fdict['agents']['1']['lidar']['0']['pose'] + dist = np.sqrt((veh_pose[0] - inf_pose[0]) ** 2 + (inf_pose[1] - veh_pose[1]) ** 2) + if dist < min_dist: + min_dist = dist + min_dist_frame = f + print(f"Step1: registration starts from frame {min_dist_frame}") + return min_dist_frame, min_dist
+ + +
[docs]def register_step_two(start_frame, mf, meta_out_dir): + """Register point clouds""" + sdict = load_json(mf) + seq = os.path.basename(mf)[:-5] + frames = sorted(sdict.keys()) + total_frames = len(frames) + start_idx = frames.index(start_frame) + ref_pcd, ref_tf = parse_static_pcd(sdict[start_frame]['agents']['1'], root_dir) + ref_pose = pclib.tf2pose(ref_tf) + ref_pcd = ref_pcd.transform(ref_tf) + idx_l = start_idx + idx_r = start_idx + 1 + vis = False + cnt = 0 + while True: + if idx_l < 0 and idx_r >= len(frames): + break + if idx_l >= 0: + cur_frame = frames[idx_l] + pcd, tf = parse_static_pcd(sdict[cur_frame]['agents']['0'], root_dir) + if cnt == -1: + # tf = registration.manual_registration(pcd.transform(tf), ref_pcd) + + tf_corr = np.array([ [ 9.98532892e-01, 5.34621722e-02, 8.59413959e-03, -1.22072297e+02], + [-5.34946946e-02, 9.98561645e-01, 3.59984429e-03, 2.15912680e+02], + [-8.38932267e-03, -4.05430380e-03, 9.99956590e-01, 4.32884527e+01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + tf = tf_corr @ tf + + else: + tf = register_pcds(pcd, ref_pcd, tf, [1.6, 0.5], vis, cur_frame) + pose = pclib.tf2pose(tf) + sdict[cur_frame]['agents']['0']['lidar']['0']['pose'] = pose + sdict[cur_frame]['agents']['0']['pose'] = pose + sdict[cur_frame]['agents']['1']['lidar']['0']['pose'] = ref_pose + sdict[cur_frame]['agents']['1']['pose'] = ref_pose + ref_pcd = ref_pcd + pcd.transform(tf) + idx_l -= 1 + cnt += 1 + if idx_r < len(frames): + cur_frame = frames[idx_r] + pcd, tf = parse_static_pcd(sdict[cur_frame]['agents']['0'], root_dir) + tf = register_pcds(pcd, ref_pcd, tf, [1.6, 0.5], vis, cur_frame) + pose = pclib.tf2pose(tf) + sdict[cur_frame]['agents']['0']['lidar']['0']['pose'] = pose + sdict[cur_frame]['agents']['0']['pose'] = pose + sdict[cur_frame]['agents']['1']['lidar']['0']['pose'] = ref_pose + sdict[cur_frame]['agents']['1']['pose'] = ref_pose + ref_pcd = ref_pcd + pcd.transform(tf) + idx_r += 1 + cnt += 1 + + ref_pcd = ref_pcd.voxel_down_sample(voxel_size=0.1) + print(f"\rStep2: registered [{cnt}/{total_frames}] frames",end='',flush=True) + + save_json(sdict, os.path.join(meta_out_dir, f"{seq}.json")) + print('\n')
+ + +
[docs]def select_sub_scenes(meta_in, root_dir, meta_out, split): + with open(os.path.join(meta_in, f"{split}.txt"), 'r') as f: + scenes = sorted(f.read().splitlines()) + + sub_scenes = [] + for s in tqdm.tqdm(scenes): + sdict = load_json(os.path.join(meta_in, f"{s}.json")) + frames = sorted(sdict.keys()) + sub_seq = frames[:1] + cnt = 0 + for i, f in enumerate(frames[1:]): + if (i == len(frames) - 2 or int(f) - int(sub_seq[-1]) > 1): + if i == len(frames) - 2: + # reach the end + sub_seq.append(f) + if len(sub_seq) >= 6: + # find one valid sub sequence + new_sdict = parse_global_bboxes(sdict, sub_seq, root_dir) + save_json(new_sdict, os.path.join(meta_out, f"{s}_{cnt}.json")) + sub_scenes.append(f"{s}_{cnt}") + cnt += 1 + if not i == len(frames) - 2: + # sequence breaks, add the current frame to the new seq + sub_seq = [f] + else: + sub_seq.append(f) + + with open(os.path.join(meta_out, f"{split}.txt"), 'w') as f: + f.writelines('\n'.join(sub_scenes))
+ + +
[docs]def parse_timestamped_boxes(adict, root_dir, four_wheel_only=True): + lf = os.path.join(root_dir, adict['lidar']['0']['filename']) + pcd = point_cloud_from_path(lf) + boxes = np.array(adict['gt_boxes']) + if four_wheel_only: + boxes = boxes[boxes[:, 1] < 4] + if 'timestamp' in pcd.fields: + points = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1) + points_inds = points_in_boxes_cpu(points, boxes[:, [2, 3, 4, 5, 6, 7, 10]]).astype(bool) + times = pcd.pc_data['timestamp'] + timestamps = [] + for i, inds in enumerate(points_inds): + if inds.sum() == 0: + nearst_angle_idx = np.abs(np.arctan2(boxes[i, 3], boxes[i, 2]) - + np.arctan2(points[:, 1], points[:, 0])).argmin() + timestamps.append(times[nearst_angle_idx]) + else: + ts = times[inds] + timestamps.append(ts.mean()) + timestamps = np.array(timestamps) + else: + timestamps = np.zeros_like(boxes[:, 0]) + adict['lidar']['0']['time'] + + return timestamps, boxes
+ + +
[docs]def parse_global_bboxes(sdict, frames, root_dir): + """Step three""" + new_sdict = {} + tracklets = {} + id_counter = 1 + last_track_ids = set() + for fi, f in enumerate(frames): + fdict = sdict[f] + new_fdict = copy.deepcopy(fdict) + matched_track_ids = set() + matched_inds = [] + for ai, adict in fdict['agents'].items(): + timestamps, boxes = parse_timestamped_boxes(adict, root_dir) + tf = pclib.pose_to_transformation(adict['lidar']['0']['pose']) + boxes_global = transform_boxes_3d(boxes, tf, mode=11) + if len(tracklets) == 0: + for i, (t, box) in enumerate(zip(timestamps, boxes_global)): + tracklets[id_counter] = [[t] + box[1:].tolist()] + boxes[i, 0] = id_counter + id_counter += 1 + else: + tracked_boxes = [] + tracked_ids = [] + for k, v in tracklets.items(): + tracked_ids.append(k) + tracked_boxes.append(v[-1]) + tracked_boxes = np.array(tracked_boxes) + tracked_ids = np.array(tracked_ids) + dist_cost = np.linalg.norm(tracked_boxes[:, [2, 3]][:, None] - boxes_global[:, [2, 3]][None], axis=-1) + thr = 3 + min_dist = dist_cost.min(axis=0) + min_idx = dist_cost.argmin(axis=0) + match_inds = [] + for i, box in enumerate(boxes_global): + cur_box = [timestamps[i]] + box[1:].tolist() + if min_dist[i] < thr: + tracklets[tracked_ids[min_idx[i]]].append(cur_box) + match_inds.append([tracked_ids[min_idx[i]], i]) + boxes[i, 0] = tracked_ids[min_idx[i]] + else: + tracklets[id_counter] = [cur_box] + boxes[i, 0] = id_counter + id_counter += 1 + matched_inds.extend(match_inds) + + new_fdict['agents'][ai]['gt_boxes'] = boxes.tolist() + new_sdict[f] = new_fdict + + object_size_type = {} + for ti, tracklet in tracklets.items(): + tracklets[ti] = np.array(sorted(tracklet)) + object_size_type[ti] = { + 'size': np.median(tracklets[ti][:, 5:8], axis=0), + 'type': np.median(tracklets[ti][:, 1], axis=0), + } + + # remove last two frames + new_sdict.pop(frames[-1]) + new_sdict.pop(frames[-2]) + for f, fdict in new_sdict.items(): + object_ids = [] + for ai, adict in fdict['agents'].items(): + object_ids.extend([int(box[0]) for box in adict['gt_boxes']]) + object_ids = set(object_ids) + aligned_time = math.ceil(fdict['agents']['0']['lidar']['0']['time'] * 10) / 10 + aligned_boxes = [[], [], []] + for object_id in object_ids: + if object_id in tracklets: + tracklet = tracklets[object_id] + if len(tracklet) == 0: + continue + for i in range(3): + cur_time = aligned_time + 0.1 * i + time_diff = tracklet[:, 0] - cur_time + try: + prev_idx = np.where(time_diff < 0)[0].max() + next_idx = np.where(time_diff > 0)[0].min() + prev_t = tracklet[prev_idx][0] + next_t = tracklet[next_idx][0] + dxyz = tracklet[next_idx][[2, 3, 4]] - tracklet[prev_idx][[2, 3, 4]] + xyz = tracklet[prev_idx][[2, 3, 4]] + dxyz * (cur_time - prev_t) / (next_t - prev_t) + prev_rot = tracklet[next_idx][10] + object_param = [object_id , object_size_type[object_id]['type']] + xyz.tolist() + \ + object_size_type[object_id]['size'].tolist() + [0, 0, prev_rot] + aligned_boxes[i].append(object_param) + except: + aligned_boxes[i].append([0] * 11) + else: + print('d') + aligned_boxes = np.array(aligned_boxes) + tf = pclib.pose_to_transformation(fdict['agents']['0']['lidar']['0']['pose']) + aligned_boxes = box_utils.transform_boxes_3d( + aligned_boxes.reshape(-1, 11), np.linalg.inv(tf), mode=11).reshape(aligned_boxes.shape) + fdict['meta']['bbx_center_global'] = aligned_boxes[0].tolist() + fdict['meta']['boxes_pred'] = {f"{int(f) + i + 1:06d}": x[:, [2, 3, 4, 10]].tolist() \ + for i, x in enumerate(aligned_boxes[1:])} + + return new_sdict
+ + +
[docs]def remove_ego_boxes(meta_in): + mfs = glob.glob(os.path.join(meta_in, '*.json')) + for mf in mfs: + sdict = load_json(mf) + for f, fdict in sdict.items(): + gt_boxes = np.array(fdict['agents']['0']['gt_boxes']) + depth = np.linalg.norm(gt_boxes[:, 2:4], axis=-1) + gt_boxes = gt_boxes[depth > 2] + fdict['agents']['0']['gt_boxes'] = gt_boxes.tolist() + + global_boxes = np.array(fdict['meta']['bbx_center_global']) + mask = np.linalg.norm(global_boxes[:, 2:4], axis=-1) > 2 + fdict['meta']['bbx_center_global'] = global_boxes[mask].tolist() + boxes_pred = fdict['meta']['boxes_pred'] + fdict['meta']['boxes_pred'] = {k: np.array(v)[mask].tolist() for k, v in boxes_pred.items()} + + save_json(sdict, mf)
+ + +if __name__=="__main__": + root_dir = "/home/data/DAIR-V2X" + meta_out_dir = "/home/data/DAIR-V2X/meta-sub-scenes" + meta_path = "/home/data/cosense3d/dairv2x" + # root_dir = "/home/data/DAIR-V2X-Seq/SPD-Example" + # meta_out_dir = "/home/data/cosense3d/dairv2x_seq" + # convert_v2x_c(root_dir, meta_path) + # meta_dict = load_meta(os.path.join(meta_out_dir, 'dairv2x')) + # o3d_play_sequence(meta_dict, root_dir) + # optimize_poses(meta_path) + + # with open("/home/data/DAIR-V2X/meta/test.txt", 'w') as fh: + # files = glob.glob("/home/data/DAIR-V2X/meta/*.json") + # for f in files: + # fh.writelines(os.path.basename(f)[:-5] + '\n') + + # mfs = sorted(glob.glob("/home/yuan/data/DAIR-V2X/meta-loc-correct/*.json"))[:1] + # # mf = "/home/data/cosense3d/dairv2x/11.json" + # for mf in mfs: + # if int(os.path.basename(mf)[:-5]) <= 10: + # continue + # min_dist_frame, min_dist = register_step_one(mf) + # sdict = register_step_two(min_dist_frame, mf, meta_out_dir) + # parse_global_bboxes(mf, meta_out_dir, root_dir) + + # select_sub_scenes( + # "/home/yuan/data/DAIR-V2X/meta-loc-correct", + # "/home/yuan/data/DAIR-V2X", + # "/home/yuan/data/DAIR-V2X/meta-sub-scenes", + # "test" + # ) + + remove_ego_boxes("/home/yuan/data/DAIR-V2X/meta_with_pred") + + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/toolkit/opv2v.html b/docs/_build/html/_modules/cosense3d/dataset/toolkit/opv2v.html new file mode 100644 index 00000000..cea8dfec --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/toolkit/opv2v.html @@ -0,0 +1,811 @@ + + + + + + cosense3d.dataset.toolkit.opv2v — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.toolkit.opv2v

+import copy
+import json
+import math
+import os
+from glob import glob
+
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+import tqdm
+import open3d as o3d
+import os.path as osp
+
+import cv2
+from collections import OrderedDict
+from torch.utils.data import Dataset
+
+
+from scipy.spatial.transform import Rotation as R
+from cosense3d.utils.misc import load_yaml, save_json, load_json
+from cosense3d.dataset.toolkit import register_pcds
+from cosense3d.dataset.toolkit.cosense import CoSenseDataConverter as cs
+from cosense3d.utils.box_utils import boxes_to_corners_3d
+from cosense3d.utils.pclib import load_pcd
+from cosense3d.utils.vislib import draw_points_boxes_plt, draw_2d_bboxes_on_img
+from cosense3d.ops.utils import points_in_boxes_cpu
+
+
+
[docs]def x_to_world(pose: list) -> np.ndarray: + """ + The transformation matrix from x-coordinate system to carla world system + Parameters + + :param pose: [x, y, z, roll, yaw, pitch] + :return: The transformation matrix. + """ + x, y, z, roll, yaw, pitch = pose[:] + + # used for rotation matrix + c_y = np.cos(np.radians(yaw)) + s_y = np.sin(np.radians(yaw)) + c_r = np.cos(np.radians(roll)) + s_r = np.sin(np.radians(roll)) + c_p = np.cos(np.radians(pitch)) + s_p = np.sin(np.radians(pitch)) + + matrix = np.identity(4) + # translation matrix + matrix[0, 3] = x + matrix[1, 3] = y + matrix[2, 3] = z + + # rotation matrix + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + + return matrix
+ + +
[docs]def x1_to_x2(x1, x2): + """ + Transformation matrix from x1 to x2. + + Parameters + ---------- + x1 : list or np.ndarray + The pose of x1 under world coordinates or + transformation matrix x1->world + x2 : list or np.ndarray + The pose of x2 under world coordinates or + transformation matrix x2->world + + Returns + ------- + transformation_matrix : np.ndarray + The transformation matrix. + + """ + if isinstance(x1, list) and isinstance(x2, list): + x1_to_world = x_to_world(x1) + x2_to_world = x_to_world(x2) + world_to_x2 = np.linalg.inv(x2_to_world) + transformation_matrix = np.dot(world_to_x2, x1_to_world) + + # object pose is list while lidar pose is transformation matrix + elif isinstance(x1, list) and not isinstance(x2, list): + x1_to_world = x_to_world(x1) + world_to_x2 = x2 + transformation_matrix = np.dot(world_to_x2, x1_to_world) + # both are numpy matrix + else: + world_to_x2 = np.linalg.inv(x2) + transformation_matrix = np.dot(world_to_x2, x1) + + return transformation_matrix
+ + +
[docs]def create_bbx(extent): + """ + Create bounding box with 8 corners under obstacle vehicle reference. + + Parameters + ---------- + extent : list + Width, height, length of the bbx. + + Returns + ------- + bbx : np.array + The bounding box with 8 corners, shape: (8, 3) + """ + + bbx = np.array([[extent[0], -extent[1], -extent[2]], + [extent[0], extent[1], -extent[2]], + [-extent[0], extent[1], -extent[2]], + [-extent[0], -extent[1], -extent[2]], + [extent[0], -extent[1], extent[2]], + [extent[0], extent[1], extent[2]], + [-extent[0], extent[1], extent[2]], + [-extent[0], -extent[1], extent[2]]]) + + return bbx
+ + +
[docs]def corner_to_center(corner3d, order='lwh'): + """ + Convert 8 corners to x, y, z, dx, dy, dz, yaw. + + Parameters + ---------- + corner3d : np.ndarray + (N, 8, 3) + + order : str + 'lwh' or 'hwl' + + Returns + ------- + box3d : np.ndarray + (N, 7) + """ + assert corner3d.ndim == 3 + batch_size = corner3d.shape[0] + + xyz = np.mean(corner3d[:, [0, 3, 5, 6], :], axis=1) + h = abs(np.mean(corner3d[:, 4:, 2] - corner3d[:, :4, 2], axis=1, + keepdims=True)) + l = (np.sqrt(np.sum((corner3d[:, 0, [0, 1]] - corner3d[:, 3, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 2, [0, 1]] - corner3d[:, 1, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 4, [0, 1]] - corner3d[:, 7, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 5, [0, 1]] - corner3d[:, 6, [0, 1]]) ** 2, + axis=1, keepdims=True))) / 4 + + w = (np.sqrt( + np.sum((corner3d[:, 0, [0, 1]] - corner3d[:, 1, [0, 1]]) ** 2, axis=1, + keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 2, [0, 1]] - corner3d[:, 3, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 4, [0, 1]] - corner3d[:, 5, [0, 1]]) ** 2, + axis=1, keepdims=True)) + + np.sqrt(np.sum((corner3d[:, 6, [0, 1]] - corner3d[:, 7, [0, 1]]) ** 2, + axis=1, keepdims=True))) / 4 + + theta = (np.arctan2(corner3d[:, 1, 1] - corner3d[:, 2, 1], + corner3d[:, 1, 0] - corner3d[:, 2, 0]) + + np.arctan2(corner3d[:, 0, 1] - corner3d[:, 3, 1], + corner3d[:, 0, 0] - corner3d[:, 3, 0]) + + np.arctan2(corner3d[:, 5, 1] - corner3d[:, 6, 1], + corner3d[:, 5, 0] - corner3d[:, 6, 0]) + + np.arctan2(corner3d[:, 4, 1] - corner3d[:, 7, 1], + corner3d[:, 4, 0] - corner3d[:, 7, 0]))[:, + np.newaxis] / 4 + + if order == 'lwh': + return np.concatenate([xyz, l, w, h, theta], axis=1).reshape( + batch_size, 7) + elif order == 'hwl': + return np.concatenate([xyz, h, w, l, theta], axis=1).reshape( + batch_size, 7) + else: + raise NotImplementedError
+ + +
[docs]def project_world_objects(object_dict, + output_dict, + lidar_pose, + order): + """ + Project the objects under world coordinates into another coordinate + based on the provided extrinsic. + + Parameters + ---------- + object_dict : dict + The dictionary contains all objects surrounding a certain cav. + + output_dict : dict + key: object id, value: object bbx (xyzlwhyaw). + + lidar_pose : list + (6, ), lidar pose under world coordinate, [x, y, z, roll, yaw, pitch]. + + order : str + 'lwh' or 'hwl' + """ + for object_id, object_content in object_dict.items(): + location = object_content['location'] + rotation = object_content['angle'] + center = object_content['center'] + extent = object_content['extent'] + + if 'ass_id' not in object_content or object_content['ass_id'] == -1: + ass_id = object_id + else: + ass_id = object_content['ass_id'] + if 'obj_type' not in object_content: + obj_type = 'Car' + else: + obj_type = object_content['obj_type'] + + # todo: pedestrain is not consdered yet + # todo: only single class now + if obj_type == 'Pedestrian': + continue + + object_pose = [location[0] + center[0], + location[1] + center[1], + location[2] + center[2], + rotation[0], rotation[1], rotation[2]] + object2lidar = x1_to_x2(object_pose, lidar_pose) + + # shape (3, 8) + bbx = create_bbx(extent).T + # bounding box under ego coordinate shape (4, 8) + bbx = np.r_[bbx, [np.ones(bbx.shape[1])]] + + # project the 8 corners to lidar coordinate + bbx_lidar = np.dot(object2lidar, bbx).T + bbx_lidar = np.expand_dims(bbx_lidar[:, :3], 0) + bbx_lidar = corner_to_center(bbx_lidar, order=order) + + # get velocity + if 'speed' in object_content: + speed = object_content['speed'] + theta = bbx_lidar[0, -1] + velo = np.array([speed * np.cos(theta), speed * np.sin(theta)]) + else: + velo = None + + if bbx_lidar.shape[0] > 0: + output_dict.update({object_id: {'coord': bbx_lidar, + 'ass_id': ass_id, + 'velo': velo}})
+ + +
[docs]def update_local_boxes3d(fdict, objects_dict, ref_pose, order, data_dir, cav_id): + output_dict = {} + # add ground truth boxes at cav local coordinate + project_world_objects(objects_dict, + output_dict, + ref_pose, + order) + boxes_local = [] + velos = [] + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id = object_content['ass_id'] + else: + object_id = object_id + object_bbx = object_content['coord'] + if order == 'hwl': + object_bbx = object_bbx[:, [0, 1, 2, 5, 4, 3, 6]] + boxes_local.append( + [object_id, 0, ] + + object_bbx[0, :6].tolist() + + [0, 0, object_bbx[0, 6]] + ) + if 'velo' in object_content and object_content['velo'] is not None: + velos.append(object_content['velo'].tolist()) + + cs.update_agent(fdict, cav_id, gt_boxes=boxes_local) + if len(velos) == len(boxes_local): + cs.update_agent(fdict, cav_id, velos=velos) + + # get visibility of local boxes + lidar = load_pcd(os.path.join(data_dir, fdict['agents'][cav_id]['lidar']['0']['filename']))['xyz'] + if len(boxes_local) > 0: + boxes = np.array(boxes_local)[:, [2, 3, 4, 5, 6, 7, 10]] + res = points_in_boxes_cpu(lidar, boxes) + num_pts = res.sum(axis=1) + cs.update_agent(fdict, cav_id, num_pts=num_pts.tolist()) + else: + cs.update_agent(fdict, cav_id, num_pts=[])
+ + +
[docs]def opv2v_pose_to_cosense(pose): + if len(pose) == 6: + transformation = x_to_world(pose) + else: + transformation = pose + rot = R.from_matrix(transformation[:3, :3]).as_euler('xyz', degrees=False) + tl = transformation[:3, 3] + pose = tl.tolist() + rot.tolist() + return pose
+ + +
[docs]def update_cam_params(opv2v_params, cosense_fdict, agent_id, scenario, frame): + for k, v in opv2v_params.items(): + if 'camera' in k: + cam_id = int(k[-1:]) + cs.add_cam_to_fdict( + cosense_fdict, + agent_id, + cam_id, + [os.path.join(scenario, agent_id, f'{frame}_{k}.png')], + v['intrinsic'], + v['extrinsic'], + pose=v['cords'], + )
+ + +
[docs]def project_points(points, lidar2cam, I): + """Project 3d points to image planes""" + points_homo = np.concatenate([points[:, :3], np.ones_like(points[:, :1])], axis=1).T + points_homo = lidar2cam @ points_homo + pixels = I @ points_homo[:3] + pixels[:2] = pixels[:2] / pixels[2:] + depths = points_homo[2] + return pixels, depths
+ + +
[docs]def boxes_3d_to_2d(boxes3d, num_pts, lidar2cam, I, img_size): + n_box = len(boxes3d) + box_center = boxes3d.mean(axis=1) + box_points = boxes3d.reshape(-1, 3) + + box_pixels, _ = project_points(box_points, lidar2cam, I) + center_pixels, depths = project_points(box_center, lidar2cam, I) + + box_pixels = box_pixels.T.reshape(n_box, 8, 3) + mask = (box_pixels[:, :, 2] > 0).all(axis=1) + box_pixels = box_pixels[mask] + center_pixels = center_pixels[:2].T[mask] + depths = depths[mask] + num_pts = num_pts[mask] + x_min = np.clip(box_pixels[..., 0].min(axis=1), a_min=0, a_max=img_size[1]) + y_min = np.clip(box_pixels[..., 1].min(axis=1), a_min=0, a_max=img_size[0]) + x_max = np.clip(box_pixels[..., 0].max(axis=1), a_min=0, a_max=img_size[1]) + y_max = np.clip(box_pixels[..., 1].max(axis=1), a_min=0, a_max=img_size[0]) + mask = (x_min < img_size[1]) & (x_max > 0) & (y_min < img_size[0]) & (y_max > 0) + bbox_2d = np.stack([x_min[mask], y_min[mask], x_max[mask], y_max[mask]], axis=-1) + return bbox_2d, center_pixels[mask], depths[mask], num_pts[mask]
+ + +
[docs]def update_2d_bboxes(fdict, cav_id, lidar_pose, data_dir): + local_boxes = np.array(fdict['agents'][cav_id]['gt_boxes']) + if len(local_boxes) > 0: + local_boxes = local_boxes[:, 2:] + num_pts = np.array(fdict['agents'][cav_id]['num_pts']) + boxes_corners = boxes_to_corners_3d(local_boxes) + # lidar = load_pcd(os.path.join(data_dir, fdict['agents'][cav_id]['lidar'][0]['filename'])) + # lidar = np.concatenate([lidar['xyz'], np.ones_like(lidar['intensity'])], axis=1) + # draw_points_boxes_plt(pc_range=100, points=lidar, filename="/home/yuan/Downloads/tmp.png") + cam_UE2pinhole = np.array([[0, 1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) + for cam_id, cam_params in fdict['agents'][cav_id]['camera'].items(): + img = cv2.imread(os.path.join(data_dir, cam_params['filenames'][0]))[..., ::-1] + lidar2cam_UE = x1_to_x2(lidar_pose, cam_params['pose']) + lidar2cam_pinhole = cam_UE2pinhole @ lidar2cam_UE + I = np.array(cam_params['intrinsic']) + # draw_3d_points_boxes_on_img(img, lidar2cam_pinhole, I, lidar, boxes_corners) + bboxes2d, centers2d, depths, num_pts_2d = boxes_3d_to_2d( + boxes_corners, num_pts, lidar2cam_pinhole, I, img_size=img.shape) + # draw_2d_bboxes_on_img(img, bboxes2d) + cam_params['bboxes2d'] = bboxes2d.tolist() + cam_params['centers2d'] = centers2d.tolist() + cam_params['depths'] = depths.tolist() + cam_params['num_pts'] = num_pts_2d.tolist() + cam_params['lidar2cam'] = lidar2cam_pinhole.tolist() + else: + cam_UE2pinhole = np.array([[0, 1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) + for cam_id, cam_params in fdict['agents'][cav_id]['camera'].items(): + lidar2cam_UE = x1_to_x2(lidar_pose, cam_params['pose']) + lidar2cam_pinhole = cam_UE2pinhole @ lidar2cam_UE + cam_params['lidar2cam'] = lidar2cam_pinhole.tolist() + cam_params['bboxes2d'] = [] + cam_params['centers2d'] = [] + cam_params['depths'] = [] + cam_params['num_pts'] = []
+ + +
[docs]def opv2v_to_cosense(path_in, path_out, isSim=True, correct_transf=False, pcd_ext='pcd'): + if isSim: + order = 'lwh' + else: + order = 'hwl' + flag = False + for split in ['train', 'test']: + scenarios = sorted(os.listdir(os.path.join(path_in, split))) + with open(os.path.join(path_out, f'{split}.txt'), 'w') as fh: + fh.write('\n'.join(scenarios)) + for s in scenarios: + print(s) + # if s == "2021_08_22_09_43_53": + # flag = True + # if not flag: + # continue + visualize = False + sdict = {} + spath = os.path.join(path_in, split, s) + cavs = sorted([x for x in os.listdir(spath) if os.path.isdir(os.path.join(spath, x))]) + ego_id = cavs[0] + frames = sorted([x[:-5] + for x in os.listdir(os.path.join(spath, ego_id)) if + x.endswith('.yaml') and 'sparse_gt' not in x]) + for f in tqdm.tqdm(frames): + fdict = cs.fdict_template() + ego_lidar_pose = None + object_id_stack = [] + object_velo_stack = [] + object_stack = [] + for i, cav_id in enumerate(cavs): + yaml_file = os.path.join(spath, cav_id, f'{f}.yaml') + params = load_yaml(yaml_file) + cs.update_agent(fdict, cav_id, agent_type='cav', + agent_pose=opv2v_pose_to_cosense(params['true_ego_pos'])) + update_cam_params(params, fdict, cav_id, s, f) + + if cav_id == ego_id: + ego_lidar_pose = params['lidar_pose'] + + # get transformation from ego to cav, correct transformation if necessary + transformation = x1_to_x2(params['lidar_pose'], ego_lidar_pose) + if not isSim and correct_transf and cav_id != ego_id: + ego_lidar_file = os.path.join(path_in, split, s, ego_id, f'{f}.pcd') + cav_lidar_file = os.path.join(path_in, split, s, cav_id, f'{f}.pcd') + transformation = register_pcds(cav_lidar_file, ego_lidar_file, transformation, visualize) + visualize = False + # cav_lidar_pose2ego = opv2v_pose_to_cosense(transformation) + + # get cav lidar pose in cosense format + cs.update_agent(fdict, cav_id, 'cav') + cs.update_agent_lidar(fdict, cav_id, '0', + lidar_pose=opv2v_pose_to_cosense(params['lidar_pose']), + lidar_file=os.path.join(s, cav_id, f'{f}.{pcd_ext}')) + + objects_dict = params['vehicles'] + output_dict = {} + if isSim: + glob_ref_pose = ego_lidar_pose + local_ref_pose = params['lidar_pose'] + else: + glob_ref_pose = transformation + local_ref_pose = [0,] * 6 + + data_dir = os.path.join(path_in, split) + update_local_boxes3d(fdict, objects_dict, local_ref_pose, order, data_dir, cav_id) + if isSim: + # v2vreal has no camera data + update_2d_bboxes(fdict, cav_id, params['lidar_pose'], data_dir) + + # add gt boxes in ego coordinates as global boxes of cosense3d format + project_world_objects(objects_dict, + output_dict, + glob_ref_pose, + order) + + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id_stack.append(object_content['ass_id']) + else: + object_id_stack.append(object_id + 100 * int(cav_id)) + if object_content['velo'] is not None: + object_velo_stack.append(object_content['velo']) + object_stack.append(object_content['coord']) + + # exclude all repetitive objects + unique_indices = \ + [object_id_stack.index(x) for x in set(object_id_stack)] + object_stack = np.vstack(object_stack) + object_stack = object_stack[unique_indices] + if len(object_velo_stack) == len(object_stack): + object_velo_stack = np.vstack(object_velo_stack) + object_velo_stack = object_velo_stack[unique_indices] + if order == 'hwl': + object_stack = object_stack[:, [0, 1, 2, 5, 4, 3, 6]] + + cosense_bbx_center = np.zeros((len(object_stack), 11)) + cosense_bbx_center[:, 0] = np.array(object_id_stack)[unique_indices] + cosense_bbx_center[:, 2:8] = object_stack[:, :6] + cosense_bbx_center[:, 10] = object_stack[:, 6] + cs.update_frame_bbx(fdict, cosense_bbx_center.tolist()) + if '0' not in cavs: + fdict['agents'].pop('0') # remove template agent + + fdict['meta']['ego_id'] = ego_id + fdict['meta']['ego_lidar_pose'] = opv2v_pose_to_cosense(ego_lidar_pose) + if len(object_velo_stack) == len(object_stack): + fdict['meta']['bbx_velo_global'] = object_velo_stack.tolist() + + boxes_num_pts = {int(i): 0 for i in cosense_bbx_center[:, 0]} + for adict in fdict['agents'].values(): + for box, num_pts in zip(adict['gt_boxes'], adict['num_pts']): + boxes_num_pts[int(box[0])] += num_pts + fdict['meta']['num_pts'] = [boxes_num_pts[int(i)] for i in cosense_bbx_center[:, 0]] + + sdict[f] = fdict + + # plot + # ego_pose = pose_to_transformation(fdict['meta']['ego_lidar_pose']) + # ax = None + # for ai, adict in fdict['agents'].items(): + # cav_pose = pose_to_transformation(adict['lidar'][0]['pose']) + # T_cav2ego = np.linalg.inv(ego_pose) @ cav_pose + # lidar_file = os.path.join(path_in, split, adict['lidar'][0]['filename']) + # points = load_pcd(lidar_file)['xyz'] + # points = np.concatenate([points, np.ones_like(points[:, :1])], axis=-1) + # points = (T_cav2ego @ points.T).T + # color = 'g' if ai == ego_id else 'r' + # ax = draw_points_boxes_plt( + # pc_range=100, + # points=points[:, :3], + # points_c=color, + # ax=ax, + # return_ax=True + # ) + # plt.show() + # plt.close() + # pass + save_json(sdict, os.path.join(path_out, f'{s}.json'))
+ + +
[docs]def pose_to_transformation(pose): + """ + + Args: + pose: list, [x, y, z, roll, pitch, yaw] + + Returns: + transformation: np.ndarray, (4, 4) + """ + transformation = np.eye(4) + r = R.from_euler('xyz', pose[3:]).as_matrix() + transformation[:3, :3] = r + transformation[:3, 3] = np.array(pose[:3]) + return transformation
+ + +
[docs]def update_global_bboxes_num_pts(data_dir, meta_path): + json_files = glob(meta_path + '/*.json') + for jf in tqdm.tqdm(json_files): + # tmp = os.path.join(data_dir, 'train', os.path.basename(jf)[:-5]) + # data_dir_split = os.path.join(data_dir, 'train') if os.path.exists(tmp) else os.path.join(data_dir, 'test') + with open(jf, 'r') as fh: + meta = json.load(fh) + for f, fdict in meta.items(): + # lidar_files = [ldict['filename'] for adict in fdict['agents'].values() for ldict in adict['lidar'].values()] + # lidar_files = [os.path.join(data_dir_split, lf) for lf in lidar_files] + # pcds = [load_pcd(lf)['xyz'] for lf in lidar_files] + # pcds = np.concatenate(pcds, axis=0) + boxes = np.array(fdict['meta']['bbx_center_global']) + boxes_num_pts = {int(i): 0 for i in boxes[:, 0]} + for adict in fdict['agents'].values(): + for box, num_pts in zip(adict['gt_boxes'], adict['num_pts']): + boxes_num_pts[int(box[0])] += num_pts + fdict['meta']['num_pts'] = [boxes_num_pts[int(i)] for i in boxes[:, 0]] + + save_json(meta, jf.replace('opv2v', 'opv2v_full_'))
+ + +
[docs]def generate_bevmaps(data_dir, meta_path): + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps" + map_files = glob(os.path.join(map_path, '*.png')) + scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + map_bounds = load_json(os.path.join(assets_path, 'map_bounds.json')) + bevmaps = {} + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + bevmap = cv2.imread(mf) + # bevmap = np.pad(bevmap, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0) + bevmaps[town] = bevmap + + T_corr = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 0, 1]]) + + json_files = glob(meta_path + '/*.json') + grid = np.ones((500, 500)) + inds = np.stack(np.where(grid)) + xy = inds * 0.2 - 50 + 0.1 + xy_pad = np.concatenate([xy, np.zeros_like(xy[:1]), np.ones_like(xy[:1])], axis=0) + for jf in tqdm.tqdm(json_files): + scene = os.path.basename(jf).split('.')[0] + town = scene_maps[scene] + cur_map = bevmaps[town] + sx, sy = cur_map.shape[:2] + meta = load_json(jf) + for f, fdict in meta.items(): + for ai, adict in fdict['agents'].items(): + lidar_pose = adict['lidar']['0']['pose'] + transform = T_corr @ pose_to_transformation(lidar_pose) + xy_tf = transform @ xy_pad + # xy_tf = xy_pad + # xy_tf[0] = xy_tf[0] - lidar_pose[0] + # xy_tf[1] = xy_tf[1] - lidar_pose[1] + xy_tf[0] -= map_bounds[town][0] + xy_tf[1] -= map_bounds[town][1] + map_inds = np.floor(xy_tf[:2] / 0.2) + xs = np.clip(map_inds[0], 0, sx - 1).astype(int) + ys = np.clip(map_inds[1], 0, sy - 1).astype(int) + bevmap = cur_map[xs, ys].reshape(500, 500, 3)[::-1, ::-1] + + filename = os.path.join(data_dir, 'train', scene, ai, f'{f}_bev.png') + if not os.path.exists(filename): + filename = os.path.join(data_dir, 'test', scene, ai, f'{f}_bev.png') + gt_bev = cv2.imread(filename) + + img = np.zeros((500, 1050, 3)) + img[:, :500] = bevmap[:, ::-1] + img[:, 550:] = gt_bev + cv2.imwrite('/home/yuan/Downloads/tmp.png', img) + print(filename)
+ + +
[docs]def generate_roadline(map_dir, map_bounds_file): + """ + Convert global BEV semantic maps to 2d road line points. + + :param map_dir: directory for images of BEV semantic maps + :param map_bounds_file: json file that describe the world coordinates of the BEV map origin (image[0, 0]) + :return: Nx2 array, 2d world coordinates of road line points in meters. + """ + bounds = load_json(map_bounds_file) + map_files = glob(map_dir) + for mf in map_files: + roadmap = cv2.imread(mf)
+ # TODO + + +
[docs]def convert_bev_semantic_map_to_road_height_map(map_dir, map_bounds_file, scenario_town_map_file, meta_dir): + import torch + bounds = load_json(map_bounds_file) + scenario_town_map = load_json(scenario_town_map_file) + map_files = os.listdir(map_dir) + bevmaps = {mf.split('.')[0]: cv2.imread(os.path.join(map_dir, mf))[..., :2] for mf in map_files} + trajectory = {mf.split('.')[0]: [] for mf in map_files} + meta_files = glob(os.path.join(meta_dir, "*.json")) + for mf in meta_files: + scenario = os.path.basename(mf).split('.')[0] + sdict = load_json(mf) + ego_poses = [] + for f, fdict in sdict.items(): + # gt_boxes = {f"{int(x[0]):d}": x[1:] for x in ego_dict['gt_boxes']} + # ego_box = gt_boxes[fdict['meta']['ego_id']] + ego_poses.append(fdict['agents'][fdict['meta']['ego_id']]['pose'][:3]) + trajectory[scenario_town_map[scenario]].extend(ego_poses) + + for town, bevmap in bevmaps.items(): + inds = np.where(bevmap[..., 1]) + coords = np.stack(inds, axis=1) * 0.2 + coords = torch.from_numpy(coords).cuda() + bound = bounds[town] + coords[:, 0] += bound[0] + coords[:, 1] += bound[1] + traj_pts = torch.tensor(trajectory[town]).cuda() + + for i in range(0, len(coords), 10000): + i1 = i*10000 + i2 = (i+1)*10000 + dists = torch.norm(coords[i1:i2, None, :2] - traj_pts[None, :, :2], dim=-1) + min_dist, min_idx = dists.min(dim=-1) + heights = traj_pts[min_idx][:, -1]
+ # TODO + + +if __name__=="__main__": + # opv2v_to_cosense( + # "/home/data/v2vreal", + # "/home/data/v2vreal/meta", + # isSim=False, + # pcd_ext='pcd' + # ) + + # generate_bevmaps( + # "/home/yuan/data/OPV2Va", + # "/home/yuan/data/OPV2Va/meta", + # ) + + convert_bev_semantic_map_to_road_height_map( + "/code/CoSense3d/cosense3d/carla/assets/maps", + "/code/CoSense3d/cosense3d/carla/assets/map_bounds.json", + "/code/CoSense3d/cosense3d/carla/assets/scenario_town_map.json", + "/home/data/OPV2Va/meta" + ) + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/dataset/toolkit/opv2v_t.html b/docs/_build/html/_modules/cosense3d/dataset/toolkit/opv2v_t.html new file mode 100644 index 00000000..3db1da43 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/dataset/toolkit/opv2v_t.html @@ -0,0 +1,830 @@ + + + + + + cosense3d.dataset.toolkit.opv2v_t — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.dataset.toolkit.opv2v_t

+import glob
+import os.path
+import random
+
+import numpy as np
+import torch
+from plyfile import PlyData
+from matplotlib import colormaps
+from multiprocessing import Pool
+import torch_scatter
+from functools import partial
+
+from cosense3d.dataset.toolkit.opv2v import *
+from cosense3d.utils.vislib import o3d_draw_pcds_bbxs
+from cosense3d.utils.pclib import save_cosense_ply, pose_to_transformation
+from cosense3d.utils.box_utils import transform_boxes_3d
+from cosense3d.utils.misc import load_json, update_dict
+from cosense3d.ops.utils import points_in_boxes_gpu
+from cosense3d.modules.utils.common import cat_coor_with_idx
+
+
+# jet = cm.get_cmap('jet')
+jet = colormaps['jet']
+
+
+
[docs]def read_ply(filename, properties=None): + ply = PlyData.read(filename) + data = ply['vertex'] + properties_from_file = [p.name for p in ply.elements[0].properties] + if properties is None: + properties = properties_from_file + else: + for p in properties: + assert p in properties_from_file, f"Property '{p}' not found." + data_dict = {} + for p in properties: + data_dict[p] = np.array(data[p]) + + return data_dict
+ + +
[docs]def get_local_boxes3d(objects_dict, ref_pose, order): + output_dict = {} + # add ground truth boxes at cav local coordinate + project_world_objects(objects_dict, + output_dict, + ref_pose, + order) + boxes_local = [] + velos = [] + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id = object_content['ass_id'] + else: + object_id = object_id + object_bbx = object_content['coord'] + if order == 'hwl': + object_bbx = object_bbx[:, [0, 1, 2, 5, 4, 3, 6]] + boxes_local.append( + [object_id, 0, ] + + object_bbx[0, :6].tolist() + + [0, 0, object_bbx[0, 6]] + ) + if 'velo' in object_content and object_content['velo'] is not None: + velos.append(object_content['velo'].tolist()) + # TODO adapt velos + else: + velos.append([0., 0.]) + + return boxes_local, velos
+ + +
[docs]def read_ply_to_dict(f): + data = read_ply(f) + timestamp = os.path.basename(f).split('.')[:-1] + timestamp = int(timestamp[0]) * 0.05 + int(timestamp[1]) * 0.01 + timestamp = np.ones_like(data['x']) * timestamp + data['time'] = timestamp.astype(np.float32) + return data
+ + +
[docs]def read_sub_frame(f): + pcd_dict = read_ply_to_dict(f + '.ply') + params = load_yaml(f + '_objects.yaml', cloader=True) + # params = load_yaml(f + '.yaml') + # update_dict(params, params_) + gt_boxes, velos = get_local_boxes3d(params['vehicles'], + params['lidar_pose'], 'lwh') + gt_boxes = np.array(gt_boxes) + # velos = np.array(velos) + points = np.stack([pcd_dict[x] for x in 'xyz'], axis=-1) + points = points[pcd_dict['ObjTag'] == 10] + return gt_boxes, pcd_dict, points
+ + +
[docs]def get_box_velo(box, speeds, frame): + box_id = str(int(box[0])) + try: + speed = speeds[box_id][frame] + except: + if box_id not in speeds: + speed = 0.0 + elif frame not in speeds[box_id]: + frames = list(speeds[box_id].keys()) + nearst_frame_idx = (np.array(frames).astype(int) - int(frame)).argmax() + speed = speeds[box_id][frames[nearst_frame_idx]] + else: + raise NotImplementedError + return speed
+ + +
[docs]def get_velos(boxes, speeds, frame): + with Pool(16) as pool: + out_speeds = pool.map( + partial(get_box_velo, speeds=speeds, frame=frame), + boxes + ) + out_speeds = np.array(out_speeds) + + theta = boxes[:, -1] + velos = np.stack([out_speeds * np.cos(theta), + out_speeds * np.sin(theta)], axis=-1) + return velos
+ + +
[docs]def pad_box_result(res, out_len): + if len(res[0]) == out_len: + return res + box = np.zeros((out_len,) + res[0].shape[1:], dtype=res[0].dtype) + box[:res[0].shape[0]] = res[0] + # set id index to -1 to indicate it is padded + box[res[0].shape[0]:, 0] = -1 + box[res[0].shape[0]:, 4] = 100 + return box, res[1], res[2]
+ + +
[docs]def parse_sub_frame(f): + pcd_dict = read_ply_to_dict(f + '.ply') + params = load_yaml(f + '.yaml') + gt_boxes, velos = get_local_boxes3d(params['vehicles'], + params['lidar_pose'], 'lwh') + gt_boxes = np.array(gt_boxes) + velos = np.array(velos) + points = np.stack([pcd_dict[x] for x in 'xyz'], axis=-1) + pts_mask = points_in_boxes_cpu(torch.from_numpy(points), + torch.from_numpy(gt_boxes)[:, [2, 3, 4, 5, 6, 7, 10]]) + num_pts = pts_mask.sum(dim=-1).numpy() + box_mask = num_pts > 0 + gt_boxes = gt_boxes[box_mask].tolist() + velos = velos[box_mask].tolist() + num_pts = num_pts[box_mask].tolist() + + # update boxes dict + # for i, box in enumerate(gt_boxes): + # id = int(box[0]) + # if id not in boxes: + # boxes[id] = { + # 'box': box, + # 'velo': velos[i], + # 'num_pts': num_pts[i] + # } + # else: + # if boxes[id]['num_pts'] < num_pts[i]: + # boxes[id] = { + # 'box': box, + # 'velo': velos[i], + # 'num_pts': num_pts[i] + boxes[id]['num_pts'] + # } + # else: + # boxes[id]['num_pts'] += num_pts[i] + + return (gt_boxes, velos, num_pts, pcd_dict)
+ + +
[docs]def read_frame_plys_boxes(path, frame, prev_frame=None, time_offset=0, parse_boxes=True): + data_list = [] + files = [] + if prev_frame is not None: + files_prev_frame = [f'{prev_frame}.{i}' for i in range(10 - time_offset, 10)] + files.extend(files_prev_frame) + files_cur_frame = [f'{frame}.{i}' for i in range(0, 10 - time_offset)] + files.extend(files_cur_frame) + files = [os.path.join(path, f) for f in files] + boxes = {} + + with Pool(10) as pool: + res = pool.map(read_sub_frame, files) + max_len = max([len(x[0]) for x in res]) + res = pool.starmap(pad_box_result, zip(res, [max_len] * len(res))) + + pcd_dict = {k: np.concatenate([d[1][k] for d in res], axis=0) for k in res[0][1]} + boxes_tensor = cat_coor_with_idx([torch.from_numpy(x[0]) for x in res]).float() + points_tensor = cat_coor_with_idx([torch.from_numpy(x[2]) for x in res]).float() + + _, pts_idx_of_box = points_in_boxes_gpu(points_tensor.cuda(), + boxes_tensor[:, [0, 3, 4, 5, 6, 7, 8, 11]].cuda(), + batch_size=len(res)) + + pts_idx_of_box = pts_idx_of_box[pts_idx_of_box >= 0] + cnt = torch.ones_like(pts_idx_of_box) + num_pts_in_box = cnt.new_zeros(len(boxes_tensor)) + torch_scatter.scatter_add(cnt, pts_idx_of_box, out=num_pts_in_box, dim=0) + num_pts_in_box = num_pts_in_box.reshape(10, -1).cpu() + num_pts = num_pts_in_box.sum(dim=0) + boxes_tensor = boxes_tensor.view(10, -1, boxes_tensor.shape[-1])[..., 1:] + max_inds = num_pts_in_box.max(dim=0).indices + boxes_selected = boxes_tensor[max_inds, torch.arange(len(max_inds))].numpy() + boxes_selected = boxes_selected[boxes_selected[:, 0] >= 0] + + # o3d_draw_pcds_bbxs([points_tensor[:, 1:].numpy()], [boxes_selected]) + + return pcd_dict, boxes_selected, num_pts
+ + +
[docs]def load_frame_data(scene_dir, cavs, frame): + ego_id = cavs[0] + yaml_file = os.path.join(scene_dir, ego_id, f'{frame}.5.yaml') + meta = load_yaml(yaml_file) + gt_boxes, velos = get_local_boxes3d(meta['vehicles'], + meta['lidar_pose'], 'lwh') + ego_pose = meta['lidar_pose'] + + points_list = [] + time_list = [] + for cav in cavs: + cav_dir = os.path.join(scene_dir, cav) + data = read_frame_plys_boxes(cav_dir, frame, parse_boxes=False)[0] + points = np.stack([data[k] for k in 'xyz'], axis=-1) + times = (data['time'] - data['time'].min()) * 10 + lidar_pose = load_yaml( + os.path.join(scene_dir, cav, f'{frame}.5.yaml'))['lidar_pose'] + transform = x1_to_x2(lidar_pose, ego_pose) + points = (transform[:3, :3] @ points.T + transform[:3, 3].reshape(3, 1)).T + points_list.append(points) + time_list.append(times) + points = np.concatenate(points_list, axis=0) + times = np.concatenate(time_list, axis=0) + return points, times, gt_boxes, velos
+ + +
[docs]def opv2vt_to_cosense(data_dir, split, data_out_dir, meta_out_dir): + order = 'lwh' + time_offsets = load_json(os.path.join(data_out_dir, 'time_offsets.json')) + split_dir = os.path.join(data_dir, split) + scenes = sorted(os.listdir(split_dir))[:2] + with open(os.path.join(meta_out_dir, f'{split}.txt'), 'w') as fh: + fh.write('\n'.join(scenes)) + for s in scenes: + print(s) + scene_dir = os.path.join(split_dir, s) + sdict = {} + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + if os.path.exists(os.path.join(scene_dir, 'speeds.json')): + speeds = load_json(os.path.join(scene_dir, 'speeds.json')) + else: + speeds = parse_speed_from_yamls(scene_dir) + ego_id = cavs[0] + frames = sorted([x.split(".")[0] for x in os.listdir( + os.path.join(scene_dir, cavs[0])) if '.0.ply' in x]) + for i, f in tqdm.tqdm(enumerate(frames[1:-1])): + frame_mid_time = int(f) * 0.05 + 0.05 + fdict = cs.fdict_template() + ego_lidar_pose = None + object_id_stack = [] + object_velo_stack = [] + object_stack = [] + for j, cav_id in enumerate(cavs): + cur_data_out_dir = os.path.join(data_out_dir, split, s, cav_id) + os.makedirs(cur_data_out_dir, exist_ok=True) + yaml_file = os.path.join(scene_dir, cav_id, f'{f}.5.yaml') + params = load_yaml(yaml_file, cloader=True) + cs.update_agent(fdict, cav_id, agent_type='cav', agent_pose=params['true_ego_pos']) + # update_cam_params(params, fdict, cav_id, s, f) + + if cav_id == ego_id: + ego_lidar_pose = params['lidar_pose'] + + # get cav lidar pose in cosense format + cs.update_agent(fdict, cav_id, 'cav') + cs.update_agent_lidar(fdict, cav_id, 0, + lidar_pose=opv2v_pose_to_cosense(params['lidar_pose']), + lidar_file=os.path.join(s, cav_id, f'{f}.ply')) + # save lidar files + data, local_boxes, num_pts = read_frame_plys_boxes(os.path.join(scene_dir, cav_id), f, + prev_frame=frames[i], time_offset=time_offsets[s][cav_id]) + velos = get_velos(local_boxes, speeds, f) + # save_cosense_ply(data, os.path.join(cur_data_out_dir, f'{f}.ply')) + + objects_dict = params.get('vehicles', {}) + output_dict = {} + glob_ref_pose = ego_lidar_pose + local_ref_pose = params['lidar_pose'] + + # update_local_boxes + cs.update_agent(fdict, cav_id, gt_boxes=local_boxes.tolist()) + cs.update_agent(fdict, cav_id, velos=velos.tolist()) + cs.update_agent(fdict, cav_id, num_pts=num_pts.tolist()) + # update_2d_bboxes(fdict, cav_id, params['lidar_pose'], data_dir) + + # add gt boxes in ego coordinates as global boxes of cosense3d format + project_world_objects(objects_dict, + output_dict, + glob_ref_pose, + order) + + for object_id, object_content in output_dict.items(): + if object_content['ass_id'] != -1: + object_id_stack.append(object_content['ass_id']) + else: + object_id_stack.append(object_id + 100 * int(cav_id)) + if object_content['velo'] is not None: + object_velo_stack.append(object_content['velo']) + object_stack.append(object_content['coord']) + + # exclude all repetitive objects + unique_indices = \ + [object_id_stack.index(x) for x in set(object_id_stack)] + object_stack = np.vstack(object_stack) + object_stack = object_stack[unique_indices] + if len(object_velo_stack) > 0: + object_velo_stack = np.vstack(object_velo_stack) + object_velo_stack = object_velo_stack[unique_indices] + if order == 'hwl': + object_stack = object_stack[:, [0, 1, 2, 5, 4, 3, 6]] + + cosense_bbx_center = np.zeros((len(object_stack), 11)) + cosense_bbx_center[:, 0] = np.array(object_id_stack)[unique_indices] + cosense_bbx_center[:, 2:8] = object_stack[:, :6] + cosense_bbx_center[:, 10] = object_stack[:, 6] + cs.update_frame_bbx(fdict, cosense_bbx_center.tolist()) + fdict['agents'].pop(0) # remove template agent + + fdict['meta']['ego_id'] = ego_id + fdict['meta']['ego_lidar_pose'] = opv2v_pose_to_cosense(ego_lidar_pose) + fdict['meta']['global_bbox_time'] = np.full(len(cosense_bbx_center), frame_mid_time).tolist() + fdict['meta']['bbx_velo_global'] = get_velos(cosense_bbx_center, speeds, f).tolist() + + sdict[f] = fdict + + save_json(sdict, os.path.join(meta_out_dir, f'{s}.json')) + del sdict
+ + +
[docs]def vis_frame_data(): + scene_dir = "/koko/OPV2V/temporal_dump/test/2021_08_18_19_48_05" + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + frames = sorted([x.split(".")[0] for x in os.listdir( + os.path.join(scene_dir, cavs[0])) if '.0.ply' in x]) + for f in frames[::10]: + points, times, local_boxes, velos = load_frame_data(scene_dir, cavs, f) + pcd = o3d.geometry.PointCloud() + # color_inds = np.round(times).astype(int) + colors = jet(times)[:, :3] + o3d_draw_pcds_bbxs([points], [np.array(local_boxes)], + pcds_colors=[colors])
+ + +
[docs]def parse_speed_from_yamls(scene_dir): + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + vehicle_dict = {} + for cav in cavs: + cav_dir = os.path.join(scene_dir, cav) + yamls = sorted(glob(os.path.join(cav_dir, '*5_objects.yaml'))) + for yaml in tqdm.tqdm(yamls): + frame = int(os.path.basename(yaml).split('.')[0]) + params = load_yaml(yaml, cloader=True) + for k, v in params['vehicles'].items(): + if k not in vehicle_dict: + vehicle_dict[k] = {'frames': [], 'locations': []} + if frame not in vehicle_dict[k]['frames']: + vehicle_dict[k]['frames'].append(frame) + vehicle_dict[k]['locations'].append(v['location']) + + # vehicle_dict = load_json(os.path.join(scene_dir, 'vehicles.json')) + velo_dict = {} + for veh_id, veh_info in vehicle_dict.items(): + times = np.array(veh_info['frames']) * 0.05 + sort_inds = np.argsort(times) + times = times[sort_inds] + locations = np.array(veh_info['locations']) + locations = locations[sort_inds] + time_offsets = times[1:] - times[:-1] + interp_inds = np.where(time_offsets > 0.15)[0] + loc_offsets = np.linalg.norm(locations[1:] - locations[:-1], axis=-1) + speeds = loc_offsets / time_offsets + + # interpolate missed frames + speeds_interp = [] + times_interp = [] + last_idx = 0 + for idx in interp_inds: + speeds_interp.extend(speeds[last_idx:idx]) + times_interp.extend(times[last_idx:idx]) + steps = int(round(time_offsets[idx] * 10)) + if idx == 0: + interp_s = [speeds[0]] * (steps - 1) + interp_t = [times[0]] * (steps - 1) + else: + interp_s = np.linspace(speeds[idx-1], speeds[idx], steps + 1)[1:-1].tolist() + interp_t = np.linspace(times[idx-1], times[idx], steps + 1)[1:-1].tolist() + speeds_interp.extend(interp_s) + times_interp.extend(interp_t) + last_idx = idx + speeds_interp.extend(speeds[last_idx:]) + times_interp.extend(times[last_idx:]) + + velo_dict[veh_id] = {f'{round(t*20):06d}': speed for t, speed in zip(times_interp, speeds_interp)} + save_json(velo_dict, os.path.join(scene_dir, 'speeds.json')) + return velo_dict
+ + +
[docs]def update_velo(scenario_meta_file): + meta = load_json(scenario_meta_file) + frames = sorted(list(meta.keys())) + objects = {} + + # find all global objects + for f in frames: + fdict = meta[f] + boxes = fdict['meta']['bbx_center_global'] + for box in boxes: + box_id = int(box[0]) + if box_id not in objects: + objects[box_id] = {'frames': [], 'box': []} + objects[box_id]['frames'].append(int(f)) + objects[box_id]['boxes'].append(box) + + def cal_velos(cur_gt_boxes, next_gt_boxes, cur_pose, next_pose, meta_last): + cur_gt_boxes_dict = {int(box[0]): box for box in cur_gt_boxes} + next_gt_boxes_np = np.array(next_gt_boxes) + cur_pose = pose_to_transformation(cur_pose) + next_pose = pose_to_transformation(next_pose) + transf_next_to_cur = np.linalg.inv(cur_pose) @ next_pose + next_gt_boxes_np = transform_boxes_3d(next_gt_boxes_np, transf_next_to_cur) + next_gt_boxes_dict = {int(box[0]): box.tolist() for box in next_gt_boxes_np} + velos = {} + for k, v in cur_gt_boxes_dict.items(): + if k not in next_gt_boxes_dict: + if k in meta_last: + velos[k] = meta_last[k] + else: + velos[k] = [0, 0] + continue + velo = [(next_gt_boxes_dict[k][2] - v[2]) * 10, (next_gt_boxes_dict[k][3] - v[3]) * 10] # m/s + velos[k] = velo + velos = [velos[int(box[0])] for box in cur_gt_boxes] + return velos + + for i, f in enumerate(frames[:-1]): + fdict = meta[f] + global_ids = sorted([int(box[0]) for box in fdict['meta']['bbx_center_global']]) + global_ids = set(global_ids) + local_ids = [] + for a, adict in fdict['agents'].items(): + local_ids.extend([int(box[0]) for box in adict['gt_boxes']]) + local_ids = set(local_ids) + next_fdict = meta[frames[i + 1]] + last_fdict = meta[frames[max(i-1, 0)]] + + if i == 0: + meta_last = {} + else: + meta_last = {int(box[0]): last_fdict['meta']['bbx_velo_global'][i] \ + for i, box in enumerate(last_fdict['meta']['bbx_center_global'])} + meta[f]['meta']['bbx_velo_global'] = cal_velos( + fdict['meta']['bbx_center_global'], + next_fdict['meta']['bbx_center_global'], + fdict['meta']['ego_lidar_pose'], + next_fdict['meta']['ego_lidar_pose'], + meta_last + ) + for a, adict in fdict['agents'].items(): + if i == 0: + meta_last = {} + else: + meta_last = {int(box[0]): last_fdict['agents'][a]['velos'][i] \ + for i, box in enumerate(last_fdict['agents'][a]['gt_boxes'])} + velos = cal_velos( + adict['gt_boxes'], next_fdict['agents'][a]['gt_boxes'], + adict['lidar']['0']['pose'], next_fdict['agents'][a]['lidar']['0']['pose'], + meta_last + ) + meta[f]['agents'][a]['velos'] = velos + save_json(meta, scenario_meta_file)
+ + +
[docs]def vis_cosense_scenario(scenario_meta_file, data_dir): + meta = load_json(scenario_meta_file) + for f, fdict in meta.items(): + global_boxes = np.array(fdict['meta']['bbx_center_global']) + for a, adict in fdict['agents'].items(): + lidar_file = os.path.join(data_dir, adict['lidar']['0']['filename']) + pcd_dict = read_ply(lidar_file) + points = np.stack([pcd_dict[x] for x in 'xyz'], axis=-1) + boxes = np.array(adict['gt_boxes']) + + o3d_draw_pcds_bbxs([points], [boxes, global_boxes], + bbxs_colors=[[0, 255, 0], [255, 0, 0]])
+ + +
[docs]def gen_time_offsets(data_dir): + out_dict = {} + for split in ['train', 'test']: + split_dir = os.path.join(data_dir, split) + scenes = os.listdir(split_dir) + for s in scenes: + out_dict[s] = {} + scene_dir = os.path.join(split_dir, s) + cavs = sorted([x for x in os.listdir(scene_dir) + if os.path.isdir(os.path.join(scene_dir, x))]) + for i, cav in enumerate(cavs): + if i == 0: + out_dict[s][cav] = 0 + else: + out_dict[s][cav] = random.randint(0, 5) + save_json(out_dict, os.path.join(data_dir, f'time_offsets.json'))
+ + +
[docs]def load_vehicles_gframe(params): + """Load vehicles in global coordinate system.""" + object_dict = params['vehicles'] + object_out = {} + for object_id, object_content in object_dict.items(): + location = object_content['location'] + rotation = object_content['angle'] + center = object_content['center'] + extent = object_content['extent'] + + object_pose = [location[0] + center[0], + location[1] + center[1], + location[2] + center[2], + rotation[0], rotation[1], rotation[2]] + + object_out[object_id] = [0,] + object_pose[:3] + extent + object_pose[3:] + return object_out
+ + +
[docs]def transform_boxes_global_to_ref(boxes, ref_pose): + pass
+ + +
[docs]def update_global_boxes(root_dir, meta_in, meta_out, split): + split_dir = os.path.join(root_dir, split) + scenes = os.listdir(split_dir) + for s in scenes: + scene_dir = os.path.join(split_dir, s) + sdict = load_json(os.path.join(meta_in, f"{s}.json")) + cavs = sorted([x for x in os.listdir(scene_dir) if os.path.isdir(os.path.join(scene_dir, x))]) + + ego_files = sorted(glob(os.path.join(scene_dir, cavs[0], '*.0_objects.yaml'))) + sim_frames = [os.path.basename(x)[:6] for x in ego_files] + global_objects = {x: {} for x in sim_frames} + ego_poses = {} + + for cav in cavs[1:]: + yaml_files = sorted(glob(os.path.join(scene_dir, cav, '*.0_objects.yaml'))) + for yf in yaml_files: + frame = os.path.basename(yf)[:6] + objects = load_yaml(yf)['vehicles'] + global_objects[frame].update(objects) + for yf in ego_files: + frame = os.path.basename(yf)[:6] + params = load_yaml(yf) + ego_poses[frame] = params['lidar_pose'] + global_objects[frame].update(params['vehicles']) + + frames = sorted(list(sdict.keys())) + for f in frames[:-1]: + lidar_pose = ego_poses[f] + sdict[f]['meta']['boxes_pred'] = {} + box_ids = [int(box[0]) for box in sdict[f]['meta']['bbx_center_global']] + for i in range(1, 3): + cur_frame = f"{int(f) + i * 2:06d}" + boxes_global = global_objects[cur_frame] + boxes_ref = {} + project_world_objects(boxes_global, boxes_ref, lidar_pose, 'lwh') + boxes_pred = [] + for box_id in box_ids: + if box_id in boxes_global: + pred = boxes_ref[box_id]['coord'].reshape(7)[[0, 1, 2, 6]].tolist() + else: + pred = [0,] * 4 + boxes_pred.append(pred) + sdict[f]['meta']['boxes_pred'][cur_frame] = boxes_pred + sdict.pop(frames[-1]) + save_json(sdict, os.path.join(meta_out, f"{s}.json"))
+ + +
[docs]def update_bev_map(root_dir, meta_in, meta_out, split): + from cosense3d.dataset.const import OPV2V_TOWN_DICTIONARY + resolution = 0.2 + pixels_per_meter = 1 / resolution + radius = 100 + map_bounds = load_json(f'../../carla/assets/map_bounds.json') + split_dir = os.path.join(root_dir, split) + scenes = os.listdir(split_dir)[3:] + x = np.linspace(- radius + 0.5 * resolution, radius, + int(radius * 2 / resolution) - 1) + bev_points = np.stack(np.meshgrid(x, x), axis=0) + bev_points = np.r_[bev_points, [np.zeros(bev_points.shape[1:]), + np.ones(bev_points.shape[1:])]].reshape(4, -1) + + for s in scenes: + town = OPV2V_TOWN_DICTIONARY[s] + bev_map = cv2.imread(f'../../carla/assets/maps/{town}.png') + sx, sy, _ = bev_map.shape + map_bound = map_bounds[town] + scene_dir = os.path.join(split_dir, s) + sdict = load_json(os.path.join(meta_in, f"{s}.json")) + for f, fdict in sdict.items(): + adict = fdict['agents'][fdict['meta']['ego_id']] + lidar_pose = adict['lidar']['0']['pose'] + lidar_file = os.path.join(split_dir, adict['lidar']['0']['filename']) + pcd = load_pcd(lidar_file)['xyz'] + transform = pose_to_transformation(lidar_pose) + cords = np.dot(transform, bev_points).T + xs = np.floor((cords[:, 0] - map_bound[0]) * pixels_per_meter).astype(int) + ys = np.floor((cords[:, 1] - map_bound[1]) * pixels_per_meter).astype(int) + xs = np.maximum(np.minimum(xs, sx - 1), 0) + ys = np.maximum(np.minimum(ys, sy - 1), 0) + road_mask = bev_map[xs, ys] / 255. + mask = road_mask[:, :2].any(axis=1) + + import matplotlib.pyplot as plt + plt.plot(bev_points[0][mask], bev_points[1][mask], '.g') + plt.plot(pcd[:, 0], pcd[:, 1], '.r', markersize=1) + plt.show() + plt.close() + break
+ + +
[docs]def generate_roadline_reference_points(root_dir, meta_file): + assets_path = f"{os.path.dirname(__file__)}/../../carla/assets" + map_path = f"{assets_path}/maps/png" + roadline_path = f"{assets_path}/maps/roadline" + map_files = glob(os.path.join(map_path, '*.png')) + map_bounds = load_json(os.path.join(assets_path, 'map_bounds.json')) + + kernel = 3 + map_res = 0.2 + + for mf in map_files: + town = os.path.basename(mf).split('.')[0] + bound = map_bounds[town] + bevmap = cv2.imread(mf) / 255. + bevmap = torch.from_numpy(bevmap[..., :2]).any(dim=-1).float() + bevmap[bevmap == 0] = -1 + filters = torch.ones(1, 1, kernel, kernel, device=bevmap.device) / (kernel ** 2 * 2) + road = torch.conv2d(bevmap[None, None], filters).squeeze() + mask = (road < 0.5) & (road > -0.5) + inds = torch.where(mask) + # scores = 1 - road[mask].abs() + coords = torch.stack(inds).T * map_res + 0.3 + coords[:, 0] = coords[:, 0] + bound[0] + coords[:, 1] = coords[:, 1] + bound[1] + coords = coords.numpy().astype(float) + coords.tofile(os.path.join(roadline_path, f'{town}.bin'))
+ + + # sdict = load_json(meta_file) + # scene_maps = load_json(os.path.join(assets_path, 'scenario_town_map.json')) + # scenario = os.path.basename(meta_file).split('.')[0] + # town = scene_maps[scenario] + # for fi, fdict in sdict.items(): + # if int(fi) % 10 != 1: + # continue + # for ai, adict in fdict['agents'].items(): + # lidar_pose = adict['lidar']['0']['pose'] + # lidar_file = os.path.join(root_dir, 'test', adict['lidar']['0']['filename']) + # pcd = load_pcd(lidar_file)['xyz'] + # transform = pose_to_transformation(lidar_pose) + # pcd = (transform @ np.concatenate([pcd, np.ones_like(pcd[:, :1])], axis=1).T).T + # + # fig = plt.figure(figsize=(16, 12)) + # ax = fig.add_subplot() + # ax.plot(coords[:, 0], coords[:, 1], '.g', markersize=1) + # ax.scatter(pcd[:, 0], pcd[:, 1], s=1, c=np.clip(pcd[:, 2], a_min=-3, a_max=1), cmap='jet') + # plt.savefig("/home/yys/Downloads/tmp.jpg") + # plt.close() + # continue + + +if __name__=="__main__": + generate_roadline_reference_points( + "/home/data/OPV2Va", + "/home/data/OPV2Va/meta/2021_08_23_17_22_47.json" + ) + + # gen_time_offsets("/media/yuan/luna/data/OPV2Vt") + # parse_speed_from_yamls("/home/data/OPV2V/temporal_dump/train/2021_08_16_22_26_54") + # opv2vt_to_cosense( + # "/media/yuan/luna/data/OPV2Vt/temporal_dump", + # "train", + # "/koko/OPV2V/temporal", + # "/koko/cosense3d/opv2v_temporal" + # ) + # opv2vt_to_cosense( + # "/home/data/OPV2V/temporal_dump", + # "test", + # "/home/data/OPV2V/temporal", + # "/home/data/cosense3d/opv2v_temporal" + # ) + # vis_frame_data() + # vis_cosense_scenario( + # "/home/data/cosense3d/opv2v_temporal/2021_08_16_22_26_54.json", + # "/home/data/OPV2V/temporal/train" + # ) + # update_velo( + # "/media/yuan/luna/data/OPV2Vt/meta/2021_08_16_22_26_54.json", + # ) + # update_bev_map( + # "/koko/OPV2V/temporal", + # "/koko/cosense3d/opv2vt", + # "/koko/cosense3d/opv2vt_bev", + # "train" + # ) + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules.html b/docs/_build/html/_modules/cosense3d/modules.html new file mode 100644 index 00000000..34100fdb --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules.html @@ -0,0 +1,278 @@ + + + + + + cosense3d.modules — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules

+import torch
+from torch import nn
+from typing import List, Dict, Optional
+import importlib
+
+from cosense3d.modules.utils.common import cat_coor_with_idx
+from cosense3d.modules.utils.me_utils import ME
+
+
+
[docs]def build_module(module_cfg): + module_full_path=module_cfg['type'] + package, module_name = module_full_path.rsplit('.', 1) + module = importlib.import_module(f'cosense3d.modules.{package}') + cls_obj = getattr(module, module_name, None) + assert cls_obj is not None, f'Class \'{module_name}\' not found.' + try: + inst = cls_obj(**module_cfg) + except Exception as e: + raise Exception(f"{module_name}:{e.__repr__()}") + return inst
+ + +
[docs]class BaseModule(nn.Module): + def __init__(self, gather_keys, scatter_keys, gt_keys=[], freeze=False, **kwargs): + super(BaseModule, self).__init__() + self.gather_keys = gather_keys + self.scatter_keys = scatter_keys + self.gt_keys = gt_keys + self.freeze = freeze + +
[docs] def to_gpu(self, gpu_id): + self.to(gpu_id) + addtional_sync_func = nn.SyncBatchNorm.convert_sync_batchnorm + return None
+ +
[docs] def freeze_parameters(self): + for param in self.parameters(): + param.requires_grad = False
+ +
[docs] def forward(self, *args, **kwargs): + raise NotImplementedError
+ +
[docs] def loss(self, *args, **kwargs): + """This must be implemented in head module.""" + # TODO: Create Head base module. + pass
+ +
[docs] def prepare_vis_data(self): + pass
+ +
[docs] def format_input(self, input: List): + pass
+ +
[docs] def format_output(self, output, B): + pass
+ +
[docs] def cat_data_from_list(self, input, key=None, pad_idx=False): + if key is not None: + data = [x[key] for x in input] + else: + data = input + if isinstance(data[0], torch.Tensor): + if pad_idx: + return cat_coor_with_idx(data) + else: + return torch.cat(data, dim=0) + else: + return data
+ +
[docs] def stack_data_from_list(self, input, key=None): + if key is not None: + data = [x[key] for x in input] + else: + data = input + if isinstance(data[0], torch.Tensor): + return torch.stack(data, dim=0) + else: + return data
+ + +
[docs] def cat_list(self, x_list, recursive=False): + """Concatenate sub_lists to one list""" + if len(x_list) > 0 and isinstance(x_list[0], list): + out = [] + for x in x_list: + out.extend(self.cat_list(x) if recursive else x) + return out + else: + return x_list
+ +
[docs] def cat_dict_list(self, d_list: List[Dict]): + out_dict = {k:[] for k in d_list[0].keys()} + for k in d_list[0].keys(): + for d in d_list: + out_dict[k].extend(d[k]) + return out_dict
+ +
[docs] def stack_dict_list(self, d_list: List[Dict]): + out_dict = {k:[] for k in d_list[0].keys()} + for k in d_list[0].keys(): + for d in d_list: + out_dict[k].append(d[k]) + out_dict[k] = torch.stack(out_dict[k], dim=0) + return out_dict
+ +
[docs] def compose_imgs(self, img_list): + imgs = [img for x in img_list for img in x] + return torch.stack(imgs, dim=0)
+ +
[docs] def compose_stensor(self, stensor_list, stride): + coor = [stensor[f'p{stride}']['coor'] for stensor in stensor_list] + coor = cat_coor_with_idx(coor) + feat = [stensor[f'p{stride}']['feat'] for stensor in stensor_list] + feat = torch.cat(feat, dim=0) + if 'ctr' in stensor_list[0][f'p{stride}']: + ctr = [stensor[f'p{stride}']['ctr'] for stensor in stensor_list] + ctr = torch.cat(ctr, dim=0) + else: + ctr = None + return coor, feat, ctr
+ +
[docs] def decompose_stensor(self, res, N): + # decompose batch + for k, v in res.items(): + if isinstance(v, ME.SparseTensor): + coor, feat = v.decomposed_coordinates_and_features + ctr = None + elif isinstance(v, dict): + coor, feat, ctr = [], [], [] + for i in range(N): + mask = v['coor'][:, 0] == i + coor.append(v['coor'][mask, 1:]) + feat.append(v['feat'][mask]) + ctr.append(v['ctr'][mask]) + else: + raise NotImplementedError + res[k] = {'coor': coor, 'feat': feat, 'ctr': ctr} + + # compose result list + res_list = self.compose_result_list(res, N) + return res_list
+ +
[docs] def compose_result_list(self, res, N): + """ + + :param res: dict(k:list) + :param N: + :return: + """ + keys = res.keys() + res_list = [] + for i in range(N): + cur_res = {} + for k, v in res.items(): + if isinstance(v, dict): + cur_res[k] = { + 'coor': v['coor'][i], + 'feat': v['feat'][i], + 'ctr': v['ctr'][i] + } + elif isinstance(v, list) or isinstance(v, torch.Tensor): + cur_res[k] = v[i] + else: + raise NotImplementedError + res_list.append(cur_res) + return res_list
+ + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(gather_keys={self.gather_keys}, ' + repr_str += f'scatter_keys={self.scatter_keys})' + return repr_str
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/backbone2d/resnet_encoder.html b/docs/_build/html/_modules/cosense3d/modules/backbone2d/resnet_encoder.html new file mode 100644 index 00000000..25c68cd0 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/backbone2d/resnet_encoder.html @@ -0,0 +1,198 @@ + + + + + + cosense3d.modules.backbone2d.resnet_encoder — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.backbone2d.resnet_encoder

+import torch
+import torch.nn as nn
+import torchvision.models as models
+
+from einops import rearrange
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.plugin import build_plugin_module
+from cosense3d.modules.utils.positional_encoding import img_locations
+
+
+
[docs]class ResnetEncoder(BaseModule): + """Resnet family to encode image.""" + def __init__(self, num_layers, feat_indices, out_index, img_size, + neck=None, **kwargs): + super(ResnetEncoder, self).__init__(**kwargs) + + self.num_layers = num_layers + self.feat_indices = sorted(feat_indices) + self.out_index = out_index + self.img_size = img_size + indices = (out_index, ) if isinstance(out_index, int) else out_index + self.strides = [2 ** (idx + 1) for idx in indices] + self.feat_sizes = [(img_size[0] // stride, img_size[1] // stride) + for stride in self.strides] + if 'img_coor' in self.scatter_keys: + self.img_locations = [nn.Parameter( + img_locations(img_size, feat_size), requires_grad=False) + for feat_size in self.feat_sizes] + self.img_locations = nn.ParameterList(self.img_locations) + + resnet = getattr(models, f'resnet{self.num_layers}', None) + + if resnet is None: + raise ValueError(f"{self.num_layers} is not a valid number of resnet ""layers") + + resnet_weights = getattr(models, f"ResNet{self.num_layers}_Weights") + self.encoder = resnet(weights=resnet_weights.DEFAULT) + self.neck = build_plugin_module(neck) if neck is not None else None + +
[docs] def forward(self, input_images, **kwargs): + num_imgs = [len(x) for x in input_images] + imgs = self.compose_imgs(input_images) + b, h, w, c = imgs.shape + + # b, h, w, c -> b, c, h, w + imgs = imgs.permute(0, 3, 1, 2).contiguous() + + x = self.encoder.conv1(imgs) + x = self.encoder.bn1(x) + x = self.encoder.relu(x) + x = self.encoder.maxpool(x) + + out = [] + for i in range(1, 5): + x = getattr(self.encoder, f'layer{i}')(x) + if i in self.feat_indices: + out.append(x) + + if self.neck is not None: + out = self.neck(out) + if isinstance(self.out_index, tuple): + out = [out[self.feat_indices.index(i)] for i in self.out_index] + else: + out = out[self.feat_indices.index(self.out_index)] + return self.format_output(out, num_imgs)
+ +
[docs] def format_output(self, output, num_imgs): + ptr = 0 + output_list = [] + coor_list = [] + for n in num_imgs: + if isinstance(output, (tuple, list)): + output_list.append(tuple(out[ptr:ptr+n] for out in output)) + else: + output_list.append(output[ptr:ptr + n]) + if 'img_coor' in self.scatter_keys: + assert hasattr(self, 'img_locations') + img_locs = [locs.unsqueeze(0).repeat(n, 1, 1, 1) + for locs in self.img_locations] + if isinstance(self.out_index, int): + img_locs = img_locs[0] + coor_list.append(img_locs) + ptr += n + out_dict = {} + if 'img_feat' in self.scatter_keys: + out_dict['img_feat'] = output_list + if 'img_coor' in self.scatter_keys: + out_dict['img_coor'] = coor_list + + return out_dict
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/backbone3d/mink_unet.html b/docs/_build/html/_modules/cosense3d/modules/backbone3d/mink_unet.html new file mode 100644 index 00000000..f82e969e --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/backbone3d/mink_unet.html @@ -0,0 +1,313 @@ + + + + + + cosense3d.modules.backbone3d.mink_unet — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.backbone3d.mink_unet

+import torch
+from torch import nn
+from cosense3d.modules import BaseModule
+from cosense3d.modules.utils.common import *
+from cosense3d.modules.utils.me_utils import *
+
+
+
[docs]class MinkUnet(BaseModule): + QMODE = ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE + def __init__(self, + data_info, + stride, + in_dim, + d=3, + kernel_size_layer1=3, + enc_dim=32, + cache_strides=None, + floor_height=0, + height_compression=None, + compression_kernel_size_xy=1, + to_dense=False, + dist=False, + **kwargs): + super(MinkUnet, self).__init__(**kwargs) + update_me_essentials(self, data_info) + self.stride = stride + self.in_dim = in_dim + self.enc_dim = enc_dim + self.floor_height = floor_height + self.to_dense = to_dense + self.height_compression = height_compression + self.compression_kernel_size_xy = compression_kernel_size_xy + self.d = d + self.lidar_range_tensor = nn.Parameter(torch.Tensor(self.lidar_range), requires_grad=False) + # For determine batchnorm type: if the model is trained on multiple GPUs with ME.MinkowskiBatchNorm, + # the BN would perform differently in eval mode because the running_mean and running_var would be + # different to training mode, this is caused by different number of tracked batches, therefore if + # ditributed training is used for this model, either ME.MinkowskiSyncBatchNorm should be used, or + # the running mean and var should be adapted. + # TODO: adapt running mean and var in inference mode if model is trained with DDP + self.dist = dist + if cache_strides is None: + self.cache_strides = [stride] + self.max_resolution = stride + else: + self.max_resolution = min(cache_strides) + self.cache_strides = cache_strides + self._init_unet_layers(kernel_size_layer1) + if height_compression is not None: + self._init_height_compression_layers(height_compression) + self.init_weights() + + def _init_unet_layers(self, kernel_size_layer1=3): + self.enc_mlp = linear_layers([self.in_dim * 2, 16, self.enc_dim], norm='LN') + kernel_conv1 = [kernel_size_layer1,] * min(self.d, 3) + kernel = [3,] * min(self.d, 3) + if self.d == 4: + kernel = kernel + [1,] + kernel_conv1 = kernel + [1,] + + kwargs = {'d': self.d, 'bn_momentum': 0.1} + self.conv1 = minkconv_conv_block(self.enc_dim, self.enc_dim, kernel_conv1, + 1, **kwargs) + self.conv2 = get_conv_block([self.enc_dim, self.enc_dim, self.enc_dim], kernel, **kwargs) + self.conv3 = get_conv_block([self.enc_dim, self.enc_dim * 2, self.enc_dim * 2], kernel, **kwargs) + self.conv4 = get_conv_block([self.enc_dim * 2, self.enc_dim * 4, self.enc_dim * 4], kernel, **kwargs) + + if self.max_resolution <= 4: + self.trconv4 = get_conv_block([self.enc_dim * 4, self.enc_dim * 2, self.enc_dim * 2], kernel, tr=True, **kwargs) + if self.max_resolution <= 2: + self.trconv3 = get_conv_block([self.enc_dim * 4, self.enc_dim * 2, self.enc_dim * 2], kernel, tr=True, **kwargs) + if self.max_resolution <= 1: + self.trconv2 = get_conv_block([self.enc_dim * 3, self.enc_dim * 2, self.enc_dim], kernel, tr=True, **kwargs) + self.out_layer = minkconv_layer(self.enc_dim * 2, self.enc_dim, kernel, 1, d=self.d) + + def _init_height_compression_layers(self, planes): + self.stride_size_dict = {} + for k, v in planes.items(): + self.stride_size_dict[int(k[1])] = self.grid_size(int(k[1])) + layers = [] + steps = v['steps'] + channels = v['channels'] + for i, s in enumerate(steps): + kernel = [self.compression_kernel_size_xy] * 2 + [s] + stride = [1] * 2 + [s] + layers.append( + minkconv_conv_block(channels[i], channels[i+1], + kernel, stride, self.d, 0.1) + ) + layers = nn.Sequential(*layers) + setattr(self, f'{k}_compression', layers) + +
[docs] def init_weights(self): + for n, p in self.named_parameters(): + if ('mlp' in n and 'weight' in n) or 'kernel' in n: + if p.ndim == 1: + continue + nn.init.xavier_uniform_(p)
+ +
[docs] def to_gpu(self, gpu_id): + self.to(gpu_id) + return ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm
+ +
[docs] def forward(self, points: list, **kwargs): + res = self.forward_unet(points, **kwargs) + + if self.height_compression is not None: + res = self.forward_height_compression(res) + + res = self.format_output(res, len(points)) + return res
+ +
[docs] def forward_unet(self, points, **kwargs): + N = len(points) + points = [torch.cat([torch.ones_like(pts[:, :1]) * i, pts], dim=-1 + ) for i, pts in enumerate(points)] + x = prepare_input_data(points, self.voxel_size, self.QMODE, self.floor_height, + self.d, self.in_dim) + x1, norm_points_p1, points_p1, count_p1, pos_embs = voxelize_with_centroids( + x, self.enc_mlp, self.lidar_range_tensor) + + # convs + x1 = self.conv1(x1) + x2 = self.conv2(x1) + x4 = self.conv3(x2) + p8 = self.conv4(x4) + p8_cat = p8 + + # transposed convs + if self.max_resolution <= 4: + p4 = self.trconv4(p8) + p4_cat = ME.cat(x4, p4) + if self.max_resolution <= 2: + p2 = self.trconv3(p4_cat) + p2_cat = ME.cat(x2, p2) + if self.max_resolution <= 1: + p1 = self.trconv2(p2_cat) + p1_cat = self.out_layer(ME.cat(x1, p1)) + if self.max_resolution == 0: + p0 = devoxelize_with_centroids(p1, x, pos_embs) + p0_cat = {'coor': torch.cat(points, dim=0), 'feat': p0} + + vars = locals() + res = {f'p{k}': vars[f'p{k}_cat'] for k in self.cache_strides} + + tmp = x4.F.max(dim=0).values + return res
+ +
[docs] def forward_height_compression(self, res): + for stride in self.stride_size_dict.keys(): + out_tensor = getattr(self, f'p{stride}_compression')(res[f'p{stride}']) + assert len(out_tensor.C[:, 3].unique()) == 1, \ + (f"height is not fully compressed. " + f"Unique z coords: {','.join([str(x.item()) for x in out_tensor.C[:, 3].unique()])}") + if self.to_dense: + out_tensor = self.stensor_to_dense(out_tensor).permute(0, 3, 1, 2) + res[f'p{stride}'] = out_tensor + else: + ctr = indices2metric(out_tensor.C, self.voxel_size) + res[f'p{stride}'] = {'coor': out_tensor.C[:, :3], 'feat': out_tensor.F, 'ctr': ctr[:, 1:3]} + return res
+ +
[docs] def format_output(self, res, N): + out_dict = {self.scatter_keys[0]: self.decompose_stensor(res, N)} + return out_dict
+ +
[docs] def stensor_to_dense(self, stensor): + mask, indices = self.valid_coords(stensor) + b = int(stensor.C[:, 0].max()) + 1 + d = stensor.F.shape[1] + features = stensor.F[mask].view(-1, d) + s = self.stride_size_dict[stensor.tensor_stride[0]] + dtensor = features.new_zeros((b, s[0], s[1], d)) + dtensor[indices[0], indices[1], indices[2]] = features + return dtensor
+ +
[docs] def valid_coords(self, stensor): + stride = stensor.tensor_stride + s = self.stride_size_dict[stride[0]] + # remove voxels that are outside range + xi = torch.div(stensor.C[:, 1], stride[0], rounding_mode='floor') + s[0] / 2 + yi = torch.div(stensor.C[:, 2], stride[1], rounding_mode='floor') + s[1] / 2 + + mask = (xi >= 0) * (xi < s[0]) * (yi >= 0) * (yi < s[1]) + indices = (stensor.C[:, 0][mask].long(), + xi[mask].long(), + yi[mask].long() + ) + # if the backbone uses 4d convs, last dim is time + if stensor.C.shape[1] == 5: + ti = stensor.C[:, 4] + mask = mask * (ti >= 0) * (ti < self.seq_len) + indices = indices + ti[mask].long() + return mask, indices
+ +
[docs] def grid_size(self, stride): + x_range = self.lidar_range[3] - self.lidar_range[0] + y_range = self.lidar_range[4] - self.lidar_range[1] + x_size = int(x_range / self.voxel_size[0]) // stride + y_size = int(y_range / self.voxel_size[1]) // stride + return (x_size, y_size)
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/backbone3d/pillar_bev.html b/docs/_build/html/_modules/cosense3d/modules/backbone3d/pillar_bev.html new file mode 100644 index 00000000..f60e7175 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/backbone3d/pillar_bev.html @@ -0,0 +1,263 @@ + + + + + + cosense3d.modules.backbone3d.pillar_bev — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.backbone3d.pillar_bev

+import torch
+from torch import nn
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.utils.common import *
+from cosense3d.modules.utils.me_utils import *
+
+
+
[docs]class PillarBEV(BaseModule): + def __init__(self, + in_channels, + layer_nums, + layer_strides, + downsample_channels, + upsample_channels, + upsample_strides, + voxel_generator, + pillar_encoder, + bev_shrinker=None, + bev_compressor=None, + **kwargs): + super(PillarBEV, self).__init__(**kwargs) + self.pillar_encoder = plugin.build_plugin_module(pillar_encoder) + self.voxel_generator = plugin.build_plugin_module(voxel_generator) + self.grid_size = self.voxel_generator.grid_size + + if bev_shrinker is not None: + self.bev_shrinker = plugin.build_plugin_module(bev_shrinker) + if bev_compressor is not None: + self.bev_compressor = plugin.build_plugin_module(bev_compressor) + + num_levels = len(layer_nums) + c_in_list = [in_channels, *downsample_channels[:-1]] + + self.blocks = nn.ModuleList() + self.deblocks = nn.ModuleList() + + for idx in range(num_levels): + cur_layers = [ + nn.ZeroPad2d(1), + nn.Conv2d( + c_in_list[idx], downsample_channels[idx], kernel_size=3, + stride=layer_strides[idx], padding=0, bias=False + ), + nn.BatchNorm2d(downsample_channels[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ] + for k in range(layer_nums[idx]): + cur_layers.extend([ + nn.Conv2d(downsample_channels[idx], downsample_channels[idx], + kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(downsample_channels[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ]) + + self.blocks.append(nn.Sequential(*cur_layers)) + if len(upsample_strides) > 0: + stride = upsample_strides[idx] + if stride >= 1: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d( + downsample_channels[idx], upsample_channels[idx], + upsample_strides[idx], + stride=upsample_strides[idx], bias=False + ), + nn.BatchNorm2d(upsample_channels[idx], + eps=1e-3, momentum=0.01), + nn.ReLU() + )) + else: + stride = np.round(1 / stride).astype(np.int) + self.deblocks.append(nn.Sequential( + nn.Conv2d( + downsample_channels[idx], upsample_channels[idx], + stride, + stride=stride, bias=False + ), + nn.BatchNorm2d(upsample_channels[idx], eps=1e-3, + momentum=0.01), + nn.ReLU() + )) + + c_in = sum(upsample_channels) + if len(upsample_strides) > num_levels: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], + stride=upsample_strides[-1], bias=False), + nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01), + nn.ReLU(), + )) + + self.num_bev_features = c_in + +
[docs] def forward(self, points: list, **kwargs): + N = len(points) + voxels, coords, num_points = self.voxel_generator([x[:, :4] for x in points]) + coords = self.cat_data_from_list(coords, pad_idx=True) + voxels = self.cat_data_from_list(voxels) + num_points = self.cat_data_from_list(num_points) + pillar_features = self.pillar_encoder(voxels, coords, num_points) + bev_feat = self.to_dense_bev(coords, pillar_features, N) + + ups = [] + ret_dict = {} + x = bev_feat + + for i in range(len(self.blocks)): + x = self.blocks[i](x) + + stride = int(bev_feat.shape[2] / x.shape[2]) + ret_dict[f'p{stride}'] = x + + if len(self.deblocks) > 0: + ups.append(self.deblocks[i](x)) + else: + ups.append(x) + + if len(ups) > 1: + x = torch.cat(ups, dim=1) + elif len(ups) == 1: + x = ups[0] + + if len(self.deblocks) > len(self.blocks): + x = self.deblocks[-1](x) + + if hasattr(self, 'bev_shrinker'): + x = self.bev_shrinker(x) + if hasattr(self, 'bev_compressor'): + x = self.bev_compressor(x) + + out = {self.scatter_keys[0]: x} + if 'multi_scale_bev_feat' in self.scatter_keys: + stride = int(bev_feat.shape[2] / x.shape[2]) + ret_dict[f'p{stride}'] = x + out['multi_scale_bev_feat'] = [{k: v[i] for k, v in ret_dict.items()} for i in range(N)] + return out
+ +
[docs] def format_output(self, res, N): + out_dict = {self.scatter_keys[0]: self.decompose_stensor(res, N)} + return out_dict
+ +
[docs] def to_dense_bev(self, coor, feat, N): + bev_feat = torch.zeros(N, + self.grid_size[2], + self.grid_size[1], + self.grid_size[0], + feat.shape[-1], + dtype=feat.dtype, + device=feat.device) + coor = coor.long() + bev_feat[coor[:, 0], coor[:, 1], coor[:, 2], coor[:, 3]] = feat + bev_feat = bev_feat.permute(0, 4, 1, 2, 3) + assert bev_feat.shape[2] == 1 + return bev_feat.squeeze(dim=2)
+ + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/backbone3d/spconv.html b/docs/_build/html/_modules/cosense3d/modules/backbone3d/spconv.html new file mode 100644 index 00000000..c4a2a934 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/backbone3d/spconv.html @@ -0,0 +1,301 @@ + + + + + + cosense3d.modules.backbone3d.spconv — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.backbone3d.spconv

+from functools import partial
+from typing import List
+
+import spconv
+import torch
+import torch.nn as nn
+
+from spconv.pytorch import  SparseSequential, SubMConv3d, SparseConv3d, SparseInverseConv3d, SparseConvTensor
+from cosense3d.modules import BaseModule, plugin
+
+
+
[docs]def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(), + ) + + return m
+ + +
[docs]class Spconv(BaseModule): + def __init__(self, + in_channels, + out_channels, + voxel_generator, + voxel_encoder, + bev_neck=None, + bev_compressor=None, + cache_coords=True, + cache_strides=[1, 2, 4, 8], + **kwargs): + super(Spconv, self).__init__(**kwargs) + self.num_point_features = out_channels + self.cache_keys = [] + if cache_coords: + self.cache_keys.append('coords') + for s in cache_strides: + self.cache_keys.append(f'p{s}') + self.voxel_generator = plugin.build_plugin_module(voxel_generator) + self.voxel_encoder = plugin.build_plugin_module(voxel_encoder) + self.grid_size = self.voxel_generator.grid_size + if bev_neck is not None: + self.bev_neck = plugin.build_plugin_module(bev_neck) + if bev_compressor is not None: + self.bev_compressor = plugin.build_plugin_module(bev_compressor) + self._init_layers(in_channels, out_channels) + + def _init_layers(self, in_channels, out_channels): + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + + self.sparse_shape = self.grid_size.tolist()[::-1] + self.sparse_shape[0] += 1 + + self.conv_input = SparseSequential( + SubMConv3d(in_channels, 16, 3, + padding=1, bias=False, indice_key='subm1'), + norm_fn(16), + nn.ReLU(), + ) + block = post_act_block + + self.conv1 = SparseSequential( + block(16, 16, 3, + norm_fn=norm_fn, padding=1, indice_key='subm1'), + ) + + self.conv2 = SparseSequential( + # [1600, 1408, 41] <- [800, 704, 21] + block(16, 32, 3, + norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + ) + + self.conv3 = SparseSequential( + # [800, 704, 21] <- [400, 352, 11] + block(32, 64, 3, + norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + ) + + self.conv4 = SparseSequential( + # [400, 352, 11] <- [200, 176, 5] + block(64, 64, 3, + norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + ) + + last_pad = 0 + self.conv_out = SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + SparseConv3d(64, out_channels, (3, 1, 1), + stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), + norm_fn(out_channels), + nn.ReLU(), + ) + + self.backbone_channels = { + 'x_conv1': 16, + 'x_conv2': 32, + 'x_conv3': 64, + 'x_conv4': 64, + 'out': out_channels + } + +
[docs] def forward(self, points: list, **kwargs): + B = len(points) + res_dict = {} + voxels, coords, num_points = self.voxel_generator(x[:, :4] for x in points) + res_dict['coords'] = coords + coords = self.cat_data_from_list(coords, pad_idx=True) + voxels = self.cat_data_from_list(voxels) + num_points = self.cat_data_from_list(num_points) + voxel_features = self.voxel_encoder(voxels, num_points) + + input_sp_tensor = SparseConvTensor( + features=voxel_features, + indices=coords.int(), + spatial_shape=self.sparse_shape, + batch_size=B + ) + + x = self.conv_input(input_sp_tensor) + + res_dict['p1'] = self.conv1(x) + res_dict['p2'] = self.conv2(res_dict['p1']) + res_dict['p4'] = self.conv3(res_dict['p2'] ) + res_dict['p8'] = self.conv4(res_dict['p4'] ) + + res_dict['p8_out'] = self.conv_out(res_dict['p8']) + res_dict['bev'] = self.to_dense(res_dict['p8_out']) + + multi_scale_bev_feat = {} + if hasattr(self, 'bev_neck'): + res = self.bev_neck(res_dict['bev']) + if isinstance(res, tuple): + res_dict['bev'] = res[0] + multi_scale_bev_feat = res[1] + else: + res_dict['bev'] = res + if hasattr(self, 'bev_compressor'): + res_dict['bev'] = self.bev_compressor(res_dict['bev']) + + out_dict = {} + if 'voxel_feat' in self.scatter_keys: + out_dict['voxel_feat'] = self.format_output( + {k: res_dict[k] for k in self.cache_keys}, B) + if 'bev_feat' in self.scatter_keys: + out_dict['bev_feat'] = res_dict['bev'] + if 'multi_scale_bev_feat' in self.scatter_keys: + multi_scale_bev_feat[1] = res_dict['bev'] + out_dict['multi_scale_bev_feat'] = \ + [{f'p{k * 8}': v[i] for k, v in multi_scale_bev_feat.items()} for i in range(B)] + return out_dict
+ +
[docs] def format_output(self, out_dict, B): + out_list = [] + for i in range(B): + new_dict = {} + for k, v in out_dict.items(): + if isinstance(v, list) or isinstance(v, torch.Tensor): + new_dict[k] = v[i] + else: + coor = v.indices + feat = v.features.contiguous() + mask = coor[:, 0] == i + new_dict[k] = {'coor': coor[mask, 1:], 'feat': feat[mask]} + out_list.append(new_dict) + + return out_list
+ +
[docs] def to_dense(self, stensor): + spatial_features = stensor.dense() + N, C, D, H, W = spatial_features.shape + bev_featrues = spatial_features.reshape(N, C * D, H, W) + return bev_featrues.contiguous()
+ + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/backbone3d/voxelnet.html b/docs/_build/html/_modules/cosense3d/modules/backbone3d/voxelnet.html new file mode 100644 index 00000000..4f9a4525 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/backbone3d/voxelnet.html @@ -0,0 +1,192 @@ + + + + + + cosense3d.modules.backbone3d.voxelnet — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.backbone3d.voxelnet

+import torch
+from torch import nn
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.utils.common import *
+from cosense3d.modules.utils.me_utils import *
+
+
+
[docs]class VoxelNet(BaseModule): + def __init__(self, + voxel_generator, + voxel_encoder, + cml, + neck=None, + bev_compressor=None, + **kwargs): + super(VoxelNet, self).__init__(**kwargs) + self.voxel_generator = plugin.build_plugin_module(voxel_generator) + self.voxel_encoder = plugin.build_plugin_module(voxel_encoder) + self.grid_size = self.voxel_generator.grid_size + self.cml = plugin.build_plugin_module(cml) + + if neck is not None: + self.neck = plugin.build_plugin_module(neck) + if bev_compressor is not None: + self.bev_compressor = plugin.build_plugin_module(bev_compressor) + +
[docs] def forward(self, points: list, **kwargs): + N = len(points) + voxels, coords, num_points = self.voxel_generator(points) + coords = self.cat_data_from_list(coords, pad_idx=True) + voxels = self.cat_data_from_list(voxels) + num_points = self.cat_data_from_list(num_points) + voxel_features = self.voxel_encoder(voxels, coords, num_points) + if self.cml.dense: + voxel_features = self.to_dense(coords, voxel_features, N) + voxel_features = self.cml(voxel_features) + else: + voxel_features, voxel_coords = self.cml(voxel_features, coords) + voxel_features = self.to_dense(voxel_coords, voxel_features, N, filter_range=True) + # 3d to 2d feature + bev_feat = voxel_features.flatten(1, 2) + x = bev_feat + ret_dict = {} + if hasattr(self, 'neck'): + res = self.neck(x) + if isinstance(res, torch.Tensor): + x = res + else: + x = res[0] + ret_dict = res[1] + if hasattr(self, 'bev_compressor'): + x = self.bev_compressor(x) + + out = {self.scatter_keys[0]: x} + if 'multi_scale_bev_feat' in self.scatter_keys: + stride = int(bev_feat.shape[2] / x.shape[2]) + ret_dict[f'p{stride}'] = x + out['multi_scale_bev_feat'] = [{k: v[i] for k, v in ret_dict.items()} for i in range(N)] + + return out
+ +
[docs] def to_dense(self, coor, feat, N, filter_range=False): + if filter_range: + strides = self.cml.out_strides.cpu() + grid_size = torch.ceil(self.grid_size[[2, 1, 0]] / strides).int().tolist() + mask = (coor[:, 1] >= 0) & (coor[:, 1] < grid_size[0]) & \ + (coor[:, 2] >= 0) & (coor[:, 2] < grid_size[1]) & \ + (coor[:, 3] >= 0) & (coor[:, 3] < grid_size[2]) + coor, feat = coor[mask], feat[mask] + else: + grid_size = self.grid_size[[2, 1, 0]].tolist() + bev_feat = torch.zeros(N, + grid_size[0], + grid_size[1], + grid_size[2], + feat.shape[-1], + dtype=feat.dtype, + device=feat.device) + coor = coor.long() + bev_feat[coor[:, 0], coor[:, 1], coor[:, 2], coor[:, 3]] = feat + + return bev_feat.permute(0, 4, 1, 2, 3)
+ + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/attn_fusion.html b/docs/_build/html/_modules/cosense3d/modules/fusion/attn_fusion.html new file mode 100644 index 00000000..13eab46a --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/attn_fusion.html @@ -0,0 +1,201 @@ + + + + + + cosense3d.modules.fusion.attn_fusion — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.attn_fusion

+import warnings
+from typing import Dict
+
+import torch
+
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.plugin.attn import ScaledDotProductAttention
+from cosense3d.modules.utils.me_utils import update_me_essentials
+from cosense3d.modules.utils.common import cat_coor_with_idx
+
+
+
[docs]class SparseAttentionFusion(BaseModule): + def __init__(self, stride, in_channels, **kwargs): + super(SparseAttentionFusion, self).__init__(**kwargs) + if isinstance(stride, int): + self.stride = [stride] + else: + self.stride = stride + self.attn = ScaledDotProductAttention(in_channels) + +
[docs] def forward(self, ego_feats, coop_feats=None, **kwargs): + fused_feat = [] + fuse_key = self.gather_keys[0] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + batch_feat = {} + for stride in self.stride: + coor, feat, ctr = self.fuse_feature_at_stride(ego_feat, coop_feat, stride, fuse_key) + batch_feat[f'p{stride}'] = {'coor': coor, 'feat': feat, 'ctr': ctr} + fused_feat.append(batch_feat) + return self.format_output(fused_feat)
+ +
[docs] def format_output(self, output): + return {self.scatter_keys[0]: output}
+ +
[docs] def fuse_feature_at_stride(self, ego_feat, coop_feat, stride, fuse_key): + coor = [ego_feat[f'p{stride}']['coor']] + feat = [ego_feat[f'p{stride}']['feat']] + ctr = [ego_feat[f'p{stride}']['ctr']] + if len(coop_feat) == 0: + return coor[0], feat[0], ctr[0] + else: + # fuse coop to ego + for cpfeat in coop_feat.values(): + if fuse_key not in cpfeat: + continue + cpm = cpfeat[fuse_key][f'p{stride}'] + coor.append(cpm['coor']) + feat.append(cpm['feat']) + ctr.append(cpm['ctr']) + + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + ctr_cat = torch.cat(ctr, dim=0) + uniq_coor, reverse_inds = torch.unique(coor_cat[:, 1:], dim=0, + return_inverse=True) + uniq_ctr = ctr_cat[reverse_inds.unique()] + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_zeros(len(uniq_coor), feat_cat.shape[-1]) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + q = feats_pad[0].unsqueeze(1) # num_pts, 1, d + kv = torch.stack(feats_pad[1:], dim=1) # num_pts, num_coop_cav, d + feat_out = self.attn(q, kv, kv).squeeze(1) + return uniq_coor, feat_out, uniq_ctr
+ + +
[docs]class DenseAttentionFusion(BaseModule): + def __init__(self, feature_dim, neck=None, **kwargs): + super(DenseAttentionFusion, self).__init__(**kwargs) + self.attn = ScaledDotProductAttention(feature_dim) + if neck is not None: + self.neck = plugin.build_plugin_module(neck) + +
[docs] def forward(self, ego_feats, coop_feats=None, **kwargs): + out = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + feat = [ego_feat] + for cpfeat in coop_feat.values(): + if 'bev_feat' not in cpfeat: + continue + feat.append(cpfeat['bev_feat']) + xx = torch.stack(feat, dim=0) + N, C, H, W = xx.shape + xx = xx.view(N, C, -1).permute(2, 0, 1) + h = self.attn(xx, xx, xx) + h = h.permute(1, 2, 0).view(N, C, H, W)[0, ...] + out.append(h) + out = torch.stack(out) + if hasattr(self, 'neck'): + out = self.neck(out) + return {self.scatter_keys[0]: out}
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/box_fusion.html b/docs/_build/html/_modules/cosense3d/modules/fusion/box_fusion.html new file mode 100644 index 00000000..daaaa4f1 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/box_fusion.html @@ -0,0 +1,291 @@ + + + + + + cosense3d.modules.fusion.box_fusion — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.box_fusion

+import torch
+import torch.nn as nn
+
+from cosense3d.modules import BaseModule
+from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu
+pi = 3.141592653
+
+
+
[docs]def limit_period(val, offset=0.5, period=2 * pi): + return val - torch.floor(val / period + offset) * period
+ + +
[docs]class BoxFusion(BaseModule): + def __init__(self, lidar_range, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + +
[docs] def forward(self, ego_preds, coop_preds, memory, global_times, **kwargs): + out_dict = {'box': [], 'scr': [], 'lbl': [], 'time': [], 'idx': []} + for ego_pred, coop_pred, mem, global_time in zip(ego_preds, coop_preds, memory, global_times): + boxes = [ego_pred['preds']['box']] + scores = [ego_pred['preds']['scr']] + labels = [ego_pred['preds']['lbl']] + times = [ego_pred['preds']['time']] + if len(mem) > 0: + boxes.append(mem['preds']['box']) + scores.append(mem['preds']['scr']) + labels.append(mem['preds']['lbl']) + times.append(mem['preds']['time']) + for cppred in coop_pred.values(): + boxes.append(cppred['detection_local']['preds']['box']) + scores.append(cppred['detection_local']['preds']['scr']) + labels.append(cppred['detection_local']['preds']['lbl']) + times.append(cppred['detection_local']['preds']['time']) + clusters_boxes, clusters_scores, cluster_labels, cluster_times = \ + self.clustering(boxes, scores, labels, times, global_time) + boxes_fused, scores_fused, labels_fused, times_fused = self.cluster_fusion( + clusters_boxes, clusters_scores, cluster_labels, cluster_times, global_time) + out_dict['box'].append(boxes_fused) + out_dict['scr'].append(scores_fused) + out_dict['lbl'].append(labels_fused) + out_dict['time'].append(times_fused) + out_dict['idx'].append(torch.zeros_like(labels_fused)) + + out_list = self.compose_result_list(out_dict, len(ego_preds)) + return {self.scatter_keys[0]: [{'preds': x} for x in out_list]}
+ +
[docs] def clustering(self, boxes, scores, labels, times, global_time): + times_cat = torch.cat(times, dim=0) + # remove boxes outside the maximum time length + mask = (global_time - times_cat) < 0.15 + pred_boxes_cat = torch.cat(boxes, dim=0)[mask] + pred_boxes_cat[:, -1] = limit_period(pred_boxes_cat[:, -1]) + pred_scores_cat = torch.cat(scores, dim=0)[mask] + pred_labels_cat = torch.cat(labels, dim=0)[mask] + times_cat = times_cat[mask] + + if len(pred_scores_cat) == 0: + clusters = [torch.Tensor([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.57]). + to(boxes[0].device).view(1, 7)] + scores= [torch.Tensor([0.01]).to(boxes[0].device).view(-1)] + labels = [torch.Tensor([-1]).to(boxes[0].device).view(-1)] + times = [torch.Tensor([-1]).to(boxes[0].device).view(-1)] + return clusters, scores, labels, times + + ious = boxes_iou3d_gpu(pred_boxes_cat, pred_boxes_cat) + cluster_indices = torch.zeros(len(ious)).int() # gt assignments of preds + cur_cluster_id = 1 + while torch.any(cluster_indices == 0): + cur_idx = torch.where(cluster_indices == 0)[0][ + 0] # find the idx of the first pred which is not assigned yet + cluster_indices[torch.where(ious[cur_idx] > 0.1)[0]] = cur_cluster_id + cur_cluster_id += 1 + clusters = [] + scores = [] + labels = [] + times = [] + for j in range(1, cur_cluster_id): + clusters.append(pred_boxes_cat[cluster_indices == j]) + scores.append(pred_scores_cat[cluster_indices == j]) + labels.append(pred_labels_cat[cluster_indices == j]) + times.append(times_cat[cluster_indices == j]) + + return clusters, scores, labels, times
+ +
[docs] @torch.no_grad() + def cluster_fusion(self, clusters, scores, labels, times, global_time): + """ + Merge boxes in each cluster with scores as weights for merging + """ + for i, (c, s, l, t) in enumerate(zip(clusters, scores, labels, times)): + assert len(c) == len(s) + if len(c) == 1: + labels[i] = l[0] + times[i] = t[0] + continue + uniq_lbls, cnt = l.mode(keepdim=True) + labels[i] = uniq_lbls[cnt.argmax()] + + + box_fused, s_fused = self.merge_sync_boxes(c, s) + scores[i] = s_fused + clusters[i] = box_fused + times[i] = t.mean() + + return torch.cat(clusters, dim=0), torch.cat(scores), torch.stack(labels), torch.stack(times)
+ +
[docs] @torch.no_grad() + def temporal_cluster_fusion(self, clusters, scores, labels, times, global_time): + """ + Merge boxes in each cluster with scores as weights for merging + """ + for i, (c, s, l, t) in enumerate(zip(clusters, scores, labels, times)): + assert len(c) == len(s) + if len(c) == 1: + labels[i] = l[0] + times[i] = t[0] + continue + uniq_lbls, cnt = l.mode(keepdim=True) + labels[i] = uniq_lbls[cnt.argmax()] + + t_idx = (t * 100).round().int() + uniq_ts = torch.unique(t_idx) + ts = [] + boxes = [] + scrs = [] + for idx in uniq_ts: + mask = t_idx == idx + cur_cluster = c[mask] + cur_scores = s[mask] + box_fused, s_fused = self.merge_sync_boxes(cur_cluster, cur_scores) + ts.append(t[mask].mean()) + boxes.append(box_fused) + scrs.append(s_fused) + + if len(ts) == 1: + scores[i] = scrs[0] + clusters[i] = boxes[0] + times[i] = ts[0] + else: + # interpolate to global time + ts = torch.stack(ts) + sort_inds = torch.argsort(ts) + ts = ts[sort_inds] + boxes = torch.cat(boxes, dim=0)[sort_inds] + scrs = torch.cat(scrs)[sort_inds] + velo = (boxes[-1, :2] - boxes[-2, :2]) / (ts[-1] - ts[-2]) + out_box = boxes[scrs.argmax()] + out_box[:2] += velo * (global_time - ts[-1]) + scores[i] = torch.mean(scrs, dim=0, keepdim=True) + clusters[i] = out_box.reshape(1, -1) + times[i] = torch.tensor(global_time, device=ts.device) + + return torch.cat(clusters, dim=0), torch.cat(scores), torch.stack(labels), torch.stack(times)
+ +
[docs] def merge_sync_boxes(self, c, s): + # reverse direction for non-dominant direction of boxes + dirs = c[:, -1] + max_score_idx = torch.argmax(s) + dirs_diff = torch.abs(dirs - dirs[max_score_idx].item()) + lt_pi = (dirs_diff > pi).int() + dirs_diff = dirs_diff * (1 - lt_pi) + ( + 2 * pi - dirs_diff) * lt_pi + score_lt_half_pi = s[dirs_diff > pi / 2].sum() # larger than + score_set_half_pi = s[ + dirs_diff <= pi / 2].sum() # small equal than + # select larger scored direction as final direction + if score_lt_half_pi <= score_set_half_pi: + dirs[dirs_diff > pi / 2] += pi + else: + dirs[dirs_diff <= pi / 2] += pi + dirs = limit_period(dirs) + s_normalized = s / s.sum() + sint = torch.sin(dirs) * s_normalized + cost = torch.cos(dirs) * s_normalized + theta = torch.atan2(sint.sum(), cost.sum()).view(1, ) + center_dim = c[:, :-1] * s_normalized[:, None] + box_fused = torch.cat([center_dim.sum(dim=0), theta]).unsqueeze(0) + s_sorted = torch.sort(s, descending=True).values + s_fused = 0 + for j, ss in enumerate(s_sorted): + s_fused += ss ** (j + 1) + s_fused = torch.tensor([min(s_fused, 1.0)], device=s.device) + return box_fused, s_fused
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/fax.html b/docs/_build/html/_modules/cosense3d/modules/fusion/fax.html new file mode 100644 index 00000000..8c46c593 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/fax.html @@ -0,0 +1,459 @@ + + + + + + cosense3d.modules.fusion.fax — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.fax

+"""
+This class is about swap fusion applications
+"""
+import torch
+from einops import rearrange
+from torch import nn, einsum
+from einops.layers.torch import Rearrange, Reduce
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.plugin.cobevt import NaiveDecoder
+
+
+
[docs]class PreNormResidual(nn.Module): + def __init__(self, dim, fn): + super().__init__() + self.norm = nn.LayerNorm(dim) + self.fn = fn + +
[docs] def forward(self, x, **kwargs): + return self.fn(self.norm(x), **kwargs) + x
+ + +
[docs]class FeedForward(nn.Module): + def __init__(self, dim, hidden_dim, dropout=0.): + super().__init__() + self.net = nn.Sequential( + nn.Linear(dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, dim), + nn.Dropout(dropout) + ) + +
[docs] def forward(self, x): + return self.net(x)
+ + +# swap attention -> max_vit +
[docs]class Attention(nn.Module): + """ + Unit Attention class. Todo: mask is not added yet. + + Parameters + ---------- + dim: int + Input feature dimension. + dim_head: int + The head dimension. + dropout: float + Dropout rate + agent_size: int + The agent can be different views, timestamps or vehicles. + """ + + def __init__( + self, + dim, + dim_head=32, + dropout=0., + agent_size=6, + window_size=7 + ): + super().__init__() + assert (dim % dim_head) == 0, \ + 'dimension should be divisible by dimension per head' + + self.heads = dim // dim_head + self.scale = dim_head ** -0.5 + self.window_size = [agent_size, window_size, window_size] + + self.to_qkv = nn.Linear(dim, dim * 3, bias=False) + self.attend = nn.Sequential( + nn.Softmax(dim=-1) + ) + + self.to_out = nn.Sequential( + nn.Linear(dim, dim, bias=False), + nn.Dropout(dropout) + ) + + self.relative_position_bias_table = nn.Embedding( + (2 * self.window_size[0] - 1) * + (2 * self.window_size[1] - 1) * + (2 * self.window_size[2] - 1), + self.heads) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for + # each token inside the window + coords_d = torch.arange(self.window_size[0]) + coords_h = torch.arange(self.window_size[1]) + coords_w = torch.arange(self.window_size[2]) + # 3, Wd, Wh, Ww + coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w, indexing='ij')) + coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww + + # 3, Wd*Wh*Ww, Wd*Wh*Ww + relative_coords = \ + coords_flatten[:, :, None] - coords_flatten[:, None, :] + # Wd*Wh*Ww, Wd*Wh*Ww, 3 + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + # shift to start from 0 + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 2] += self.window_size[2] - 1 + + relative_coords[:, :, 0] *= \ + (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1) + relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1) + relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww + self.register_buffer("relative_position_index", + relative_position_index) + +
[docs] def forward(self, x, mask=None): + # x shape: b, l, h, w, w_h, w_w, c + batch, agent_size, height, width, window_height, window_width, _, device, h \ + = *x.shape, x.device, self.heads + + # flatten + x = rearrange(x, 'b l x y w1 w2 d -> (b x y) (l w1 w2) d') + # project for queries, keys, values + q, k, v = self.to_qkv(x).chunk(3, dim=-1) + # split heads + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), + (q, k, v)) + # scale + q = q * self.scale + + # sim + sim = einsum('b h i d, b h j d -> b h i j', q, k) + + # add positional bias + L = agent_size * window_height * window_width + bias = self.relative_position_bias_table(self.relative_position_index[:L, :L]) + sim = sim + rearrange(bias, 'i j h -> h i j') + + # mask shape if exist: b x y w1 w2 e l + if mask is not None: + # b x y w1 w2 e l -> (b x y) 1 (l w1 w2) + mask = rearrange(mask, 'b x y w1 w2 e l -> (b x y) e (l w1 w2)') + # (b x y) 1 1 (l w1 w2) = b h 1 n + mask = mask.unsqueeze(1) + sim = sim.masked_fill(mask == 0, -float('inf')) + + # attention + attn = self.attend(sim) + # aggregate + out = einsum('b h i j, b h j d -> b h i d', attn, v) + # merge heads + out = rearrange(out, 'b h (l w1 w2) d -> b l w1 w2 (h d)', + l=agent_size, w1=window_height, w2=window_width) + + # combine heads out + out = self.to_out(out) + return rearrange(out, '(b x y) l w1 w2 d -> b l x y w1 w2 d', + b=batch, x=height, y=width)
+ + +
[docs]class SwapFusionBlockMask(nn.Module): + """ + Swap Fusion Block contains window attention and grid attention with + mask enabled for multi-vehicle cooperation. + """ + + def __init__(self, + input_dim, + mlp_dim, + dim_head, + window_size, + agent_size, + drop_out): + super(SwapFusionBlockMask, self).__init__() + + self.window_size = window_size + + self.window_attention = PreNormResidual(input_dim, + Attention(input_dim, dim_head, + drop_out, + agent_size, + window_size)) + self.window_ffd = PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, + drop_out)) + self.grid_attention = PreNormResidual(input_dim, + Attention(input_dim, dim_head, + drop_out, + agent_size, + window_size)) + self.grid_ffd = PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, + drop_out)) + +
[docs] def forward(self, x, mask): + # x: b l c h w + # mask: b h w 1 l + # window attention -> grid attention + mask_swap = mask + + # mask b h w 1 l -> b x y w1 w2 1 L + mask_swap = rearrange(mask_swap, + 'b (x w1) (y w2) e l -> b x y w1 w2 e l', + w1=self.window_size, w2=self.window_size) + x = rearrange(x, 'b m d (x w1) (y w2) -> b m x y w1 w2 d', + w1=self.window_size, w2=self.window_size) + x = self.window_attention(x, mask=mask_swap) + x = self.window_ffd(x) + x = rearrange(x, 'b m x y w1 w2 d -> b m d (x w1) (y w2)') + + # grid attention + mask_swap = mask + mask_swap = rearrange(mask_swap, + 'b (w1 x) (w2 y) e l -> b x y w1 w2 e l', + w1=self.window_size, w2=self.window_size) + x = rearrange(x, 'b m d (w1 x) (w2 y) -> b m x y w1 w2 d', + w1=self.window_size, w2=self.window_size) + x = self.grid_attention(x, mask=mask_swap) + x = self.grid_ffd(x) + x = rearrange(x, 'b m x y w1 w2 d -> b m d (w1 x) (w2 y)') + + return x
+ + +
[docs]class SwapFusionBlock(nn.Module): + """ + Swap Fusion Block contains window attention and grid attention. + """ + + def __init__(self, + input_dim, + mlp_dim, + dim_head, + window_size, + agent_size, + drop_out): + super(SwapFusionBlock, self).__init__() + # b = batch * max_cav + self.block = nn.Sequential( + Rearrange('b m d (x w1) (y w2) -> b m x y w1 w2 d', + w1=window_size, w2=window_size), + PreNormResidual(input_dim, Attention(input_dim, dim_head, drop_out, + agent_size, window_size)), + PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, drop_out)), + Rearrange('b m x y w1 w2 d -> b m d (x w1) (y w2)'), + + Rearrange('b m d (w1 x) (w2 y) -> b m x y w1 w2 d', + w1=window_size, w2=window_size), + PreNormResidual(input_dim, Attention(input_dim, dim_head, drop_out, + agent_size, window_size)), + PreNormResidual(input_dim, + FeedForward(input_dim, mlp_dim, drop_out)), + Rearrange('b m x y w1 w2 d -> b m d (w1 x) (w2 y)'), + ) + +
[docs] def forward(self, x, mask=None): + # todo: add mask operation later for mulit-agents + x = self.block(x) + return x
+ + +
[docs]class SwapFusionEncoder(BaseModule): + """ + Data rearrange -> swap block -> mlp_head + """ + + def __init__(self, + input_dim=128, + mlp_dim=256, + agent_size=5, + window_size=8, + dim_head=32, + drop_out=0.1, + depth=3, + mask=False, + decoder=None, + **kwargs): + super(SwapFusionEncoder, self).__init__(**kwargs) + self.layers = nn.ModuleList([]) + self.depth = depth + self.mask = mask + swap_fusion_block = SwapFusionBlockMask if self.mask else SwapFusionBlock + + for i in range(self.depth): + block = swap_fusion_block(input_dim, + mlp_dim, + dim_head, + window_size, + agent_size, + drop_out) + self.layers.append(block) + + # mlp head + self.mlp_head = nn.Sequential( + Reduce('b m d h w -> b d h w', 'mean'), + Rearrange('b d h w -> b h w d'), + nn.LayerNorm(input_dim), + nn.Linear(input_dim, input_dim), + Rearrange('b h w d -> b d h w') + ) + + if decoder is not None: + self.decoder = NaiveDecoder(decoder) + +
[docs] def forward(self, ego_feat, coop_cpm, **kwargs): + B = len(ego_feat) + C, H, W = ego_feat[0].shape + x = [] + mask = [] + num_cavs = [] + for xe, xc in zip(ego_feat, coop_cpm): + values = xc.values() + ego_mask = torch.ones_like(xe[:1]) + x.append([xe,] + [v['bev_feat'] for v in values]) + mask.append([ego_mask,] + [v['bev_mask'] for v in values]) + num_cavs.append(len(values) + 1) + l = max(num_cavs) + x_pad = ego_feat[0].new_zeros(B, l, C, H, W) + mask_pad = ego_feat[0].new_zeros(B, H, W, 1, l) + for i in range(B): + x_pad[i, :len(x[i])] = torch.stack(x[i], dim=0) + mask_pad[i, :, :, :, :len(x[i])] = torch.stack(mask[i], dim=-1).permute(1, 2, 0, 3) + for stage in self.layers: + x_pad = stage(x_pad, mask=mask_pad) + out = self.mlp_head(x_pad) + + if hasattr(self, 'decoder'): + out = self.decoder(out.unsqueeze(1)) + out = rearrange(out, 'b l c h w -> (b l) c h w') + return {self.scatter_keys[0]: out}
+ + +if __name__ == "__main__": + import os + + os.environ['CUDA_VISIBLE_DEVICES'] = '1' + args = {'input_dim': 512, + 'mlp_dim': 512, + 'agent_size': 4, + 'window_size': 8, + 'dim_head': 4, + 'drop_out': 0.1, + 'depth': 2, + 'mask': True + } + block = SwapFusionEncoder(args) + block.cuda() + test_data = torch.rand(1, 4, 512, 32, 32) + test_data = test_data.cuda() + mask = torch.ones(1, 32, 32, 1, 4) + mask = mask.cuda() + + output = block(test_data, mask) + print(output) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/keypoints.html b/docs/_build/html/_modules/cosense3d/modules/fusion/keypoints.html new file mode 100644 index 00000000..934d208c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/keypoints.html @@ -0,0 +1,219 @@ + + + + + + cosense3d.modules.fusion.keypoints — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.keypoints

+import torch
+import torch.nn as nn
+
+from cosense3d.modules import BaseModule
+from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu
+pi = 3.141592653
+
+
+
[docs]def limit_period(val, offset=0.5, period=2 * pi): + return val - torch.floor(val / period + offset) * period
+ + +
[docs]class KeypointsFusion(BaseModule): + def __init__(self, lidar_range, train_from_epoch=0, **kwargs): + super().__init__(**kwargs) + self.lidar_range = lidar_range + self.train_from_epoch = train_from_epoch + +
[docs] def forward(self, ego_feats, coop_feats, **kwargs): + epoch = kwargs.get('epoch', self.train_from_epoch + 1) + if epoch < self.train_from_epoch: + return {self.scatter_keys[0]: [None for _ in ego_feats]} + out_dict = {'boxes': [], 'scores': [], 'feat': [], 'coor': []} + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + feat = [ego_feat['point_features']] + coor = [ego_feat['point_coords']] + boxes = [ego_feat['boxes']] + scores = [ego_feat['scores']] + for cpfeat in coop_feat.values(): + if 'keypoint_feat' not in cpfeat: + continue + feat.append(cpfeat['keypoint_feat']['point_features']) + coor.append(cpfeat['keypoint_feat']['point_coords']) + boxes.append(cpfeat['keypoint_feat']['boxes']) + scores.append(cpfeat['keypoint_feat']['scores']) + clusters_boxes, clusters_scores = self.clustering(boxes, scores) + boxes_fused, scores_fused = self.cluster_fusion(clusters_boxes, clusters_scores) + out_dict['boxes'].append(boxes_fused) + out_dict['scores'].append(scores_fused) + out_dict['feat'].append(torch.cat(feat, dim=0)) + out_dict['coor'].append(torch.cat(coor, dim=0)) + + return {self.scatter_keys[0]: self.compose_result_list(out_dict, len(ego_feats))}
+ +
[docs] def clustering(self, boxes, scores): + pred_boxes_cat = torch.cat(boxes, dim=0) + pred_boxes_cat[:, -1] = limit_period(pred_boxes_cat[:, -1]) + pred_scores_cat = torch.cat(scores, dim=0) + + if len(pred_scores_cat) == 0: + clusters = [torch.Tensor([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.57]). + to(boxes[0].device).view(1, 7)] + scores= [torch.Tensor([0.01]).to(boxes[0].device).view(-1)] + return clusters, scores + + ious = boxes_iou3d_gpu(pred_boxes_cat, pred_boxes_cat) + cluster_indices = torch.zeros(len(ious)).int() # gt assignments of preds + cur_cluster_id = 1 + while torch.any(cluster_indices == 0): + cur_idx = torch.where(cluster_indices == 0)[0][ + 0] # find the idx of the first pred which is not assigned yet + cluster_indices[torch.where(ious[cur_idx] > 0.1)[0]] = cur_cluster_id + cur_cluster_id += 1 + clusters = [] + scores = [] + for j in range(1, cur_cluster_id): + clusters.append(pred_boxes_cat[cluster_indices == j]) + scores.append(pred_scores_cat[cluster_indices == j]) + + return clusters, scores
+ +
[docs] @torch.no_grad() + def cluster_fusion(self, clusters, scores): + """ + Merge boxes in each cluster with scores as weights for merging + """ + for i, (c, s) in enumerate(zip(clusters, scores)): + assert len(c) == len(s) + if len(c) == 1: + continue + # reverse direction for non-dominant direction of boxes + dirs = c[:, -1] + max_score_idx = torch.argmax(s) + dirs_diff = torch.abs(dirs - dirs[max_score_idx].item()) + lt_pi = (dirs_diff > pi).int() + dirs_diff = dirs_diff * (1 - lt_pi) + ( + 2 * pi - dirs_diff) * lt_pi + score_lt_half_pi = s[dirs_diff > pi / 2].sum() # larger than + score_set_half_pi = s[ + dirs_diff <= pi / 2].sum() # small equal than + # select larger scored direction as final direction + if score_lt_half_pi <= score_set_half_pi: + dirs[dirs_diff > pi / 2] += pi + else: + dirs[dirs_diff <= pi / 2] += pi + dirs = limit_period(dirs) + s_normalized = s / s.sum() + sint = torch.sin(dirs) * s_normalized + cost = torch.cos(dirs) * s_normalized + theta = torch.atan2(sint.sum(), cost.sum()).view(1, ) + center_dim = c[:, :-1] * s_normalized[:, None] + clusters[i] = torch.cat([center_dim.sum(dim=0), theta]).unsqueeze(0) + s_sorted = torch.sort(s, descending=True).values + s_fused = 0 + for j, ss in enumerate(s_sorted): + s_fused += ss ** (j + 1) + s_fused = torch.tensor([min(s_fused, 1.0)], device=s.device) + scores[i] = s_fused + + return torch.cat(clusters, dim=0), torch.cat(scores)
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/maxout_fusion.html b/docs/_build/html/_modules/cosense3d/modules/fusion/maxout_fusion.html new file mode 100644 index 00000000..d5f24259 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/maxout_fusion.html @@ -0,0 +1,178 @@ + + + + + + cosense3d.modules.fusion.maxout_fusion — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.maxout_fusion

+import torch
+import torch.nn as nn
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.utils.common import cat_coor_with_idx
+
+
+
[docs]class BEVMaxoutFusion(BaseModule): + def __init__(self, **kwargs): + super().__init__(**kwargs) + +
[docs] def forward(self, ego_feats, coop_feats, **kwargs): + out_feat = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + feat = [ego_feat] + for cpfeat in coop_feat.values(): + if 'bev_feat' not in cpfeat: + continue + feat.append(cpfeat['bev_feat']) + feat = torch.stack(feat, dim=0).max(dim=0).values + out_feat.append(feat) + + return {self.scatter_keys[0]: out_feat}
+ + +
[docs]class SparseBEVMaxoutFusion(BaseModule): + def __init__(self, + pc_range, + resolution, + **kwargs): + super().__init__(**kwargs) + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.resolution = resolution + +
[docs] def forward(self, ego_feats, coop_feats, **kwargs): + fused_feat = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + coor = [ego_feat['ref_pts']] + feat = [ego_feat['outs_dec'][-1]] + if len(coop_feat) == 0: + fused_feat.append({ + 'ref_pts': coor[0], + 'outs_dec': feat[0].unsqueeze(1) + }) + continue + + # fuse coop to ego + for cpfeat in coop_feat.values(): + coor.append(cpfeat[self.gather_keys[0]]['ref_pts']) + feat.append(cpfeat[self.gather_keys[0]]['outs_dec'][-1]) + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + # coor_int = coor_cat[:, 1:] * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + # coor_int = (coor_int * (1 / self.resolution)).int() + uniq_coor, reverse_inds = torch.unique(coor_cat[:, 1:], dim=0, + return_inverse=True) + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_full((len(uniq_coor), feat_cat.shape[-1]), -torch.inf) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + out = torch.stack(feats_pad, dim=0).max(dim=0).values + fused_feat.append({ + 'ref_pts': uniq_coor, + 'outs_dec': out.unsqueeze(1) + }) + return self.format_output(fused_feat)
+ +
[docs] def format_output(self, output): + return {self.scatter_keys[0]: output}
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/naive_fusion.html b/docs/_build/html/_modules/cosense3d/modules/fusion/naive_fusion.html new file mode 100644 index 00000000..17f251aa --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/naive_fusion.html @@ -0,0 +1,167 @@ + + + + + + cosense3d.modules.fusion.naive_fusion — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.naive_fusion

+import warnings
+from typing import Dict
+
+import torch
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.utils.me_utils import update_me_essentials
+
+
+
[docs]class NaiveFusion(BaseModule): + def __init__(self, stride, **kwargs): + super(NaiveFusion, self).__init__(**kwargs) + if isinstance(stride, int): + self.stride = [stride] + else: + self.stride = stride + +
[docs] def forward(self, ego_feats, coop_feats=None, **kwargs): + fused_feat = [] + fuse_key = self.gather_keys[0] + + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + batch_feat = {} + for stride in self.stride: + coor, feat, ctr = self.fuse_feature_at_stride( + ego_feat, coop_feat, stride, fuse_key + ) + batch_feat[f'p{stride}'] = { + 'coor': coor, + 'feat': feat, + 'ctr': ctr, + } + fused_feat.append(batch_feat) + return self.format_output(fused_feat)
+ +
[docs] def fuse_feature_at_stride(self, ego_feat, coop_feat, stride, fuse_key): + coor = [ego_feat[f'p{stride}']['coor']] + feat = [ego_feat[f'p{stride}']['feat']] + ctr = [ego_feat[f'p{stride}']['ctr']] + # fuse coop to ego + for cpfeat in coop_feat.values(): + if fuse_key not in cpfeat: + continue + cpm = cpfeat[fuse_key][f'p{stride}'] + coor.append(cpm['coor']) + feat.append(cpm['feat']) + ctr.append(cpm['ctr']) + coor = torch.cat(coor, dim=0) + feat = torch.cat(feat, dim=0) + ctr = torch.cat(ctr, dim=0) + return coor, feat, ctr
+ + +
[docs] def format_output(self, output): + return {self.scatter_keys[0]: output}
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/spatial_query_fusion.html b/docs/_build/html/_modules/cosense3d/modules/fusion/spatial_query_fusion.html new file mode 100644 index 00000000..3f8125ee --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/spatial_query_fusion.html @@ -0,0 +1,322 @@ + + + + + + cosense3d.modules.fusion.spatial_query_fusion — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.spatial_query_fusion

+from typing import Mapping, Any
+
+import torch
+import torch.nn as nn
+
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.utils.common import cat_coor_with_idx
+from cosense3d.modules.plugin.attn import ScaledDotProductAttention
+from cosense3d.modules.utils.localization_utils import register_points
+from cosense3d.modules.utils.common import pad_r
+from cosense3d.modules.utils.misc import MLN
+import cosense3d.modules.utils.positional_encoding as PE
+
+
+
[docs]class SpatialQueryFusion(BaseModule): + def __init__(self, + in_channels, + pc_range, + resolution, + **kwargs): + super().__init__(**kwargs) + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.resolution = resolution + self.attn = ScaledDotProductAttention(in_channels) + +
[docs] def forward(self, ego_feats, coop_feats, **kwargs): + fused_feat = [] + for ego_feat, coop_feat in zip(ego_feats, coop_feats): + coor = [ego_feat['ref_pts']] + feat = [ego_feat['outs_dec'][-1]] + if len(coop_feat) == 0: + fused_feat.append({ + 'ref_pts': coor[0], + 'outs_dec': feat[0].unsqueeze(1) + }) + continue + + # fuse coop to ego + for cpfeat in coop_feat.values(): + coor.append(cpfeat[self.gather_keys[0]]['ref_pts']) + feat.append(cpfeat[self.gather_keys[0]]['outs_dec'][-1]) + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + # coor_int = coor_cat[:, 1:] * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + # coor_int = (coor_int * (1 / self.resolution)).int() + uniq_coor, reverse_inds = torch.unique(coor_cat[:, 1:], dim=0, + return_inverse=True) + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_zeros(len(uniq_coor), feat_cat.shape[-1]) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + q = feats_pad[0].unsqueeze(1) # num_pts, 1, d + kv = torch.stack(feats_pad, dim=1) # num_pts, num_coop_cav, d + out = self.attn(q, kv, kv).squeeze(1) + fused_feat.append({ + 'ref_pts': uniq_coor, + 'outs_dec': out.unsqueeze(1) + }) + return self.format_output(fused_feat)
+ +
[docs] def format_output(self, output): + return {self.scatter_keys[0]: output}
+ + +
[docs]class SpatialQueryAlignFusionRL(BaseModule): + def __init__(self, + in_channels, + pc_range, + resolution, + num_pose_feat=64, + **kwargs): + super().__init__(**kwargs) + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.resolution = resolution + self.emb_dim = in_channels + self.attn = ScaledDotProductAttention(in_channels) + self.pose_pe = MLN(4 * 12, f_dim=self.emb_dim) + self.num_pose_feat = num_pose_feat + self.position_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + +
[docs] def forward(self, det_local, roadline, roadline_preds, ego_queries, + ego_pose_corrected, ego_poses, ego_poses_aug, + cpms, **kwargs): + fused_feat = [] + for i, cpm in enumerate(cpms): + det = det_local[i] + ego_rl, ego_rl_pred, ego_query = roadline[i], roadline_preds[i], ego_queries[i] + ego_pose_corr, ego_pose, pose_aug2g = ego_pose_corrected[i], ego_poses[i], ego_poses_aug[i] + # augment-frame to ego-aligned-world frame + Taug2eaw = ego_pose_corr @ ego_pose.inverse() @ pose_aug2g + ego_bctr = det['preds']['box'][:, :2] + ego_coor = ego_query['ref_pts'] + ego_coor_emb = self.query_embedding(PE.pos2posemb2d(ego_coor[:, :2], self.num_pose_feat)) + ego_feat = ego_query['outs_dec'][-1] + ego_coor_emb + ego_coor = ego_coor * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + coor = [ego_coor] # in augment-frame + feat = [ego_feat] + if len(cpm) == 0: + fused_feat.append({ + 'ref_pts': coor[0], + 'outs_dec': feat[0].unsqueeze(1) + }) + continue + + # fuse coop to ego + for cpfeat in cpm.values(): + if len(cpfeat['box_ctrs']) == 0: + continue + # transformation matrix coop-aligned-world frame to ego-aligned-world frame + if self.training: + # during training, ground-truth poses are used, caw-frame==eaw-frame + Tcaw2aug = Taug2eaw.inverse() + else: + Tcaw2eaw = self.align_coordinates(ego_bctr, ego_rl, ego_rl_pred, Taug2eaw, cpfeat) + Tcaw2aug = Taug2eaw.inverse() @ Tcaw2eaw + T = Tcaw2aug @ cpfeat['Taug2caw'] + + # encode the transformation matrix that transforms feature points + # from erroneous ego-frame to the corrected ego-frame + ref_pts = (T @ pad_r(cpfeat['ref_pts'], 1.0).T)[:3].T + ref_pts_norm = (ref_pts - self.pc_range[:3]) / (self.pc_range[3:] - self.pc_range[:3]) + rot_emb = PE.nerf_positional_encoding(T[:2, :2].flatten(-2)).repeat(len(ref_pts), 1) + pos_emb = self.position_embedding(PE.pos2posemb2d(ref_pts_norm[:, :2], self.num_pose_feat)) + transform_emb = self.pose_pe(pos_emb, rot_emb) + coor.append(ref_pts) + feat.append(cpfeat['feat'][-1] + transform_emb) + + # inplace transformation for coop point cloud: only for visualization in GLViewer + cpfeat['points'][:, :3] = (T @ pad_r(cpfeat['points'][:, :3], 1.0).T)[:3].T + + coor_cat = cat_coor_with_idx(coor) + feat_cat = torch.cat(feat, dim=0) + # coor_int = coor_cat[:, 1:] * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + coor_int = (coor_cat[:, 1:] * (1 / self.resolution)).int() + uniq_coor, reverse_inds = torch.unique(coor_int, dim=0, return_inverse=True) + uniq_coor = (uniq_coor * self.resolution - self.pc_range[:3]) / (self.pc_range[3:] - self.pc_range[:3]) + + feats_pad = [] + for i, c in enumerate(coor): + feat_pad = feat_cat.new_zeros(len(uniq_coor), feat_cat.shape[-1]) + feat_pad[reverse_inds[coor_cat[:, 0] == i]] = feat[i] + feats_pad.append(feat_pad) + q = feats_pad[0].unsqueeze(1) # num_pts, 1, d + kv = torch.stack(feats_pad, dim=1) # num_pts, num_coop_cav, d + out = self.attn(q, kv, kv).squeeze(1) + fused_feat.append({ + 'ref_pts': uniq_coor, + 'outs_dec': out.unsqueeze(1) + }) + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # ax = draw_points_boxes_plt(pc_range=self.pc_range.tolist(), return_ax=True) + # for pts in coor: + # pts = pts.detach().cpu().numpy() + # ax.plot(pts[:, 0], pts[:, 1], '.', markersize=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + return self.format_output(fused_feat)
+ +
[docs] def format_output(self, output, **kwargs): + return {self.scatter_keys[0]: output}
+ +
[docs] def align_coordinates(self, ego_bctr, ego_rl, ego_rl_pred, ego_pose, cpfeat): + coop_bctr = cpfeat['box_ctrs'] + coop_rl = cpfeat['roadline'] + + # transform ego points from aug-frame to ego-aligned world-frame + ego_bctr = (ego_pose @ pad_r(pad_r(ego_bctr, 0.0), 1.0).T).T + ego_rl_pred = (ego_pose @ pad_r(pad_r(ego_rl_pred, 0.0), 1.0).T).T + coop_pts = pad_r(torch.cat([coop_rl, coop_bctr], dim=0)) + ego_pts = torch.cat([pad_r(ego_rl[:, :3]), ego_bctr[:, :3]], dim=0) + + transform, coop_pts_tf = register_points(coop_pts, ego_pts, thr=0.8) + + # import matplotlib.pyplot as plt + # ego_bctr_vis = ego_bctr.detach().cpu().numpy() + # ego_rl_pred_vis = ego_rl_pred.detach().cpu().numpy() + # ego_rl_vis = ego_rl.detach().cpu().numpy() + # coop_bctr_vis = coop_bctr.detach().cpu().numpy() + # coop_rl_vis = coop_rl.detach().cpu().numpy() + # + # plt.plot(ego_rl_vis[:, 0], ego_rl_vis[:, 1], 'g.', markersize=1) + # plt.plot(ego_rl_pred_vis[:, 0], ego_rl_pred_vis[:, 1], 'y.', markersize=1) + # plt.plot(ego_bctr_vis[:, 0], ego_bctr_vis[:, 1], 'yo', markersize=5, markerfacecolor='none') + # plt.plot(coop_rl_vis[:, 0], coop_rl_vis[:, 1], 'r.', markersize=1) + # plt.plot(coop_bctr_vis[:, 0], coop_bctr_vis[:, 1], 'ro', markersize=5, markerfacecolor='none', alpha=0.5) + # # plt.plot(coop_pts_tf[:, 0], coop_pts_tf[:, 1], 'b.', markersize=1) + # plt.savefig("/home/yys/Downloads/tmp.png") + # plt.close() + + return torch.from_numpy(transform).float().to(ego_pose.device)
+ + + + + + + + + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/fusion/temporal_fusion.html b/docs/_build/html/_modules/cosense3d/modules/fusion/temporal_fusion.html new file mode 100644 index 00000000..9011e9b7 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/fusion/temporal_fusion.html @@ -0,0 +1,1046 @@ + + + + + + cosense3d.modules.fusion.temporal_fusion — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.fusion.temporal_fusion

+from typing import Mapping, Any
+
+import torch
+import torch.nn as nn
+
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.utils.misc import SELayer_Linear, MLN, MLN2
+import cosense3d.modules.utils.positional_encoding as PE
+
+
+
[docs]class TemporalLidarFusion(BaseModule): + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=64, + topk=2048, + num_propagated=256, + memory_len=1024, + num_query=644, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk = topk + self.num_query = num_query + self.num_propagated = num_propagated + self.memory_len = memory_len + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.reference_points = nn.Embedding(self.num_query, self.pos_dim) + self.pseudo_reference_points = nn.Embedding(self.num_propagated, self.pos_dim) + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + +
[docs] def init_weights(self): + nn.init.uniform_(self.reference_points.weight.data, 0, 1) + nn.init.uniform_(self.pseudo_reference_points.weight.data, 0, 1) + self.pseudo_reference_points.weight.requires_grad = False + self.transformer.init_weights()
+ +
[docs] def forward(self, rois, bev_feat, mem_dict, **kwargs): + feat, ctr = self.gather_topk(rois, bev_feat) + + pos = ((ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + pos_emb = self.position_embeding(self.embed_pos(pos)) + memory = self.memory_embed(feat) + pos_emb = self.featurized_pe(pos_emb, memory) + + reference_points = self.reference_points.weight.unsqueeze(0).repeat(memory.shape[0], 1, 1) + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos = \ + self.temporal_alignment(query_pos, tgt, reference_points, mem_dict) + mask_dict = [None, None] + outs_dec, _ = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(rois)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def gather_topk(self, rois, bev_feats): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{self.feature_stride}']['ctr'] + feat = bev_feat[f'p{self.feature_stride}']['feat'] + scores = roi['scr'] + if scores.shape[0] < self.topk: + raise NotImplementedError + else: + topk_inds = torch.topk(scores, k=self.topk).indices + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr
+ +
[docs] def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim)
+ +
[docs] def temporal_alignment(self, query_pos, tgt, ref_pts, mem_dict): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + if not x.all(): + # pad the recent memory ref pts with pseudo points + pseudo_ref_pts = self.pseudo_reference_points.weight.unsqueeze(0).repeat(B, 1, 1) + x = x.view(*((-1,) + (1,) * (pseudo_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + pseudo_ref_pts * (1 - x) + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + temp_memory = mem_dict['embeddings'] + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + query_pos += self.time_embedding( + self.embed_pos(torch.zeros_like(ref_pts[..., :1]), self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos
+ + +
[docs]class TemporalFusion(BaseModule): + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=128, + topk_ref_pts=1024, + topk_feat=512, + num_propagated=256, + memory_len=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.topk_feat = topk_feat + self.ref_pts_stride = ref_pts_stride + self.num_propagated = num_propagated + self.memory_len = memory_len + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.LayerNorm(self.embed_dims * 4), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + +
[docs] def init_weights(self): + self.transformer.init_weights()
+ +
[docs] def forward(self, rois, bev_feat, mem_dict, time_scale=None, **kwargs): + ref_feat, ref_ctr = self.gather_topk(rois, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + mem_feat, mem_ctr = self.gather_topk(rois, bev_feat, self.feature_stride, self.topk_feat) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos = ((mem_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos_emb = self.position_embeding(self.embed_pos(mem_pos)) + memory = self.memory_embed(mem_feat) + pos_emb = self.featurized_pe(mem_pos_emb, memory) + + if time_scale is not None: + ref_time = torch.rad2deg(torch.arctan2(ref_ctr[..., 1:2], ref_ctr[..., 0:1])) + 180 + ref_time = torch.stack([ts[inds.long()] for inds, ts in zip(ref_time, time_scale)], dim=0) + else: + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = [None, None] + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(rois)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + scores = roi[f'p{stride}']['conf'][:, + roi[f'p{stride}']['reg'].shape[-1] - 1:].sum(dim=-1) + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr
+ +
[docs] def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim)
+ +
[docs] def temporal_alignment(self, query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + temp_memory = mem_dict['embeddings'] + + if not x.all(): + # pad the recent memory ref pts with pseudo points + ext_inds = torch.randperm(self.topk_ref_pts)[:self.num_propagated] + ext_ref_pts = ref_pts[:, ext_inds] + ext_feat = ref_feat[:, ext_inds] + # pseudo_ref_pts = pseudo_ref_pts + torch.rand_like(pseudo_ref_pts) + x = x.view(*((-1,) + (1,) * (ext_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + ext_ref_pts * (1 - x) + ext_feat = temp_memory[:, 0] * x + ext_feat * (1 - x) + else: + ext_feat = temp_memory[:, 0] + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + if ref_time is None: + ref_time = torch.zeros_like(ref_pts[..., :1]) + self.global_ref_time + query_pos += self.time_embedding(self.embed_pos(ref_time, self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos, ext_feat
+ + +
[docs]class LocalTemporalFusion(BaseModule): + """Modified from TemporalFusion to standardize input and output keys""" + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=128, + topk_ref_pts=1024, + topk_feat=512, + num_propagated=256, + memory_len=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + norm_fusion=False, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.topk_feat = topk_feat + self.ref_pts_stride = ref_pts_stride + self.num_propagated = num_propagated + self.memory_len = memory_len + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + self.norm_fusion = norm_fusion + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + if self.norm_fusion: + self.local_global_fusion = nn.Sequential( + nn.Linear(self.embed_dims * 2, self.embed_dims), + nn.LayerNorm(self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims), + ) + + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.LayerNorm(self.embed_dims * 4), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + +
[docs] def init_weights(self): + self.transformer.init_weights()
+ +
[docs] def forward(self, local_roi, global_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + mem_feat, mem_ctr = self.gather_topk(global_roi, bev_feat, self.feature_stride, self.topk_feat) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos = ((mem_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos_emb = self.position_embeding(self.embed_pos(mem_pos)) + memory = self.memory_embed(mem_feat) + pos_emb = self.featurized_pe(mem_pos_emb, memory) + + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = [None, None] + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + if self.norm_fusion: + outs_dec = self.local_global_fusion(torch.cat([local_feat, global_feat], dim=-1)) + else: + # simple addition will lead to large values in long sequences + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + if 'scr' in roi: + scores = roi['scr'] + else: + scores = roi[f'p{stride}']['scr'] + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr
+ +
[docs] def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim)
+ +
[docs] def temporal_alignment(self, query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + temp_memory = mem_dict['embeddings'] + + if not x.all(): + # pad the recent memory ref pts with pseudo points + ext_inds = torch.randperm(self.topk_ref_pts)[:self.num_propagated] + ext_ref_pts = ref_pts[:, ext_inds] + ext_feat = ref_feat[:, ext_inds] + # pseudo_ref_pts = pseudo_ref_pts + torch.rand_like(pseudo_ref_pts) + x = x.view(*((-1,) + (1,) * (ext_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + ext_ref_pts * (1 - x) + ext_feat = temp_memory[:, 0] * x + ext_feat * (1 - x) + else: + ext_feat = temp_memory[:, 0] + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + if ref_time is None: + ref_time = torch.zeros_like(ref_pts[..., :1]) + self.global_ref_time + query_pos += self.time_embedding(self.embed_pos(ref_time, self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos, ext_feat
+ + +
[docs]class LocalTemporalFusionV1(LocalTemporalFusion): +
[docs] def forward(self, rois, bev_feat, mem_dict, **kwargs): + return super().forward(rois, rois, bev_feat, mem_dict, **kwargs)
+ + +
[docs]class LocalTemporalFusionV2(LocalTemporalFusion): +
[docs] def forward(self, local_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = None + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(None, tgt, query_pos, None, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs}
+ + +
[docs]class LocalTemporalFusionV3(BaseModule): + """TemporalFusion with feature flow""" + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + pos_dim=3, + num_pose_feat=128, + topk_ref_pts=1024, + topk_feat=512, + num_propagated=256, + memory_len=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + norm_fusion=False, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = num_pose_feat + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.topk_feat = topk_feat + self.ref_pts_stride = ref_pts_stride + self.num_propagated = num_propagated + self.memory_len = memory_len + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + self.norm_fusion = norm_fusion + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + if self.norm_fusion: + self.local_global_fusion = nn.Sequential( + nn.Linear(self.embed_dims * 2, self.embed_dims), + nn.LayerNorm(self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims), + ) + + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.LayerNorm(self.embed_dims * 4), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.LayerNorm(self.embed_dims), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + + self.time_embedding = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims) + ) + + # encoding ego pose + pose_nerf_dim = (3 + 3 * 4) * 12 + self.ego_pose_pe = MLN(pose_nerf_dim, f_dim=self.embed_dims) + self.ego_pose_memory = MLN(pose_nerf_dim, f_dim=self.embed_dims) + +
[docs] def init_weights(self): + self.transformer.init_weights()
+ +
[docs] def forward(self, local_roi, global_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + mem_feat, mem_ctr = self.gather_topk(global_roi, bev_feat, self.feature_stride, self.topk_feat) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos = ((mem_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + mem_pos_emb = self.position_embeding(self.embed_pos(mem_pos)) + memory = self.memory_embed(mem_feat) + pos_emb = self.featurized_pe(mem_pos_emb, memory) + + ref_time = None + reference_points = ref_pos.clone() + query_pos = self.query_embedding(self.embed_pos(reference_points)) + tgt = torch.zeros_like(query_pos) + + tgt, query_pos, reference_points, temp_memory, temp_pos, ext_feat = \ + self.temporal_alignment(query_pos, tgt, reference_points, + ref_feat, mem_dict, ref_time) + mask_dict = [None, None] + global_feat = [] + + for _ in range(self.transformer_itrs): + tgt = self.transformer(memory, tgt, query_pos, pos_emb, + mask_dict, temp_memory, temp_pos)[0][-1] + global_feat.append(tgt) + global_feat = torch.stack(global_feat, dim=0) + local_feat = torch.cat([ref_feat, ext_feat], dim=1) + local_feat = local_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + if self.norm_fusion: + outs_dec = self.local_global_fusion(torch.cat([local_feat, global_feat], dim=-1)) + else: + # simple addition will lead to large values in long sequences + outs_dec = local_feat + global_feat + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + if 'scr' in roi: + scores = roi['scr'] + else: + scores = roi[f'p{stride}']['scr'] + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr
+ +
[docs] def embed_pos(self, pos, dim=None): + dim = self.num_pose_feat if dim is None else dim + return getattr(PE, f'pos2posemb{pos.shape[-1]}d')(pos, dim)
+ +
[docs] def temporal_alignment(self, query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None): + B = ref_pts.shape[0] + mem_dict = self.stack_dict_list(mem_dict) + x = mem_dict['prev_exists'].view(-1) + # metric coords --> normalized coords + temp_ref_pts = ((mem_dict['ref_pts'] - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:3+self.pos_dim] - self.lidar_range[:self.pos_dim])) + temp_memory = mem_dict['embeddings'] + + if not x.all(): + # pad the recent memory ref pts with pseudo points + ext_inds = torch.randperm(self.topk_ref_pts)[:self.num_propagated] + ext_ref_pts = ref_pts[:, ext_inds] + ext_feat = ref_feat[:, ext_inds] + # pseudo_ref_pts = pseudo_ref_pts + torch.rand_like(pseudo_ref_pts) + x = x.view(*((-1,) + (1,) * (ext_ref_pts.ndim - 1))) + temp_ref_pts[:, 0] = temp_ref_pts[:, 0] * x + ext_ref_pts * (1 - x) + ext_feat = temp_memory[:, 0] * x + ext_feat * (1 - x) + else: + ext_feat = temp_memory[:, 0] + + temp_pos = self.query_embedding(self.embed_pos(temp_ref_pts)) + rec_pose = torch.eye( + 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + B, query_pos.size(1), 1, 1) + + # Get ego motion-aware tgt and query_pos for the current frame + rec_motion = torch.cat( + [torch.zeros_like(tgt[..., :3]), + rec_pose[..., :3, :].flatten(-2)], dim=-1) + rec_motion = PE.nerf_positional_encoding(rec_motion) + tgt = self.ego_pose_memory(tgt, rec_motion) + query_pos = self.ego_pose_pe(query_pos, rec_motion) + + # get ego motion-aware reference points embeddings and memory for past frames + memory_ego_motion = torch.cat( + [mem_dict['velo'], mem_dict['timestamp'], + mem_dict['pose'][..., :3, :].flatten(-2)], dim=-1).float() + memory_ego_motion = PE.nerf_positional_encoding(memory_ego_motion) + temp_pos = self.ego_pose_pe(temp_pos, memory_ego_motion) + temp_memory = self.ego_pose_memory(temp_memory, memory_ego_motion) + + # get time-aware pos embeddings + if ref_time is None: + ref_time = torch.zeros_like(ref_pts[..., :1]) + self.global_ref_time + query_pos += self.time_embedding(self.embed_pos(ref_time, self.embed_dims)) + temp_pos += self.time_embedding( + self.embed_pos(mem_dict['timestamp'], self.embed_dims).float()) + + tgt = torch.cat([tgt, temp_memory[:, 0]], dim=1) + query_pos = torch.cat([query_pos, temp_pos[:, 0]], dim=1) + ref_pts = torch.cat([ref_pts, temp_ref_pts[:, 0]], dim=1) + # rec_pose = torch.eye( + # 4, device=query_pos.device).reshape(1, 1, 4, 4).repeat( + # B, query_pos.shape[1] + temp_pos[:, 0].shape[1], 1, 1) + temp_memory = temp_memory[:, 1:].flatten(1, 2) + temp_pos = temp_pos[:, 1:].flatten(1, 2) + + return tgt, query_pos, ref_pts, temp_memory, temp_pos, ext_feat
+ + +
[docs]class LocalNaiveFusion(BaseModule): + """This is a naive replacement of LocalTemporalFusion by only selecting the topk points for later spatial fusion""" + def __init__(self, + in_channels, + feature_stride, + lidar_range, + pos_dim=3, + topk_ref_pts=1024, + ref_pts_stride=2, + transformer_itrs=1, + global_ref_time=0, + **kwargs): + super().__init__(**kwargs) + self.pos_dim = pos_dim + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk_ref_pts = topk_ref_pts + self.ref_pts_stride = ref_pts_stride + self.transformer_itrs = transformer_itrs + self.global_ref_time = global_ref_time + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + +
[docs] def forward(self, local_roi, global_roi, bev_feat, mem_dict, **kwargs): + ref_feat, ref_ctr = self.gather_topk(local_roi, bev_feat, self.ref_pts_stride, self.topk_ref_pts) + + ref_pos = ((ref_ctr - self.lidar_range[:self.pos_dim]) / + (self.lidar_range[3:self.pos_dim + 3] - self.lidar_range[:self.pos_dim])) + outs_dec = ref_feat[None].repeat(self.transformer_itrs, 1, 1, 1) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': ref_pos[i], + } for i in range(len(bev_feat)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def gather_topk(self, rois, bev_feats, stride, topk): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{stride}']['ctr'] + feat = bev_feat[f'p{stride}']['feat'] + if 'scr' in roi: + scores = roi['scr'] + else: + scores = roi[f'p{stride}']['scr'] + sort_inds = scores.argsort(descending=True) + if scores.shape[0] < topk: + n_repeat = topk // len(scores) + 1 + sort_inds = torch.cat([sort_inds] * n_repeat, dim=0) + + topk_inds = sort_inds[:topk] + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + # pad 2d coordinates to 3d if needed + if topk_ctr.shape[-1] < self.pos_dim: + pad_dim = self.pos_dim - topk_ctr.shape[-1] + topk_ctr = torch.cat([topk_ctr, torch.zeros_like(topk_ctr[..., :pad_dim])], dim=-1) + return topk_feat, topk_ctr
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/bev.html b/docs/_build/html/_modules/cosense3d/modules/heads/bev.html new file mode 100644 index 00000000..cdb7b32d --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/bev.html @@ -0,0 +1,385 @@ + + + + + + cosense3d.modules.heads.bev — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.bev

+import os
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.utils.me_utils import *
+from cosense3d.modules.utils.common import pad_r, linear_last, cat_coor_with_idx
+from cosense3d.ops.utils import points_in_boxes_gpu
+from cosense3d.modules.losses import edl, build_loss
+from cosense3d.modules.plugin import build_plugin_module
+from cosense3d.modules.plugin.attn import NeighborhoodAttention
+
+
+
[docs]class BEV(BaseModule): + def __init__(self, + data_info, + in_dim, + stride, + target_assigner, + loss_cls, + num_cls=1, + class_names_each_head=None, + down_sample_tgt=False, + generate_roi_scr=True, + **kwargs): + super(BEV, self).__init__(**kwargs) + self.in_dim = in_dim + self.class_names_each_head = class_names_each_head + self.down_sample_tgt = down_sample_tgt + self.stride = stride + self.num_cls = num_cls + self.generate_roi_scr = generate_roi_scr + for k, v in data_info.items(): + setattr(self, k, v) + update_me_essentials(self, data_info, self.stride) + + self.reg_layer = linear_last(in_dim, 32, num_cls, bias=True) + + self.tgt_assigner = build_plugin_module(target_assigner) + self.loss_cls = build_loss(**loss_cls) + self.is_edl = True if 'edl' in self.loss_cls.name.lower() else False + +
[docs] def forward(self, stensor_list, **kwargs): + coor, feat, ctr = self.format_input(stensor_list) + + if self.training and self.down_sample_tgt: + coor, feat = self.down_sample(coor, feat) + + centers = indices2metric(coor, self.voxel_size) + reg = self.reg_layer(feat) + + conf, unc = self.tgt_assigner.get_predictions( + reg, self.is_edl, getattr(self.loss_cls, 'activation')) + + out = { + 'ctr': centers, + 'reg': reg, + 'conf': conf, + 'unc': unc, + } + if self.generate_roi_scr: + out['scr'] = conf.max(dim=-1).values + + return self.format_output(out, len(stensor_list))
+ +
[docs] def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride)
+ +
[docs] def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['ctr'][:, 0] == i + output_new['ctr'].append(output['ctr'][mask, 1:]) + output_new['reg'].append(output['reg'][mask]) + output_new['conf'].append(output['conf'][mask]) + output_new['unc'].append(output['unc'][mask]) + if 'scr' in output_new: + output_new['scr'].append(output['scr'][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output
+ +
[docs] def down_sample(self, coor, feat): + keep = torch.rand_like(feat[:, 0]) > 0.5 + coor = coor[keep] + feat = feat[keep] + + return coor, feat
+ +
[docs] def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + tgt_pts = self.cat_data_from_list(batch_list, 'ctr', pad_idx=True) + boxes_vis = gt_boxes[0][:, :7].detach().cpu().numpy() + gt_boxes = self.cat_data_from_list(gt_boxes, pad_idx=True) + conf = self.cat_data_from_list(batch_list, 'conf') + tgt_pts, tgt_label, valid = self.tgt_assigner.assign( + tgt_pts, gt_boxes[:, :8], len(batch_list), conf, **kwargs) + epoch_num = kwargs.get('epoch', 0) + reg = self.cat_data_from_list(batch_list, 'reg') + + # if kwargs['itr'] % 100 == 0: + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # from matplotlib import colormaps + # jet = colormaps['jet'] + # points = batch_list[0]['ctr'].detach().cpu().numpy() + # scores = batch_list[0]['conf'][:, self.num_cls - 1:].detach().cpu().numpy() + # ax = draw_points_boxes_plt( + # pc_range=[-144, -41.6, -3.0, 144, 41.6, 1.0], + # # points=points, + # boxes_gt=boxes_vis, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], c=scores, cmap=jet, s=3, marker='s', vmin=0, vmax=1) + # plt.savefig(f"{os.environ['HOME']}/Downloads/tmp1.jpg") + # plt.close() + + if valid is None: + # targets are not down-sampled + avg_factor = max(tgt_label.sum(), 1) + loss_cls = self.loss_cls( + reg, + tgt_label, + temp=epoch_num, + avg_factor=avg_factor + ) + else: + # negative targets are not down-sampled to a ratio to the positive samples + loss_cls = self.loss_cls( + reg[valid], + tgt_label, + temp=epoch_num, + ) + loss_dict = {'bev_loss': loss_cls} + return loss_dict
+ + +
[docs]class BEVMultiResolution(BaseModule): + def __init__(self, strides, strides_for_loss, **kwargs): + super().__init__(**kwargs) + self.strides = strides + self.strides_for_loss = strides_for_loss + for s in strides: + kwargs['stride'] = s + setattr(self, f'head_p{s}', BEV(**kwargs)) + +
[docs] def forward(self, stensor_list, *args, **kwargs): + out_list = [{} for b in range(len(stensor_list))] + for s in self.strides: + out = getattr(self, f'head_p{s}')(stensor_list)[self.scatter_keys[0]] + for i, x in enumerate(out): + out_list[i][f'p{s}'] = x + + return {self.scatter_keys[0]: out_list}
+ +
[docs] def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + loss_dict = {} + for s in self.strides_for_loss: + ldict = getattr(self, f'head_p{s}').loss( + [l[f'p{s}'] for l in batch_list], gt_boxes, gt_labels, **kwargs) + for k, v in ldict.items(): + loss_dict[f'{k}_s{s}'] = v + return loss_dict
+ + +
[docs]class ContinuousBEV(BaseModule): + def __init__(self, + out_channels, + data_info, + in_dim, + stride, + context_decoder, + target_assigner, + loss_cls, + class_names_each_head=None, + **kwargs): + super().__init__(**kwargs) + self.in_dim = in_dim + self.class_names_each_head = class_names_each_head + self.stride = stride + for k, v in data_info.items(): + setattr(self, k, v) + update_me_essentials(self, data_info, self.stride) + + self.context_decoder = build_plugin_module(context_decoder) + + self.reg_layer = linear_last(in_dim, 32, out_channels, bias=True) + + self.tgt_assigner = build_plugin_module(target_assigner) + self.loss_cls = build_loss(**loss_cls) + +
[docs] @torch.no_grad() + def sample_reference_points(self, centers, gt_boxes, gt_labels): + gt_boxes = self.cat_data_from_list(gt_boxes, pad_idx=True) + if self.training: + new_pts = centers.clone() + new_pts[:, 1:] += (torch.rand_like(centers[:, 1:]) - 0.5) * self.res[0] + ref_pts, ref_label, _ = self.tgt_assigner.assign( + new_pts, gt_boxes, len(gt_boxes)) + else: + ref_pts, ref_label, _ = self.tgt_assigner.assign( + centers, gt_boxes, len(gt_boxes), down_sample=False) + return ref_pts, ref_label
+ +
[docs] def get_evidence(self, ref_pts, coor, feat): + raise NotImplementedError
+ +
[docs] def forward(self, stensor_list, gt_boxes, gt_labels, **kwargs): + coor, feat, ctr = self.format_input(stensor_list) + centers = indices2metric(coor, self.voxel_size) + ref_pts, ref_label = self.sample_reference_points( + centers, gt_boxes, gt_labels) + evidence = self.get_evidence(ref_pts, coor, feat) + conf, unc = edl.evidence_to_conf_unc(evidence) + + out = { + 'ref_pts': ref_pts, + 'ref_lbls': ref_label, + 'evi': evidence, + 'conf': conf, + 'unc': unc + } + + return self.format_output(out, len(stensor_list))
+ +
[docs] def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride)
+ +
[docs] def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['ref_pts'][:, 0] == i + output_new['ref_pts'].append(output['ref_pts'][mask, 1:]) + output_new['ref_lbls'].append(output['ref_lbls'][mask]) + output_new['evi'].append(output['evi'][mask]) + output_new['conf'].append(output['conf'][mask]) + output_new['unc'].append(output['unc'][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output
+ +
[docs] def down_sample(self, coor, feat): + keep = torch.rand_like(feat[:, 0]) > 0.5 + coor = coor[keep] + feat = feat[keep] + + return coor, feat
+ +
[docs] def loss(self, batch_list, **kwargs): + tgt_lbl = self.cat_data_from_list(batch_list, 'ref_lbls') + epoch_num = kwargs.get('epoch', 0) + evidence = self.cat_data_from_list(batch_list, 'evi') + # avg_factor = max(tgt_label.sum(), 1) + loss_cls = self.loss_cls( + evidence, + tgt_lbl, + temp=epoch_num, + # avg_factor=avg_factor + ) + loss_dict = {'bev_loss': loss_cls} + return loss_dict
+ + +
[docs]class ContiGevBEV(ContinuousBEV): + +
[docs] def get_evidence(self, ref_pts, coor, feat): + reg = self.reg_layer(feat) + reg = self.context_decoder(ref_pts, coor, reg) + return reg
+ + +
[docs]class ContiAttnBEV(ContinuousBEV): + +
[docs] def get_evidence(self, ref_pts, coor, feat): + ref_context = self.context_decoder(ref_pts, coor, feat) + reg = self.reg_layer(ref_context) + return reg.relu()
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/bev_dense.html b/docs/_build/html/_modules/cosense3d/modules/heads/bev_dense.html new file mode 100644 index 00000000..4f9e1d5e --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/bev_dense.html @@ -0,0 +1,185 @@ + + + + + + cosense3d.modules.heads.bev_dense — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.bev_dense

+"""
+Seg head for bev understanding
+"""
+
+import torch
+import torch.nn as nn
+from einops import rearrange
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.losses import build_loss
+
+
+
[docs]class BevSegHead(BaseModule): + def __init__(self, target, input_dim, output_class, loss_cls, **kwargs): + super(BevSegHead, self).__init__(**kwargs) + self.target = target + if 'dynamic' in self.target: + self.dynamic_head = nn.Conv2d(input_dim, + output_class, + kernel_size=3, + padding=1) + if 'static' in self.target: + self.static_head = nn.Conv2d(input_dim, + output_class, + kernel_size=3, + padding=1) + self.loss_cls = build_loss(**loss_cls) + +
[docs] def forward(self, x, **kwargs): + x = self.stack_data_from_list(x) + out_dict = {} + if 'dynamic' in self.target: + out_dict['dynamic_bev_pred'] = self.dynamic_head(x) + if not self.training: + out_dict['dynamic_bev_pred'] = out_dict['dynamic_bev_pred'].permute(0, 2, 3, 1).softmax(dim=-1) + if 'static' in self.target: + out_dict['dynamic_bev_pred'] = self.static_head(x) + if not self.training: + out_dict['static_bev_pred'] = out_dict['dynamic_bev_pred'].permute(0, 2, 3, 1).softmax(dim=1) + + # output_list = self.compose_result_list(out_dict, len(x)) + return out_dict
+ +
[docs] def loss(self, dynamic_bev_preds, dynamic_bev, **kwargs): + dynamic_bev_preds = self.stack_data_from_list(dynamic_bev_preds) + dynamic_bev_gt = torch.stack(dynamic_bev, dim=0) + loss_dict = self.loss_cls( + dynamic_pred=dynamic_bev_preds, + dynamic_gt=dynamic_bev_gt + ) + return loss_dict
+ + +
[docs]class BevRoIDenseHead(BaseModule): + def __init__(self, in_dim, stride, num_cls=1, loss_cls=None, **kwargs): + super(BevRoIDenseHead, self).__init__(**kwargs) + self.head = nn.Conv2d(in_dim, num_cls, kernel_size=1) + self.stride = stride + if loss_cls is not None: + self.loss_cls = build_loss(**loss_cls) + +
[docs] def forward(self, input, **kwargs): + x = self.stack_data_from_list([x[f'p{self.stride}'] for x in input]) + x = self.head(x) + + # output_list = self.compose_result_list(out_dict, len(x)) + return {self.scatter_keys[0]: x}
+ +
[docs] def loss(self, bev_preds, bev_tgt, **kwargs): + bev_preds = self.stack_data_from_list(bev_preds) + dynamic_bev_gt = torch.stack(bev_tgt, dim=0) + loss_dict = self.loss_cls( + dynamic_pred=bev_preds, + dynamic_gt=dynamic_bev_gt + ) + return loss_dict
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/det_anchor_dense.html b/docs/_build/html/_modules/cosense3d/modules/heads/det_anchor_dense.html new file mode 100644 index 00000000..18469f97 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/det_anchor_dense.html @@ -0,0 +1,256 @@ + + + + + + cosense3d.modules.heads.det_anchor_dense — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.det_anchor_dense

+from typing import List
+
+import torch
+from torch import nn
+from cosense3d.modules import BaseModule
+from cosense3d.modules import plugin
+from cosense3d.modules.losses import build_loss
+from cosense3d.utils.misc import multi_apply
+
+
+
[docs]class DetAnchorDense(BaseModule): + def __init__(self, + in_channels, + loss_cls, + loss_box, + num_classes=1, + stride=None, + target_assigner=None, + get_boxes_when_training=False, + box_stamper=None, + **kwargs): + super(DetAnchorDense, self).__init__(**kwargs) + assert num_classes == 1, 'currently only support binary classification.' + self.num_classes = num_classes + self.get_boxes_when_training = get_boxes_when_training + self.target_assigner = plugin.build_plugin_module(target_assigner) + self.stride = stride + if self.stride is None: + assert target_assigner is not None + self.stride = self.target_assigner.stride + self.num_anchors = self.target_assigner.num_anchors + self.code_size = self.target_assigner.box_coder.code_size + self.cls_head = nn.Conv2d(in_channels, self.num_anchors, kernel_size=1) + self.reg_head = nn.Conv2d(in_channels, self.code_size * self.num_anchors, kernel_size=1) + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + if box_stamper is not None: + self.box_stamper = plugin.build_plugin_module(box_stamper) + +
[docs] def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True
+ +
[docs] def forward(self, bev_feat_list, points=None, **kwargs): + if isinstance(bev_feat_list[0], torch.Tensor): + bev_feat = torch.stack(bev_feat_list, dim=0) + elif isinstance(bev_feat_list[0], dict): + bev_feat = torch.stack([x[f'p{self.stride}'] for x in bev_feat_list], dim=0) + else: + raise NotImplementedError + + cls = self.cls_head(bev_feat) + reg = self.reg_head(bev_feat) + + out = {'cls': cls, 'reg': reg} + + if self.get_boxes_when_training or not self.training: + preds = self.predictions(out) + if hasattr(self, 'box_stamper'): + assert points is not None + preds = self.box_stamper(preds, points) + out['preds'] = preds + + return self.format_output(out, len(bev_feat))
+ +
[docs] def format_output(self, output, B): + # decompose batch + if 'preds' in output: + preds_list = [] + for i in range(B): + preds = {} + mask = output['preds']['idx'] == i + for k, v in output['preds'].items(): + preds[k] = v[mask] + preds_list.append(preds) + output['preds'] = preds_list + output = {self.scatter_keys[0]: self.compose_result_list(output, B)} + return output
+ +
[docs] def loss(self, preds, gt_boxes, gt_labels, **kwargs): + """The dense bev maps show have the shape ((b, c, h, w))""" + pred_cls = self.stack_data_from_list(preds, 'cls') + pred_reg = self.stack_data_from_list(preds, 'reg') + # convert to shape(b, c, h, w) -> (nwh, c) to match the anchors + b, c, h, w = pred_cls.shape + pred_cls = pred_cls.permute(0, 3, 2, 1).reshape(-1) + pred_reg = pred_reg.permute(0, 3, 2, 1).reshape(-1, 7) + cls_tgt, reg_tgt, _ = multi_apply( + self.target_assigner.assign, gt_boxes) + cls_tgt = torch.cat(cls_tgt, dim=0) + reg_tgt = torch.cat(reg_tgt, dim=0) + + # vis_cls_pred = pred_cls.view(b, w, h, c).softmax(dim=-1).max(dim=-1).values[0] + # vis_cls_tgt = cls_tgt.view(b, w, h, c).max(dim=-1).values[0] + # img = torch.cat([vis_cls_pred, vis_cls_tgt], dim=1).detach().cpu().numpy().T + # import matplotlib.pyplot as plt + # + # plt.imshow(img) + # plt.show() + # plt.close() + + pos_mask = cls_tgt > 0 + cared = cls_tgt >= 0 + avg_factor = max(pos_mask.sum(), 1) + # downsample negative + # neg_inds = torch.where(cls_tgt == 0)[0] + # neg_inds = neg_inds[torch.randperm(len(neg_inds))[:avg_factor * 5]] + # cared[neg_inds] = True + + # focal loss encode the last dim of tgt as background + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = 0 + + loss_cls = self.loss_cls(pred_cls[cared].view(-1, 1), labels[cared], + avg_factor=avg_factor) + + reg_preds_sin, reg_tgts_sin = self.add_sin_difference(pred_reg[pos_mask], reg_tgt) + loss_box = self.loss_box(reg_preds_sin, reg_tgts_sin, + avg_factor=avg_factor / reg_preds_sin.shape[-1]) + + return { + 'cls_loss': loss_cls, + 'box_loss': loss_box + }
+ +
[docs] @staticmethod + def add_sin_difference(boxes1, boxes2, dim=6): + assert dim != -1 + rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * \ + torch.cos(boxes2[..., dim:dim + 1]) + rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * \ + torch.sin(boxes2[..., dim:dim + 1]) + + boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, + boxes1[..., dim + 1:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, + boxes2[..., dim + 1:]], dim=-1) + return boxes1, boxes2
+ +
[docs] def predictions(self, preds): + return self.target_assigner.get_predictions(preds)
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/det_anchor_sparse.html b/docs/_build/html/_modules/cosense3d/modules/heads/det_anchor_sparse.html new file mode 100644 index 00000000..6475e47f --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/det_anchor_sparse.html @@ -0,0 +1,246 @@ + + + + + + cosense3d.modules.heads.det_anchor_sparse — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.det_anchor_sparse

+from typing import List
+
+import torch
+from torch import nn
+from cosense3d.modules import BaseModule
+from cosense3d.modules import plugin
+from cosense3d.modules.losses import build_loss
+from cosense3d.utils.misc import multi_apply
+from cosense3d.modules.utils.common import linear_last
+
+
+
[docs]class DetAnchorSparse(BaseModule): + def __init__(self, + in_channels, + loss_cls, + loss_box, + num_classes=1, + target_assigner=None, + get_boxes_when_training=False, + get_roi_scores=False, + **kwargs): + super(DetAnchorSparse, self).__init__(**kwargs) + assert num_classes == 1, 'currently only support binary classification.' + self.num_classes = num_classes + self.get_boxes_when_training = get_boxes_when_training + self.get_roi_scores = get_roi_scores + self.target_assigner = plugin.build_plugin_module(target_assigner) + self.num_anchors = self.target_assigner.num_anchors + self.code_size = self.target_assigner.box_coder.code_size + self.cls_head = linear_last(in_channels, in_channels * 3, self.num_anchors) + self.reg_head = linear_last(in_channels, in_channels * 3, self.code_size * self.num_anchors) + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + +
[docs] def init_weights(self): + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True
+ +
[docs] def forward(self, stensor_list, **kwargs): + coor, feat, ctr = self.compose_stensor(stensor_list, self.target_assigner.stride) + cls = self.cls_head(feat) + reg = self.reg_head(feat) + + out = {'cls': cls, 'reg': reg, 'ctr': ctr} + + if self.get_roi_scores: + out['scr'] = cls.sigmoid().max(dim=-1).values + + if self.get_boxes_when_training or not self.training: + out['preds'] = self.predictions(coor, out) + + return self.format(out, coor, len(stensor_list))
+ +
[docs] def format(self, output, coor, B): + res_list = [] + for i in range(B): + mask = coor[:, 0] == i + res_dict = {k: v[mask] for k, v in output.items() if k!='preds'} + if 'preds' in output: + preds = {} + mask = output['preds']['idx'] == i + for k, v in output['preds'].items(): + preds[k] = v[mask] + res_dict['preds'] = preds + res_list.append(res_dict) + output = {self.scatter_keys[0]: res_list} + return output
+ +
[docs] def loss(self, preds, stensor_list, gt_boxes, gt_labels, **kwargs): + coor = [x[f'p{self.target_assigner.stride}']['coor'] for x in stensor_list] + pred_cls = self.cat_data_from_list(preds, 'cls') + pred_reg = self.cat_data_from_list(preds, 'reg') + + pred_cls = pred_cls.reshape(-1, self.num_classes) + pred_reg = pred_reg.reshape(-1, self.code_size) + cls_tgt, reg_tgt, _ = multi_apply( + self.target_assigner.assign, coor, gt_boxes) + cls_tgt = torch.cat(cls_tgt, dim=0) + reg_tgt = torch.cat(reg_tgt, dim=0) + + # vis_cls_pred = pred_cls.view(b, w, h, c).softmax(dim=-1).max(dim=-1).values[0] + # vis_cls_tgt = cls_tgt.view(b, w, h, c).max(dim=-1).values[0] + # img = torch.cat([vis_cls_pred, vis_cls_tgt], dim=1).detach().cpu().numpy().T + # import matplotlib.pyplot as plt + # + # plt.imshow(img) + # plt.show() + # plt.close() + + pos_mask = cls_tgt > 0 + cared = cls_tgt >= 0 + avg_factor = max(pos_mask.sum(), 1) + # downsample negative + # neg_inds = torch.where(cls_tgt == 0)[0] + # neg_inds = neg_inds[torch.randperm(len(neg_inds))[:avg_factor * 5]] + # cared[neg_inds] = True + + # focal loss encode the last dim of tgt as background + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = 0 + + if len(cared) != len(pred_cls): + print([x['cls'].shape for x in preds]) + print(cared.shape) + loss_cls = self.loss_cls(pred_cls[cared], labels[cared], + avg_factor=avg_factor) + + reg_preds_sin, reg_tgts_sin = self.add_sin_difference(pred_reg[pos_mask], reg_tgt) + loss_box = self.loss_box(reg_preds_sin, reg_tgts_sin, + avg_factor=avg_factor / reg_preds_sin.shape[-1]) + + return { + 'cls_loss': loss_cls, + 'box_loss': loss_box + }
+ +
[docs] @staticmethod + def add_sin_difference(boxes1, boxes2, dim=6): + assert dim != -1 + rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * \ + torch.cos(boxes2[..., dim:dim + 1]) + rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * \ + torch.sin(boxes2[..., dim:dim + 1]) + + boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, + boxes1[..., dim + 1:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, + boxes2[..., dim + 1:]], dim=-1) + return boxes1, boxes2
+ +
[docs] def predictions(self, coors, preds): + return self.target_assigner.get_predictions(coors, preds)
+ + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/det_center_sparse.html b/docs/_build/html/_modules/cosense3d/modules/heads/det_center_sparse.html new file mode 100644 index 00000000..9bec58f6 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/det_center_sparse.html @@ -0,0 +1,561 @@ + + + + + + cosense3d.modules.heads.det_center_sparse — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.det_center_sparse

+from einops import rearrange
+
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.utils.common import linear_last
+from cosense3d.utils.misc import multi_apply
+from cosense3d.modules.losses import build_loss, pred_to_conf_unc
+from cosense3d.modules.utils.me_utils import *
+from cosense3d.modules.utils.positional_encoding import ratio2coord
+
+
+
[docs]class UnitedClsHead(nn.Module): + def __init__(self, + class_names_each_head, + in_channel, + one_hot_encoding=True, + use_bias=False, + norm='BN', + **kwargs): + super().__init__() + n_cls = sum([len(c) for c in class_names_each_head]) + out_channel = n_cls + 1 if one_hot_encoding else n_cls + self.head = linear_last(in_channel, in_channel, out_channel, use_bias, norm) + +
[docs] def forward(self, x): + return [self.head(x)]
+ + +
[docs]class SeparatedClsHead(nn.Module): + def __init__(self, + class_names_each_head, + in_channel, + one_hot_encoding=True, + use_bias=False, + norm='BN', + **kwargs): + super().__init__() + self.n_head = len(class_names_each_head) + for i, cls_names in enumerate(class_names_each_head): + out_channel = len(cls_names) + if one_hot_encoding: + out_channel += 1 + setattr(self, f'head_{i}', + linear_last(in_channel, in_channel, out_channel, use_bias, norm)) + +
[docs] def forward(self, x): + out = [] + for i in range(self.n_head): + out.append(getattr(self, f'head_{i}')(x)) + return out
+ + +
[docs]class UnitedRegHead(nn.Module): + def __init__(self, + reg_channels, + in_channel, + combine_channels=True, + sigmoid_keys=None, + use_bias=False, + norm='BN', + **kwargs): + super().__init__() + self.combine_channels = combine_channels + self.sigmoid_keys = [] if sigmoid_keys is None else sigmoid_keys + self.reg_channels = {} + for c in reg_channels: + name, channel = c.split(':') + self.reg_channels[name] = int(channel) + + if combine_channels: + out_channel = sum(list(self.reg_channels.values())) + self.head = linear_last(in_channel, in_channel, out_channel, use_bias, norm) + else: + for name, channel in self.reg_channels.items(): + setattr(self, f'head_{name}', + linear_last(in_channel, in_channel, int(channel), use_bias, norm)) + +
[docs] def forward(self, x): + out_dict = {} + if self.combine_channels: + out_tensor = self.head(x) + ptr = 0 + for k, v in self.reg_channels.items(): + out = out_tensor[:, ptr:ptr+v] + if k in self.sigmoid_keys: + out = out.sigmoid() + out_dict[k] = [out] # list compatible with separated head + ptr += v + else: + for k in self.reg_channels.keys(): + out_dict[k] = [getattr(self, f'head_{k}')(x)] + return out_dict
+ + +
[docs]class DetCenterSparse(BaseModule): + def __init__(self, + data_info, + stride, + class_names_each_head, + shared_conv_channel, + cls_head_cfg, + reg_head_cfg, + reg_channels, + cls_assigner, + box_assigner, + loss_cls, + loss_box, + center_threshold=0.5, + generate_roi_scr=False, + norm='BN', + **kwargs): + super(DetCenterSparse, self).__init__(**kwargs) + update_me_essentials(self, data_info, stride) + self.center_threshold = center_threshold + self.n_heads = len(class_names_each_head) + self.class_names_each_head = class_names_each_head + self.generate_roi_scr = generate_roi_scr + self.reg_heads = [] + + self.cls_head = globals()[cls_head_cfg['name']]( + class_names_each_head, + shared_conv_channel, + one_hot_encoding=cls_head_cfg.get('one_hot_encoding', True), + norm=norm + ) + self.reg_head = globals()[reg_head_cfg['name']]( + reg_channels, + shared_conv_channel, + combine_channels=reg_head_cfg['combine_channels'], + sigmoid_keys=reg_head_cfg['sigmoid_keys'], + norm=norm + ) + + self.cls_assigner = plugin.build_plugin_module(cls_assigner) + self.box_assigner = plugin.build_plugin_module(box_assigner) + + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + + self.out_dict = {'cls': []} + for name in self.reg_heads: + self.out_dict[f'reg_{name}'] = [] + + self.temp = 1 + +
[docs] def forward(self, stensor_list, **kwargs): + self.temp += 1 + B = len(stensor_list) + coor, feat, centers = self.format_input(stensor_list) + if centers is not None: + centers = indices2metric(coor, self.voxel_size) + cls = self.cls_head(feat) + reg = self.reg_head(feat) + + out_dict = { + 'ctr': centers, + 'cls': cls, + 'reg': reg, + } + + if self.generate_roi_scr: + is_edl = 'edl' in self.loss_cls.name.lower() + conf = [pred_to_conf_unc(x, self.loss_cls.activation, edl=is_edl)[0] for x in cls] + conf = torch.stack(conf, dim=0).max(dim=0).values + if len(conf) == 0: + print('det_coor', coor.shape) + print('det_feat', feat.shape) + if is_edl: + out_dict['scr'] = conf[:, 1:].max(dim=-1).values + else: + out_dict['scr'] = conf.max(dim=-1).values + if not self.training: + out_dict['preds'], out_dict['conf'] = self.predictions(out_dict) + + return self.format_output(out_dict, B)
+ +
[docs] def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride)
+ +
[docs] def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['ctr'][:, 0] == i + output_new['ctr'].append(output['ctr'][mask, 1:]) + output_new['cls'].append([h_cls[mask] for h_cls in output['cls']]) + output_new['reg'].append({k:[vi[mask] for vi in v] for k, v in output['reg'].items()}) + if 'conf' in output: + output_new['conf'].append(output['conf'][mask]) + if 'scr' in output: + output_new['scr'].append(output['scr'][mask]) + if 'preds' in output: + mask = output['preds']['idx'][:, 0] == i + preds = {} + for k, v in output['preds'].items(): + if k in ['idx', 'box']: + preds[k] = v[mask][:, 1:] + else: + preds[k] = v[mask] + output_new['preds'].append(preds) + + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output
+ +
[docs] def loss(self, batch_list, gt_boxes, gt_labels, gt_mask=None, **kwargs): + epoch = kwargs.get('epoch', 0) + centers = [batch['ctr'] for batch in batch_list] + pred_cls_list = [torch.stack(batch['cls'], dim=0) for batch in batch_list] + if 'scr' in batch_list[0]: + pred_scores = [batch['scr'] for batch in batch_list] + else: + pred_scores = [pred_to_conf_unc(x)[0][..., 1:].sum(dim=-1) for x in pred_cls_list] + if gt_mask is not None: + for i, m in enumerate(gt_mask): + gt_boxes[i] = gt_boxes[i][m] + gt_labels[i] = gt_labels[i][m] + cls_tgt = multi_apply(self.cls_assigner.assign, + centers, gt_boxes, gt_labels, pred_scores, **kwargs) + + # import matplotlib.pyplot as plt + # ctrs_vis = centers[0].detach().cpu().numpy().T + # scrs_vis = pred_cls_list[0][0].softmax(dim=-1).detach().cpu().numpy().T + # gt_vis = (cls_tgt[0] == 1).squeeze().detach().cpu().numpy() + # fig = plt.figure() + # ax = fig.add_subplot() + # ax.scatter(ctrs_vis[0], ctrs_vis[1], c=scrs_vis[1], edgecolors='none', marker='.', vmin=0, vmax=1, cmap='jet') + # ax.scatter(ctrs_vis[0][gt_vis], ctrs_vis[1][gt_vis], c='g', edgecolors='none', marker='.', alpha=0.5) + # plt.show() + # plt.close() + + cls_tgt = torch.cat(cls_tgt, dim=0) + + n_classes = [len(n) for n in self.class_names_each_head] + + # get reg target + box_tgt = self.box_assigner.assign( + self.cat_data_from_list(centers, pad_idx=True), + self.cat_data_from_list(gt_boxes, pad_idx=True), + self.cat_data_from_list(gt_labels) + ) + + ptr = 0 + loss_cls = 0 + loss_box = 0 + for h in range(self.n_heads): + # center loss + cur_cls_src = torch.cat([x[h] for x in pred_cls_list], dim=0).contiguous() + cur_cls_tgt = cls_tgt[..., ptr:ptr+n_classes[h]].contiguous() # one hot foreground labels + + cared = (cur_cls_tgt >= 0).any(dim=-1) + cur_cls_src = cur_cls_src[cared] + cur_cls_tgt = cur_cls_tgt[cared] + ptr += n_classes[h] + + # convert one-hot to labels + cur_labels = torch.zeros_like(cur_cls_tgt[..., 0]).long() + lbl_inds, cls_inds = torch.where(cur_cls_tgt) + if 'edl' in self.loss_cls.name.lower(): + cur_labels[lbl_inds] = cls_inds + 1 + cur_num_cls = n_classes[h] + 1 + avg_factor = None if self.cls_assigner.pos_neg_ratio else max((cur_labels > 0).sum(), 1) + elif 'focal' in self.loss_cls.name.lower(): + cur_num_cls = n_classes[h] + cur_labels += n_classes[h] + cur_labels[lbl_inds] = cls_inds + avg_factor = max(len(cls_inds), 1) + else: + raise NotImplementedError + + # focal loss encode the last dim of tgt as background + # labels = pos_mask.new_full((len(pos_mask),), self.num_classes, dtype=torch.long) + # labels[pos_mask] = 0 + + lcenter = self.loss_cls( + cur_cls_src, + cur_labels, + temp=epoch, + n_cls_override=cur_num_cls, + avg_factor=avg_factor + ) + loss_cls = loss_cls + lcenter + + # reg loss + ind = box_tgt['idx'][h] + if ind.shape[1] > 0: + for reg_name in self.reg_head.reg_channels.keys(): + pred_reg = torch.cat([x['reg'][reg_name][h] for x in batch_list], dim=0) + cur_reg_src = rearrange(pred_reg, 'n d ... -> n ... d').contiguous() + cur_reg_src = cur_reg_src[box_tgt['valid_mask'][h]] + cur_reg_tgt = box_tgt[reg_name][h] # N, C + cur_loss = self.loss_box(cur_reg_src, cur_reg_tgt) + + loss_box = loss_box + cur_loss + + loss_dict = {'ctr_loss': loss_cls, 'box_loss': loss_box} + return loss_dict
+ +
[docs] def predictions(self, preds): + return self.box_assigner.get_predictions(preds)
+ + +
[docs]class MultiLvlDetCenterSparse(DetCenterSparse): + def __init__(self, nlvls, sparse, *args, **kwargs): + super(MultiLvlDetCenterSparse, self).__init__(*args, **kwargs) + self.nlvls = nlvls + self.sparse = sparse + self.lidar_range_cuda = nn.Parameter(torch.tensor(self.lidar_range), requires_grad=False) + +
[docs] def forward(self, feat_in, **kwargs): + outs_dec, reference_points, reference_inds = self.format_input(feat_in) + + assert outs_dec.isnan().sum() == 0, "found nan in outs_dec." + pos_dim = reference_points.shape[-1] + shape = outs_dec.shape + centers = ratio2coord(reference_points, self.lidar_range_cuda) + + cls = self.cls_head(outs_dec.view(-1, shape[-1])) + reg = self.reg_head(outs_dec.view(-1, shape[-1])) + + cls = torch.stack(cls, dim=0).view(self.n_heads, *shape[:-1], -1) # (nhead, nlvl, nbatch, nsample, ncls) + reg = {k: torch.stack(v, dim=0).view(self.n_heads, *shape[:-1], -1) for k, v in reg.items()} + pred_boxes = self.box_assigner.box_coder.decode( + centers.unsqueeze(0).unsqueeze(0).repeat((self.n_heads, self.nlvls,) + (1,) * len(shape[1:])), reg) + + out_dict = { + 'ctr': centers, + 'cls': cls, + 'reg': reg, + 'pred_boxes': pred_boxes + } + + out_dict['conf'] = pred_to_conf_unc(cls, self.loss_cls.activation)[0] + if 'edl' in self.loss_cls.name.lower(): + out_dict['scr'] = out_dict['conf'][..., 1:].max(dim=-1).values + else: + out_dict['scr'] = out_dict['conf'].max(dim=-1).values + + if not self.training: + out_dict['preds'], _ = self.predictions(out_dict) + + return self.format_output(out_dict, len(feat_in), reference_inds)
+ +
[docs] def format_input(self, feat_in): + if self.sparse: + outs_dec = self.cat_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2) + reference_points = self.cat_data_from_list(feat_in, 'ref_pts', pad_idx=True) + reference_inds = reference_points[..., 0] + reference_points = reference_points[..., 1:] + else: + outs_dec = self.stack_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2, 3) + reference_points = self.stack_data_from_list(feat_in, 'ref_pts') + reference_inds = None + return outs_dec, reference_points, reference_inds
+ +
[docs] def format_output(self, output, B=None, reference_inds=None): + outs = [] + for i in range(B): + if self.sparse: + m = reference_inds == i + else: + m = i + out = { + 'cls': output['cls'][:, :, m], + 'reg': {k: v[:, :, m] for k, v in output['reg'].items()}, + 'ctr': output['ctr'][m], + 'pred_boxes': output['pred_boxes'][:, :, m], + } + if 'scr' in output: + out['scr'] = output['scr'][:, :, m] + if 'preds' in output: + mask = output['preds']['idx'][:, 0] == i + preds = {} + for k, v in output['preds'].items(): + if k in ['idx', 'box']: + preds[k] = v[mask][:, 1:] + else: + preds[k] = v[mask] + out['preds'] = preds + outs.append(out) + + return {self.scatter_keys[0]: outs}
+ +
[docs] def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + epoch = kwargs.get('epoch', 0) + centers = [batch['ctr'] for batch in batch_list for _ in range(self.nlvls)] + pred_cls_list = [x for batch in batch_list for x in batch['cls'].transpose(1, 0)] + pred_scores = [x for batch in batch_list for x in batch['scr'].transpose(1, 0)] + + cls_tgt = multi_apply(self.cls_assigner.assign, + centers, gt_boxes, gt_labels, pred_scores, **kwargs) + cls_tgt = torch.cat(cls_tgt, dim=0) + + n_classes = [len(n) for n in self.class_names_each_head] + + # get reg target + box_tgt = self.box_assigner.assign( + self.cat_data_from_list([batch['ctr'] for batch in batch_list], pad_idx=True), + self.cat_data_from_list(gt_boxes, pad_idx=True), + self.cat_data_from_list(gt_labels) + ) + + ptr = 0 + loss_cls = 0 + loss_box = 0 + for h in range(self.n_heads): + # center loss + cur_cls_src = torch.cat([x[h] for x in pred_cls_list], dim=0).contiguous() + cur_cls_tgt = cls_tgt[..., ptr:ptr+n_classes[h]].contiguous() # one hot foreground labels + + cared = (cur_cls_tgt >= 0).any(dim=-1) + cur_cls_src = cur_cls_src[cared] + cur_cls_tgt = cur_cls_tgt[cared] + ptr += n_classes[h] + + # convert one-hot to labels + cur_labels = torch.zeros_like(cur_cls_tgt[..., 0]).long() + lbl_inds, cls_inds = torch.where(cur_cls_tgt) + cur_labels[lbl_inds] = cls_inds + 1 + + if self.cls_assigner.pos_neg_ratio: + avg_factor = None + else: + avg_factor = max((cur_labels > 0).sum(), 1) + lcenter = self.loss_cls( + cur_cls_src, + cur_labels, + temp=epoch, + n_cls_override=n_classes[h] + 1, + avg_factor=avg_factor + ) + loss_cls = loss_cls + lcenter + + # reg loss + ind = box_tgt['idx'][h] + if ind.shape[1] > 0: + for reg_name, reg_dim in self.reg_head.reg_channels.items(): + pred_reg = torch.cat([x['reg'][reg_name][h].view(-1, reg_dim) for x in batch_list], dim=0) + cur_reg_src = rearrange(pred_reg, 'n d ... -> n ... d').contiguous() + cur_reg_src = cur_reg_src[torch.cat([box_tgt['valid_mask'][h]] * self.nlvls, dim=0)] + cur_reg_tgt = torch.cat([box_tgt[reg_name][h]] * self.nlvls, dim=0) # N, C + cur_loss = self.loss_box(cur_reg_src, cur_reg_tgt) + + loss_box = loss_box + cur_loss + + loss_dict = {'ctr_loss': loss_cls, 'box_loss': loss_box} + return loss_dict
+ +
[docs] def predictions(self, preds): + return self.box_assigner.get_predictions({ + 'ctr': preds['ctr'], + 'cls': preds['cls'][:, -1], + 'reg': {k: v[:, -1] for k, v in preds['reg'].items()} + })
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/det_roi_refine.html b/docs/_build/html/_modules/cosense3d/modules/heads/det_roi_refine.html new file mode 100644 index 00000000..b2b75737 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/det_roi_refine.html @@ -0,0 +1,372 @@ + + + + + + cosense3d.modules.heads.det_roi_refine — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.det_roi_refine

+import copy
+
+import torch.nn as nn
+import torch
+import numpy as np
+from cosense3d.ops import pointnet2_utils
+from cosense3d.utils.pclib import rotate_points_along_z_torch
+from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu
+from cosense3d.utils import box_utils
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.losses.common import (weighted_smooth_l1_loss,
+                                             weighted_sigmoid_binary_cross_entropy)
+
+
+
[docs]class KeypointRoIHead(BaseModule): + def __init__(self, + num_cls, + in_channels, + n_fc_channels, + roi_grid_pool, + target_assigner, + dp_ratio=0.3, + train_from_epoch=0, + **kwargs): + super().__init__(**kwargs) + self.code_size = 7 + self.dp_ratio = dp_ratio + self.train_from_epoch = train_from_epoch + self.target_assigner = plugin.build_plugin_module(target_assigner) + mlps = copy.copy(roi_grid_pool['mlps']) + for k in range(len(mlps)): + mlps[k] = [in_channels] + mlps[k] + + self.roi_grid_pool_layer = pointnet2_utils.StackSAModuleMSG( + radii=roi_grid_pool['pool_radius'], + nsamples=roi_grid_pool['n_sample'], + mlps=mlps, + use_xyz=True, + pool_method=roi_grid_pool['pool_method'], + ) + + grid_size = roi_grid_pool['grid_size'] + self.grid_size = grid_size + c_out = sum([x[-1] for x in mlps]) + pre_channel = grid_size * grid_size * grid_size * c_out + fc_layers = [n_fc_channels] * 2 + self.shared_fc_layers, pre_channel = self._make_fc_layers(pre_channel, + fc_layers) + + self.cls_layers, pre_channel = self._make_fc_layers(pre_channel, + fc_layers, + output_channels= + num_cls) + self.iou_layers, _ = self._make_fc_layers(pre_channel, fc_layers, + output_channels= + num_cls) + self.reg_layers, _ = self._make_fc_layers(pre_channel, fc_layers, + output_channels=num_cls * 7) + + self._init_weights(weight_init='xavier') + + def _init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001) + + def _make_fc_layers(self, input_channels, fc_list, output_channels=None): + fc_layers = [] + pre_channel = input_channels + for k in range(len(fc_list)): + fc_layers.extend([ + nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False), + # nn.BatchNorm1d(fc_list[k]), + nn.ReLU() + ]) + pre_channel = fc_list[k] + if self.dp_ratio > 0: + fc_layers.append(nn.Dropout(self.dp_ratio)) + if output_channels is not None: + fc_layers.append( + nn.Conv1d(pre_channel, output_channels, kernel_size=1, + bias=True)) + fc_layers = nn.Sequential(*fc_layers) + return fc_layers, pre_channel + +
[docs] def get_global_grid_points_of_roi(self, rois): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + # (B, 6x6x6, 3) + local_roi_grid_points = self.get_dense_grid_points(rois, + batch_size_rcnn, + self.grid_size) + global_roi_grid_points = rotate_points_along_z_torch( + local_roi_grid_points.clone(), rois[:, 6] + ).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + global_roi_grid_points += global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points
+ +
[docs] @staticmethod + def get_dense_grid_points(rois, batch_size_rcnn, grid_size): + """ + Get the local coordinates of each grid point of a roi in the coordinate + system of the roi(origin lies in the center of this roi. + """ + faked_features = rois.new_ones((grid_size, grid_size, grid_size)) + dense_idx = torch.stack(torch.where(faked_features), + dim=1) # (N, 3) [x_idx, y_idx, z_idx] + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, + 1).float() # (B, 6x6x6, 3) + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = ( + dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze( + dim=1) \ + - (local_roi_size.unsqueeze( + dim=1) / 2) # (B, 6x6x6, 3) + return roi_grid_points
+ +
[docs] def roi_grid_pool(self, preds): + B = len(preds) + rois = torch.cat([p['boxes'] for p in preds], dim=0) + point_features = torch.cat([p['feat'] for p in preds], dim=0) + # (BxN, 6x6x6, 3) + global_roi_grid_points, local_roi_grid_points = \ + self.get_global_grid_points_of_roi(rois) + + xyz = torch.cat([p['coor'] for p in preds], dim=0) + xyz_batch_cnt = xyz.new_zeros(B).int() + for bs_idx in range(B): + xyz_batch_cnt[bs_idx] = len(preds[bs_idx]['coor']) + new_xyz = global_roi_grid_points.view(-1, 3) + new_xyz_batch_cnt = xyz.new_zeros(B).int() + for bs_idx in range(B): + new_xyz_batch_cnt[bs_idx] = len(preds[bs_idx]['boxes']) * self.grid_size ** 3 + + pooled_points, pooled_features = self.roi_grid_pool_layer( + xyz=xyz[:, :3].contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz[:, :3].contiguous(), + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=point_features.contiguous(), # weighted point features + ) # (M1 + M2 ..., C) + # (BxN, 6x6x6, C) + pooled_features = pooled_features.view(-1, self.grid_size ** 3, + pooled_features.shape[-1]) + + return pooled_features
+ +
[docs] def forward(self, preds, **kwargs): + epoch = kwargs.get('epoch', self.train_from_epoch + 1) + if epoch < self.train_from_epoch: + return {self.scatter_keys[0]: [None for _ in preds]} + # RoI aware pooling + pooled_features = self.roi_grid_pool(preds) + + batch_size_rcnn = pooled_features.shape[0] + pooled_features = pooled_features.permute(0, 2, 1). \ + contiguous().view(batch_size_rcnn, -1, self.grid_size, + self.grid_size, + self.grid_size) # (BxN, C, 6, 6, 6) + shared_features = self.shared_fc_layers( + pooled_features.view(batch_size_rcnn, -1, 1)) + rcnn_cls = self.cls_layers(shared_features).transpose( + 1, 2).contiguous().squeeze( dim=1) # (B, 1 or 2) + rcnn_iou = self.iou_layers(shared_features).transpose( + 1, 2).contiguous().squeeze( dim=1) # (B, 1) + rcnn_reg = self.reg_layers(shared_features).transpose( + 1, 2).contiguous().squeeze( dim=1) # (B, C) + + roi_preds = None + if not self.training: + rois = torch.cat([p['boxes'] for p in preds], dim=0) + roi_preds = self.target_assigner.get_predictions( + rcnn_cls, rcnn_iou, rcnn_reg, rois + ) + + idx = 0 + out_list = [] + for p in preds: + num = len(p['boxes']) + out_dict = { + 'rois': p['boxes'], + 'rcnn_cls': rcnn_cls[idx:idx+num], + 'rcnn_iou': rcnn_iou[idx:idx+num], + 'rcnn_reg': rcnn_reg[idx:idx+num], + } + if roi_preds is not None: + out_dict['preds'] = {k: v[idx:idx+num] for k, v in roi_preds.items()} + out_list.append(out_dict) + idx += num + + return {self.scatter_keys[0]: out_list}
+ +
[docs] def loss(self, out, gt_boxes, epoch, **kwargs): + """ + Parameters + ---------- + output_dict : dict + target_dict : dict + """ + if epoch < self.train_from_epoch: + return {} + rois = [x['rois'] for x in out] + label_dict = self.target_assigner.assign(rois, gt_boxes) + + # rcnn out + rcnn_cls = self.cat_data_from_list(out, 'rcnn_cls').view(1, -1, 1) + rcnn_iou = self.cat_data_from_list(out, 'rcnn_iou').view(1, -1, 1) + rcnn_reg = self.cat_data_from_list(out, 'rcnn_reg').view(1, -1, 7) + + tgt_cls = label_dict['cls_tgt'].view(1, -1, 1) + tgt_iou = label_dict['iou_tgt'].view(1, -1, 1) + tgt_reg = label_dict['reg_tgt'].view(1, -1, 7) + + pos_norm = tgt_cls.sum() + # cls loss + loss_cls = weighted_sigmoid_binary_cross_entropy(rcnn_cls, tgt_cls) + + # iou loss + # TODO: also count the negative samples + loss_iou = weighted_smooth_l1_loss(rcnn_iou, tgt_iou, + weights=tgt_cls).mean() + + # regression loss + # Target resampling : Generate a weights mask to force the regressor concentrate on low iou predictions + # sample 50% with iou>0.7 and 50% < 0.7 + weights = torch.ones(tgt_iou.shape, device=tgt_iou.device) + weights[tgt_cls == 0] = 0 + neg = torch.logical_and(tgt_iou < 0.7, tgt_cls != 0) + pos = torch.logical_and(tgt_iou >= 0.7, tgt_cls != 0) + num_neg = int(neg.sum(dim=1)) + num_pos = int(pos.sum(dim=1)) + num_pos_smps = max(num_neg, 2) + pos_indices = torch.where(pos)[1] + not_selsected = torch.randperm(num_pos)[:num_pos - num_pos_smps] + # not_selsected_indices = pos_indices[not_selsected] + weights[:, pos_indices[not_selsected]] = 0 + loss_reg = weighted_smooth_l1_loss(rcnn_reg, tgt_reg, + weights=weights / max(weights.sum(), + 1)).sum() + + loss_dict = { + 'rcnn_cls_loss': loss_cls, + 'rcnn_iou_loss': loss_iou, + 'rcnn_reg_loss': loss_reg, + } + + return loss_dict
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/img_focal.html b/docs/_build/html/_modules/cosense3d/modules/heads/img_focal.html new file mode 100644 index 00000000..370c404b --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/img_focal.html @@ -0,0 +1,321 @@ + + + + + + cosense3d.modules.heads.img_focal — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.img_focal

+import torch
+from torch import nn
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.utils.init import bias_init_with_prob
+from cosense3d.modules.utils.common import inverse_sigmoid, clip_sigmoid
+from cosense3d.utils.box_utils import bbox_xyxy_to_cxcywh
+from cosense3d.utils.iou2d_calculator import bbox_overlaps
+from cosense3d.utils.misc import multi_apply
+from cosense3d.modules.losses import build_loss
+
+
+
[docs]class ImgFocal(BaseModule): + def __init__(self, in_channels, embed_dims, num_classes, center_assigner, box_assigner, + loss_cls2d, loss_centerness, loss_bbox2d, loss_iou2d, loss_centers2d, + with_depth=False, **kwargs): + super().__init__(**kwargs) + self.in_channels = in_channels + self.embed_dims = embed_dims + self.num_classes = num_classes + self.with_depth = with_depth + + self.center_assigner = plugin.build_plugin_module(center_assigner) + self.box_assigner = plugin.build_plugin_module(box_assigner) + + self.loss_cls2d = build_loss(**loss_cls2d) + self.loss_centerness = build_loss(**loss_centerness) + self.loss_bbox2d = build_loss(**loss_bbox2d) + self.loss_iou2d = build_loss(**loss_iou2d) + self.loss_centers2d = build_loss(**loss_centers2d) + + self._init_layers() + + def _init_layers(self): + self.cls = nn.Conv2d(self.embed_dims, self.num_classes, kernel_size=1) + self.shared_cls = nn.Sequential( + nn.Conv2d(self.in_channels, self.embed_dims, kernel_size=(3, 3), padding=1), + nn.GroupNorm(32, num_channels=self.embed_dims), + nn.ReLU(),) + self.centerness = nn.Conv2d(self.embed_dims, 1, kernel_size=1) + bias_init = bias_init_with_prob(0.01) + nn.init.constant_(self.cls.bias, bias_init) + nn.init.constant_(self.centerness.bias, bias_init) + + self.shared_reg = nn.Sequential( + nn.Conv2d(self.in_channels, self.embed_dims, kernel_size=(3, 3), padding=1), + nn.GroupNorm(32, num_channels=self.embed_dims), + nn.ReLU(), ) + self.ltrb = nn.Conv2d(self.embed_dims, 4, kernel_size=1) + self.center2d = nn.Conv2d(self.embed_dims, 2, kernel_size=1) + if self.with_depth: + self.depth = nn.Conv2d(self.embed_dims, 1, kernel_size=1) + +
[docs] def forward(self, img_feat, img_coor, **kwargs): + out_dict = {} + x = self.cat_data_from_list(img_feat) + N, c, h, w = x.shape + n_pixels = h * w + + cls_feat = self.shared_cls(x) + cls = self.cls(cls_feat) + centerness = self.centerness(cls_feat) + cls_logits = cls.permute(0,2,3,1).reshape(-1, n_pixels, self.num_classes) + centerness = centerness.permute(0,2,3,1).reshape(-1, n_pixels, 1) + cls_score = cls_logits.topk(1, dim=2).values[..., 0].view(-1, n_pixels, 1) + sample_weight = cls_score.detach().sigmoid() * centerness.detach().view(-1, n_pixels, 1).sigmoid() + + out_dict.update({ + 'feat_size': [h, w], + 'centerness': centerness, + 'cls_score': cls_score, + 'sample_weight': sample_weight + }) + + + img_coor = self.cat_data_from_list(img_coor) + reg_feat = self.shared_reg(x) + ltrb = self.ltrb(reg_feat).permute(0, 2, 3, 1).contiguous() + ltrb = ltrb.sigmoid() + centers2d_offset = self.center2d(reg_feat).permute(0, 2, 3, 1).contiguous() + centers2d = self.apply_center_offset(img_coor, centers2d_offset) + bboxes = self.apply_ltrb(img_coor, ltrb) + + pred_bboxes = bboxes.view(-1, n_pixels, 4) + pred_centers2d = centers2d.view(-1, n_pixels, 2) + out_dict.update({ + 'pred_boxes': pred_bboxes, + 'pred_centers2d': pred_centers2d + }) + + if self.with_depth: + # TODO + raise NotImplementedError + + return self.format_output(out_dict, img_feat)
+ +
[docs] def format_output(self, out_dict, img_feat): + ptr = 0 + output_list = [] + for imgs in img_feat: + n = imgs.shape[0] + output_list.append({k: v[ptr:ptr+n] for k, v in out_dict.items()}) + ptr += n + return {self.scatter_keys[0]: output_list}
+ +
[docs] def loss(self, batch_list, labels2d, centers2d, bboxes2d, img_size, **kwargs): + feat_size = batch_list[0]['feat_size'] + centerness = self.cat_data_from_list(batch_list, 'centerness') + cls_score = self.cat_data_from_list(batch_list, 'cls_score') + pred_boxes = self.cat_data_from_list(batch_list, 'pred_boxes') + pred_centers2d = self.cat_data_from_list(batch_list, 'pred_centers2d') + labels2d = self.cat_list(labels2d) + centers2d = self.cat_list(centers2d) + bboxes2d = self.cat_list(bboxes2d) + img_size = self.cat_list(img_size) + B = len(img_size) + + num_gts, assigned_gt_inds, assigned_labels = multi_apply( + self.box_assigner.assign, + pred_boxes, cls_score, pred_centers2d, + bboxes2d, labels2d, centers2d, img_size) + + cared_pred_boxes = [] + cared_centers = [] + aligned_bboxes_gt = [] + aligned_centers_gt = [] + aligned_labels = [] + factors = [] + mask = [] + for i, s in enumerate(img_size): + pos_mask = assigned_gt_inds[i] > 0 + mask.append(pos_mask) + pos_inds = assigned_gt_inds[i][pos_mask] - 1 + boxes = pred_boxes[i][pos_mask] + cared_pred_boxes.append(boxes) + factors.append(pred_boxes.new_tensor( + [s[1], s[0], s[1], s[0]]).unsqueeze(0).repeat(boxes.shape[0], 1)) + aligned_bboxes_gt.append(bboxes2d[i][pos_inds]) + cared_centers.append(pred_centers2d[i][pos_mask]) + aligned_centers_gt.append(centers2d[i][pos_inds]) + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = labels2d[i][pos_inds] + aligned_labels.append(labels) + + factors = torch.cat(factors, dim=0) + cared_pred_boxes = torch.cat(cared_pred_boxes, dim=0) + cared_pred_boxes_pix = cared_pred_boxes * factors + cared_centers = torch.cat(cared_centers, dim=0) + factors_inv = 1 / factors + aligned_bboxes_gt = torch.cat(aligned_bboxes_gt, dim=0) + aligned_centers_gt = torch.cat(aligned_centers_gt, dim=0) + aligned_labels = torch.cat(aligned_labels, dim=0) + mask = torch.cat(mask, dim=0) + + loss_iou = self.loss_iou2d(cared_pred_boxes_pix, aligned_bboxes_gt) + + cls_score = cls_score.reshape(-1, cls_score.shape[-1]) + iou_score = torch.zeros_like(cls_score[..., 0]) + iou_score[mask] = bbox_overlaps(aligned_bboxes_gt, cared_pred_boxes_pix, + is_aligned=True).reshape(-1) + cls_avg_factor = max(sum(num_gts), 1) + loss_cls = self.loss_cls2d( + cls_score, (aligned_labels, iou_score.detach()), avg_factor=cls_avg_factor) + + loss_box = self.loss_bbox2d(cared_pred_boxes, aligned_bboxes_gt * factors_inv) + loss_center = self.loss_centers2d(cared_centers, aligned_centers_gt * factors_inv[:, :2]) + + heatmaps = multi_apply(self.center_assigner.assign, centers2d, bboxes2d, + img_size, [img_size[0][0] // feat_size[0]] * B) + heatmaps = torch.stack(heatmaps, dim=0).view(B, -1, 1) + centerness = clip_sigmoid(centerness).view(B, -1, 1) + loss_centerness = self.loss_centerness(centerness, heatmaps, avg_factor=cls_avg_factor) + return { + 'img_cls_loss': loss_cls, + 'img_iou_loss': loss_iou, + 'img_box_loss': loss_box, + 'img_ctr_loss': loss_center, + 'img_ctrness_loss': loss_centerness, + }
+ + +
[docs] @staticmethod + def apply_center_offset(locations, center_offset): + """ + :param locations: (1, H, W, 2) + :param pred_ltrb: (N, H, W, 4) + """ + centers_2d = torch.zeros_like(center_offset) + locations = inverse_sigmoid(locations) + centers_2d[..., 0] = locations[..., 0] + center_offset[..., 0] # x1 + centers_2d[..., 1] = locations[..., 1] + center_offset[..., 1] # y1 + centers_2d = centers_2d.sigmoid() + + return centers_2d
+ +
[docs] @staticmethod + def apply_ltrb(locations, pred_ltrb): + """ + :param locations: (1, H, W, 2) + :param pred_ltrb: (N, H, W, 4) + """ + pred_boxes = torch.zeros_like(pred_ltrb) + pred_boxes[..., 0] = (locations[..., 0] - pred_ltrb[..., 0]) # x1 + pred_boxes[..., 1] = (locations[..., 1] - pred_ltrb[..., 1]) # y1 + pred_boxes[..., 2] = (locations[..., 0] + pred_ltrb[..., 2]) # x2 + pred_boxes[..., 3] = (locations[..., 1] + pred_ltrb[..., 3]) # y2 + min_xy = pred_boxes[..., 0].new_tensor(0) + max_xy = pred_boxes[..., 0].new_tensor(1) + pred_boxes = torch.where(pred_boxes < min_xy, min_xy, pred_boxes) + pred_boxes = torch.where(pred_boxes > max_xy, max_xy, pred_boxes) + pred_boxes = bbox_xyxy_to_cxcywh(pred_boxes) + + return pred_boxes
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/lidar_petr_head.html b/docs/_build/html/_modules/cosense3d/modules/heads/lidar_petr_head.html new file mode 100644 index 00000000..eaba5c44 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/lidar_petr_head.html @@ -0,0 +1,220 @@ + + + + + + cosense3d.modules.heads.lidar_petr_head — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.lidar_petr_head

+import torch
+import torch.nn as nn
+
+from cosense3d.modules import BaseModule, plugin
+from cosense3d.modules.utils.misc import SELayer_Linear, MLN
+from cosense3d.modules.utils.positional_encoding import pos2posemb2d
+
+
+
[docs]class LidarPETRHead(BaseModule): + def __init__(self, + in_channels, + transformer, + feature_stride, + lidar_range, + topk=2048, + memory_len=256, + num_query=644, + **kwargs): + super().__init__(**kwargs) + self.transformer = plugin.build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.num_pose_feat = 64 + self.pos_dim = 2 + self.in_channels = in_channels + self.feature_stride = feature_stride + self.topk = topk + self.num_query = num_query + + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + self.reference_points = nn.Embedding(self.num_query, self.pos_dim) + + self._init_layers() + + def _init_layers(self): + self.position_embeding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims * 4), + nn.ReLU(), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat * self.pos_dim, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + +
[docs] def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True
+ +
[docs] def forward(self, rois, bev_feat, memory, **kwargs): + feat, ctr = self.gather_topk(rois, bev_feat) + + pos = ((ctr - self.lidar_range[:2]) / + (self.lidar_range[3:5] - self.lidar_range[:2])) + pos_emb = self.position_embeding(pos2posemb2d(pos, self.num_pose_feat)) + memory = self.memory_embed(feat) + pos_emb = self.featurized_pe(pos_emb, memory) + + reference_points = (self.reference_points.weight).unsqueeze(0).repeat(memory.shape[0], 1, 1) + query_pos = self.query_embedding(pos2posemb2d(reference_points, self.num_pose_feat)) + tgt = torch.zeros_like(query_pos) + outs_dec, _ = self.transformer(memory, tgt, query_pos, pos_emb) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(rois)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def format_input(self, input): + memory = [] + for x in input: + x = x.permute(0, 2, 3, 1).flatten(0, 2) + memory.append(x) + max_l = max([m.shape[0] for m in memory]) + out = x.new_zeros(len(memory), max_l, x.shape[-1]) + mask = x.new_ones(len(memory), max_l) + for i, m in enumerate(memory): + out[i, :len(m)] = m + mask[i, :len(m)] = False + return out, mask
+ +
[docs] def gather_topk(self, rois, bev_feats): + topk_feat, topk_ctr = [], [] + for roi, bev_feat in zip(rois, bev_feats): + ctr = bev_feat[f'p{self.feature_stride}']['ctr'] + feat = bev_feat[f'p{self.feature_stride}']['feat'] + scores = roi['scr'] + if scores.shape[0] < self.topk: + raise NotImplementedError + else: + topk_inds = torch.topk(scores, k=self.topk).indices + topk_ctr.append(ctr[topk_inds]) + topk_feat.append(feat[topk_inds]) + topk_ctr = torch.stack(topk_ctr, dim=0) + topk_feat = torch.stack(topk_feat, dim=0) + return topk_feat, topk_ctr
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/multitask_head.html b/docs/_build/html/_modules/cosense3d/modules/heads/multitask_head.html new file mode 100644 index 00000000..d61b40d9 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/multitask_head.html @@ -0,0 +1,169 @@ + + + + + + cosense3d.modules.heads.multitask_head — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.multitask_head

+from torch import nn
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules import build_module
+from cosense3d.modules.plugin import build_plugin_module
+
+
+
[docs]class MultiTaskHead(BaseModule): + def __init__(self, + heads, + strides, + losses, + formatting=None, + **kwargs): + super().__init__(**kwargs) + self.losses = losses + modules = [] + gt_keys = set(self.gt_keys) + for i, h in enumerate(heads): + if len(h.get('gt_keys', [])) == 0: + cur_gt_keys = self.gt_keys + else: + cur_gt_keys = h['gt_keys'] + gt_keys.update(set(cur_gt_keys)) + h.update(dict( + stride=strides[i], + gather_keys=self.gather_keys, + scatter_keys=[self.scatter_keys[i]], + gt_keys=cur_gt_keys, + )) + modules.append(build_module(h)) + self.heads = nn.ModuleList(modules) + self.gt_keys = list(gt_keys) + if formatting is None: + self.formatting = [None] * len(self.heads) + else: + assert len(formatting) == len(self.heads) + self.formatting = [] + for fmt in formatting: + self.formatting.append(build_plugin_module(fmt)) + +
[docs] def forward(self, tensor_list, *args, **kwargs): + out = {} + for i, h in enumerate(self.heads): + x = h(tensor_list, *args, **kwargs) + if self.formatting[i] is not None: + for k, v in x.items(): + x[k] = self.formatting[i](x[k]) + out.update(x) + + return out
+ +
[docs] def loss(self, *args, **kwargs): + kl = len(self.scatter_keys) + heads_out = args[:kl] + gt_dict = {k:args[kl+i] for i, k in enumerate(self.gt_keys)} + loss_dict = {} + for i, h in enumerate(self.heads): + if self.losses[i]: + gt_list = [gt_dict[k] for k in h.gt_keys] + loss_dict.update(h.loss(heads_out[i], *gt_list, **kwargs)) + return loss_dict
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/nbr_attn_bev.html b/docs/_build/html/_modules/cosense3d/modules/heads/nbr_attn_bev.html new file mode 100644 index 00000000..b64df2ca --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/nbr_attn_bev.html @@ -0,0 +1,295 @@ + + + + + + cosense3d.modules.heads.nbr_attn_bev — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.nbr_attn_bev

+import torch
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.utils.me_utils import *
+from cosense3d.modules.utils.common import pad_r, linear_last, cat_coor_with_idx
+from cosense3d.ops.utils import points_in_boxes_gpu
+from cosense3d.modules.losses.edl import edl_mse_loss, evidence_to_conf_unc
+from cosense3d.modules.utils.nbr_attn import NeighborhoodAttention
+
+
+
[docs]class NbrAttentionBEV(BaseModule): + def __init__(self, + data_info, + in_dim, + stride, + annealing_step, + sampling, + target_assigner=None, + class_names_each_head=None, + **kwargs): + super(NbrAttentionBEV, self).__init__(**kwargs) + self.in_dim = in_dim + self.class_names_each_head = class_names_each_head + self.stride = stride + self.annealing_step = annealing_step + self.sampling = sampling + for k, v in data_info.items(): + setattr(self, k, v) + update_me_essentials(self, data_info, self.stride) + + self.nbr_attn = NeighborhoodAttention(emb_dim=in_dim) + self.reg_layer = linear_last(in_dim, 32, 2, bias=True) + + if class_names_each_head is not None: + from cosense3d.model.utils.target_assigner import TargetAssigner + self.tgt_assigner = TargetAssigner(target_assigner, + class_names_each_head) + +
[docs] def forward(self, stensor_list, **kwargs): + coor, feat, ctr = self.format_input(stensor_list) + centers = indices2metric(coor, self.voxel_size) + reference_points = self.generate_reference_points(centers) + out = self.nbr_attn(feat, coor, reference_points, len(stensor_list)) + reg = self.reg_layer(out) + conf, unc = evidence_to_conf_unc(reg.relu()) + + out_dict = { + 'center': centers, + 'reg': reg, + 'conf': conf, + 'unc': unc + } + + return self.format_output(out_dict, len(stensor_list))
+ +
[docs] def format_input(self, stensor_list): + return self.compose_stensor(stensor_list, self.stride)
+ +
[docs] def format_output(self, output, B=None): + # decompose batch + output_new = {k: [] for k in output.keys()} + for i in range(B): + mask = output['center'][:, 0] == i + output_new['center'].append(output['center'][mask, 1:]) + output_new['reg'].append(output['reg'][mask]) + output_new['conf'].append(output['conf'][mask]) + output_new['unc'].append(output['unc'][mask]) + output = {self.scatter_keys[0]: self.compose_result_list(output_new, B)} + return output
+ +
[docs] def generate_reference_points(self, centers): + if self.training: + reference_points = centers[torch.rand_like(centers[:, 0]) > 0.5] + else: + reference_points = centers + noise = torch.rand_like(reference_points[:, 1:]) * self.voxel_size[0] * self.stride + reference_points[:, 1:] = reference_points[:, 1:] + noise + return reference_points
+ +
[docs] def loss(self, batch_list, gt_boxes, gt_labels, **kwargs): + tgt_pts, tgt_label, valid = self.get_tgt(batch_list, gt_boxes, gt_labels, **kwargs) + epoch_num = kwargs.get('epoch', 0) + reg = self.cat_data_from_list(batch_list, 'reg') + loss_dict = edl_mse_loss(preds=reg[valid], + tgt=tgt_label, + n_cls=2, + temp=epoch_num, + annealing_step=self.annealing_step, + model_label='bev') + return loss_dict
+ +
[docs] @torch.no_grad() + def get_tgt(self, batch_list, gt_boxes, gt_labels, **kwargs): + epoch_num = kwargs.get('epoch', 0) + B = len(batch_list) + tgt_pts = self.cat_data_from_list(batch_list, 'center', pad_idx=True) + boxes = self.cat_data_from_list(gt_boxes, pad_idx=True).clone() + boxes[:, 3] = 0 + pts = pad_r(tgt_pts) + try: + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + boxes[:, 4:6] *= 2 + _, box_idx_of_pts2 = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + except: + print(boxes.shape) + print(pts.shape) + # set area B: dense neg as -1 for down-sampling, differentiate from area C: sparse neg. + tgt_label = - (box_idx_of_pts >= 0).int() + tgt_label[box_idx_of_pts >= 0] = 1 + + n_sam = len(boxes) * 50 + if self.sampling['annealing']: + annealing_ratio = epoch_num / self.annealing_step + n_sam = n_sam + annealing_ratio * len(tgt_label) / 50 + # down-sample + mask = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + tgt_label[tgt_label == -1] = 0 # set area B to 0 + + # positive sample annealing + conf = self.cat_data_from_list(batch_list, 'conf') + labeled_pos = tgt_label == 1 + potential_pos = (conf[..., 1] > (1 - annealing_ratio * 0.5)) + unlabeled_potential_pos = torch.logical_and(potential_pos, + torch.logical_not(labeled_pos)) + if self.sampling['topk']: + k = int(labeled_pos.sum().item() * (1 + 30 * annealing_ratio)) + topk = torch.topk(conf[..., 1], k) + is_topk = torch.zeros_like(labeled_pos) + is_topk[topk.indices] = 1 + topk_potential_pos = torch.logical_and(is_topk, unlabeled_potential_pos) + unlabeled_potential_pos = topk_potential_pos + + # set potential positive samples label to ignore + tgt_label[unlabeled_potential_pos] = -1 + else: + mask = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + # mask = torch.ones_like(tgt_label).bool() + tgt_label[tgt_label == -1] = 0 # set area B to 0 + + # get final tgt + tgt_pts = tgt_pts[mask] + tgt_label = tgt_label[mask] + + # from cosense3d.utils.vislib import draw_points_boxes_plt + # boxes_src = batch_dict['objects'][:, [0, 3, 4, 5, 6, 7, 8, 11]] + # ax = draw_points_boxes_plt( + # pc_range=self.lidar_range, + # points=tgt_pts[tgt_pts[:, 0] == 0, 1:].cpu().numpy(), + # boxes_gt=boxes_src[boxes_src[:, 0] == 0, 1:], + # return_ax=True + # ) + # pts_ = tgt_pts[tgt_label==1] + # ax = draw_points_boxes_plt( + # points=pts_[pts_[:, 0] == 0, 1:].cpu().numpy(), + # points_c='r', + # ax=ax, + # return_ax=True, + # ) + # pts_ = tgt_pts[tgt_label==-1] + # draw_points_boxes_plt( + # points=pts_[pts_[:, 0] == 0, 1:].cpu().numpy(), + # points_c='orange', + # filename='/home/yuan/Downloads/tmp1.png', + # ax=ax + # ) + + return tgt_pts, tgt_label, mask
+ +
[docs] @torch.no_grad() + def downsample_tgt_pts(self, tgt_label, max_sam): + selected = torch.ones_like(tgt_label.bool()) + pos = tgt_label == 1 + if pos.sum() > max_sam: + mask = torch.rand_like(tgt_label[pos].float()) < max_sam / pos.sum() + selected[pos] = mask + + neg = tgt_label == 0 + if neg.sum() > max_sam: + mask = torch.rand_like(tgt_label[neg].float()) < max_sam / neg.sum() + selected[neg] = mask + return selected
+ + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/petr_head.html b/docs/_build/html/_modules/cosense3d/modules/heads/petr_head.html new file mode 100644 index 00000000..05ecfaad --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/petr_head.html @@ -0,0 +1,316 @@ + + + + + + cosense3d.modules.heads.petr_head — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.petr_head

+from typing import List
+
+import torch
+from torch import nn
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.plugin import build_plugin_module
+from cosense3d.modules.utils.common import inverse_sigmoid
+from cosense3d.utils.misc import multi_apply
+from cosense3d.utils.box_utils import normalize_bbox, denormalize_bbox
+from cosense3d.modules.losses import build_loss
+
+
+
[docs]class PETRHead(BaseModule): + def __init__(self, + embed_dims, + pc_range, + code_weights, + num_classes, + box_assigner, + loss_cls, + loss_bbox, + loss_iou=None, + num_reg_fcs=2, + num_pred=3, + use_logits=True, + **kwargs): + super().__init__(**kwargs) + self.embed_dims = embed_dims + self.code_size = 10 + self.num_classes = num_classes + self.num_reg_fcs = num_reg_fcs + self.num_pred = num_pred + self.use_logits = use_logits + + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.code_weights = nn.Parameter(torch.tensor(code_weights), requires_grad=False) + + self.box_assigner = build_plugin_module(box_assigner) + + self.loss_cls = build_loss(**loss_cls) + self.loss_bbox = build_loss(**loss_bbox) + if loss_iou is not None: + self.loss_iou = build_loss(**loss_iou) + + self._init_layers() + self.init_weights() + + def _init_layers(self): + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(nn.Linear(self.embed_dims, self.num_classes)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(nn.Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(self.num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(self.num_pred)]) + +
[docs] def init_weights(self): + for m in self.cls_branches: + nn.init.constant_(m[-1].bias, 2.0) + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.init.xavier_uniform_(m.weight) + self._is_init = True
+ +
[docs] def forward(self, feat_in, **kwargs): + outs_dec = self.stack_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2, 3) + reference_points = self.stack_data_from_list(feat_in, 'ref_pts') + pos_dim = reference_points.shape[-1] + outputs_classes = [] + outputs_coords = [] + for lvl in range(len(outs_dec)): + out_dec = outs_dec[lvl] + out_dec = torch.nan_to_num(out_dec) + + pred_cls = self.cls_branches[lvl](out_dec) + pred_reg = self.reg_branches[lvl](out_dec) + + if self.use_logits: + reference = inverse_sigmoid(reference_points.clone()) + pred_reg[..., :pos_dim] += reference + pred_reg[..., :3] = pred_reg[..., :3].sigmoid() + else: + reference = reference_points.clone() + reference[..., :pos_dim] = (reference[..., :pos_dim] * ( + self.pc_range[3:3+pos_dim] - self.pc_range[0:pos_dim]) + + self.pc_range[0:pos_dim]) + pred_reg[..., :pos_dim] = pred_reg[..., :pos_dim] + reference + + outputs_classes.append(pred_cls) + outputs_coords.append(pred_reg) + + all_cls_scores = torch.stack(outputs_classes) + all_bbox_preds = torch.stack(outputs_coords) + if self.use_logits: + all_bbox_preds[..., :3] = (all_bbox_preds[..., :3] * ( + self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3]) + + outs = [ + { + 'all_cls_scores': all_cls_scores[:, i], + 'all_bbox_preds': all_bbox_preds[:, i], + 'ref_pts': reference_points[i] + } for i in range(len(feat_in)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def loss(self, petr_out, gt_boxes, gt_labels, det, **kwargs): + cls_scores = self.stack_data_from_list(petr_out, 'all_cls_scores').flatten(0, 1) + bbox_preds = self.stack_data_from_list(petr_out, 'all_bbox_preds').flatten(0, 1) + gt_boxes = [boxes for boxes in gt_boxes for _ in range(self.num_pred)] + gt_labels = [labels for labels in gt_labels for _ in range(self.num_pred)] + code_weights = [self.code_weights] * len(gt_labels) + + num_gts, assigned_gt_inds, assigned_labels = multi_apply( + self.box_assigner.assign, + bbox_preds, + cls_scores, + gt_boxes, + gt_labels, + code_weights + ) + + cared_pred_boxes = [] + aligned_bboxes_gt = [] + aligned_labels = [] + mask = [] + for i in range(len(cls_scores)): + pos_mask = assigned_gt_inds[i] > 0 + mask.append(pos_mask) + pos_inds = assigned_gt_inds[i][pos_mask] - 1 + boxes = bbox_preds[i][pos_mask] + cared_pred_boxes.append(boxes) + aligned_bboxes_gt.append(gt_boxes[i][pos_inds]) + labels = pos_mask.new_full((len(pos_mask), ), self.num_classes, dtype=torch.long) + labels[pos_mask] = gt_labels[i][pos_inds] + # ignore part of negative samples, set labels of them to -1 + inds = torch.where(labels == self.num_classes)[0] + inds = inds[torch.randperm(len(inds))][pos_mask.sum() * 5] + labels[inds] = -1 + aligned_labels.append(labels) + + # # plot + # if i > 0: + # continue + # ref_pts = petr_out[0]['ref_pts'] + # ref_pts = (ref_pts * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3]) + # ref_pts_pos = ref_pts[pos_mask].detach().cpu().numpy() + # ref_pts = ref_pts.detach().cpu().numpy() + # scores = cls_scores[i].sigmoid().squeeze().detach().cpu().numpy() + # gt_boxes_vis = gt_boxes[i][pos_inds].detach().cpu().numpy() + # pred_boxes_vis = denormalize_bbox(boxes).detach().cpu().numpy() + # det_ctr = det[0]['ctr'].detach().cpu().numpy() + # det_scr = det[0]['scr'].detach().cpu().numpy() + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # fig = plt.figure(figsize=(12, 5)) + # ax = fig.add_subplot() + # # ax.scatter(det_ctr[:, 0], det_ctr[:, 1], c=det_scr, vmin=0, vmax=0.5, s=1) + # ax.scatter(ref_pts_pos[:, 0], ref_pts_pos[:, 1], c='r') + # ax.scatter(ref_pts[:, 0], ref_pts[:, 1], c=scores, s=2) + # ax = draw_points_boxes_plt( + # pc_range=self.pc_range.tolist(), + # boxes_pred=pred_boxes_vis[:, :7], + # boxes_gt=gt_boxes_vis[:, :7], + # ax=ax, + # return_ax=True + # ) + # plt.savefig("/mars/projects20/CoSense3D/cosense3d/logs/stream_lidar/tmp.png") + # plt.close() + + cared_pred_boxes = torch.cat(cared_pred_boxes, dim=0) + aligned_bboxes_gt = torch.cat(aligned_bboxes_gt, dim=0) + aligned_labels = torch.cat(aligned_labels, dim=0) + mask = torch.cat(mask, dim=0) + + cls_avg_factor = max(sum(num_gts), 1) + cared = aligned_labels >= 0 + loss_cls = self.loss_cls(cls_scores.reshape(-1, cls_scores.shape[-1])[cared], + aligned_labels[cared], avg_factor=cls_avg_factor) + + bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1))[mask] + normalized_bbox_targets = normalize_bbox(aligned_bboxes_gt) + isnotnan = torch.isfinite(bbox_preds).all(dim=-1) + bbox_weights = torch.ones_like(cared_pred_boxes) * self.code_weights + loss_box = self.loss_bbox(cared_pred_boxes[isnotnan], + normalized_bbox_targets[isnotnan], + bbox_weights[isnotnan]) + + return { + 'petr_cls_loss': loss_cls, + 'petr_box_loss': loss_box + }
+ + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/heads/query_guided_petr_head.html b/docs/_build/html/_modules/cosense3d/modules/heads/query_guided_petr_head.html new file mode 100644 index 00000000..5cf05002 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/heads/query_guided_petr_head.html @@ -0,0 +1,451 @@ + + + + + + cosense3d.modules.heads.query_guided_petr_head — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.heads.query_guided_petr_head

+from typing import List
+import os
+import torch
+from torch import nn
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.plugin import build_plugin_module
+from cosense3d.modules.utils.common import inverse_sigmoid
+from cosense3d.utils.misc import multi_apply
+from cosense3d.utils.box_utils import normalize_bbox, denormalize_bbox
+from cosense3d.modules.losses import build_loss
+from cosense3d.modules.losses.edl import pred_to_conf_unc
+
+
+
[docs]class QueryGuidedPETRHead(BaseModule): + def __init__(self, + embed_dims, + pc_range, + code_weights, + num_classes, + cls_assigner, + box_assigner, + loss_cls, + loss_box, + num_reg_fcs=3, + num_pred=3, + use_logits=False, + reg_channels=None, + sparse=False, + pred_while_training=False, + **kwargs): + super().__init__(**kwargs) + self.embed_dims = embed_dims + self.reg_channels = {} + if reg_channels is None: + self.code_size = 10 + else: + for c in reg_channels: + name, channel = c.split(':') + self.reg_channels[name] = int(channel) + self.code_size = sum(self.reg_channels.values()) + self.num_classes = num_classes + self.num_reg_fcs = num_reg_fcs + self.num_pred = num_pred + self.use_logits = use_logits + self.sparse = sparse + self.pred_while_training = pred_while_training + + self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False) + self.code_weights = nn.Parameter(torch.tensor(code_weights), requires_grad=False) + + self.box_assigner = build_plugin_module(box_assigner) + self.cls_assigner = build_plugin_module(cls_assigner) + + self.loss_cls = build_loss(**loss_cls) + self.loss_box = build_loss(**loss_box) + self.is_edl = True if 'edl' in self.loss_cls.name.lower() else False + + self._init_layers() + self.init_weights() + + def _init_layers(self): + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(nn.Linear(self.embed_dims, self.num_classes)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(nn.Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(self.num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(self.num_pred)]) + +
[docs] def init_weights(self): + for m in self.cls_branches: + nn.init.xavier_uniform_(m[-1].weight) + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.init.xavier_uniform_(m.weight) + self._is_init = True
+ +
[docs] def forward(self, feat_in, **kwargs): + if self.sparse: + outs_dec = self.cat_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2) + reference_points = self.cat_data_from_list(feat_in, 'ref_pts', pad_idx=True) + reference_inds = reference_points[..., 0] + reference_points = reference_points[..., 1:] + else: + outs_dec = self.stack_data_from_list(feat_in, 'outs_dec').permute(1, 0, 2, 3) + reference_points = self.stack_data_from_list(feat_in, 'ref_pts') + reference_inds = None + pos_dim = reference_points.shape[-1] + assert outs_dec.isnan().sum() == 0, "found nan in outs_dec." + # if outs_dec.isnan().any(): + # print('d') + + outputs_classes = [] + outputs_coords = [] + for lvl in range(len(outs_dec)): + out_dec = outs_dec[lvl] + # out_dec = torch.nan_to_num(out_dec) + + pred_cls = self.cls_branches[lvl](out_dec) + pred_reg = self.reg_branches[lvl](out_dec) + + if self.use_logits: + reference = inverse_sigmoid(reference_points.clone()) + pred_reg[..., :pos_dim] += reference + pred_reg[..., :3] = pred_reg[..., :3].sigmoid() + + outputs_classes.append(pred_cls) + outputs_coords.append(pred_reg) + + all_cls_logits = torch.stack(outputs_classes) + all_bbox_reg = torch.stack(outputs_coords) + if self.use_logits: + all_bbox_reg[..., :3] = (all_bbox_reg[..., :3] * ( + self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3]) + + reference_points = reference_points * (self.pc_range[3:] - self.pc_range[:3]) + self.pc_range[:3] + det_boxes, pred_boxes = self.get_pred_boxes(all_bbox_reg, reference_points) + cls_scores = pred_to_conf_unc(all_cls_logits, self.loss_cls.activation, self.is_edl)[0] + + if self.sparse: + outs = [] + for i in range(len(feat_in)): + mask = reference_inds == i + outs.append( + { + 'all_cls_logits': all_cls_logits[:, mask], + 'all_bbox_reg': all_bbox_reg[:, mask], + 'ref_pts': reference_points[mask], + 'all_cls_scores': cls_scores[:, mask], + 'all_bbox_preds': det_boxes[:, mask], + 'all_bbox_preds_t': pred_boxes[:, mask] if pred_boxes is not None else None, + } + ) + else: + outs = [ + { + 'all_cls_logits': all_cls_logits[:, i], + 'all_bbox_reg': all_bbox_reg[:, i], + 'ref_pts': reference_points[i], + 'all_cls_scores': cls_scores[:, i], + 'all_bbox_preds': det_boxes[:, i], + 'all_bbox_preds_t': pred_boxes[:, i] if pred_boxes is not None else None, + } for i in range(len(feat_in)) + ] + + if self.pred_while_training or not self.training: + dets = self.get_predictions(cls_scores, det_boxes, pred_boxes, batch_inds=reference_inds) + for i, out in enumerate(outs): + out['preds'] = dets[i] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def loss(self, petr_out, gt_boxes_global, gt_labels_global, *args, **kwargs): + aux_dict = {self.gt_keys[2:][i]: x for i, x in enumerate(args)} + epoch = kwargs.get('epoch', 0) + if self.sparse: + cls_scores = torch.cat([x for out in petr_out for x in out['all_cls_logits']], dim=0) + bbox_reg = torch.cat([x for out in petr_out for x in out['all_bbox_reg']], dim=0) + ref_pts = [x['ref_pts'] for x in petr_out for _ in range(self.num_pred)] + else: + cls_scores = self.stack_data_from_list(petr_out, 'all_cls_logits').flatten(0, 1) + bbox_reg = self.stack_data_from_list(petr_out, 'all_bbox_reg').flatten(0, 1) + ref_pts = self.stack_data_from_list(petr_out, 'ref_pts').unsqueeze(1).repeat( + 1, self.num_pred, 1, 1).flatten(0, 1) + gt_boxes_global = [x for x in gt_boxes_global for _ in range(self.num_pred)] + # gt_velos = [x[:, 7:] for x in gt_boxes for _ in range(self.num_pred)] + gt_labels_global = [x for x in gt_labels_global for _ in range(self.num_pred)] + if 'gt_preds' in aux_dict: + gt_preds = [x.transpose(1, 0) for x in aux_dict['gt_preds'] for _ in range(self.num_pred)] + else: + gt_preds = None + + # cls loss + cls_tgt = multi_apply(self.cls_assigner.assign, + ref_pts, gt_boxes_global, gt_labels_global, **kwargs) + cls_src = cls_scores.view(-1, self.num_classes) + + from cosense3d.utils.vislib import draw_points_boxes_plt, plt + points = ref_pts[0].detach().cpu().numpy() + boxes = gt_boxes_global[0][:, :7].detach().cpu().numpy() + scores = petr_out[0]['all_cls_scores'][0] + scores = scores[:, self.num_classes - 1:].squeeze().detach().cpu().numpy() + ax = draw_points_boxes_plt( + pc_range=self.pc_range.tolist(), + boxes_gt=boxes, + return_ax=True + ) + ax.scatter(points[:, 0], points[:, 1], c=scores, cmap='jet', s=3, marker='s', vmin=0.0, vmax=1) + plt.savefig(f"{os.environ['HOME']}/Downloads/tmp.jpg") + plt.close() + + # if kwargs['itr'] % 1 == 0: + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # points = ref_pts[0].detach().cpu().numpy() + # boxes = gt_boxes[0][:, :7].detach().cpu().numpy() + # scores = pred_to_conf_unc( + # cls_scores[0], getattr(self.loss_cls, 'activation'), edl=self.is_edl)[0] + # scores = scores[:, self.num_classes - 1:].squeeze().detach().cpu().numpy() + # ax = draw_points_boxes_plt( + # pc_range=self.pc_range.tolist(), + # boxes_gt=boxes, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], c=scores, cmap='jet', s=3, marker='s', vmin=0.0, vmax=1.0) + # # ax = draw_points_boxes_plt( + # # pc_range=self.pc_range.tolist(), + # # points=points[cls_tgt[0].squeeze().detach().cpu().numpy() > 0], + # # points_c="green", + # # ax=ax, + # # return_ax=True + # # ) + # # ax = draw_points_boxes_plt( + # # pc_range=self.pc_range.tolist(), + # # points=points[scores > 0.5], + # # points_c="magenta", + # # ax=ax, + # # return_ax=True + # # ) + # plt.savefig(f"{os.environ['HOME']}/Downloads/tmp.jpg") + # plt.close() + + cls_tgt = torch.cat(cls_tgt, dim=0) + cared = (cls_tgt >= 0).any(dim=-1) + cls_src = cls_src[cared] + cls_tgt = cls_tgt[cared] + + # convert one-hot to labels( + cur_labels = torch.zeros_like(cls_tgt[..., 0]).long() + lbl_inds, cls_inds = torch.where(cls_tgt) + cur_labels[lbl_inds] = cls_inds + 1 + + avg_factor = max((cur_labels > 0).sum(), 1) + loss_cls = self.loss_cls( + cls_src, + cur_labels, + temp=epoch, + avg_factor=avg_factor + ) + + # box loss + # pad ref pts with batch index + if 'gt_preds' in aux_dict: + gt_preds = self.cat_data_from_list(gt_preds) + box_tgt = self.box_assigner.assign( + self.cat_data_from_list(ref_pts, pad_idx=True), + self.cat_data_from_list(gt_boxes_global, pad_idx=True), + self.cat_data_from_list(gt_labels_global), + gt_preds + ) + ind = box_tgt['idx'][0] # only one head + loss_box = 0 + bbox_reg = bbox_reg.view(-1, self.code_size) + if ind.shape[1] > 0: + ptr = 0 + for reg_name, reg_dim in self.reg_channels.items(): + pred_reg = bbox_reg[:, ptr:ptr+reg_dim].contiguous() + if reg_name == 'scr': + pred_reg = pred_reg.sigmoid() + cur_reg_src = pred_reg[box_tgt['valid_mask'][0]] + if reg_name == 'vel': + cur_reg_tgt = box_tgt['vel'][0] * 0.1 + elif reg_name == 'pred': + cur_reg_tgt = box_tgt[reg_name][0] + mask = cur_reg_tgt[..., 0].bool() + cur_reg_src = cur_reg_src[mask] + cur_reg_tgt = cur_reg_tgt[mask, 1:] + else: + cur_reg_tgt = box_tgt[reg_name][0] # N, C + cur_loss = self.loss_box(cur_reg_src, cur_reg_tgt) + + loss_box = loss_box + cur_loss + ptr += reg_dim + + return { + 'cls_loss': loss_cls, + 'box_loss': loss_box, + 'cls_max': pred_to_conf_unc( + cls_src, self.loss_cls.activation, self.is_edl)[0][..., self.num_classes - 1:].max() + }
+ +
[docs] def get_pred_boxes(self, bbox_preds, ref_pts): + reg = {} + + ptr = 0 + for reg_name, reg_dim in self.reg_channels.items(): + reg[reg_name] = bbox_preds[..., ptr:ptr + reg_dim].contiguous() + ptr += reg_dim + + out = self.box_assigner.box_coder.decode(ref_pts[None], reg) + if isinstance(out, tuple): + det, pred = out + else: + det = out + pred = None + return det, pred
+ +
[docs] def get_predictions(self, cls_scores, det_boxes, pred_boxes, batch_inds=None): + if self.is_edl: + scores = cls_scores[-1][..., 1:].sum(dim=-1) + else: + scores = cls_scores[-1].sum(dim=-1) + labels = cls_scores[-1].argmax(dim=-1) + pos = scores > self.box_assigner.center_threshold + + dets = [] + if batch_inds is None: + inds = range(cls_scores.shape[1]) + for i in inds: + dets.append({ + 'box': det_boxes[-1][i][pos[i]], + 'scr': scores[i][pos[i]], + 'lbl': labels[i][pos[i]], + 'idx': torch.ones_like(labels[i][pos[i]]) * i, + }) + else: + inds = batch_inds.unique() + for i in inds: + mask = batch_inds == i + pos_mask = pos[mask] + dets.append({ + 'box': det_boxes[-1][mask][pos_mask], + 'scr': scores[mask][pos_mask], + 'lbl': labels[mask][pos_mask], + 'pred': pred_boxes[-1][mask][pos_mask] if pred_boxes is not None else None, + 'idx': batch_inds[mask][pos_mask].long() + }) + + return dets
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses.html b/docs/_build/html/_modules/cosense3d/modules/losses.html new file mode 100644 index 00000000..04979b42 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses.html @@ -0,0 +1,119 @@ + + + + + + cosense3d.modules.losses — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses

+from .focal_loss import *
+from .l1_loss import *
+from .iou_loss import *
+from .edl import *
+from .vanilla_seg_loss import VanillaSegLoss
+
+
+
[docs]def build_loss(type, **kwargs): + return globals()[type](**kwargs)
+ + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses/base_loss.html b/docs/_build/html/_modules/cosense3d/modules/losses/base_loss.html new file mode 100644 index 00000000..d9679c35 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses/base_loss.html @@ -0,0 +1,180 @@ + + + + + + cosense3d.modules.losses.base_loss — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses.base_loss

+import torch
+from torch import nn
+
+
+
[docs]class BaseLoss(nn.Module): + def __init__(self, + reduction: str = 'mean', + activation: str = 'none', + loss_weight: float = 1.0): + """ + :param reduction: (optional) the method to reduce the loss. + :param activation: options are "none", "mean" and "sum". + :param loss_weight: (optional) the weight of loss. + """ + super().__init__() + self.reduction = reduction + self.loss_weight = loss_weight + self.activation = activation + + @property + def name(self): + return self.__class__.__name__ + +
[docs] def loss(self, *args, **kwargs): + raise NotImplementedError
+ +
[docs] def forward(self, + preds: torch.Tensor, + targets: torch.Tensor, + weight: torch.Tensor=None, + avg_factor: int=None, + reduction_override: str=None, + *args, **kwargs) -> torch.Tensor: + """ + + :param preds: prediction tensor. + :param targets: target tensor. + :param weight: The weight of loss for each + prediction. Defaults to None. + :param avg_factor: Average factor that is used to average + the loss. Defaults to None. + :param reduction_override: The reduction method used to + override the original reduction method of the loss. + Defaults to None. + :param args: additional arguments. + :param kwargs: + :return: weighted loss. + """ + loss = self.loss(preds, targets, *args, **kwargs) + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + if reduction == 'mean': + loss = loss.mean() + elif reduction == 'sum': + loss = loss.sum() + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + # Avoid causing ZeroDivisionError when avg_factor is 0.0, + # i.e., all labels of an image belong to ignore index. + eps = torch.finfo(torch.float32).eps + loss = loss.sum() / (avg_factor + eps) + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return self.loss_weight * loss
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses/common.html b/docs/_build/html/_modules/cosense3d/modules/losses/common.html new file mode 100644 index 00000000..68679266 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses/common.html @@ -0,0 +1,247 @@ + + + + + + cosense3d.modules.losses.common — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses.common

+import torch
+import torch.nn.functional as F
+
+
+
[docs]def weighted_smooth_l1_loss(preds, targets, sigma=3.0, weights=None): + diff = preds - targets + abs_diff = torch.abs(diff) + abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff) + loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + \ + (abs_diff - 0.5 / (sigma ** 2)) * (1.0 - abs_diff_lt_1) + if weights is not None: + if len(loss.shape) > len(weights.shape): + weights = weights.unsqueeze(dim=-1) + loss *= weights + return loss
+ + +
[docs]def weighted_l1_loss(preds, targets, sigma=3.0, weights=None): + diff = preds - targets + loss = torch.abs(diff) + if weights is not None: + if len(loss.shape) > len(weights.shape): + weights = weights.unsqueeze(dim=-1) + loss *= weights + return loss
+ + +
[docs]def sigmoid_binary_cross_entropy(preds, tgts, weights=None, reduction='none'): + """ + Parameters + ---------- + preds: Tensor(d1, ..., dn) + tgts: Tensor(d1, ..., dn) + weights. Tensor(d1, ..., dn) + reduction: str('none' | 'mean' | 'sum') + ------- + """ + assert preds.shape == tgts.shape + if weights is not None: + assert weights.shape == preds.shape + per_entry_cross_ent = F.binary_cross_entropy_with_logits( + preds, tgts, + weights, reduction=reduction + ) + return per_entry_cross_ent
+ + +
[docs]def weighted_sigmoid_binary_cross_entropy(preds, tgts, weights=None, + class_indices=None): + if weights is not None: + weights = weights.unsqueeze(-1) + if class_indices is not None: + weights *= ( + indices_to_dense_vector(class_indices, preds.shape[2]) + .view(1, 1, -1) + .type_as(preds) + ) + per_entry_cross_ent = F.binary_cross_entropy_with_logits(preds, tgts, weights) + return per_entry_cross_ent
+ + +
[docs]def indices_to_dense_vector( + indices: torch.Tensor, + size: int, + indices_value: float = 1.0, + default_value: float = 0.0 +) -> torch.Tensor: + """ + Creates dense vector with indices set to specific value and rest to zeros. + + This function exists because it is unclear if it is safe to use + tf.sparse_to_dense(indices, [size], 1, validate_indices=False) + with indices which are not ordered. + This function accepts a dynamic size (e.g. `tf.shape(tensor)[0]`) + + :param indices: 1d Tensor with integer indices which are to be set to indices_values. + :param size: size of output Tensor. + :param indices_value: values of elements specified by indices in the output vector. + :param default_value: values of other elements in the output vector. + :return: dense 1D Tensor of shape [size] with indices set to indices_values and the + rest set to default_value. + """ + dense = torch.zeros(size).fill_(default_value) + dense[indices] = indices_value + + return dense
+ + +
[docs]def cross_entroy_with_logits(preds, tgts, n_cls, weights=None, reduction='none'): + cared = tgts >= 0 + preds = preds[cared] + tgts = tgts[cared] + tgt_onehot = torch.zeros((len(tgts), n_cls), device=preds.device) + tgt_onehot[torch.arange(len(tgts), device=tgts.device), tgts.long()] = 1 + + loss = F.cross_entropy(preds, tgt_onehot, weight=weights, reduction=reduction) + return loss
+ + +
[docs]def focal_loss(preds, tgts, weights=None, reduction='none', + gamma=2.0, alpha=0.25, use_sigmoid=True): + """ + + Parameters + ---------- + preds: FloatTensor(..., n_cls) + tgts: FloatTensor(..., n_cls) or LongTensor(...,) or LongTensor(...,1), largest label is background + weights: same as preds or tgts + ------- + """ + assert len(preds.shape) == len(tgts.shape) or len(preds.shape) - 1 == len(tgts.shape) + if use_sigmoid: + pred_sigmoid = torch.sigmoid(preds) + else: + pred_sigmoid = preds + + if preds.shape[-1] != tgts.shape[-1]: + num_classes = preds.size(1) + one_hot_tgts = F.one_hot(tgts, num_classes=num_classes + 1) + one_hot_tgts = one_hot_tgts[:, :num_classes] + else: + one_hot_tgts = tgts + + alpha_weight = one_hot_tgts * alpha + (1 - one_hot_tgts) * (1 - alpha) + pt = one_hot_tgts * (1.0 - pred_sigmoid) + (1.0 - one_hot_tgts) * pred_sigmoid + focal_weight = alpha_weight * torch.pow(pt, gamma) + + bce_loss = torch.clamp(preds, min=0) - preds * one_hot_tgts + \ + torch.log1p(torch.exp(-torch.abs(preds))) + + loss = focal_weight * bce_loss + if weights is None: + return loss + elif weights.shape.__len__() < preds.shape.__len__(): + weights = weights.unsqueeze(-1) + + assert weights.shape.__len__() == loss.shape.__len__() + + return loss * weights
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses/edl.html b/docs/_build/html/_modules/cosense3d/modules/losses/edl.html new file mode 100644 index 00000000..1da0e795 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses/edl.html @@ -0,0 +1,304 @@ + + + + + + cosense3d.modules.losses.edl — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses.edl

+import torch
+import torch.nn.functional as F
+
+from cosense3d.modules.losses import BaseLoss
+
+
+
[docs]def relu_evidence(y): + return F.relu(y)
+ + +
[docs]def exp_evidence(y): + return torch.exp(torch.clamp(y, -6, 6))
+ + +
[docs]def softplus_evidence(y): + return F.softplus(y)
+ + +
[docs]def kl_divergence(alpha, num_classes): + device = alpha.device + ones = torch.ones([1, num_classes], dtype=torch.float32, device=device) + sum_alpha = torch.sum(alpha, dim=1, keepdim=True) + first_term = ( + torch.lgamma(sum_alpha) + - torch.lgamma(alpha).sum(dim=1, keepdim=True) + # + torch.lgamma(ones).sum(dim=1, keepdim=True) + - torch.lgamma(ones.sum(dim=1, keepdim=True)) + ) + second_term = ( + (alpha - ones) + .mul(torch.digamma(alpha) - torch.digamma(sum_alpha)) + .sum(dim=1, keepdim=True) + ) + kl = first_term + second_term + return kl
+ + +
[docs]def loglikelihood_loss(y, alpha): + S = torch.sum(alpha, dim=1, keepdim=True) + loglikelihood_err = torch.sum((y - (alpha / S)) ** 2, dim=1, keepdim=True) + loglikelihood_var = torch.sum( + alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True + ) + loglikelihood = loglikelihood_err + loglikelihood_var + return loglikelihood
+ + +
[docs]def mse_loss(y, alpha, epoch_num, num_classes, annealing_step): + loglikelihood = loglikelihood_loss(y, alpha) + + annealing_coef = torch.min( + torch.tensor(1.0, dtype=torch.float32), + torch.tensor(epoch_num / annealing_step, dtype=torch.float32), + ) + + kl_alpha = (alpha - 1) * (1 - y) + 1 + kl_div = annealing_coef * kl_divergence(kl_alpha, num_classes) + return loglikelihood + kl_div
+ + +
[docs]def edl_mse_loss(preds, tgt, n_cls, temp, annealing_step, model_label='edl'): + """ + Calculate evidential loss + :param model_label: (str) a name to distinguish edl loss of different modules + :param preds: (N, n_cls) the logits of each class + :param tgt: (N,) labels with values from 0...(n_cls - 1) or (N, n_cls) + :param n_cls: (int) number of classes, including background + :param temp: current temperature for annealing of KL Divergence term of the loss + :param annealing_step: maximum annealing step + :return: + """ + evidence = relu_evidence(preds) + if len(tgt.shape) == 1: + cared = tgt >= 0 + evidence = evidence[cared] + tgt = tgt[cared] + tgt_onehot = F.one_hot(tgt.long(), n_cls).float() + elif len(tgt.shape) == 2 and tgt.shape[1] > 1: + cared = (tgt >= 0).all(dim=-1) + evidence = evidence[cared] + tgt_onehot = tgt[cared] + else: + raise NotImplementedError + alpha = evidence + 1 + loss = mse_loss(tgt_onehot, alpha, temp, n_cls, annealing_step).mean() + + ss = evidence.detach() + tt = tgt_onehot.detach() + acc = (torch.argmax(ss, dim=1) == torch.argmax(tt, dim=1)).sum() / len(tt) * 100 + loss_dict = { + f'{model_label}_loss': loss, + f'{model_label}_ac': acc, + } + + # Uncomment to log recall of all classes + # for cls in [1, 2]: + # loss_dict[f'acc{cls}'] = torch.logical_and( + # torch.argmax(ss, dim=1) == cls, tt == cls).sum() \ + # / max((tt == cls).sum(), 1) * 100 + + return loss_dict
+ + +
[docs]def evidence_to_conf_unc(evidence, edl=True): + if edl: + # used edl loss + alpha = evidence + 1 + S = torch.sum(alpha, dim=-1, keepdim=True) + conf = torch.div(alpha, S) + K = evidence.shape[-1] + unc = torch.div(K, S) + # conf = torch.sqrt(conf * (1 - unc)) + unc = unc.squeeze(dim=-1) + else: + # use entropy as uncertainty + entropy = -evidence * torch.log2(evidence) + unc = entropy.sum(dim=-1) + # conf = torch.sqrt(evidence * (1 - unc.unsqueeze(-1))) + conf = evidence + return conf, unc
+ + +
[docs]def pred_to_conf_unc(preds, activation='relu', edl=True): + if callable(activation): + evidence = activation(preds) + elif activation == 'relu': + evidence = relu_evidence(preds) + elif activation == 'exp': + evidence = exp_evidence(preds) + elif activation == 'sigmoid': + evidence = preds.sigmoid() + elif activation == 'softmax': + evidence = preds.softmax(dim=-1) + else: + evidence = preds + + if edl: + alpha = evidence + 1 + S = torch.sum(alpha, dim=-1, keepdim=True) + conf = torch.div(alpha, S) + K = evidence.shape[-1] + unc = torch.div(K, S) + # conf = torch.sqrt(conf * (1 - unc)) + unc = unc.squeeze(dim=-1) + else: + # use entropy as uncertainty + entropy = -evidence * torch.log2(evidence) + unc = entropy.sum(dim=-1) + # conf = torch.sqrt(evidence * (1 - unc.unsqueeze(-1))) + conf = evidence + return conf, unc
+ + +
[docs]class EDLLoss(BaseLoss): + def __init__(self, + n_cls: int, + annealing_step: int, + **kwargs): + """ + Evidential loss. + + :param n_cls: number of classes, including background. + :param annealing_step: maximum temperature annealing step for KL regularization of EDL loss . + :param kwargs: + """ + super().__init__(**kwargs) + self.n_cls = n_cls + self.annealing_step = annealing_step + if self.activation == 'relu': + self.activation = relu_evidence + elif self.activation == 'exp': + self.activation = exp_evidence + else: + self.activation = None + +
[docs] def loss(self, preds, tgt, temp, n_cls_override=None): + if self.activation is None: + evidence = preds + else: + evidence = self.activation(preds) + if len(tgt.shape) == 1: + cared = tgt >= 0 + evidence = evidence[cared] + tgt = tgt[cared] + tgt_onehot = F.one_hot(tgt.long(), self.n_cls).float() + elif len(tgt.shape) == 2 and tgt.shape[1] > 1: + cared = (tgt >= 0).all(dim=-1) + evidence = evidence[cared] + tgt_onehot = tgt[cared] + else: + raise NotImplementedError + alpha = evidence + 1 + n_cls = self.n_cls if n_cls_override is None else n_cls_override + loss = mse_loss(tgt_onehot, alpha, temp, n_cls, self.annealing_step) + + return loss
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses/focal_loss.html b/docs/_build/html/_modules/cosense3d/modules/losses/focal_loss.html new file mode 100644 index 00000000..6a17a9ad --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses/focal_loss.html @@ -0,0 +1,420 @@ + + + + + + cosense3d.modules.losses.focal_loss — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses.focal_loss

+import torch
+import torch.nn.functional as F
+
+from .base_loss import BaseLoss
+
+
+
[docs]def quality_focal_loss(pred: torch.Tensor, + target: tuple([torch.Tensor]), + beta: float = 2.0) -> torch.Tensor: + r""" + Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning + Qualified and Distributed Bounding Boxes for Dense Object Detection + <https://arxiv.org/abs/2006.04388>`_. + + :param pred: Predicted joint representation of classification + and quality (IoU) estimation with shape (N, C), C is the number of + classes. + :param target: Target category label with shape (N,) + and target quality label with shape (N,). + :param beta: The beta parameter for calculating the modulating factor. + Defaults to 2.0. + :return: Loss tensor with shape (N,). + """ + assert len(target) == 2, """target for QFL must be a tuple of two elements, + including category label and quality label, respectively""" + # label denotes the category id, score denotes the quality score + label, score = target + + # negatives are supervised by 0 quality score + pred_sigmoid = pred.sigmoid() + scale_factor = pred_sigmoid + zerolabel = scale_factor.new_zeros(pred.shape) + loss = F.binary_cross_entropy_with_logits( + pred, zerolabel, reduction='none') * scale_factor.pow(beta) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = pred.size(1) + pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) + pos_label = label[pos].long() + # positives are supervised by bbox quality (IoU) score + scale_factor = score[pos] - pred_sigmoid[pos, pos_label] + loss[pos, pos_label] = F.binary_cross_entropy_with_logits( + pred[pos, pos_label], score[pos], + reduction='none') * scale_factor.abs().pow(beta) + + loss = loss.sum(dim=1, keepdim=False) + return loss
+ + +
[docs]def quality_focal_loss_with_prob(pred: torch.Tensor, + target: tuple([torch.Tensor]), + beta: float = 2.0) -> torch.Tensor: + r""" + Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning + Qualified and Distributed Bounding Boxes for Dense Object Detection + <https://arxiv.org/abs/2006.04388>`_. + + :param pred: Predicted joint representation of classification + and quality (IoU) estimation with shape (N, C), C is the number of + classes. + :param target: Target category label with shape (N,) + and target quality label with shape (N,). + :param beta: The beta parameter for calculating the modulating factor. + Defaults to 2.0. + :return: Loss tensor with shape (N,). + """ + assert len(target) == 2, """target for QFL must be a tuple of two elements, + including category label and quality label, respectively""" + # label denotes the category id, score denotes the quality score + label, score = target + + # negatives are supervised by 0 quality score + pred_sigmoid = pred + scale_factor = pred_sigmoid + zerolabel = scale_factor.new_zeros(pred.shape) + loss = F.binary_cross_entropy( + pred, zerolabel, reduction='none') * scale_factor.pow(beta) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = pred.size(1) + pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) + pos_label = label[pos].long() + # positives are supervised by bbox quality (IoU) score + scale_factor = score[pos] - pred_sigmoid[pos, pos_label] + loss[pos, pos_label] = F.binary_cross_entropy( + pred[pos, pos_label], score[pos], + reduction='none') * scale_factor.abs().pow(beta) + + loss = loss.sum(dim=1, keepdim=False) + return loss
+ + +
[docs]class QualityFocalLoss(BaseLoss): + def __init__(self, + use_sigmoid: bool=True, + beta: float=2.0, + activated: bool=False, + **kwargs): + r""" + Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: + Learning Qualified and Distributed Bounding Boxes for Dense Object + Detection <https://arxiv.org/abs/2006.04388>`_. + + :param use_sigmoid: Whether sigmoid operation is conducted in QFL. + Defaults to True. + :param beta: The beta parameter for calculating the modulating factor. + Defaults to 2.0. + :param activated: (optional) Whether the input is activated. + If True, it means the input has been activated and can be + treated as probabilities. Else, it should be treated as logits. + Defaults to False. + :param kwargs: + """ + super(QualityFocalLoss, self).__init__(**kwargs) + assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' + self.use_sigmoid = use_sigmoid + self.beta = beta + self.activated = activated + +
[docs] def loss(self, pred: torch.Tensor, target: torch.Tensor): + """Forward function. + + :param pred: Predicted joint representation of + classification and quality (IoU) estimation with shape (N, C), + C is the number of classes. + :param target: Target category label with shape + (N,) and target quality label with shape (N,). + :return: loss result. + """ + if self.use_sigmoid: + if self.activated: + loss_cls = quality_focal_loss_with_prob(pred, target, self.beta) + else: + loss_cls = quality_focal_loss(pred, target, self.beta) + else: + raise NotImplementedError + return loss_cls
+ + +
[docs]class GaussianFocalLoss(BaseLoss): + """GaussianFocalLoss is a variant of focal loss. + + More details can be found in the `paper + <https://arxiv.org/abs/1808.01244>`_ + Code is modified from `kp_utils.py + <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 + Please notice that the target in GaussianFocalLoss is a gaussian heatmap, + not 0/1 binary target. + """ + + def __init__(self, + alpha: float=2.0, + gamma: float=4.0, + reduction: str='mean', + loss_weight: float=1.0): + """ + + :param alpha: Power of prediction. + :param gamma: Power of target for negative samples. + :param reduction: Options are "none", "mean" and "sum". + :param loss_weight: Loss weight of current loss. + """ + super(GaussianFocalLoss, self).__init__() + self.alpha = alpha + self.gamma = gamma + self.reduction = reduction + self.loss_weight = loss_weight + +
[docs] def loss(self, pred: torch.Tensor, target: torch.Tensor): + """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian + distribution. + + :param pred: The prediction. + :param target: The learning target of the prediction + in gaussian distribution. + :return: loss result. + """ + eps = 1e-12 + pos_weights = target.eq(1) + neg_weights = (1 - target).pow(self.gamma) + pos_loss = -(pred + eps).log() * (1 - pred).pow(self.alpha) * pos_weights + neg_loss = -(1 - pred + eps).log() * pred.pow(self.alpha) * neg_weights + return pos_loss + neg_loss
+ + +
[docs]def py_focal_loss_with_prob(pred: torch.Tensor, + target: torch.Tensor, + gamma: float=2.0, + alpha: float=0.25): + """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. + Different from `py_sigmoid_focal_loss`, this function accepts probability + as input. + + :param pred: The prediction probability with shape (N, C), + C is the number of classes. + :param target: The learning label of the prediction. + :param gamma: The gamma for calculating the modulating + factor. Defaults to 2.0. + :param alpha: A balanced form for Focal Loss. + Defaults to 0.25. + :return: loss result. + """ + num_classes = pred.size(1) + target = F.one_hot(target, num_classes=num_classes + 1) + target = target[:, :num_classes] + + target = target.type_as(pred) + pt = (1 - pred) * target + pred * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy( + pred, target, reduction='none') * focal_weight + return loss
+ + +
[docs]def py_sigmoid_focal_loss(pred: torch.Tensor, + target: torch.Tensor, + gamma: float=2.0, + alpha: float=0.25): + """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. + Different from `py_sigmoid_focal_loss`, this function accepts probability + as input. + + :param pred: The prediction probability with shape (N, C), + C is the number of classes. + :param target: The learning label of the prediction. + :param gamma: The gamma for calculating the modulating + factor. Defaults to 2.0. + :param alpha: A balanced form for Focal Loss. + Defaults to 0.25. + :return: loss result. + """ + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + return loss
+ + +
[docs]class FocalLoss(BaseLoss): + + def __init__(self, + use_sigmoid: bool=True, + gamma: float=2.0, + alpha: float=0.25, + activated: bool=False, + bg_idx: int=None, + **kwargs): + """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ + + :param use_sigmoid: Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + :param gamma: The gamma for calculating the modulating + factor. Defaults to 2.0. + :param alpha: A balanced form for Focal Loss. + Defaults to 0.25. + :param activated: Whether the input is activated. + If True, it means the input has been activated and can be + treated as probabilities. Else, it should be treated as logits. + Defaults to False. + :param bg_idx: background class index. + :param kwargs: + """ + super(FocalLoss, self).__init__(**kwargs) + assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.activated = activated + self.bg_idx = bg_idx + if use_sigmoid: + self.activation = 'sigmoid' + elif activated is False: + self.activation = 'softmax' + +
[docs] def loss(self, pred: torch.Tensor, target: torch.Tensor, *args, **kwargs): + """ + :param pred: prediction. + :param target: ground truth targets. + :param args: + :param kwargs: + :return: + """ + if self.use_sigmoid: + if self.activated: + calculate_loss_func = py_focal_loss_with_prob + else: + num_classes = pred.size(1) + if isinstance(target, torch.cuda.FloatTensor) and target.ndim == 1: + target = torch.stack([1 - target, target], dim=1) + else: + target = F.one_hot(target, num_classes=num_classes + 1) + if self.bg_idx is None: + target = target[:, :num_classes] + else: + target = target[:, [c for c in range(num_classes + 1) if c != self.bg_idx]] + calculate_loss_func = py_sigmoid_focal_loss + + loss_cls = calculate_loss_func( + pred, + target, + gamma=self.gamma, + alpha=self.alpha) + + else: + raise NotImplementedError + return loss_cls
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses/iou_loss.html b/docs/_build/html/_modules/cosense3d/modules/losses/iou_loss.html new file mode 100644 index 00000000..55e5f880 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses/iou_loss.html @@ -0,0 +1,154 @@ + + + + + + cosense3d.modules.losses.iou_loss — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses.iou_loss

+from .base_loss import BaseLoss
+from cosense3d.utils.iou2d_calculator import bbox_overlaps
+
+
+
[docs]class IoULoss(BaseLoss): + def __init__(self, mode: str='log', eps:float=1e-6, **kwargs): + """ + + :param mode: Loss scaling mode, including "linear", "square", and "log". + Default: 'log' + :param eps: Eps to avoid log(0). + :param kwargs: + """ + super(IoULoss, self).__init__(**kwargs) + assert mode in ['linear', 'square', 'log'] + self.mode = mode + self.eps = eps + +
[docs] def loss(self, pred, target): + ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=self.eps) + if self.mode == 'linear': + loss = 1 - ious + elif self.mode == 'square': + loss = 1 - ious ** 2 + elif self.mode == 'log': + loss = -ious.log() + else: + raise NotImplementedError + return loss
+ + +
[docs]class GIoULoss(BaseLoss): + def __init__(self, eps: float=1e-7, **kwargs): + """ + + :param eps: Eps to avoid log(0). + :param kwargs: + """ + super(GIoULoss, self).__init__(**kwargs) + self.eps = eps + +
[docs] def loss(self, pred, target): + gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=self.eps) + loss = 1 - gious + return loss
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses/l1_loss.html b/docs/_build/html/_modules/cosense3d/modules/losses/l1_loss.html new file mode 100644 index 00000000..82b4032a --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses/l1_loss.html @@ -0,0 +1,144 @@ + + + + + + cosense3d.modules.losses.l1_loss — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses.l1_loss

+import torch
+from .base_loss import BaseLoss
+
+
+
[docs]class L1Loss(BaseLoss): + +
[docs] def loss(self, pred, target): + if target.numel() == 0: + return pred.sum() * 0 + + assert pred.size() == target.size() + loss = torch.abs(pred - target) + return loss
+ + +
[docs]class SmoothL1Loss(BaseLoss): + def __init__(self, beta: float=1.0, **kwargs): + """ + :param beta: The threshold in the piecewise function. + Defaults to 1.0. + :param kwargs: + """ + super(SmoothL1Loss, self).__init__(**kwargs) + assert beta > 0 + self.beta = beta + +
[docs] def loss(self, pred, target): + if target.numel() == 0: + return pred.sum() * 0 + + assert pred.size() == target.size() + diff = torch.abs(pred - target) + loss = torch.where(diff < self.beta, + 0.5 * diff * diff / self.beta, + diff - 0.5 * self.beta) + return loss
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/losses/vanilla_seg_loss.html b/docs/_build/html/_modules/cosense3d/modules/losses/vanilla_seg_loss.html new file mode 100644 index 00000000..76c3fa4b --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/losses/vanilla_seg_loss.html @@ -0,0 +1,171 @@ + + + + + + cosense3d.modules.losses.vanilla_seg_loss — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.losses.vanilla_seg_loss

+import torch
+import torch.nn as nn
+
+from einops import rearrange
+
+
+
[docs]class VanillaSegLoss(nn.Module): + def __init__(self, d_weights, s_weights, d_coe, s_coe, l_weights=50, **kwargs): + super(VanillaSegLoss, self).__init__() + + self.d_weights = d_weights + self.s_weights = s_weights + self.l_weights = l_weights + + self.d_coe = d_coe + self.s_coe = s_coe + + self.loss_func_static = \ + nn.CrossEntropyLoss( + weight=torch.Tensor([1., self.s_weights, self.l_weights]).cuda()) + self.loss_func_dynamic = \ + nn.CrossEntropyLoss( + weight=torch.Tensor([1., self.d_weights]).cuda()) + +
[docs] def forward(self, static_pred=None, dynamic_pred=None, + static_gt=None, dynamic_gt=None): + """ + Perform loss function on the prediction. + + Parameters + ---------- + output_dict : dict + The dictionary contains the prediction. + + gt_dict : dict + The dictionary contains the groundtruth. + + Returns + ------- + Loss dictionary. + """ + loss_dict = {} + + if static_pred is not None: + # during training, only need to compute the ego vehicle's gt loss + # static_gt = rearrange(static_gt, 'b l h w -> (b l) h w') + # static_pred = rearrange(static_pred, 'b l c h w -> (b l) c h w') + static_loss = self.loss_func_static(static_pred, static_gt.long()) + loss_dict['static_loss'] = self.s_coe * static_loss + + if dynamic_pred is not None: + # dynamic_gt = rearrange(dynamic_gt, 'b l h w -> (b l) h w') + # dynamic_pred = rearrange(dynamic_pred, 'b l c h w -> (b l) c h w') + dynamic_loss = self.loss_func_dynamic(dynamic_pred, dynamic_gt.long()) + loss_dict['dynamic_loss'] = self.d_coe * dynamic_loss + + return loss_dict
+ + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/necks/cpm_composer.html b/docs/_build/html/_modules/cosense3d/modules/necks/cpm_composer.html new file mode 100644 index 00000000..43abc828 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/necks/cpm_composer.html @@ -0,0 +1,128 @@ + + + + + + cosense3d.modules.necks.cpm_composer — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.necks.cpm_composer

+import torch
+from torch import nn
+
+from cosense3d.modules import BaseModule, plugin
+
+
+
[docs]class KeypointComposer(BaseModule): + def __init__(self, vsa, train_from_epoch=0, **kwargs): + super().__init__(**kwargs) + self.train_from_epoch = train_from_epoch + self.vsa = plugin.build_plugin_module(vsa) + +
[docs] def forward(self, preds, bev_feat, voxel_feat, points, **kwargs): + epoch = kwargs.get('epoch', self.train_from_epoch + 1) + if epoch < self.train_from_epoch: + return {self.scatter_keys[0]: [None for _ in preds]} + + res = self.vsa(preds, bev_feat, voxel_feat, points) + res = self.compose_result_list(res, len(preds)) + return {self.scatter_keys[0]: res}
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/necks/dilation_spconv.html b/docs/_build/html/_modules/cosense3d/modules/necks/dilation_spconv.html new file mode 100644 index 00000000..b8933c61 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/necks/dilation_spconv.html @@ -0,0 +1,268 @@ + + + + + + cosense3d.modules.necks.dilation_spconv — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.necks.dilation_spconv

+import functools
+import torch
+
+from cosense3d.modules import BaseModule, nn
+from cosense3d.modules.utils.me_utils import mink_coor_limit, minkconv_conv_block, ME, indices2metric
+
+
+
[docs]class DilationSpconv(BaseModule): + def __init__(self, data_info, convs, d=2, n_layers=None, **kwargs): + super(DilationSpconv, self).__init__(**kwargs) + self.det_r = data_info.get('det_r', False) + self.lidar_range = data_info.get('lidar_range', False) + self.voxel_size = data_info['voxel_size'] + self.d = d + self.n_layers = n_layers + self.conv_args = convs + self.convs = [] + for k, conv_args in convs.items(): + self.convs.append(k) + setattr(self, f'convs_{k}', self.get_conv_layer(conv_args)) + stride = int(k[1]) + + if self.det_r: + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif self.lidar_range: + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, f'mink_xylim_{k}', mink_coor_limit(lr, self.voxel_size, stride)) # relevant to ME + +
[docs] def to_gpu(self, gpu_id): + self.to(gpu_id) + return ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm
+ +
[docs] def forward(self, stensor_list, **kwargs): + out_dict = {} + for k in self.convs: + stride = int(k[1]) + coor, feat, ctr = self.compose_stensor(stensor_list, stride) + stensor2d = ME.SparseTensor( + coordinates=coor[:, :3].contiguous(), + features=feat, + tensor_stride=[stride] * 2 + ) + + stensor2d = getattr(self, f'convs_{k}')(stensor2d) + # after coordinate expansion, some coordinates will exceed the maximum detection + # range, therefore they are removed here. + xylim = getattr(self, f'mink_xylim_{k}') + mask = (stensor2d.C[:, 1] > xylim[0]) & (stensor2d.C[:, 1] <= xylim[1]) & \ + (stensor2d.C[:, 2] > xylim[2]) & (stensor2d.C[:, 2] <= xylim[3]) + + coor = stensor2d.C[mask] + feat = stensor2d.F[mask] + ctr = indices2metric(coor, self.voxel_size)[:, 1:] + + out_dict[k] = { + 'coor': coor, + 'feat': feat, + 'ctr': ctr + } + return self.format_output(out_dict, len(stensor_list))
+ +
[docs] def format_output(self, out_dict, B): + out_list = self.decompose_stensor(out_dict, B) + return {self.scatter_keys[0]: out_list}
+ +
[docs] def get_conv_layer(self, args): + minkconv_layer = functools.partial( + minkconv_conv_block, d=self.d, bn_momentum=0.1, + ) + in_dim = args['in_dim'] + out_dim = args['out_dim'] + layers = [minkconv_layer(in_dim, out_dim, args['kernels'][0], 1, + expand_coordinates=True)] + for ks in args['kernels'][1:]: + layers.append(minkconv_layer(out_dim, out_dim, ks, 1, + expand_coordinates=True)) + if self.n_layers is not None and self.n_layers > len(args['kernels']): + for _ in range(self.n_layers - len(args['kernels'])): + layers.append(minkconv_layer(out_dim, out_dim, 3, 1, + expand_coordinates=False)) + return nn.Sequential(*layers)
+ + +
[docs]class DilationSpconvAblation(BaseModule): + def __init__(self, data_info, convs, d=2, n_layers=None, **kwargs): + super(DilationSpconvAblation, self).__init__(**kwargs) + self.det_r = data_info.get('det_r', False) + self.lidar_range = data_info.get('lidar_range', False) + self.voxel_size = data_info['voxel_size'] + self.d = d + self.n_layers = n_layers + self.conv_args = convs + self.convs = [] + for k, conv_args in convs.items(): + self.convs.append(k) + setattr(self, f'convs_{k}', self.get_conv_layer(conv_args)) + stride = int(k[1]) + + if self.det_r: + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif self.lidar_range: + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, f'mink_xylim_{k}', mink_coor_limit(lr, self.voxel_size, stride)) # relevant to ME + +
[docs] def to_gpu(self, gpu_id): + self.to(gpu_id) + return ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm
+ +
[docs] def forward(self, stensor_list, **kwargs): + out_dict = {} + for k in self.convs: + stride = int(k[1]) + coor, feat, ctr = self.compose_stensor(stensor_list, stride) + stensor2d = ME.SparseTensor( + coordinates=coor[:, :3].contiguous(), + features=feat, + tensor_stride=[stride] * 2 + ) + + stensor2d = getattr(self, f'convs_{k}')(stensor2d) + # after coordinate expansion, some coordinates will exceed the maximum detection + # range, therefore they are removed here. + xylim = getattr(self, f'mink_xylim_{k}') + mask = (stensor2d.C[:, 1] > xylim[0]) & (stensor2d.C[:, 1] <= xylim[1]) & \ + (stensor2d.C[:, 2] > xylim[2]) & (stensor2d.C[:, 2] <= xylim[3]) + + coor = stensor2d.C[mask] + feat = stensor2d.F[mask] + ctr = indices2metric(coor, self.voxel_size)[:, 1:] + + out_dict[k] = { + 'coor': coor, + 'feat': feat, + 'ctr': ctr + } + return self.format_output(out_dict, len(stensor_list))
+ +
[docs] def format_output(self, out_dict, B): + out_list = self.decompose_stensor(out_dict, B) + return {self.scatter_keys[0]: out_list}
+ +
[docs] def get_conv_layer(self, args): + minkconv_layer = functools.partial( + minkconv_conv_block, d=self.d, bn_momentum=0.1, + ) + in_dim = args['in_dim'] + out_dim = args['out_dim'] + layers = [minkconv_layer(in_dim, out_dim, args['kernels'][0], 1, + expand_coordinates=False)] + for ks in args['kernels'][1:]: + layers.append(minkconv_layer(out_dim, out_dim, ks, 1, + expand_coordinates=False)) + if self.n_layers is not None and self.n_layers > len(args['kernels']): + for _ in range(self.n_layers - len(args['kernels'])): + layers.append(minkconv_layer(out_dim, out_dim, 3, 1, + expand_coordinates=False)) + return nn.Sequential(*layers)
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/necks/formatting.html b/docs/_build/html/_modules/cosense3d/modules/necks/formatting.html new file mode 100644 index 00000000..f72ba67d --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/necks/formatting.html @@ -0,0 +1,264 @@ + + + + + + cosense3d.modules.necks.formatting — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.necks.formatting

+import torch
+from torch import nn
+
+from cosense3d.modules import BaseModule
+
+
+
[docs]class DenseToSparse(BaseModule): + def __init__(self, + data_info, + strides=None, + **kwargs): + super(DenseToSparse, self).__init__(**kwargs) + self.lidar_range = data_info['lidar_range'] + self.voxel_size = data_info['voxel_size'] + self.strides = strides + +
[docs] def forward(self, *args, **kwargs): + input_dict = {self.gather_keys[i]: x for i, x in enumerate(args)} + out_dict = {} + multi_scale_bev_feat = [] + for x in input_dict['multi_scale_bev_feat']: + tmp = {} + for s in self.strides: + tmp[f'p{s}'] = { + 'ctr': self.get_centers(s, device=x[f'p{s}'].device).flatten(0, 1), + 'feat': x[f'p{s}'].permute(1, 2, 0).flatten(0, 1) + } + multi_scale_bev_feat.append(tmp) + out_dict['multi_scale_bev_feat'] = multi_scale_bev_feat + + det_local_sparse = [] + for x in input_dict['det_local_dense']: + det_local_sparse.append({'scr': x['cls'].max(dim=0).values.flatten()}) + out_dict['det_local_sparse'] = det_local_sparse + + bev_local_sparse = [] + for x in input_dict['bev_local_dense']: + bev_local_sparse.append({'scr': x.max(dim=0).values.flatten()}) + out_dict['bev_local_sparse'] = bev_local_sparse + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # draw_points_boxes_plt( + # pc_range=self.lidar_range, + # points=input_dict['points'][0][:, :3].detach().cpu().numpy(), + # filename="/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/points.png" + # ) + # fig = plt.figure(figsize=(10, 5)) + # ax = fig.add_subplot() + # pts = multi_scale_bev_feat[0]['p2']['ctr'].detach().cpu().numpy() + # # colors = det_local_sparse[0]['scr'].sigmoid().detach().cpu().numpy() + # colors = multi_scale_bev_feat[0]['p2']['feat'].mean(dim=1).detach().cpu().numpy() + # ax.scatter(pts[:, 0], pts[:, 1], c=colors) + # plt.savefig("/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/scores.png") + return out_dict
+ +
[docs] def get_centers(self, stride, device): + pix_x = self.voxel_size[0] * stride + pix_y = self.voxel_size[1] * stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x, device=device) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y, device=device) + pix_y * 0.5 + centers = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + return centers.permute(1, 0, 2)
+ + +
[docs]class DetDenseToSparse(nn.Module): + def __init__(self, + data_info, + stride, + **kwargs): + super(DetDenseToSparse, self).__init__(**kwargs) + self.lidar_range = data_info['lidar_range'] + self.voxel_size = data_info['voxel_size'] + self.stride = stride + +
[docs] def forward(self, input): + out_list = [] + for x in input: + # select the max of two anchors at each position + h, w = x['cls'].shape[1:] + cls, max_inds = x['cls'].permute(0, 2, 1).max(dim=0) + scr = cls.sigmoid() + reg = x['reg'].view(x['cls'].shape[0], -1, h, w).permute(3, 2, 0, 1) + ctr = self.get_centers() + out_list.append({ + 'ctr': ctr.flatten(0, 1), + 'cls': cls.flatten(0, 1), + 'reg': reg.flatten(0, 1), + 'scr': scr.flatten(0, 1) + }) + + return out_list
+ +
[docs] def get_centers(self): + pix_x = self.voxel_size[0] * self.stride + pix_y = self.voxel_size[1] * self.stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y) + pix_y * 0.5 + centers = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + return centers
+ + +
[docs]class FPVRCNNToLTS(BaseModule): + def __init__(self, + data_info, + strides=None, + **kwargs): + super(FPVRCNNToLTS, self).__init__(**kwargs) + self.lidar_range = data_info['lidar_range'] + self.voxel_size = data_info['voxel_size'] + +
[docs] def forward(self, *args, **kwargs): + input_dict = {self.gather_keys[i]: x for i, x in enumerate(args)} + out_dict = {} + multi_scale_feat = [] + roi_local = [] + for x, y in zip(input_dict['multi_scale_bev_feat'], input_dict['keypoint_feat']): + multi_scale_feat.append({ + 'p2': { + 'ctr': y['point_coords'][:, 1:4], + 'feat': y['point_features'] + }, + 'p8': { + 'ctr': self.get_centers(32, device=x[f'p32'].device).flatten(0, 1), + 'feat': x['p32'].permute(1, 2, 0).flatten(0, 1) + } + }) + roi_local.append({'scr': y['point_scores']}) + out_dict['multi_scale_feat'] = multi_scale_feat + out_dict['roi_local'] = roi_local + + bev_local_sparse = [] + for x in input_dict['bev_local_dense']: + bev_local_sparse.append({'scr': x.max(dim=0).values.flatten()}) + out_dict['roi_global'] = bev_local_sparse + + # from cosense3d.utils.vislib import draw_points_boxes_plt, plt + # draw_points_boxes_plt( + # pc_range=self.lidar_range, + # points=input_dict['points'][0][:, :3].detach().cpu().numpy(), + # filename="/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/points.png" + # ) + # fig = plt.figure(figsize=(10, 5)) + # ax = fig.add_subplot() + # pts = multi_scale_bev_feat[0]['p2']['ctr'].detach().cpu().numpy() + # # colors = det_local_sparse[0]['scr'].sigmoid().detach().cpu().numpy() + # colors = multi_scale_bev_feat[0]['p2']['feat'].mean(dim=1).detach().cpu().numpy() + # ax.scatter(pts[:, 0], pts[:, 1], c=colors) + # plt.savefig("/media/yuan/luna/streamLTS/LTS_fcooper_dairv2x/scores.png") + return out_dict
+ +
[docs] def get_centers(self, stride, device): + pix_x = self.voxel_size[0] * stride + pix_y = self.voxel_size[1] * stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x, device=device) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y, device=device) + pix_y * 0.5 + centers = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + return centers.permute(1, 0, 2)
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin.html b/docs/_build/html/_modules/cosense3d/modules/plugin.html new file mode 100644 index 00000000..96baf030 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin.html @@ -0,0 +1,201 @@ + + + + + + cosense3d.modules.plugin — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin

+# Copyright (c) OpenMMLab. All rights reserved. Modified by Yunshuang Yuan.
+import inspect
+from typing import Dict, Tuple, Union
+from importlib import import_module
+
+import torch.nn as nn
+import re  # type: ignore
+
+
+
[docs]def infer_abbr(class_type: type) -> str: + """Infer abbreviation from the class name. + + This method will infer the abbreviation to map class types to + abbreviations. + + Rule 1: If the class has the property "abbr", return the property. + Rule 2: Otherwise, the abbreviation falls back to snake case of class + name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. + + :param class_type: The norm layer type. + :return: The inferred abbreviation. + """ + + def camel2snack(word): + """Convert camel case word into snack case. + + Modified from `inflection lib + <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_. + + Example:: + + >>> camel2snack("FancyBlock") + 'fancy_block' + """ + + word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) + word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) + word = word.replace('-', '_') + return word.lower() + + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ # type: ignore + else: + return camel2snack(class_type.__name__)
+ + +
[docs]def build_plugin_layer(cfg: Dict, + postfix: Union[int, str] = '', + **kwargs) -> Tuple[str, nn.Module]: + """Build plugin layer. + + :param cfg: cfg should contain: + + - type (str): identify plugin layer type. + - layer args: args needed to instantiate a plugin layer. + :param postfix: appended into norm abbreviation to + create named layer. Default: ''. + :param kwargs: + :return: The first one is the concatenation of + abbreviation and postfix. The second is the created plugin layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + try: + pkg, cls = layer_type.rsplit('.', 1) + plugin_layer = import_module(pkg).get(cls) + except: + raise KeyError(f'Unrecognized plugin type {layer_type}') + + abbr = infer_abbr(plugin_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + layer = plugin_layer(**kwargs, **cfg_) + + return name, layer
+ + +
[docs]def build_plugin_module(cfg: Dict): + cfg_ = cfg.copy() + type_ = cfg_.pop('type') + module_name, cls_name = type_.split('.') + module = import_module(f'{__package__}.{module_name}') + cls_inst = getattr(module, cls_name)(**cfg_) + return cls_inst
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/attn.html b/docs/_build/html/_modules/cosense3d/modules/plugin/attn.html new file mode 100644 index 00000000..46c64f32 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/attn.html @@ -0,0 +1,253 @@ + + + + + + cosense3d.modules.plugin.attn — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.attn

+import torch
+import torch.nn.functional as F
+import numpy as np
+from torch import nn
+
+from cosense3d.modules.utils.misc import SELayer_Linear
+from cosense3d.modules.utils.positional_encoding import pos2posemb2d
+from cosense3d.modules.utils.me_utils import indices2metric, metric2indices, update_me_essentials
+
+
+
[docs]class ScaledDotProductAttention(nn.Module): + """ + Scaled Dot-Product Attention proposed in "Attention Is All You Need" + Compute the dot products of the query with all keys, divide each by sqrt(dim), + and apply a softmax function to obtain the weights on the values + """ + def __init__(self, dim: int): + """ + :param dim: imention of attention + """ + super(ScaledDotProductAttention, self).__init__() + self.sqrt_dim = np.sqrt(dim) + +
[docs] def forward(self, query, key, value): + """ + :param query: (batch, q_len, d_model) tensor containing projection vector for decoder. + :param key: (batch, k_len, d_model) tensor containing projection vector for encoder. + :param value: (batch, v_len, d_model) tensor containing features of the encoded input sequence. + :return: context, attn + - **context**: tensor containing the context vector from attention mechanism. + - **attn**: tensor containing the attention (alignment) from the encoder outputs. + """ + score = torch.bmm(query, key.transpose(1, 2)) / self.sqrt_dim + attn = F.softmax(score, -1) + context = torch.bmm(attn, value) + return context
+ + +
[docs]class NeighborhoodAttention(nn.Module): + def __init__(self, data_info, stride, emb_dim=128): + super().__init__() + self.stride = stride + update_me_essentials(self, data_info, self.stride) + self.lr = nn.Parameter(torch.tensor(self.lidar_range), requires_grad=False) + self.vs = nn.Parameter(torch.tensor(self.voxel_size), requires_grad=False) + # self.grid_size = ( + # round((lr[3] - lr[0]) / vs[0] / stride), + # round((lr[4] - lr[1]) / vs[1] / stride), + # ) + self.emb_dim = emb_dim + self.num_pos_feat = emb_dim // 2 + self.sqrt_dim = np.sqrt(emb_dim) + x = torch.arange(-1, 2) + self.nbrs = torch.stack(torch.meshgrid(x, x, indexing='ij'), + dim=-1).reshape(-1, 2) + self.nbrs = nn.Parameter(self.nbrs, requires_grad=False) + self.n_nbrs = len(self.nbrs) + + self.query_pos_encoder = nn.Sequential( + nn.Linear(emb_dim, emb_dim * 2), + nn.ReLU(), + nn.Linear(emb_dim * 2, emb_dim), + ) + self.value_pos_encoder = nn.Sequential( + nn.Linear(emb_dim, emb_dim * 2), + nn.ReLU(), + nn.Linear(emb_dim * 2, emb_dim), + ) + self.featurized_pe = SELayer_Linear(emb_dim) + +
[docs] def coor_to_indices(self, coor): + inds = coor.clone() + inds[:, 1] = inds[:, 1] / self.stride - self.offset_sz_x + inds[:, 2] = inds[:, 2] / self.stride - self.offset_sz_y + return inds.long()
+ +
[docs] def forward(self, ref_pts, ctr_coor, ctr_feat): + """ + + Parameters + ---------- + ref_pts LongTensor(Q, 3): 2d coordinates in metrics(batch_idx, x, y) + ctr_coor LongTensor(V, 3): 2d coordinates in indices (batch_idx, x, y) + ctr_feat FloatTensor(V, d): bev grid center point features + + Returns + ------- + out_features FloatTensor(Q, d): attended features + """ + Q = ref_pts.shape[0] + V, Vd = ctr_feat.shape + + ctr_pts = indices2metric(ctr_coor, self.vs) + ctr_inds = self.coor_to_indices(ctr_coor) + ref_coor = metric2indices(ref_pts, self.vs) + ref_inds = self.coor_to_indices(ref_coor) + query_pos = (ref_pts[:, 1:] - self.lr[0:2]) / (self.lr[3:5] - self.lr[0:2]) + value_pos = (ctr_pts[:, 1:] - self.lr[0:2]) / (self.lr[3:5] - self.lr[0:2]) + + qpos_emb = self.query_pos_encoder( + pos2posemb2d(query_pos, num_pos_feats=self.num_pos_feat)) + vpos_emb = self.value_pos_encoder( + pos2posemb2d(value_pos, num_pos_feats=self.num_pos_feat)) + vpos_emb = self.featurized_pe(vpos_emb, ctr_feat) + + q_inds, v_inds = self.get_nbr_mapping(ref_inds, ctr_inds) + # pad pos_embs with zeros at the 1st entry + # points outside the grid will retrieve the embedding in the 1st padded row + qpos_emb = torch.cat([torch.zeros_like(qpos_emb[:1]), qpos_emb], dim=0) + vpos_emb = torch.cat([torch.zeros_like(vpos_emb[:1]), vpos_emb], dim=0) + ctr_feat = torch.cat([torch.zeros_like(ctr_feat[:1]), ctr_feat], dim=0) + + score = (qpos_emb[q_inds] * vpos_emb[v_inds]).sum(dim=-1) / self.sqrt_dim + attn = F.softmax(score.view(-1, self.n_nbrs), dim=-1) + context = attn.unsqueeze(-1) * ctr_feat[v_inds].view(-1, self.n_nbrs, Vd) + return context.sum(1)
+ +
[docs] def get_nbr_mapping(self, query_pos, value_pos): + B = query_pos[:, 0].max() + 1 + pad_width = 2 + query_pos[:, 1:] += pad_width + value_pos[:, 1:] += pad_width + query_inds = torch.arange(len(query_pos), dtype=torch.long) + value_inds = torch.arange(len(value_pos), dtype=torch.long) + + # index -1 indicates that this nbr is outside the grid range + value_map = - torch.ones((B, self.size_x + pad_width * 2, + self.size_y + pad_width * 2), dtype=torch.long) + value_map[value_pos[:, 0], + value_pos[:, 1], + value_pos[:, 2]] = value_inds + + query_inds_nbrs = query_pos.unsqueeze(dim=1).repeat(1, self.n_nbrs, 1) + query_inds_nbrs[..., 1:] += self.nbrs.view(1, -1, 2) + query_inds_nbrs = query_inds_nbrs.view(-1, 3) + mask = ((query_inds_nbrs >= 0).all(dim=-1) & + (query_inds_nbrs[:, 1] < self.size_x + pad_width * 2) & + (query_inds_nbrs[:, 2] < self.size_y + pad_width * 2)) + assert torch.logical_not(mask).sum() == 0 + query_inds_mapped = query_inds.unsqueeze(1).repeat(1, self.n_nbrs).view(-1) + value_inds_mapped = value_map[query_inds_nbrs[:, 0], + query_inds_nbrs[:, 1], + query_inds_nbrs[:, 2]] + # shift the overall indices by 1 step, index -1 will then become 0 + return query_inds_mapped + 1, value_inds_mapped + 1
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/bev_rpn.html b/docs/_build/html/_modules/cosense3d/modules/plugin/bev_rpn.html new file mode 100644 index 00000000..5756f9b8 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/bev_rpn.html @@ -0,0 +1,211 @@ + + + + + + cosense3d.modules.plugin.bev_rpn — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.bev_rpn

+import torch
+from torch import nn
+import torch.nn.functional as F
+
+
+
[docs]class Conv2d(nn.Module): + + def __init__(self, in_channels, out_channels, k, s, p, activation=True, + batch_norm=True): + super(Conv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=k, + stride=s, padding=p) + if batch_norm: + self.bn = nn.BatchNorm2d(out_channels) + else: + self.bn = None + self.activation = activation + +
[docs] def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + if self.activation: + return F.relu(x, inplace=True) + else: + return x
+ + +
[docs]class RPN(nn.Module): + def __init__(self, anchor_num=2): + super(RPN, self).__init__() + self.anchor_num = anchor_num + + self.block_1 = [Conv2d(128, 128, 3, 2, 1)] + self.block_1 += [Conv2d(128, 128, 3, 1, 1) for _ in range(3)] + self.block_1 = nn.Sequential(*self.block_1) + + self.block_2 = [Conv2d(128, 128, 3, 2, 1)] + self.block_2 += [Conv2d(128, 128, 3, 1, 1) for _ in range(5)] + self.block_2 = nn.Sequential(*self.block_2) + + self.block_3 = [Conv2d(128, 256, 3, 2, 1)] + self.block_3 += [nn.Conv2d(256, 256, 3, 1, 1) for _ in range(5)] + self.block_3 = nn.Sequential(*self.block_3) + + self.deconv_1 = nn.Sequential(nn.ConvTranspose2d(256, 256, 4, 4, 0), + nn.BatchNorm2d(256)) + self.deconv_2 = nn.Sequential(nn.ConvTranspose2d(128, 256, 2, 2, 0), + nn.BatchNorm2d(256)) + self.deconv_3 = nn.Sequential(nn.ConvTranspose2d(128, 256, 1, 1, 0), + nn.BatchNorm2d(256)) + +
[docs] def forward(self, x): + x = self.block_1(x) + x_skip_1 = x + x = self.block_2(x) + x_skip_2 = x + x = self.block_3(x) + x_0 = self.deconv_1(x) + x_1 = self.deconv_2(x_skip_2) + x_2 = self.deconv_3(x_skip_1) + x = torch.cat((x_0, x_1, x_2), 1) + return x
+ + +
[docs]class CustomRPN(nn.Module): + def __init__(self, strides=[2, 2, 2], down_sample=2, num_layers=3, in_channels=128, out_channels=256): + super(CustomRPN, self).__init__() + self.strides = strides + mid_channels = in_channels * 2 + self.n_blocks = len(strides) + up_stride = 1 + + for i, s in enumerate(self.strides): + channels = mid_channels if i == self.n_blocks - 1 else in_channels + block = [Conv2d(in_channels, channels, 3, s, 1)] + block += [Conv2d(channels, channels, 3, 1, 1) for _ in range(num_layers)] + setattr(self, f'block_{i + 1}', nn.Sequential(*block)) + up_stride *= s + stride = up_stride // down_sample + setattr(self, f'deconv_{self.n_blocks - i}', + nn.Sequential(nn.ConvTranspose2d(channels, mid_channels, stride, stride, 0), + nn.BatchNorm2d(mid_channels)) + ) + self.out_conv = nn.Sequential(nn.ConvTranspose2d(mid_channels * 3, out_channels, 1, 1, 0), + nn.BatchNorm2d(out_channels)) + +
[docs] def forward(self, x): + ret_dict = {} + down_stride = 1 + for i, s in enumerate(self.strides): + x = getattr(self, f'block_{i + 1}')(x) + down_stride *= s + ret_dict[f'p{down_stride}'] = x + + out = [] + for i, s in enumerate(self.strides): + x = getattr(self, f'deconv_{i + 1}')(ret_dict[f'p{down_stride}']) + down_stride = down_stride // s + out.append(x) + out = self.out_conv(torch.cat(out, 1)) + + return out, ret_dict
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/downsample_conv.html b/docs/_build/html/_modules/cosense3d/modules/plugin/downsample_conv.html new file mode 100644 index 00000000..76ec218a --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/downsample_conv.html @@ -0,0 +1,161 @@ + + + + + + cosense3d.modules.plugin.downsample_conv — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.downsample_conv

+"""
+Class used to downsample features by 3*3 conv
+"""
+import torch.nn as nn
+
+
+
[docs]class DoubleConv(nn.Module): + """ + Double convoltuion + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int, + padding: bool): + super().__init__() + self.double_conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, + stride=stride, padding=padding), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True) + ) + +
[docs] def forward(self, x): + return self.double_conv(x)
+ + +
[docs]class DownsampleConv(nn.Module): + def __init__(self, + in_channels, + kernel_sizes=[1], + dims=[256], + strides=[1], + paddings=[0]): + super(DownsampleConv, self).__init__() + self.layers = nn.ModuleList([]) + + for ksize, dim, stride, padding in zip( + kernel_sizes, dims, strides, paddings): + self.layers.append(DoubleConv(in_channels, + dim, + kernel_size=ksize, + stride=stride, + padding=padding)) + in_channels = dim + +
[docs] def forward(self, x): + for i in range(len(self.layers)): + x = self.layers[i](x) + return x
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/flash_attn.html b/docs/_build/html/_modules/cosense3d/modules/plugin/flash_attn.html new file mode 100644 index 00000000..d479e0e9 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/flash_attn.html @@ -0,0 +1,300 @@ + + + + + + cosense3d.modules.plugin.flash_attn — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.flash_attn

+# ------------------------------------------------------------------------
+# Copyright (c) 2023 megvii-model. All Rights Reserved.
+# ------------------------------------------------------------------------
+#  Modified by Yunshuang Yuan
+# ------------------------------------------------------------------------
+# flash-attention
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.nn.init import (
+    xavier_uniform_,
+    constant_,
+    xavier_normal_
+)
+from torch.nn.functional import linear
+
+from einops import rearrange
+
+
+from flash_attn.flash_attn_interface import flash_attn_unpadded_kvpacked_func, _get_block_size
+from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis
+from cosense3d.modules.utils.test_flash_attn import convert_flash_attn_S_to_softmax, \
+    generate_random_padding_mask
+
+
+
+
[docs]def flash_attn_unpadded_kvpacked_test(q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, dropout_p, softmax_scale, + causal, batch_size): + d = q.shape[-1] + device = q.device + output_unpad, sm_lse, S_dmask = flash_attn_unpadded_kvpacked_func( + q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + dropout_p, return_attn_probs=True, causal=causal, softmax_scale=softmax_scale + ) + query_padding_mask = generate_random_padding_mask(max_sq, batch_size, device, mode='full') + key_padding_mask = generate_random_padding_mask(max_sk, batch_size, device, mode='full') + S_dmask_converted = convert_flash_attn_S_to_softmax( + S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal + ) + return output_unpad, S_dmask_converted
+ + +def _in_projection_packed(q, k, v, w, b=None): + w_q, w_k, w_v = w.chunk(3) + if b is None: + b_q = b_k = b_v = None + else: + b_q, b_k, b_v = b.chunk(3) + return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) + + +
[docs]class FlashAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + """ + + def __init__(self, + softmax_scale: float=None, + attention_dropout: float=0.0, + return_attn_weights: float=False, + device: str=None, + dtype: type=None): + """ + + :param softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + :param attention_dropout: The dropout rate to apply to the attention + (default: 0.1) + :param return_attn_weights: + :param device: + :param dtype: + """ + super().__init__() + self.softmax_scale = softmax_scale + self.dropout_p = attention_dropout + self.fp16_enabled = True + self.return_attn_weights = return_attn_weights + +
[docs] def forward(self, + q: torch.Tensor, + kv: torch.Tensor, + causal: bool=False, + key_padding_mask: torch.Tensor=None): + """Implements the multihead softmax attention. + + :param q: The tensor containing the query. (B, T, H, D) + :param kv: The tensor containing the key, and value. (B, S, 2, H, D) + :param causal: + :param key_padding_mask: a bool tensor of shape (B, S) + :return: + """ + # assert q.dtype in [torch.float16, torch.bfloat16] and kv.dtype in [torch.float16, torch.bfloat16] + assert q.is_cuda and kv.is_cuda + assert q.shape[0] == kv.shape[0] and q.shape[-2] == kv.shape[-2] and q.shape[-1] == kv.shape[-1] + + batch_size = q.shape[0] + seqlen_q, seqlen_k = q.shape[1], kv.shape[1] + if key_padding_mask is None: + q, kv = rearrange(q, 'b s ... -> (b s) ...'), rearrange(kv, 'b s ... -> (b s) ...') + max_sq, max_sk = seqlen_q, seqlen_k + cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, + device=q.device) + cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, + device=kv.device) + if self.training or not self.return_attn_weights: + output = flash_attn_unpadded_kvpacked_func( + q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) + attn_weights = None + else: + Q, K, V = q.permute(1, 0, 2), kv[:, 0].permute(1, 0, 2), kv[:, 1].permute(1, 0, 2) + attn_weights = torch.softmax((Q @ K.transpose(-2, -1) / math.sqrt(Q.size(-1))), dim=-1) + # attn_weights = torch.dropout(attn_weights, self.dropout_p, train=False) + output = attn_weights @ V + attn_weights = attn_weights.mean(dim=0) + output = output.permute(1, 0, 2) + + # output, attn_weights = flash_attn_unpadded_kvpacked_test( + # q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + # self.dropout_p if self.training else 0.0, + # softmax_scale=self.softmax_scale, causal=causal, batch_size=batch_size + # ) + output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) + attn_weights = rearrange(attn_weights, '(b s) ... -> b s ...', b=batch_size) + # attn_weights = attn_weights.mean(dim=1) + else: + nheads = kv.shape[-2] + q = rearrange(q, 'b s ... -> (b s) ...') + max_sq = seqlen_q + cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, + device=q.device) + x = rearrange(kv, 'b s two h d -> b s (two h d)') + x_unpad, indices, cu_seqlens_k, max_sk = unpad_input(x, key_padding_mask) + x_unpad = rearrange(x_unpad, 'nnz (two h d) -> nnz two h d', two=2, h=nheads) + output_unpad = flash_attn_unpadded_kvpacked_func( + q, x_unpad, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output_unpad, '(b s) ... -> b s ...', b=batch_size) + attn_weights = None + + return output, attn_weights
+ + +
[docs]class FlashMHA(nn.Module): + + def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0, + causal=False, device=None, dtype=None, **kwargs) -> None: + assert batch_first + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.embed_dim = embed_dim + self.causal = causal + self.bias = bias + + self.num_heads = num_heads + assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads" + self.head_dim = self.embed_dim // num_heads + assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" + + self.in_proj_weight = nn.Parameter(torch.empty((3 * embed_dim, embed_dim))) + if bias: + self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.inner_attn = FlashAttention(attention_dropout=attention_dropout, **factory_kwargs) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self._reset_parameters() + + def _reset_parameters(self) -> None: + xavier_uniform_(self.in_proj_weight) + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + +
[docs] def forward(self, q, k, v, key_padding_mask=None): + """x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) + key_padding_mask: bool tensor of shape (batch, seqlen) + """ + # q, k, v = self.Wq(q), self.Wk(k), self.Wv(v) + q, k, v = _in_projection_packed(q, k, v, self.in_proj_weight, self.in_proj_bias) + q = rearrange(q, 'b s (h d) -> b s h d', h=self.num_heads) + k = rearrange(k, 'b s (h d) -> b s h d', h=self.num_heads) + v = rearrange(v, 'b s (h d) -> b s h d', h=self.num_heads) + kv = torch.stack([k, v], dim=2) + context, attn_weights = self.inner_attn(q, kv, key_padding_mask=key_padding_mask, causal=self.causal) + return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/fpn.html b/docs/_build/html/_modules/cosense3d/modules/plugin/fpn.html new file mode 100644 index 00000000..5a7f973d --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/fpn.html @@ -0,0 +1,264 @@ + + + + + + cosense3d.modules.plugin.fpn — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.fpn

+from torch import nn
+import torch.nn.functional as F
+
+from cosense3d.modules.utils.conv import ConvModule
+from cosense3d.modules.utils.init import xavier_init
+
+
+
[docs]class FPN(nn.Module): + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(FPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + self.lateral_convs.append(l_conv) + if i == 0 : + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + +
[docs] def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True
+ +
[docs] def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] += F.interpolate(laterals[i], + **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) if i==0 else laterals[i] for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs)
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/gevbev_decoder.html b/docs/_build/html/_modules/cosense3d/modules/plugin/gevbev_decoder.html new file mode 100644 index 00000000..d1e4a1a6 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/gevbev_decoder.html @@ -0,0 +1,190 @@ + + + + + + cosense3d.modules.plugin.gevbev_decoder — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.gevbev_decoder

+import torch
+import torch_scatter
+from torch import nn
+
+from cosense3d.modules.utils.misc import SELayer_Linear
+from cosense3d.modules.utils.gaussian_utils import weighted_mahalanobis_dists
+from cosense3d.modules.utils.me_utils import indices2metric, metric2indices, update_me_essentials
+
+
+
[docs]class GevBEVDecoder(nn.Module): + def __init__(self, data_info, stride, kernel=3, var0=0.1): + super().__init__() + update_me_essentials(self, data_info, stride) + self.lr = nn.Parameter(torch.tensor(self.lidar_range), requires_grad=False) + self.vs = nn.Parameter(torch.tensor(self.voxel_size), requires_grad=False) + self.var0 = [var0, var0] + x = torch.arange(kernel) - kernel // 2 + self.nbrs = torch.stack(torch.meshgrid(x, x, indexing='ij'), + dim=-1).reshape(-1, 2) + self.nbrs = nn.Parameter(self.nbrs, requires_grad=False) + self.n_nbrs = len(self.nbrs) + +
[docs] def coor_to_indices(self, coor): + inds = coor.clone() + inds[:, 1] = inds[:, 1] / self.stride - self.offset_sz_x + inds[:, 2] = inds[:, 2] / self.stride - self.offset_sz_y + return inds.long()
+ +
[docs] def forward(self, ref_pts, ctr_coor, ctr_reg): + """ + :param ref_pts: LongTensor(Q, 3) 2d coordinates in metrics(batch_idx, x, y) + :param ctr_coor: LongTensor(V, 3) 2d coordinates in indices (batch_idx, x, y) + :param ctr_reg: FloatTensor(V, d) bev grid center point regression result + + :return: out_evidence FloatTensor(Q, d): attended features + """ + reg = ctr_reg.relu() + reg_evi = reg[:, :2] + reg_var = reg[:, 2:].view(-1, 2, 2) + + ctr_pts = indices2metric(ctr_coor, self.vs) + ctr_inds = self.coor_to_indices(ctr_coor) + ref_coor = metric2indices(ref_pts, self.vs) + ref_inds = self.coor_to_indices(ref_coor) + + q_inds, v_inds, mask = self.get_nbr_mapping(ref_inds, ctr_inds) + + evidence = torch.zeros_like(ref_pts[:, :2]) + dists = ref_pts[q_inds[mask], 1:3] - ctr_pts[v_inds[mask], 1:3] + probs_weighted = weighted_mahalanobis_dists(reg_evi[v_inds[mask]], reg_var[v_inds[mask]], dists, self.var0) + torch_scatter.scatter(probs_weighted, q_inds[mask], + dim=0, out=evidence, reduce='sum') + return evidence.reshape(len(ref_pts), self.n_nbrs, 2)
+ +
[docs] def get_nbr_mapping(self, query_pos, value_pos): + B = query_pos[:, 0].max() + 1 + pad_width = 2 + query_pos[:, 1:] += pad_width + value_pos[:, 1:] += pad_width + query_inds = torch.arange(len(query_pos), dtype=torch.long) + value_inds = torch.arange(len(value_pos), dtype=torch.long) + + # index -1 indicates that this nbr is outside the grid range + value_map = - torch.ones((B, self.size_x + pad_width * 2, + self.size_y + pad_width * 2), dtype=torch.long) + value_map[value_pos[:, 0], + value_pos[:, 1], + value_pos[:, 2]] = value_inds + + query_inds_nbrs = query_pos.unsqueeze(dim=1).repeat(1, self.n_nbrs, 1) + query_inds_nbrs[..., 1:] += self.nbrs.view(1, -1, 2) + query_inds_nbrs = query_inds_nbrs.view(-1, 3) + mask = ((query_inds_nbrs >= 0).all(dim=-1) & + (query_inds_nbrs[:, 1] < self.size_x + pad_width * 2) & + (query_inds_nbrs[:, 2] < self.size_y + pad_width * 2)) + assert torch.logical_not(mask).sum() == 0 + query_inds_mapped = query_inds.unsqueeze(1).repeat(1, self.n_nbrs).view(-1) + value_inds_mapped = value_map[query_inds_nbrs[:, 0], + query_inds_nbrs[:, 1], + query_inds_nbrs[:, 2]] + mask = torch.logical_and(query_inds_mapped >= 0, value_inds_mapped >= 0) + return query_inds_mapped, value_inds_mapped, mask
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/mink_spconv.html b/docs/_build/html/_modules/cosense3d/modules/plugin/mink_spconv.html new file mode 100644 index 00000000..ff230714 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/mink_spconv.html @@ -0,0 +1,187 @@ + + + + + + cosense3d.modules.plugin.mink_spconv — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.mink_spconv

+import functools
+import torch
+
+from cosense3d.modules import BaseModule, nn
+from cosense3d.modules.utils.me_utils import mink_coor_limit, minkconv_conv_block, ME
+
+
+
[docs]class Spconv(nn.Module): + def __init__(self, data_info, convs, d=2, dilation=False, **kwargs): + super(Spconv, self).__init__() + assert d == 2, 'only support dim=2' + self.det_r = data_info.get('det_r', False) + self.lidar_range = data_info.get('lidar_range', False) + self.voxel_size = data_info['voxel_size'] + self.d = d + self.dilation = dilation + self.convs = [] + for k, conv_args in convs.items(): + self.convs.append(k) + setattr(self, f'convs_{k}', self.get_conv_layer(conv_args)) + stride = int(k[1]) + + if self.det_r: + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif self.lidar_range: + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, f'mink_xylim_{k}', mink_coor_limit(lr, self.voxel_size, stride)) # relevant to ME + +
[docs] def forward(self, stensor_dict, **kwargs): + out_dict = {} + for k in self.convs: + stride = int(k[1]) + stensor2d = self.get_2d_stensor(stensor_dict, stride) + + stensor2d = getattr(self, f'convs_{k}')(stensor2d) + # after coordinate expansion, some coordinates will exceed the maximum detection + # range, therefore they are removed here. + xylim = getattr(self, f'mink_xylim_{k}') + mask = (stensor2d.C[:, 1] > xylim[0]) & (stensor2d.C[:, 1] <= xylim[1]) & \ + (stensor2d.C[:, 2] > xylim[2]) & (stensor2d.C[:, 2] <= xylim[3]) + + coor = stensor2d.C[mask] + feat = stensor2d.F[mask] + + out_dict[k] = { + 'coor': coor, + 'feat': feat + } + return out_dict
+ +
[docs] def get_2d_stensor(self, stensor_dict, stride): + stensor = stensor_dict[f'p{stride}'] + if isinstance(stensor, ME.SparseTensor) and stensor.C.shape[-1] == 3: + return stensor + else: + if isinstance(stensor, dict): + coor, feat = stensor['coor'][:, :3], stensor['feat'] + elif isinstance(stensor, ME.SparseTensor): + coor, feat = stensor.C[:, :3], stensor.F + return ME.SparseTensor( + coordinates=coor[:, :3].contiguous(), + features=feat, + tensor_stride=[stride] * 2 + )
+ +
[docs] def get_conv_layer(self, args): + minkconv_layer = functools.partial( + minkconv_conv_block, d=self.d, bn_momentum=0.1, + ) + in_dim = args['in_dim'] + out_dim = args['out_dim'] + layers = [minkconv_layer(in_dim, out_dim, args['kernels'][0], 1, + expand_coordinates=self.dilation)] + for ks in args['kernels'][1:]: + layers.append(minkconv_layer(out_dim, out_dim, ks, 1, + expand_coordinates=self.dilation)) + return nn.Sequential(*layers)
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/naive_compressor.html b/docs/_build/html/_modules/cosense3d/modules/plugin/naive_compressor.html new file mode 100644 index 00000000..2ef93f4e --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/naive_compressor.html @@ -0,0 +1,139 @@ + + + + + + cosense3d.modules.plugin.naive_compressor — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.naive_compressor

+import torch.nn as nn
+
+
+
[docs]class NaiveCompressor(nn.Module): + """ + A very naive compression that only compress on the channel. + """ + def __init__(self, input_dim, compress_ratio): + super().__init__() + self.encoder = nn.Sequential( + nn.Conv2d(input_dim, input_dim//compress_ratio, kernel_size=3, + stride=1, padding=1), + nn.BatchNorm2d(input_dim//compress_ratio, eps=1e-3, momentum=0.01), + nn.ReLU() + ) + self.decoder = nn.Sequential( + nn.Conv2d(input_dim//compress_ratio, input_dim, kernel_size=3, + stride=1, padding=1), + nn.BatchNorm2d(input_dim, eps=1e-3, momentum=0.01), + nn.ReLU(), + nn.Conv2d(input_dim, input_dim, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(input_dim, eps=1e-3, + momentum=0.01), + nn.ReLU() + ) + +
[docs] def forward(self, x): + x = self.encoder(x) + x = self.decoder(x) + + return x
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/pillar_encoder.html b/docs/_build/html/_modules/cosense3d/modules/plugin/pillar_encoder.html new file mode 100644 index 00000000..be5c68c2 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/pillar_encoder.html @@ -0,0 +1,249 @@ + + + + + + cosense3d.modules.plugin.pillar_encoder — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.pillar_encoder

+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from cosense3d.modules.utils.conv import ConvModule
+from cosense3d.modules.utils.init import xavier_init
+
+
+
[docs]class PFNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + use_norm=True, + last_layer=False): + super().__init__() + self.last_vfe = last_layer + self.use_norm = use_norm + if not self.last_vfe: + out_channels = out_channels // 2 + + if self.use_norm: + self.linear = nn.Linear(in_channels, out_channels, bias=False) + self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01) + else: + self.linear = nn.Linear(in_channels, out_channels, bias=True) + + self.part = 50000 + +
[docs] def forward(self, inputs): + if inputs.shape[0] > self.part: + # nn.Linear performs randomly when batch size is too large + num_parts = inputs.shape[0] // self.part + part_linear_out = [self.linear( + inputs[num_part * self.part:(num_part + 1) * self.part]) + for num_part in range(num_parts + 1)] + x = torch.cat(part_linear_out, dim=0) + else: + x = self.linear(inputs) + torch.backends.cudnn.enabled = False + x = self.norm(x.permute(0, 2, 1)).permute(0, 2, + 1) if self.use_norm else x + torch.backends.cudnn.enabled = True + x = F.relu(x) + x_max = torch.max(x, dim=1, keepdim=True)[0] + + if self.last_vfe: + return x_max + else: + x_repeat = x_max.repeat(1, inputs.shape[1], 1) + x_concatenated = torch.cat([x, x_repeat], dim=2) + return x_concatenated
+ + +
[docs]class PillarEncoder(nn.Module): + def __init__(self, + features, + voxel_size, + lidar_range, + channels, + use_norm=True): + super(PillarEncoder, self).__init__() + self.voxel_size = nn.Parameter(torch.tensor(voxel_size), requires_grad=False) + self.lidar_range = nn.Parameter(torch.tensor(lidar_range), requires_grad=False) + self.offset = nn.Parameter(self.voxel_size / 2 + self.lidar_range[:3], + requires_grad=False) + self.num_point_features = sum( + [getattr(self, f"{f}_dim") for f in features]) + self.features = features + assert isinstance(channels, list) + self.channels = [self.num_point_features] + channels + self.out_channels = channels[-1] + self.use_norm = use_norm + self._init_layers(self.channels) + + def _init_layers(self, channels): + pfn_layers = [] + for i in range(len(channels) - 1): + in_filters = channels[i] + out_filters = channels[i + 1] + pfn_layers.append( + PFNLayer(in_filters, out_filters, self.use_norm, + last_layer=(i >= len(channels) - 2)) + ) + self.pfn_layers = nn.ModuleList(pfn_layers) + +
[docs] def forward(self, voxel_features, coords, voxel_num_points): + points_mean = voxel_features[..., :3].sum(dim=1, keepdim=True) / \ + voxel_num_points.view(-1, 1, 1) + f_cluster = voxel_features[..., :3] - points_mean + + coords_metric = coords[:, [3, 2, 1]].unsqueeze(1) * self.voxel_size + self.offset + f_center = voxel_features[..., :3] - coords_metric + + features = self.compose_voxel_feature(voxel_features) + [f_cluster, f_center] + features = torch.cat(features, dim=-1) + + voxel_count = features.shape[1] + mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0) + features *= mask.unsqueeze(-1) + for pfn in self.pfn_layers: + features = pfn(features) + features = features.squeeze() + return features
+ +
[docs] def compose_voxel_feature(self, voxel_features): + features = [] + if 'absolute_xyz' in self.features: + features.append(voxel_features[..., :3]) + if 'distance' in self.features: + features.append(torch.norm(voxel_features[..., :3], 2, -1, + keepdim=True)) + if 'intensity' in self.features: + assert voxel_features.shape[-1] >= 4 + features.append(voxel_features[..., 3:4]) + return features
+ +
[docs] @staticmethod + def get_paddings_indicator(actual_num, max_num, axis=0): + actual_num = torch.unsqueeze(actual_num, axis + 1) + max_num_shape = [1] * len(actual_num.shape) + max_num_shape[axis + 1] = -1 + max_num = torch.arange(max_num, + dtype=torch.int, + device=actual_num.device).view(max_num_shape) + paddings_indicator = actual_num.int() > max_num + return paddings_indicator
+ + @property + def distance_dim(self): + return 1 + + @property + def absolute_xyz_dim(self): + return 6 + + @property + def xyz_dim(self): + return 3 + @property + def intensity_dim(self): + return 1
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/ssfa.html b/docs/_build/html/_modules/cosense3d/modules/plugin/ssfa.html new file mode 100644 index 00000000..831321f2 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/ssfa.html @@ -0,0 +1,212 @@ + + + + + + cosense3d.modules.plugin.ssfa — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.ssfa

+import torch
+from torch import nn
+
+
+
[docs]class SSFA(nn.Module): + def __init__(self, in_channels, out_channels=128, shrink_strides=None, shrink_channels=None): + super(SSFA, self).__init__() + self._num_input_features = in_channels # 128 + self.shrink_strides = shrink_strides + + seq = [nn.ZeroPad2d(1)] + get_conv_layers('Conv2d', 128, 128, + n_layers=3, kernel_size=[3, 3, 3], + stride=[1, 1, 1], padding=[0, 1, 1], + sequential=False) + self.bottom_up_block_0 = nn.Sequential(*seq) + self.bottom_up_block_1 = get_conv_layers('Conv2d', 128, 256, + n_layers=3, kernel_size=[3, 3, 3], + stride=[2, 1, 1], padding=[1, 1, 1]) + + self.trans_0 = get_conv_layers('Conv2d', 128, 128, + n_layers=1, kernel_size=[1], stride=[1], padding=[0]) + self.trans_1 = get_conv_layers('Conv2d', 256, 256, + n_layers=1, kernel_size=[1], stride=[1], padding=[0]) + + self.deconv_block_0 = get_conv_layers('ConvTranspose2d', 256, 128, + n_layers=1, kernel_size=[3], stride=[2], + padding=[1], output_padding=[1]) + self.deconv_block_1 = get_conv_layers('ConvTranspose2d', 256, 128, + n_layers=1, kernel_size=[3], stride=[2], + padding=[1], output_padding=[1]) + + self.conv_0 = get_conv_layers('Conv2d', out_channels, 128, + n_layers=1, kernel_size=[3], stride=[1], padding=[1]) + self.conv_1 = get_conv_layers('Conv2d', out_channels, 128, + n_layers=1, kernel_size=[3], stride=[1], padding=[1]) + + self.w_0 = get_conv_layers('Conv2d', out_channels, 1, + n_layers=1, kernel_size=[1], stride=[1], padding=[0], relu_last=False) + self.w_1 = get_conv_layers('Conv2d', out_channels, 1, + n_layers=1, kernel_size=[1], stride=[1], padding=[0], relu_last=False) + + if isinstance(shrink_strides, list): + assert len(shrink_channels) == len(shrink_strides) + shrink_convs = [] + in_channels = out_channels + for s, c in zip(shrink_strides, shrink_channels): + shrink_convs.append(nn.Conv2d(in_channels, c, 3, s, padding=1)) + in_channels = c + self.shrink_convs = nn.ModuleList(shrink_convs) + + # default init_weights for conv(msra) and norm in ConvModule +
[docs] def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight, gain=1) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0)
+ +
[docs] def forward(self, x): + x_0 = self.bottom_up_block_0(x) + x_1 = self.bottom_up_block_1(x_0) + x_trans_0 = self.trans_0(x_0) + x_trans_1 = self.trans_1(x_1) + x_middle_0 = self.deconv_block_0(x_trans_1) + x_trans_0 + x_middle_1 = self.deconv_block_1(x_trans_1) + x_output_0 = self.conv_0(x_middle_0) + x_output_1 = self.conv_1(x_middle_1) + + x_weight_0 = self.w_0(x_output_0) + x_weight_1 = self.w_1(x_output_1) + x_weight = torch.softmax(torch.cat([x_weight_0, x_weight_1], dim=1), dim=1) + x_output = x_output_0 * x_weight[:, 0:1, :, :] + x_output_1 * x_weight[:, 1:, :, :] + + if self.shrink_strides is None: + return x_output.contiguous() + else: + assert isinstance(self.shrink_strides, list) + downx = 1 + ret_dict = {} + x = x_output + for i, s in enumerate(self.shrink_strides): + downx *= s + x = self.shrink_convs[i](x) + ret_dict[downx] = x + return x_output.contiguous(), ret_dict
+ + +
[docs]def get_conv_layers(conv_name, in_channels, out_channels, n_layers, kernel_size, stride, + padding, relu_last=True, sequential=True, **kwargs): + """ + Build convolutional layers. kernel_size, stride and padding should be a list with the lengths that match n_layers + """ + seq = [] + for i in range(n_layers): + seq.extend([getattr(nn, conv_name)(in_channels, out_channels, kernel_size[i], stride=stride[i], + padding=padding[i], bias=False, **{k: v[i] for k, v in kwargs.items()}), + nn.BatchNorm2d(out_channels, eps=1e-3, momentum=0.01)]) + if i < n_layers - 1 or relu_last: + seq.append(nn.ReLU()) + in_channels = out_channels + if sequential: + return nn.Sequential(*seq) + else: + return seq
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/target_assigners.html b/docs/_build/html/_modules/cosense3d/modules/plugin/target_assigners.html new file mode 100644 index 00000000..6634def3 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/target_assigners.html @@ -0,0 +1,1738 @@ + + + + + + cosense3d.modules.plugin.target_assigners — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.target_assigners

+import math
+from abc import ABCMeta, abstractmethod
+from functools import partial
+from typing import List, Dict, Optional, Tuple
+
+import torch
+from torch import nn
+import torch_scatter
+from scipy.optimize import linear_sum_assignment
+
+from cosense3d.utils.box_utils import (bbox_xyxy_to_cxcywh,
+                                       bbox_cxcywh_to_xyxy,
+                                       normalize_bbox,
+                                       boxes3d_to_standup_bboxes,
+                                       rotate_points_batch)
+from cosense3d.utils.pclib import rotate_points_along_z_torch
+from cosense3d.utils.iou2d_calculator import bbox_overlaps
+from cosense3d.modules.utils.gaussian_utils import gaussian_2d
+from cosense3d.modules.utils.gevbev_utils import draw_sample_evis, weighted_mahalanobis_dists
+from cosense3d.modules.utils.me_utils import metric2indices, update_me_essentials
+from cosense3d.modules.utils.box_coder import build_box_coder
+from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu
+from cosense3d.dataset.const import CoSenseBenchmarks as csb
+from cosense3d.modules.utils.common import pad_r, pad_l, meshgrid
+from cosense3d.ops.utils import points_in_boxes_gpu
+from cosense3d.modules.losses import pred_to_conf_unc
+from cosense3d.utils.misc import PI
+
+
+
[docs]def sample_mining(scores: torch.Tensor, + labels: torch.Tensor, + dists=None, + sample_mining_thr=0.5, + max_sample_ratio=5, + max_num_sample=None): + """ + When only limited numbers of negative targets are sampled for training, + and the majority of the negative samples are ignored, then there is a + high probability that hard negative targets are also ignored. This will + weaken the model to learn from these hard negative targets and generate + a lot of false positives. + Therefore, this function mines the samples that have high predictive + scores as training targets. This function should be used after 'pos_neg_sampling'. + + :param scores: (N1, ...Nk) classification scores/confidences that the + sample belong to foreground. + :param labels: (N1..., Nk) class labels, -1 indicates ignore, 0 indicates negative, + positive numbers indicates classes. + :param dists: distances. + :param sample_mining_thr: score threshold for sampling + :param max_sample_ratio: `n_sample` / `n_pos_sample` + :param max_num_sample: maximum number of samples. + :return: + """ + assert scores.ndim == labels.ndim + assert scores.shape == labels.shape + pred_pos = scores > sample_mining_thr + if dists is not None: + # only mine points that are not too close to the real positive samples + pred_pos[dists < 3] = False + not_cared = labels == -1 + sample_inds = torch.where(torch.logical_and(pred_pos, not_cared))[0] + n_pos = (labels > 0).sum() + max_num_sample = int(n_pos * max_sample_ratio) if max_num_sample is None else max_num_sample + if len(sample_inds) > max_num_sample: + sample_inds = sample_inds[torch.randperm(len(sample_inds))[:max_num_sample]] + labels[sample_inds] = 0 + return labels
+ + +
[docs]def pos_neg_sampling(labels: torch.Tensor, pos_neg_ratio: float) -> torch.Tensor: + """ + Downsample negative targets. + + :param labels: class labels. + :param pos_neg_ratio: ratio = num_neg_samples / num_pos_samples. + :return: class labels with -1 labels to be ignored during training. + """ + pos = labels > 0 + neg = labels == 0 + n_neg_sample = pos.sum(dim=0) * pos_neg_ratio + if neg.sum() > n_neg_sample: + neg_inds = torch.where(neg)[0] + perm = torch.randperm(len(neg_inds))[n_neg_sample:] + labels[neg_inds[perm]] = -1 + return labels
+ + +
[docs]class BaseAssigner(metaclass=ABCMeta): + """Base assigner.""" + +
[docs] @abstractmethod + def assign(self, *args, **kwargs): + """Assign preds to targets."""
+ + +
[docs]class MatchCost: + """This class is modified from mmdet.""" +
[docs] @staticmethod + def classification(cls_pred: torch.Tensor, + gt_labels: torch.Tensor, + weight: float=1.0) -> torch.Tensor: + """ + + :param cls_pred: Predicted classification logits, shape + (num_query, num_class). + :param gt_labels: Label of `gt_bboxes`, shape (num_gt,). + :param weight: loss_weight. + :return: cls_cost value with weight + """ + # Following the official DETR repo, contrary to the loss that + # NLL is used, we approximate it in 1 - cls_score[gt_label]. + # The 1 is a constant that doesn't change the matching, + # so it can be omitted. + cls_score = cls_pred.softmax(-1) + cls_cost = -cls_score[:, gt_labels] + return cls_cost * weight
+ +
[docs] @staticmethod + def bboxl1(bbox_pred: torch.Tensor, + gt_bboxes: torch.Tensor, + weight: float=1., + box_format: str='xyxy') -> torch.Tensor: + """ + + :param bbox_pred: Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + (num_query, 4). + :param gt_bboxes: Ground truth boxes with normalized + coordinates (x1, y1, x2, y2). Shape (num_gt, 4). + :param weight: loss_weight. + :param box_format: 'xyxy' for DETR, 'xywh' for Sparse_RCNN. + :return: bbox_cost value with weight + """ + if box_format == 'xywh': + gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) + elif box_format == 'xyxy': + bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) + else: + raise NotImplementedError + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * weight
+ +
[docs] @staticmethod + def giou(bboxes: torch.Tensor, + gt_bboxes: torch.Tensor, + weight: float=1.0): + """ + + :param bboxes: Predicted boxes with unnormalized coordinates + (x1, y1, x2, y2). Shape (num_query, 4). + :param gt_bboxes: Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape (num_gt, 4). + :param weight: loss weight. + :return: giou_cost value with weight + """ + # overlaps: [num_bboxes, num_gt] + overlaps = bbox_overlaps( + bboxes, gt_bboxes, mode="giou", is_aligned=False) + # The 1 is a constant that doesn't change the matching, so omitted. + iou_cost = -overlaps + return iou_cost * weight
+ +
[docs] @staticmethod + def iou(bboxes, gt_bboxes, weight=1.0): + """See giou""" + # overlaps: [num_bboxes, num_gt] + overlaps = bbox_overlaps( + bboxes, gt_bboxes, mode="iou", is_aligned=False) + # The 1 is a constant that doesn't change the matching, so omitted. + iou_cost = -overlaps + return iou_cost * weight
+ +
[docs] @staticmethod + def l1(pred, gt, weight=1.0): + """L1 distance between pred and gt Tensors""" + cost = torch.cdist(pred, gt, p=1) + return cost * weight
+ +
[docs] @staticmethod + def binary_focal_loss(cls_pred, gt_labels, weight=1., alpha=0.25, gamma=2, eps=1e-12,): + cls_pred = cls_pred.flatten(1) + gt_labels = gt_labels.flatten(1).float() + n = cls_pred.shape[1] + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + eps).log() * ( + 1 - alpha) * cls_pred.pow(gamma) + pos_cost = -(cls_pred + eps).log() * alpha * ( + 1 - cls_pred).pow(gamma) + + cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ + torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) + return cls_cost / n * weight
+ +
[docs] @staticmethod + def focal_loss(cls_pred, gt_labels, weight=1., alpha=0.25, gamma=2, eps=1e-12,): + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + eps).log() * ( + 1 - alpha) * cls_pred.pow(gamma) + pos_cost = -(cls_pred + eps).log() * alpha * ( + 1 - cls_pred).pow(gamma) + + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * weight
+ +
[docs] def build(self, type, **kwargs): + return partial(getattr(self, type), **kwargs)
+ + +
[docs]class HungarianAssigner2D(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost, regression iou cost and center2d l1 cost. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + """ + + def __init__(self, + cls_cost=dict(type='classification', weight=1.), + reg_cost=dict(type='bboxl1', weight=1.0), + iou_cost=dict(type='giou', weight=1.0), + centers2d_cost=dict(type='l1', weight=1.0)): + cost_builder = MatchCost() + self.cls_cost = cost_builder.build(**cls_cost) + self.reg_cost = cost_builder.build(**reg_cost) + self.iou_cost = cost_builder.build(**iou_cost) + self.centers2d_cost = cost_builder.build(**centers2d_cost) + +
[docs] def assign(self, + bbox_pred, + cls_pred, + pred_centers2d, + gt_bboxes, + gt_labels, + centers2d, + img_size, + eps: float = 1e-7 + ): + """Computes one-to-one matching based on the weighted costs. + + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + + :param bbox_pred: Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + :param cls_pred: Predicted classification logits, shape + [num_query, num_class]. + :param pred_centers2d: prediction 2d center points. + :param gt_bboxes: ground truth bboxes. + :param gt_labels: Label of `gt_bboxes`, shape (num_gt,). + img_size: input image size. + :param centers2d: 2d center points. + :param img_size: input image size. + :param eps: A value added to the denominator for + numerical stability. Default 1e-7. + :return: + """ + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return num_gts, assigned_gt_inds, assigned_labels + img_h, img_w = img_size + factor = gt_bboxes.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalize_gt_bboxes = gt_bboxes / factor + reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) + # regression iou cost, defaultly giou is used in official DETR. + bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor + iou_cost = self.iou_cost(bboxes, gt_bboxes) + + # center2d L1 cost + normalize_centers2d = centers2d / factor[:, 0:2] + centers2d_cost = self.centers2d_cost(pred_centers2d, normalize_centers2d) + + # weighted sum of above four costs + cost = cls_cost + reg_cost + iou_cost + centers2d_cost + cost = torch.nan_to_num(cost, nan=100.0, posinf=100.0, neginf=-100.0) + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return num_gts, assigned_gt_inds, assigned_labels
+ + +
[docs]class HungarianAssigner3D(BaseAssigner): + def __init__(self, + cls_cost=dict(type='focal_loss', weight=1.0), + reg_cost=dict(type='l1', weight=1.0), + iou_cost=dict(type='iou', weight=1.0)): + cost_builder = MatchCost() + self.cls_cost = cost_builder.build(**cls_cost) + self.reg_cost = cost_builder.build(**reg_cost) + self.iou_cost = cost_builder.build(**iou_cost) + +
[docs] def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + code_weights=None, + eps=1e-7): + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes,), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes,), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return num_gts, assigned_gt_inds, assigned_labels + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalized_gt_bboxes = normalize_bbox(gt_bboxes) + if code_weights is not None: + bbox_pred = bbox_pred * code_weights + normalized_gt_bboxes = normalized_gt_bboxes * code_weights + + reg_cost = self.reg_cost(bbox_pred[:, :8], normalized_gt_bboxes[:, :8]) + + # weighted sum of above two costs + cost = cls_cost + reg_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + cost = torch.nan_to_num(cost, nan=100.0, posinf=100.0, neginf=-100.0) + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + + # # 5. align matched pred and gt + # aligned_tgt_boxes = torch.zeros_like(bbox_pred) + # assign_mask = assigned_gt_inds > 0 + # aligned_tgt_boxes[assign_mask] = normalized_gt_bboxes[assigned_gt_inds[assign_mask] - 1] + + # from projects.utils.vislib import draw_points_boxes_plt + # vis_boxes_pred = denormalize_bbox(bbox_pred[assign_mask], self.pc_range)[:, :-2] + # vis_boxes_pred[:, :2] /= code_weights[:2] + # vis_boxes_gt = denormalize_bbox(aligned_tgt_boxes[assign_mask], self.pc_range)[:, :-2] + # vis_boxes_gt[:, :2] /= code_weights[:2] + # draw_points_boxes_plt( + # pc_range=51.2, + # boxes_pred=vis_boxes_pred.detach().cpu().numpy(), + # bbox_pred_label=[str(i) for i in range(vis_boxes_pred.shape[0])], + # boxes_gt=vis_boxes_gt.detach().cpu().numpy(), + # bbox_gt_label=[str(i) for i in range(vis_boxes_gt.shape[0])], + # filename='/home/yuan/Downloads/tmp.png' + # ) + + return num_gts, assigned_gt_inds, assigned_labels
+ + +
[docs]class HeatmapAssigner(BaseAssigner): + +
[docs] @staticmethod + def draw_heatmap_gaussian(heatmap, center, radius, k=1): + """Get gaussian masked heatmap. + + Args: + heatmap (torch.Tensor): Heatmap to be masked. + center (torch.Tensor): Center coord of the heatmap. + radius (int): Radius of gaussian. + k (int, optional): Multiple of masked_gaussian. Defaults to 1. + + Returns: + torch.Tensor: Masked heatmap. + """ + diameter = 2 * radius + 1 + gaussian = gaussian_2d((diameter, diameter), sigma=diameter / 6) + + x, y = int(center[0]), int(center[1]) + + height, width = heatmap.shape[0:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = torch.from_numpy( + gaussian[radius - top:radius + bottom, + radius - left:radius + right]).to(heatmap.device, + torch.float32) + if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: + torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap) + return heatmap
+ +
[docs] def assign(self, obj_centers2d, obj_bboxes, img_shape, stride): + img_h, img_w = img_shape[:2] + heatmap = torch.zeros(img_h // stride, img_w // stride, device=obj_centers2d.device) + if len(obj_centers2d) != 0: + l = obj_centers2d[..., 0:1] - obj_bboxes[..., 0:1] + t = obj_centers2d[..., 1:2] - obj_bboxes[..., 1:2] + r = obj_bboxes[..., 2:3] - obj_centers2d[..., 0:1] + b = obj_bboxes[..., 3:4] - obj_centers2d[..., 1:2] + bound = torch.cat([l, t, r, b], dim=-1) + radius = torch.ceil(torch.min(bound, dim=-1)[0] / 16) + radius = torch.clamp(radius, 1.0).cpu().numpy().tolist() + for center, r in zip(obj_centers2d, radius): + heatmap = self.draw_heatmap_gaussian(heatmap, center / 16, radius=int(r), k=1) + return heatmap
+ + +
[docs]class BoxAnchorAssigner(BaseAssigner, torch.nn.Module): + def __init__(self, + box_size, + dirs, + voxel_size, + lidar_range, + stride, + box_coder, + pos_threshold=0.6, + neg_threshold=0.45, + score_thrshold=0.25, + ): + super().__init__() + self.voxel_size = voxel_size + self.lidar_range = lidar_range + self.num_anchors = len(dirs) + self.stride = stride + self.pos_threshold = pos_threshold + self.neg_threshold = neg_threshold + self.score_thrshold = score_thrshold + self.box_coder = build_box_coder(**box_coder) + anchors, standup_anchors = self.get_anchor_template(box_size, dirs) + self.anchors = nn.Parameter(anchors, requires_grad=False) + self.standup_anchors = nn.Parameter(standup_anchors, requires_grad=False) + +
[docs] def get_anchor_template(self, box_size, dirs): + pix_x = self.voxel_size[0] * self.stride + pix_y = self.voxel_size[1] * self.stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y) + pix_y * 0.5 + xys = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + xys = xys.unsqueeze(2).repeat(1, 1, self.num_anchors, 1) + zs = - torch.ones_like(xys[..., :1]) + h, w = xys.shape[:2] + lwh = torch.tensor(box_size).reshape( + 1, 1, 1, -1).repeat(h, w, self.num_anchors, 1) + rs = torch.deg2rad(torch.tensor(dirs)).reshape( + 1, 1, -1, 1).repeat(h, w, 1, 1) + # (w, h, num_anchor, 7) --> (whn, 7) + anchors = torch.cat([xys, zs, lwh, rs], dim=-1) + self.anchor_shape = anchors.shape + anchors = anchors.view(-1, 7) + standup_anchors = boxes3d_to_standup_bboxes(anchors) + return anchors, standup_anchors
+ +
[docs] def assign(self, gt_boxes): + """ + + Parameters + ---------- + gt_boxes Tensor(N, 7): [x, y, z, l, w, h, r, ...] + + Returns + ------- + reg Tensor(H, W, num_anchors, code_size): box regression targets + """ + if len(gt_boxes) == 0: + labels = gt_boxes.new_full((self.standup_anchors.shape[0],), -1) + reg_tgt = gt_boxes.new_zeros((0, self.box_coder.code_size)) + dir_scores = gt_boxes.new_zeros((0, 4)) + # Todo dir_score, gt_boxes, correct shape + return labels, reg_tgt, dir_scores + + standup_boxes = boxes3d_to_standup_bboxes(gt_boxes[:, :7]) + ious = self.box_overlaps(self.standup_anchors, standup_boxes) + iou_max, max_inds = ious.max(dim=1) + top1_inds = torch.argmax(ious, dim=0) + + pos = iou_max > self.pos_threshold + pos_inds = torch.cat([top1_inds, torch.where(pos)[0]]).unique() + neg = iou_max < self.neg_threshold + neg[pos_inds] = False + + labels = gt_boxes.new_full((ious.shape[0],), -1) + labels[neg] = 0 + labels[pos_inds] = 1 + + aligned_gt_boxes = gt_boxes[max_inds[pos_inds], :7] + aligned_anchors = self.anchors[pos_inds] + reg_tgt, dir_score = self.box_coder.encode(aligned_anchors, aligned_gt_boxes) + + return labels, reg_tgt, dir_score
+ +
[docs] def box_overlaps(self, boxes1, boxes2): + areas1 = (boxes1[:, 2] - boxes1[:, 0] + 1) * \ + (boxes1[:, 3] - boxes1[:, 1] + 1) + areas2 = (boxes2[:, 2] - boxes2[:, 0] + 1) * \ + (boxes2[:, 3] - boxes2[:, 1] + 1) + + boxes1_mat = boxes1.unsqueeze(1).repeat(1, boxes2.shape[0], 1) + boxes2_mat = boxes2.unsqueeze(0).repeat(boxes1.shape[0], 1, 1) + x_extend = torch.minimum(boxes1_mat[..., 2], boxes2_mat[..., 2]) - \ + torch.maximum(boxes1_mat[..., 0], boxes2_mat[..., 0]) + 1 + y_extend = torch.minimum(boxes1_mat[..., 3], boxes2_mat[..., 3]) - \ + torch.maximum(boxes1_mat[..., 1], boxes2_mat[..., 1]) + 1 + + overlaps = torch.zeros_like(boxes1_mat[..., 0]) + + pos = torch.logical_and(x_extend > 0, y_extend > 0) + intersection = x_extend[pos] * y_extend[pos] + union = (areas1.unsqueeze(1) + areas2.unsqueeze(0))[pos] - intersection + overlaps[pos] = intersection / union + + return overlaps
+ +
[docs] def get_predictions(self, preds): + # roi = {'box': [], 'scr': [], 'lbl': [], 'idx': []} + roi = {} + B = len(preds['cls']) + pred_cls = preds['cls'].sigmoid().permute(0, 3, 2, 1).reshape(B, -1) + pred_reg = preds['reg'].permute(0, 3, 2, 1).reshape(B, -1, 7) + indices = torch.stack([torch.ones_like(pred_cls[0]) * i for i in range(B)], dim=0) + + anchors = self.anchors.unsqueeze(0).repeat(B, 1, 1) + pos = pred_cls > self.score_thrshold + + boxes_dec = self.box_coder.decode(anchors, pred_reg) + # remove abnormal boxes + mask = (boxes_dec[..., 3:6] > 0.1) & (boxes_dec[..., 3:6] < 10) + pos = torch.logical_and(pos, mask.all(dim=-1)) + + pred_cls = pred_cls[pos] + pred_box = boxes_dec[pos] + roi['scr'] = pred_cls + roi['box'] = pred_box + # TODO currently only support class car + roi['lbl'] = torch.zeros_like(pred_cls) + roi['idx'] = indices[pos] + + return roi
+ + +
[docs]class BoxSparseAnchorAssigner(BaseAssigner, torch.nn.Module): + def __init__(self, + box_size, + dirs, + voxel_size, + lidar_range, + stride, + box_coder, + me_coor=True, + pos_threshold=0.6, + neg_threshold=0.45, + score_thrshold=0.25, + ): + super().__init__() + self.voxel_size = voxel_size + self.lidar_range = lidar_range + self.num_anchors = len(dirs) + self.stride = stride + self.pos_threshold = pos_threshold + self.neg_threshold = neg_threshold + self.score_thrshold = score_thrshold + self.box_coder = build_box_coder(**box_coder) + anchors, standup_anchors = self.get_anchor_template(box_size, dirs) + self.anchors = nn.Parameter(anchors, requires_grad=False) + self.standup_anchors = nn.Parameter(standup_anchors, requires_grad=False) + if me_coor: + lr = lidar_range + res_x, res_y = stride * voxel_size[0], stride * voxel_size[1] + self.size_x = round((lr[3] - lr[0]) / res_x) + self.size_y = round((lr[4] - lr[1]) / res_y) + self.offset_sz_x = round(lr[0] / res_x) + self.offset_sz_y = round(lr[1] / res_y) + self.coor_to_inds = self.me_coor_to_grid_indices + else: + raise NotImplementedError + +
[docs] def me_coor_to_grid_indices(self, coor): + inds = coor / self.stride + inds[:, 0] -= self.offset_sz_x + inds[:, 1] -= self.offset_sz_y + in_range_mask = (inds >= 0).all(dim=-1) & (inds[:, 0] < self.size_x) & (inds[:, 1] < self.size_y) + return inds[in_range_mask].long(), in_range_mask
+ +
[docs] def get_anchor_template(self, box_size, dirs): + pix_x = self.voxel_size[0] * self.stride + pix_y = self.voxel_size[1] * self.stride + x = torch.arange(self.lidar_range[0], self.lidar_range[3], pix_x) + pix_x * 0.5 + y = torch.arange(self.lidar_range[1], self.lidar_range[4], pix_y) + pix_y * 0.5 + xys = torch.stack(torch.meshgrid(x, y, indexing='ij'), dim=-1) + xys = xys.unsqueeze(2).repeat(1, 1, self.num_anchors, 1) + zs = - torch.ones_like(xys[..., :1]) + h, w = xys.shape[:2] + lwh = torch.tensor(box_size).reshape( + 1, 1, 1, -1).repeat(h, w, self.num_anchors, 1) + rs = torch.deg2rad(torch.tensor(dirs)).reshape( + 1, 1, -1, 1).repeat(h, w, 1, 1) + # (w, h, num_anchor, 7) --> (whn, 7) + anchors = torch.cat([xys, zs, lwh, rs], dim=-1) + standup_anchors = boxes3d_to_standup_bboxes( + anchors.view(-1, 7)).reshape(h, w, self.num_anchors, 4) + return anchors, standup_anchors
+ +
[docs] def assign(self, coors: torch.Tensor, gt_boxes: torch.Tensor): + """ + + :param coors: (N, 2) 2D mink coor [x, y] + :param gt_boxes: (M, 7) [x, y, z, l, w, h, r] + :return: + - labels Tensor(N, num_anchors): box regression targets + - reg_tgt Tensor(N, num_anchors, code_size): box regression targets + - ir_score Tensor(N, num_anchors, 4) or None: direction score target + """ + gt_boxes = gt_boxes[:, :7] + if len(gt_boxes) == 0: + labels = gt_boxes.new_full((coors.shape[0] * self.num_anchors,), -1) + reg_tgt = gt_boxes.new_zeros((0, self.box_coder.code_size)) + dir_scores = gt_boxes.new_zeros((0, 4)) + # Todo dir_score, gt_boxes, correct shape + return labels, reg_tgt, dir_scores + inds, in_range_mask = self.coor_to_inds(coors) + gt_standup_boxes = boxes3d_to_standup_bboxes(gt_boxes) + standup_anchors = self.standup_anchors[inds[:, 0], inds[:, 1]].view(-1, 4) + ious = self.box_overlaps(standup_anchors, gt_standup_boxes) + iou_max, max_inds = ious.max(dim=1) + top1_inds = torch.argmax(ious, dim=0) + + pos = iou_max > self.pos_threshold + pos_inds = torch.cat([top1_inds, torch.where(pos)[0]]).unique() + neg = iou_max < self.neg_threshold + neg[pos_inds] = False + + labels = gt_boxes.new_full((ious.shape[0],), -1) + labels[neg] = 0 + labels[pos_inds] = 1 + + aligned_gt_boxes = gt_boxes[max_inds[pos_inds]] + aligned_anchors = self.anchors[inds[:, 0], inds[:, 1]].view(-1, self.box_coder.code_size)[pos_inds] + reg_tgt, dir_score = self.box_coder.encode(aligned_anchors, aligned_gt_boxes) + + labels_final = gt_boxes.new_full((in_range_mask.shape[0], self.num_anchors), -1) + labels_final[in_range_mask] = labels.view(-1, self.num_anchors) + return labels_final.view(-1), reg_tgt, dir_score
+ +
[docs] def box_overlaps(self, boxes1, boxes2): + areas1 = (boxes1[:, 2] - boxes1[:, 0] + 1) * \ + (boxes1[:, 3] - boxes1[:, 1] + 1) + areas2 = (boxes2[:, 2] - boxes2[:, 0] + 1) * \ + (boxes2[:, 3] - boxes2[:, 1] + 1) + + boxes1_mat = boxes1.unsqueeze(1).repeat(1, boxes2.shape[0], 1) + boxes2_mat = boxes2.unsqueeze(0).repeat(boxes1.shape[0], 1, 1) + x_extend = torch.minimum(boxes1_mat[..., 2], boxes2_mat[..., 2]) - \ + torch.maximum(boxes1_mat[..., 0], boxes2_mat[..., 0]) + 1 + y_extend = torch.minimum(boxes1_mat[..., 3], boxes2_mat[..., 3]) - \ + torch.maximum(boxes1_mat[..., 1], boxes2_mat[..., 1]) + 1 + + overlaps = torch.zeros_like(boxes1_mat[..., 0]) + + pos = torch.logical_and(x_extend > 0, y_extend > 0) + intersection = x_extend[pos] * y_extend[pos] + union = (areas1.unsqueeze(1) + areas2.unsqueeze(0))[pos] - intersection + overlaps[pos] = intersection / union + + return overlaps
+ +
[docs] def get_predictions(self, coors, preds): + """ + + :param coors: Tensor(N, 3) mink coor [batch_idx, x, y] + :param preds: + :return: + """ + # roi = {'box': [], 'scr': [], 'lbl': [], 'idx': []} + roi = {} + inds, in_range_mask = self.coor_to_inds(coors[:, 1:]) + pred_cls = preds['cls'][in_range_mask].sigmoid().reshape(-1) + pred_reg = preds['reg'][in_range_mask].reshape(-1, 7) + indices = coors[:, 0:1][in_range_mask].repeat(1, self.num_anchors).reshape(-1) + + anchors = self.anchors[inds[:, 0], inds[:, 1]].view(-1, self.box_coder.code_size) + pos = pred_cls > self.score_thrshold + anchors = anchors[pos] + pred_cls = pred_cls[pos] + pred_reg = pred_reg[pos] + indices = indices[pos] + + boxes_dec = self.box_coder.decode(anchors, pred_reg) + + # remove abnormal boxes + mask = (boxes_dec[..., 3:6] > 0.1) & (boxes_dec[..., 3:6] < 10) + mask = mask.all(dim=-1) + pred_cls = pred_cls[mask] + pred_box = boxes_dec[mask] + indices = indices[mask] + + roi['scr'] = pred_cls + roi['box'] = pred_box + # TODO currently only support class car + roi['lbl'] = torch.zeros_like(pred_cls) + roi['idx'] = indices + + return roi
+ + +
[docs]class BoxCenterAssigner(BaseAssigner, torch.nn.Module): + def __init__(self, + voxel_size, + lidar_range, + stride, + detection_benchmark, + class_names_each_head, + center_threshold, + box_coder, + activation='relu', + edl=True, + ): + super().__init__() + self.voxel_size = voxel_size + self.lidar_range = lidar_range + self.meter_per_pixel = (voxel_size[0] * stride, voxel_size[1] * stride) + self.csb = csb.get(detection_benchmark) + self.class_names_each_head = class_names_each_head + self.activation = activation + self.center_threshold = center_threshold + self.box_coder = build_box_coder(**box_coder) + self.edl = edl + +
[docs] def pts_to_indices(self, bev_pts: torch.Tensor): + """ + :param bev_pts: (N, 3+), BEV points, 1st column should be batch index. + :return: + """ + x = (bev_pts[:, 1] - self.meter_per_pixel[0] * 0.5 - self.lidar_range[0]) \ + / self.meter_per_pixel[0] + y = (bev_pts[:, 2] - self.meter_per_pixel[1] * 0.5 - self.lidar_range[1]) \ + / self.meter_per_pixel[1] + indices = torch.stack([bev_pts[:, 0].long(), x.long(), y.long()], dim=1) + return indices
+ +
[docs] @torch.no_grad() + def assign(self, centers, gt_boxes, gt_labels, gt_preds=None, **kwargs): + box_names = [self.csb[c.item()][0] for c in gt_labels] + + # cal regression targets + reg_tgt = {'box': [], 'dir': [], 'scr': [], 'idx': [], 'valid_mask': [], 'vel': [], 'pred': []} + for h, cur_cls_names in enumerate(self.class_names_each_head): + center_indices = self.pts_to_indices(centers).T + box_mask = [n in cur_cls_names for n in box_names] + cur_boxes = gt_boxes[box_mask] + res = self.box_coder.encode(centers, cur_boxes, self.meter_per_pixel, gt_preds) + reg_box, reg_dir, dir_score, valid = res[:4] + + reg_tgt['idx'].append(center_indices[:, valid]) + reg_tgt['valid_mask'].append(valid) + reg_tgt['box'].append(reg_box) + reg_tgt['dir'].append(reg_dir) + reg_tgt['scr'].append(dir_score) + if getattr(self.box_coder, 'with_velo', False): + reg_tgt['vel'].append(res[4]) + if getattr(self.box_coder, 'with_pred', False): + reg_tgt['pred'].append(res[5]) + return reg_tgt
+ +
[docs] def get_predictions(self, preds): + """Decode the center and regression maps into BBoxes. + + :param preds: + - cls: list[Tensor], each tensor is the result from a cls head with shape (B or N, Ncls, ...). + - reg: + * box: list[Tensor], one tensor per reg head with shape (B or N, 6, ...). + * dir: list[Tensor], one tensor per reg head with shape (B or N, 8, ...). + * scr: list[Tensor], one tensor per reg head with shape (B or N, 4, ...). + :return: roi: + * box: list[Tensor], one tensor per head with shape (N, 8). + * scr: list[Tensor], one tensor per head with shape (N,). + * lbl: list[Tensor], one tensor per head with shape (N,). + * idx: list[Tensor], one tensor per head with shape (3, N), center map indices of the boxes. + """ + roi = {'box': [], 'scr': [], 'lbl': [], 'idx': []} + lbl_cnt = torch.cumsum(torch.Tensor([0] + [m.shape[1] for m in preds['cls']]), dim=0) + confs = [] + for h, center_cls in enumerate(preds['cls']): + if center_cls.ndim == 4: + conf, _ = pred_to_conf_unc(center_cls.permute(0, 2, 3, 1), self.activation) + center_mask = conf[..., 1:].max(dim=-1).values > self.center_threshold # b, h, w + center_indices = torch.stack(torch.where(center_mask), dim=0) + centers = self.indices_to_pts(center_indices[1:]).T + cur_centers = torch.cat([center_indices[0].unsqueeze(-1), centers], dim=-1) + cur_reg = {k: preds['reg'][k][h].permute(0, 2, 3, 1)[center_mask] + for k in ['box', 'dir', 'scr']} + else: + conf, _ = pred_to_conf_unc(center_cls, self.activation, self.edl) + centers = preds['ctr'] + if self.edl: + center_mask = conf[..., 1:].max(dim=-1).values > self.center_threshold # b, h, w + else: + center_mask = conf.max(dim=-1).values > self.center_threshold # b, h, w + + if center_cls.ndim == 3: + indices = torch.stack([torch.zeros_like(centers[i, :, :1]) + i for i in range(centers.shape[0])], dim=0) + centers = torch.cat([indices, centers], dim=-1) + + cur_centers = centers[center_mask] + center_indices = self.pts_to_indices(cur_centers) + cur_reg = {k: preds['reg'][k][h][center_mask] + for k in preds['reg'].keys()} + + # from cosense3d.utils import vislib + # mask = cur_centers[:, 0].int() == 0 + # confs = conf[center_mask][mask, 1].detach().cpu().numpy() + # points = cur_centers[mask, 1:].detach().cpu().numpy() + # fig = vislib.plt.figure(figsize=(6, 6)) + # vislib.plt.scatter(points[:, 0], points[:, 1], c=confs, s=1) + # vislib.plt.show() + # vislib.plt.close() + + cur_box = self.box_coder.decode(cur_centers, cur_reg) + cur_scr, cur_lbl = conf[center_mask].max(dim=-1) + cur_lbl = cur_lbl + lbl_cnt[h] + roi['box'].append(cur_box) + roi['scr'].append(cur_scr) + roi['lbl'].append(cur_lbl) + roi['idx'].append(center_indices) + confs.append(conf) + + # from cosense3d.utils.vislib import draw_points_boxes_plt + # points = centers[:, 1:].detach().cpu().numpy() + # boxes = cur_box[:, 1:].detach().cpu().numpy() + # draw_points_boxes_plt( + # pc_range=self.lidar_range, + # boxes_pred=boxes, + # points=points, + # filename="/home/yuan/Pictures/tmp.png" + # ) + + # merge detections from all heads + roi['box'] = torch.cat(roi['box'], dim=0) + roi['scr'] = torch.cat(roi['scr'], dim=0) + roi['lbl'] = torch.cat(roi['lbl'], dim=0) + roi['idx'] = torch.cat(roi['idx'], dim=0) + confs = torch.stack(confs, dim=1) + return roi, confs
+ + +
[docs]class BEVCenternessAssigner(BaseAssigner): + """ + Assign center points in the BEV maps to positve if the point is in the range 'min_radius' of any gt box center. + """ + def __init__(self, + n_cls, + min_radius=1.0, + pos_neg_ratio=5, + mining_thr=0, + max_mining_ratio=3, + mining_start_epoch=5, + merge_all_classes=False, + use_gaussian=False, + sigma=1.0 + ): + super().__init__() + self.n_cls = n_cls + self.min_radius = min_radius + self.pos_neg_ratio = pos_neg_ratio + self.sample_mining_thr = mining_thr + self.max_mining_ratio = max_mining_ratio + self.mining_start_epoch = mining_start_epoch + self.merge_all_classes = merge_all_classes + self.use_gaussian = use_gaussian + self.sigma = sigma + +
[docs] def get_labels_single_head(self, centers, gt_boxes, pred_scores=None, **kwargs): + diff = centers[:, :2].unsqueeze(1) - gt_boxes[:, :2].unsqueeze(0) + dists = torch.norm(diff, dim=-1) + dists_min, dists_min_arg = dists.min(dim=1) + if self.use_gaussian: + labels = torch.exp(-0.5 * torch.sqrt(dists_min) / self.sigma ** 2) + # sigmas = gt_boxes[:, 3:5][dists_min_arg] / 4 * self.sigma + # labels = weighted_mahalanobis_dists( + # sigmas ** 2, diff[torch.arange(len(diff)), dists_min_arg].abs().unsqueeze(1)) + labels[labels < 1e-4] = 0 + else: + labels = (dists_min < self.min_radius).float() + + if self.pos_neg_ratio: + labels = pos_neg_sampling(labels, self.pos_neg_ratio) + if self.sample_mining_thr > 0 and kwargs.get('epoch', 0) > self.mining_start_epoch: + assert pred_scores is not None + labels = sample_mining(pred_scores, labels, + dists_min, + self.sample_mining_thr, + self.max_mining_ratio) + + return labels
+ +
[docs] @torch.no_grad() + def assign(self, centers, gt_boxes, gt_labels, pred_scores=None, **kwargs): + if len(gt_boxes) == 0: + labels = torch.zeros_like(centers[:, :1]) + return labels + if self.merge_all_classes: + labels = self.get_labels_single_head(centers, gt_boxes).unsqueeze(-1) + else: + labels = [] + for n in range(self.n_cls): + cur_boxes = gt_boxes[gt_labels == n] + cur_scores = None if pred_scores is None else pred_scores[n] + labels.append(self.get_labels_single_head(centers, cur_boxes, cur_scores, **kwargs)) + labels = torch.stack(labels, dim=-1) + + # import matplotlib.pyplot as plt + # + # from cosense3d.utils import vislib + # pc_range = [-100, -41.6, -3.0, 100, 41.6, 3.0] + # label = labels.detach().cpu().numpy() + # label = label[:, 0] + # points = centers.detach().cpu().numpy() + # boxes = gt_boxes.cpu().numpy() + # ax = vislib.draw_points_boxes_plt( + # pc_range=pc_range, + # boxes_gt=boxes, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], cmap='jet', c=label, s=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + + return labels
+ + +
[docs]class BEVBoxAssigner(BaseAssigner): + """ + Assign center points in the BEV maps to positve if the point is in the range 'min_radius' of any gt box center. + """ + def __init__(self, + n_cls, + pos_neg_ratio=5, + mining_thr=0, + max_mining_ratio=3, + mining_start_epoch=5, + merge_all_classes=False, + ): + super().__init__() + self.n_cls = n_cls + self.pos_neg_ratio = pos_neg_ratio + self.sample_mining_thr = mining_thr + self.max_mining_ratio = max_mining_ratio + self.mining_start_epoch = mining_start_epoch + self.merge_all_classes = merge_all_classes + +
[docs] def get_labels_single_head(self, centers, gt_boxes, pred_scores=None, **kwargs): + boxes = pad_l(gt_boxes[:, :7]).clone() + boxes[:, 3] = 0 + pts = pad_r(pad_l(centers[:, :2])) + + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=1 + ) + labels = (box_idx_of_pts >= 0).float() + if self.pos_neg_ratio: + labels = pos_neg_sampling(labels, self.pos_neg_ratio) + + return labels
+ +
[docs] @torch.no_grad() + def assign(self, centers, gt_boxes, gt_labels, pred_scores=None, **kwargs): + if len(gt_boxes) == 0: + labels = torch.zeros_like(centers[:, :1]) + return labels + if self.merge_all_classes: + labels = self.get_labels_single_head(centers, gt_boxes).unsqueeze(-1) + else: + labels = [] + for n in range(self.n_cls): + cur_boxes = gt_boxes[gt_labels == n] + cur_scores = None if pred_scores is None else pred_scores[n] + labels.append(self.get_labels_single_head(centers, cur_boxes, cur_scores, **kwargs)) + labels = torch.stack(labels, dim=-1) + + # import matplotlib.pyplot as plt + # + # from cosense3d.utils import vislib + # pc_range = [-100, -41.6, -3.0, 100, 41.6, 3.0] + # label = labels.detach().cpu().numpy() + # label = label[:, 0] + # points = centers.detach().cpu().numpy() + # boxes = gt_boxes.cpu().numpy() + # ax = vislib.draw_points_boxes_plt( + # pc_range=pc_range, + # boxes_gt=boxes, + # return_ax=True + # ) + # ax.scatter(points[:, 0], points[:, 1], cmap='jet', c=label, s=1) + # plt.savefig("/home/yuan/Downloads/tmp.png") + # plt.close() + + return labels
+ + +
[docs]class BEVPointAssigner(BaseAssigner): + """ + Assign target points to BEV boxes and down-sample the target points with buffered-based method. + """ + def __init__(self, + down_sample=True, + sample_mining_thr=0., + max_mining_ratio=3, + annealing_step=None, + topk_sampling=False, + annealing_sampling=False, + ): + super().__init__() + self.down_sample = down_sample + self.sample_mining_thr = sample_mining_thr + self.max_mining_ratio = max_mining_ratio + self.annealing_step = annealing_step + self.topk_sampling = topk_sampling + self.annealing_sampling = annealing_sampling + +
[docs] def downsample_tgt_pts(self, tgt_label, max_sam): + selected = torch.ones_like(tgt_label.bool()) + pos = tgt_label == 1 + if pos.sum() > max_sam: + mask = torch.rand_like(tgt_label[pos].float()) < max_sam / pos.sum() + selected[pos] = mask + + buffer = tgt_label == 0 + if buffer.sum() > max_sam: + mask = torch.rand_like(tgt_label[buffer].float()) < max_sam / buffer.sum() + selected[buffer] = mask + + neg = tgt_label == -1 + if neg.sum() > max_sam: + mask = torch.rand_like(tgt_label[neg].float()) < max_sam / neg.sum() + selected[neg] = mask + labels = - torch.ones_like(mask).long() + labels[mask] = 0 + tgt_label[neg] = labels + return selected, tgt_label
+ +
[docs] def assign(self, tgt_pts, gt_boxes, B, conf=None, down_sample=True, **kwargs): + boxes = gt_boxes.clone() + boxes[:, 3] = 0 + pts = pad_r(tgt_pts) + + if not down_sample or not self.down_sample: + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + tgt_label = torch.zeros_like(box_idx_of_pts) + tgt_label[box_idx_of_pts >= 0] = 1 + return tgt_pts, tgt_label, None + + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + boxes[:, 4:6] *= 2 + _, enlarged_box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + + pos_mask = box_idx_of_pts >= 0 + buffer_mask = (box_idx_of_pts < 0) & (enlarged_box_idx_of_pts >= 0) + tgt_label = - torch.ones_like(box_idx_of_pts) + tgt_label[pos_mask] = 1 + tgt_label[buffer_mask] = 0 + n_sam = len(boxes) * 50 + + # add points that have high pred scores + if self.sample_mining_thr > 0: + scores = conf[..., 1:].sum(dim=-1) + tgt_label = sample_mining(scores, tgt_label, self.sample_mining_thr, + max_num_sample=n_sam) + + mask, tgt_label = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + + # get final tgt + tgt_pts = tgt_pts[mask] + tgt_label = tgt_label[mask] + + return tgt_pts, tgt_label, mask
+ +
[docs] def get_predictions(self, x, edl=True, activation='none'): + conf, unc = pred_to_conf_unc(x, activation, edl) + return conf, unc
+ + +
[docs]class BEVSemsegAssigner(BaseAssigner): + def __init__(self, + data_info, + stride, + tgt_range=None, + down_sample=False, + annealing_step=None, + ): + super().__init__() + update_me_essentials(self, data_info, stride) + self.tgt_range = tgt_range + self.downsample = down_sample + self.annealing_step = annealing_step + +
[docs] def pts_to_inds(self, pts): + """Calculate indices of samples in the bev map""" + ixy = metric2indices(pts[:, :3], self.res).long() + ixy[:, 1] -= self.offset_sz_x + ixy[:, 2] -= self.offset_sz_y + maskx = torch.logical_and(ixy[:, 1] >= 0, ixy[:, 1] < self.size_x) + masky = torch.logical_and(ixy[:, 2] >= 0, ixy[:, 2] < self.size_y) + mask = torch.logical_and(maskx, masky) + indices = ixy[mask] + return indices.T, mask
+ +
[docs] def get_obs_mask(self, inds, B): + obs_mask = torch.zeros((B, self.size_x, self.size_y), device=inds.device) + inds = inds.clone().long().T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + obs_mask[inds[0], inds[1], inds[2]] = 1 + return obs_mask.bool()
+ +
[docs] @staticmethod + def down_sample_pred_pts(ctr_pts): + keep = torch.rand_like(ctr_pts['ctr'][:, 0]) > 0.5 + for k in ctr_pts.keys(): + ctr_pts[k] = ctr_pts[k][keep] + + return ctr_pts
+ +
[docs] @torch.no_grad() + def downsample_tgt_pts(self, tgt_label, max_sam): + selected = torch.ones_like(tgt_label.bool()) + pos = tgt_label == 1 + if pos.sum() > max_sam: + mask = torch.rand_like(tgt_label[pos].float()) < max_sam / pos.sum() + selected[pos] = mask + + neg = tgt_label == 0 + if neg.sum() > max_sam: + mask = torch.rand_like(tgt_label[neg].float()) < max_sam / neg.sum() + selected[neg] = mask + return selected
+ +
[docs] def filter_range(self, ctr_pts, samples): + mask = (ctr_pts['ctr'].abs() < self.tgt_range).all(1) + for k in ctr_pts.keys(): + ctr_pts[k] = ctr_pts[k][mask] + + mask = (samples[:, 1:3].abs() < self.tgt_range).all(1) + samples = samples[mask] + return ctr_pts, samples
+ +
[docs] def assign(self, ctr_pts, samples, B, gt_boxes=None, **kwargs): + raise NotImplementedError
+ +
[docs] def get_predictions(self, data_dict, B, edl=True, activation='none', **kwargs): + raise NotImplementedError
+ + +
[docs]class ContiBEVAssigner(BEVSemsegAssigner): + def __init__(self, + distr_r=2.0, + var0=0.1, + **kwargs): + super().__init__(**kwargs) + self.distr_r = distr_r + self.var0 = var0 + steps = int(self.distr_r / self.res[0]) * 2 + 1 + offset = meshgrid(-self.distr_r, self.distr_r, 2, + n_steps=steps).cuda().view(-1, 2) + self.nbrs = offset[torch.norm(offset, dim=1) < 2].view(1, -1, 2) + +
[docs] def sample_dynamic_tgt_pts(self, ctr_pts: dict, gt_boxes: torch.Tensor, B: int) \ + -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Given the input coordinates of the center points and the ground truth BBoxes, + sample the BEV target points for BEV semantic segmentation following the buffer-based sampling as illustrated + in the following image: + + .. image:: _static/imgs/buffer_based_sampling.png + :width: 400 + :alt: Buffer-based sampling of the BEV target + + :param ctr_pts: center points of bev maps, including indices, metric centers and regression results. + :param gt_boxes: ground truth BBoxes. + :param B: batch size. + :return: + - tgt_pts: sampled target points. + - tgt_lbl: labels of the sampled target points. + - inds: map indices of the sampled target points. + """ + tgt_pts = ctr_pts['ctr'].clone() + tgt_pts[:, :2] = tgt_pts[:, :2] + torch.randn_like(tgt_pts[:, :2]) * 3 + tgt_pts = torch.cat([ctr_pts['coor'][:, :1], tgt_pts], dim=-1) + obs_mask = self.get_obs_mask(ctr_pts['coor'], B) + inds, mask = self.pts_to_inds(tgt_pts) + tgt_pts = tgt_pts[mask] + mask = obs_mask[inds[0], inds[1], inds[2]] + tgt_pts = tgt_pts[mask] + inds = inds.T[mask] + + if len(gt_boxes) == 0 or len(tgt_pts) == 0: + tgt_label = torch.zeros_like(tgt_pts[:, 0]).int() + else: + boxes = gt_boxes.clone() + boxes[:, 3] = 0 + pts = pad_r(tgt_pts) + _, box_idx_of_pts = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + boxes[:, 4:6] *= 4 + _, box_idx_of_pts2 = points_in_boxes_gpu( + pts, boxes, batch_size=B + ) + tgt_label = - (box_idx_of_pts2 >= 0).int() + tgt_label[box_idx_of_pts >= 0] = 1 + + n_sam = len(gt_boxes) * 50 + mask = self.downsample_tgt_pts(tgt_label, max_sam=n_sam) + tgt_label = tgt_label > 0 + return tgt_pts[mask], tgt_label[mask], inds[mask].T
+ +
[docs] def assign(self, ctr_pts, samples, B, gt_boxes=None, **kwargs) -> dict: + """ + Assign target. + + :param ctr_pts: center points of bev maps, including indices, metric centers and regression results. + :param samples: BEV target point samples. + :param B: batch size. + :param gt_boxes: ground truth BBoxes. + :param kwargs: keyword arguments. + :return: target_dict that contains the static or/and dynamic target points and their corresponding labels. + """ + lr = self.lidar_range + if self.tgt_range is not None: + ctr_pts, samples = self.filter_range(ctr_pts, samples) + lr = [-self.tgt_range, -self.tgt_range, -3, self.tgt_range, self.tgt_range, 1] + if self.downsample: + ctr_pts = self.down_sample_pred_pts(ctr_pts) + + tgt = {} + if 'reg_static' in ctr_pts: + tgt['evi_static'] = draw_sample_evis( + ctr_pts, samples, 'static', self.res[0], self.distr_r, lr, B, self.var0) + tgt['lbl_static'] = samples[:, -1] + if 'reg_dynamic' in ctr_pts: + assert gt_boxes is not None + tgt_pts, tgt_label, inds = self.sample_dynamic_tgt_pts(ctr_pts, gt_boxes, B) + tgt['evi_dynamic'] = draw_sample_evis( + ctr_pts, tgt_pts, 'dynamic', self.res[0], self.distr_r, lr, B, self.var0) + tgt['lbl_dynamic'] = tgt_label + + # + # import matplotlib.pyplot as plt + # from cosense3d.modules.utils.edl_utils import logit_to_edl + # fig = plt.figure(figsize=(10, 10)) + # coor = ctr_pts['coor'] + # ctr = ctr_pts['ctr'] + # sams = samples[samples[:, 0]==0][:, 1:].cpu().numpy() + # mask = coor[:, 0] == 0 + # xy = ctr[mask].cpu().numpy() + # conf, unc = logit_to_edl(ctr_pts['reg_static'][mask, :2]) + # colors = conf[:, 1].detach().cpu().numpy() + # plt.scatter(xy[:, 0], xy[:, 1], cmap='jet', c=colors, edgecolors=None, marker='.', s=2, vmin=0, vmax=1) + # plt.show() + # plt.close() + # + # fig = plt.figure(figsize=(10, 10)) + # pos = sams[:, -1] == 1 + # plt.scatter(sams[:, 0], sams[:, 1], c='k', facecolors=None, marker='o', s=5) + # plt.scatter(sams[pos, 0], sams[pos, 1], c='r', facecolors=None, marker='o', s=5) + # plt.show() + # plt.close() + # + # fig = plt.figure(figsize=(10, 10)) + # mask = tgt_pts[:, 0] == 0 + # sams = tgt_pts[mask][:, 1:].cpu().numpy() + # pos = tgt_label[mask].cpu().numpy() == 1 + # mask = coor[:, 0] == 0 + # xy = ctr[mask].cpu().numpy() + # conf, unc = logit_to_edl(ctr_pts['reg_dynamic'][mask, :2]) + # colors = conf[:, 1].detach().cpu().numpy() + # plt.scatter(xy[:, 0], xy[:, 1], cmap='jet', c=colors, edgecolors=None, marker='.', s=2, vmin=0, vmax=1) + # plt.show() + # plt.close() + # + # fig = plt.figure(figsize=(10, 10)) + # plt.scatter(sams[:, 0], sams[:, 1], c='k', facecolors=None, marker='o', s=5) + # plt.scatter(sams[pos, 0], sams[pos, 1], c='r', facecolors=None, marker='o', s=5) + # plt.show() + # plt.close() + + return tgt
+ +
[docs] def get_predictions(self, ctr_pts, B, tag, **kwargs): + """ + Given center points and its corresponding regressions, generate the dense bev semseg maps + and its uncertainty and observation mask. + + :param ctr_pts: center points of bev maps, including indices, metric centers and regression results. + :param B: batch size. + :param tag: tag for regression key "static | dynamic". + :param kwargs: keyword arguments + :return: + - conf: confidence bev map. + - unc: uncertainty bev map. + - obs_mask: observation mask of the bev map. + """ + reg = ctr_pts[f'reg_{tag}'].relu() + reg_evi = reg[:, :2] + reg_var = reg[:, 2:].view(-1, 2, 2) + ctr = ctr_pts['ctr'] + coor = ctr_pts['coor'] + + nbrs = self.nbrs.to(reg_evi.device) + dists = torch.zeros_like(ctr.view(-1, 1, 2)) + nbrs + vars0 = [self.var0, self.var0] + probs_weighted = weighted_mahalanobis_dists(reg_evi, reg_var, dists, vars0) + voxel_new = ctr.view(-1, 1, 2) + nbrs + # convert metric voxel points to map indices + x = (torch.floor(voxel_new[..., 0] / self.res[0]) - self.offset_sz_x).long() + y = (torch.floor(voxel_new[..., 1] / self.res[1]) - self.offset_sz_y).long() + batch_indices = (torch.ones_like(probs_weighted[:, :, 0]) * coor[:, :1]).long() + mask = (x >= 0) & (x < self.size_x) & (y >= 0) & (y < self.size_y) + x, y = x[mask], y[mask] + batch_indices = batch_indices[mask] + + # copy sparse probs to the dense evidence map + indices = batch_indices * self.size_x * self.size_y + x * self.size_y + y + batch_size = coor[:, 0].max().int().item() + 1 + probs_weighted = probs_weighted[mask].view(-1, 2) + evidence = torch.zeros((batch_size, self.size_x, self.size_y, 2), + device=probs_weighted.device).view(-1, 2) + torch_scatter.scatter(probs_weighted, indices, + dim=0, out=evidence, reduce='sum') + evidence = evidence.view(batch_size, self.size_x, self.size_y, 2) + + # create observation mask + obs_mask = torch.zeros_like(evidence[..., 0]).view(-1) + obs = indices.unique().long() + obs_mask[obs] = 1 + obs_mask = obs_mask.view(batch_size, self.size_x, self.size_y).bool() + conf, unc = pred_to_conf_unc(evidence) + + # import matplotlib.pyplot as plt + # plt.imshow(conf[0, :, :, 1].T.detach().cpu().numpy()) + # plt.show() + # plt.close() + return {f'conf_map_{tag}': conf, f'unc_map_{tag}': unc, f'obs_mask_{tag}': obs_mask}
+ + +
[docs]class DiscreteBEVAssigner(BaseAssigner): + def __init__(self, + data_info, + stride, + down_sample=False, + annealing_step=None, + ): + super().__init__() + update_me_essentials(self, data_info, stride) + self.down_sample = down_sample + self.annealing_step = annealing_step + +
[docs] def pts_to_inds(self, samples): + """Calculate indices of samples in the bev map""" + ixy = metric2indices(samples[:, :3], self.res).long() + ixy[:, 1] -= self.offset_sz_x + ixy[:, 2] -= self.offset_sz_y + maskx = torch.logical_and(ixy[:, 1] >= 0, ixy[:, 1] < self.size_x) + masky = torch.logical_and(ixy[:, 2] >= 0, ixy[:, 2] < self.size_y) + mask = torch.logical_and(maskx, masky) + indices = ixy[mask] + return indices.T, mask
+ +
[docs] def get_obs_mask(self, inds, B): + obs_mask = torch.zeros((B, self.size_x, self.size_y), device=inds.device) + inds = inds.T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + obs_mask[inds[0], inds[1], inds[2]] = 1 + return obs_mask.bool()
+ + +
[docs] def assign(self, ctr_pts, samples, B, gt_boxes=None, **kwargs): + bevmap = self.get_predictions(ctr_pts, B) + inds, mask = self.pts_to_inds(samples) + labels = samples[mask][:, -1] + preds = bevmap[inds[0], inds[1], inds[2]] + + # import matplotlib.pyplot as plt + # img = pred_to_conf_unc(bevmap)[0][..., 1].detach().cpu().numpy() + # plt.imshow(img[0].T) + # plt.show() + # plt.close() + return preds, labels
+ +
[docs] def get_predictions(self, data_dict, B, edl=True, activation='none', **kwargs): + reg = data_dict['reg'] + inds = data_dict['coor'] + reg_evi = reg.relu() + + bevmap = torch.zeros((B, self.size_x, self.size_y, reg_evi.shape[-1]), + device=reg_evi.device) + inds = inds.T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + # obs_mask = evidence[..., 0].bool() + # obs_mask[inds[0], inds[1], inds[2]] = True + bevmap[inds[0], inds[1], inds[2]] = reg_evi + return bevmap
+ + +
[docs]class RoIBox3DAssigner(BaseAssigner): + def __init__(self, + box_coder, + ): + self.box_coder = build_box_coder(**box_coder) + self.code_size = self.box_coder.code_size + +
[docs] def assign(self, pred_boxes, gt_boxes, **kwargs): + tgt_dict = { + 'rois': [], + 'gt_of_rois': [], + 'gt_of_rois_src': [], + 'cls_tgt': [], + 'reg_tgt': [], + 'iou_tgt': [], + 'rois_anchor': [], + 'record_len': [] + } + + for rois, gts in zip(pred_boxes, gt_boxes): + gts[:, -1] *= 1 + ious = boxes_iou3d_gpu(rois, gts) + max_ious, gt_inds = ious.max(dim=1) + gt_of_rois = gts[gt_inds] + rcnn_labels = (max_ious > 0.3).float() + mask = torch.logical_not(rcnn_labels.bool()) + + # set negative samples back to rois, no correction in stage2 for them + gt_of_rois[mask] = rois[mask] + gt_of_rois_src = gt_of_rois.clone().detach() + + # canoical transformation + roi_center = rois[:, 0:3] + # TODO: roi_ry > 0 in pcdet + roi_ry = rois[:, 6] % (2 * PI) + gt_of_rois[:, 0:3] = gt_of_rois[:, 0:3] - roi_center + gt_of_rois[:, 6] = gt_of_rois[:, 6] - roi_ry + + # transfer LiDAR coords to local coords + gt_of_rois = rotate_points_along_z_torch( + points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), + angle=-roi_ry.view(-1) + ).view(-1, gt_of_rois.shape[-1]) + + # flip orientation if rois have opposite orientation + heading_label = (gt_of_rois[:, 6] + ( + torch.div(torch.abs(gt_of_rois[:, 6].min()), + (2 * PI), rounding_mode='trunc') + + 1) * 2 * PI) % (2 * PI) # 0 ~ 2pi + opposite_flag = (heading_label > PI * 0.5) & ( + heading_label < PI * 1.5) + + # (0 ~ pi/2, 3pi/2 ~ 2pi) + heading_label[opposite_flag] = (heading_label[ + opposite_flag] + PI) % ( + 2 * PI) + flag = heading_label > PI + heading_label[flag] = heading_label[ + flag] - PI * 2 # (-pi/2, pi/2) + heading_label = torch.clamp(heading_label, min=-PI / 2, + max=PI / 2) + gt_of_rois[:, 6] = heading_label + + # generate regression target + rois_anchor = rois.clone().detach().view(-1, self.code_size) + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + + reg_targets, _ = self.box_coder.encode( + rois_anchor, gt_of_rois.view(-1, self.code_size) + ) + + tgt_dict['rois'].append(rois) + tgt_dict['gt_of_rois'].append(gt_of_rois) + tgt_dict['gt_of_rois_src'].append(gt_of_rois_src) + tgt_dict['cls_tgt'].append(rcnn_labels) + tgt_dict['reg_tgt'].append(reg_targets) + tgt_dict['iou_tgt'].append(max_ious) + tgt_dict['rois_anchor'].append(rois_anchor) + tgt_dict['record_len'].append(rois.shape[0]) + + # cat list to tensor + for k, v in tgt_dict.items(): + if k == 'record_len': + continue + tgt_dict[k] = torch.cat(v, dim=0) + return tgt_dict
+ +
[docs] def get_predictions(self, rcnn_cls, rcnn_iou, rcnn_reg, rois): + rcnn_cls = rcnn_cls.sigmoid().view(-1) + rcnn_iou = rcnn_iou.view(-1) + rcnn_score = rcnn_cls * rcnn_iou**4 + rcnn_reg = rcnn_reg.view(-1, 7) + + rois_anchor = rois.clone().detach().view(-1, self.code_size) + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + + roi_center = rois[:, 0:3] + roi_ry = rois[:, 6] % (2 * PI) + + boxes_local = self.box_coder.decode(rois_anchor, rcnn_reg) + # boxes_local = rcnn_reg + rois_anchor + detections = rotate_points_along_z_torch( + points=boxes_local.view(-1, 1, boxes_local.shape[-1]), angle=roi_ry.view(-1) + ).view(-1, boxes_local.shape[-1]) + detections[:, :3] = detections[:, :3] + roi_center + detections[:, 6] = detections[:, 6] + roi_ry + mask = rcnn_score >= 0.01 + detections = detections[mask] + scores = rcnn_score[mask] + + return { + 'box': detections, + 'scr': scores, + # Todo currently only support cars + 'lbl': torch.zeros_like(scores), + # map indices to be aligned with sparse detection head format + 'idx': torch.zeros_like(scores), + }
+ + +
[docs]class RoadLineAssigner(BaseAssigner): + def __init__(self, + res, + range, + pos_neg_ratio=2): + super().__init__() + self.res = res + self.range = range + self.size = int(round(range / res * 2)) + self.pos_neg_ratio = pos_neg_ratio + +
[docs] def assign(self, coor, tgt_pts, B, **kwargs): + ctr_coor = coor.clone() + ctr_coor[:, 1:] = ctr_coor[:, 1:] + self.size / 2 + ctr_coor = ctr_coor.long() + roadline_maps = torch.zeros((B, self.size, self.size), device=tgt_pts.device) + mask = (tgt_pts[:, 1:3].abs() < self.range).all(dim=-1) + tgt_pts = tgt_pts[mask] + + tgt_coor = torch.floor((tgt_pts[:, 1:3] + self.range) / self.res).long() + mask = torch.logical_and((tgt_coor >= 0).all(dim=-1), (tgt_coor < self.size).all(dim=-1)) + roadline_maps[tgt_pts[mask, 0].long(), tgt_coor[mask, 0], tgt_coor[mask, 1]] = tgt_pts[mask, -1] + + valid = torch.logical_and((ctr_coor[:, 1:3] >= 0).all(dim=-1), (ctr_coor[:, 1:3] < self.size).all(dim=-1)) + labels = roadline_maps[ctr_coor[valid, 0], ctr_coor[valid, 1], ctr_coor[valid, 2]] + + if self.pos_neg_ratio: + labels = pos_neg_sampling(labels, self.pos_neg_ratio) + + # import matplotlib.pyplot as plt + # pts_vis = ctr_coor[ctr_coor[:, 0] == 0, 1:].detach().cpu().numpy() + # lbl_vis = labels.detach().cpu().numpy() + # fig = plt.figure(figsize=(8, 8)) + # ax = fig.add_subplot() + # ax.scatter(pts_vis[:, 0], pts_vis[:, 1], c=lbl_vis, marker='.') + # plt.show() + # plt.close() + return labels, valid
+ + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/transformer.html b/docs/_build/html/_modules/cosense3d/modules/plugin/transformer.html new file mode 100644 index 00000000..11806df6 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/transformer.html @@ -0,0 +1,1008 @@ + + + + + + cosense3d.modules.plugin.transformer — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.transformer

+import warnings, copy
+from typing import List, Optional
+
+import torch
+from torch import nn
+import torch.utils.checkpoint as cp
+
+from cosense3d.modules.utils import build_torch_module
+from cosense3d.modules.utils.norm import build_norm_layer
+from cosense3d.modules.utils.init import xavier_init
+try:
+    from cosense3d.modules.plugin.flash_attn import FlashMHA
+except:
+    from cosense3d.modules.plugin.flash_attn_new import FlashMHA
+from cosense3d.modules.utils.amp import auto_fp16
+
+
+
[docs]def build_module(cfg): + cfg_ = copy.deepcopy(cfg) + attn_typ = cfg_.pop('type') + return globals()[attn_typ](**cfg_)
+ + +
[docs]class FFN(nn.Module): + """Implements feed-forward networks (FFNs) with residual connection. + """ + + def __init__(self, + embed_dims: int, + feedforward_channels: int, + num_fcs: int=2, + act_cfg: dict=dict(type='ReLU', inplace=True), + dropout: float=0.0, + add_residual: bool=True): + """ + + :param embed_dims: The feature dimension. Same as + `MultiheadAttention`. + :param feedforward_channels: The hidden dimension of FFNs. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Defaluts to 2. + :param num_fcs: number of fully connected layers. + :param act_cfg: activation config. + :param dropout: Probability of an element to be + zeroed. Default 0.0. + :param add_residual: Add resudual connection. + Defaults to True. + """ + super(FFN, self).__init__() + assert num_fcs >= 2, 'num_fcs should be no less ' \ + f'than 2. got {num_fcs}.' + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.num_fcs = num_fcs + self.act_cfg = act_cfg + self.dropout = dropout + self.activate = build_torch_module(act_cfg) + + layers = nn.ModuleList() + in_channels = embed_dims + for _ in range(num_fcs - 1): + layers.append( + nn.Sequential( + nn.Linear(in_channels, feedforward_channels), + self.activate, + nn.Dropout(dropout))) + in_channels = feedforward_channels + layers.append(nn.Linear(feedforward_channels, embed_dims)) + self.layers = nn.Sequential(*layers) + self.dropout = nn.Dropout(dropout) + self.add_residual = add_residual + +
[docs] def forward(self, x, residual=None): + """Forward function for `FFN`.""" + out = self.layers(x) + if not self.add_residual: + return out + if residual is None: + residual = x + return residual + self.dropout(out)
+ + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(embed_dims={self.embed_dims}, ' + repr_str += f'feedforward_channels={self.feedforward_channels}, ' + repr_str += f'num_fcs={self.num_fcs}, ' + repr_str += f'act_cfg={self.act_cfg}, ' + repr_str += f'dropout={self.dropout}, ' + repr_str += f'add_residual={self.add_residual})' + return repr_str
+ + +
[docs]class MultiheadFlashAttention(nn.Module): + r"""A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + attn_drop: float=0., + proj_drop: float=0., + dropout: float=None, + batch_first: bool=True, + cache_attn_weights: bool=False, + **kwargs): + """ + :param embed_dims: The embedding dimension. + :param num_heads: Parallel attention heads. + :param attn_drop: A Dropout layer on attn_output_weights. Default: 0.0. + :param proj_drop: A Dropout layer after `nn.MultiheadAttention`. Default: 0.0. + :param dropout: united dropout for both attention and projection layer. + :param batch_first: When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + :param cache_attn_weights: whether to cache the intermediate attention weights. + :param kwargs: + """ + super(MultiheadFlashAttention, self).__init__() + if dropout is not None: + attn_drop = dropout + proj_drop = dropout + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = True + self.cache_attn_weights = cache_attn_weights + self.attn_weights = None + + self.attn = FlashMHA(embed_dims, num_heads, attn_drop, dtype=torch.float16, device='cuda', + **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = nn.Dropout(attn_drop) + +
[docs] def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """ + Forward function for `MultiheadAttention`. + + :param query: The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else [bs, num_queries embed_dims]. + :param key: The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims]. If None, the ``query`` will be used. Defaults to None. + :param value: The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. + Defaults to None. If None, the `key` will be used. + :param identity: This tensor, with the same shape as x, will be used for the identity link. + If None, `x` will be used. Defaults to None. + :param query_pos: The positional encoding for query, with the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + :param key_pos: The positional encoding for `key`, with the same shape as `key`. Defaults to None. + If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same + shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. + :param attn_mask: ByteTensor mask with shape [num_queries, num_keys]. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + :param key_padding_mask: ByteTensor with shape [bs, num_keys]. Defaults to None. + :param kwargs: allow passing a more general data flow when combining with + other operations in `transformerlayer`. + :return: forwarded results with shape [num_queries, bs, embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + with torch.autocast(device_type='cuda', dtype=torch.float16): + # flash attention only support f16 + out, attn_weights = self.attn( + q=query, + k=key, + v=value, + key_padding_mask=None) + + if self.cache_attn_weights: + self.attn_weights = attn_weights + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out))
+ + +
[docs]class MultiHeadAttentionWrapper(nn.MultiheadAttention): + def __init__(self, *args, **kwargs): + super(MultiHeadAttentionWrapper, self).__init__(*args, **kwargs) + self.fp16_enabled = True + +
[docs] @auto_fp16(out_fp32=True) + def forward_fp16(self, *args, **kwargs): + return super(MultiHeadAttentionWrapper, self).forward(*args, **kwargs)
+ +
[docs] def forward_fp32(self, *args, **kwargs): + return super(MultiHeadAttentionWrapper, self).forward(*args, **kwargs)
+ +
[docs] def forward(self, *args, **kwargs): + if self.fp16_enabled and self.training: + return self.forward_fp16(*args, **kwargs) + else: + return self.forward_fp32(*args, **kwargs)
+ + +
[docs]class MultiheadAttention(nn.Module): + r"""A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + dropout: float=0.1, + batch_first: bool=False, + cache_attn_weights: bool=False, + fp16: bool=False, + **kwargs): + """ + :param embed_dims: The embedding dimension. + :param num_heads: Parallel attention heads. + :param dropout: probability of Dropout layer, Default: 0.0. + :param batch_first: When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + :param cache_attn_weights: whether to cache attention weights. + :param fp16: whether set precision to float16 + :param kwargs: + """ + super(MultiheadAttention, self).__init__() + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = batch_first + self.cache_attn_weights = cache_attn_weights + self.attn_weights = None + self.fp16_enabled = fp16 + if fp16: + self.attn = MultiHeadAttentionWrapper(embed_dims, num_heads, dropout, **kwargs) + else: + self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout, **kwargs) + + self.proj_drop = nn.Dropout(dropout) + self.dropout_layer = nn.Dropout(dropout) + +
[docs] def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """ + Forward function for `MultiheadAttention`. + + :param query: The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else [bs, num_queries embed_dims]. + :param key: The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, + else [bs, num_keys, embed_dims]. If None, the ``query`` will be used. Defaults to None. + :param value: The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. + Defaults to None. If None, the `key` will be used. + :param identity: This tensor, with the same shape as x, will be used for the identity link. + If None, `x` will be used. Defaults to None. + :param query_pos: The positional encoding for query, with the same shape as `x`. + If not None, it will be added to `x` before forward function. Defaults to None. + :param key_pos: The positional encoding for `key`, with the same shape as `key`. + Defaults to None. If not None, it will be added to `key` before `query_pos` has the same shape as `key`, + then `query_pos` will be used for `key_pos`. Defaults to None. + :param attn_mask: ByteTensor mask with shape [num_queries, num_keys]. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + :param key_padding_mask: ByteTensor with shape [bs, num_keys]. Defaults to None. + :param kwargs: allow passing a more general data flow when combining with other operations in `transformerlayer`. + :return: forwarded results with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else[bs, num_queries embed_dims]. + + """ + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1).contiguous() + key = key.transpose(0, 1).contiguous() + value = value.transpose(0, 1).contiguous() + + out, attn_weights = self.attn( + query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask) + if self.batch_first: + out = out.transpose(0, 1).contiguous() + + if self.cache_attn_weights: + self.attn_weights = attn_weights + + return identity + self.dropout_layer(self.proj_drop(out))
+ + +
[docs]class TransformerDecoderLayer(nn.Module): + def __init__(self, + attn_cfgs=None, + ffn_cfgs=None, + operation_order=None, + norm_cfg=dict(type='LN'), + batch_first=False, + with_cp=True, + **kwargs): + super().__init__() + assert set(operation_order) & { + 'self_attn', 'norm', 'ffn', 'cross_attn'} == \ + set(operation_order), f'The operation_order of' \ + f' {self.__class__.__name__} should ' \ + f'contains all four operation type ' \ + f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" + num_attn = operation_order.count('self_attn') + operation_order.count('cross_attn') + if isinstance(attn_cfgs, dict): + attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] + else: + assert num_attn == len(attn_cfgs), f'The length ' \ + f'of attn_cfg {num_attn} is ' \ + f'not consistent with the number of attention' \ + f'in operation_order {operation_order}.' + + self.batch_first = batch_first + self.num_attn = num_attn + self.operation_order = operation_order + self.norm_cfg = norm_cfg + self.pre_norm = operation_order[0] == 'norm' + self.use_checkpoint = with_cp + + self._init_layers(operation_order, attn_cfgs, ffn_cfgs, norm_cfg) + + def _init_layers(self, operation_order, attn_cfgs, ffn_cfgs, norm_cfg): + self.attentions = nn.ModuleList() + index = 0 + for operation_name in operation_order: + if operation_name in ['self_attn', 'cross_attn']: + if 'batch_first' in attn_cfgs[index]: + assert self.batch_first == attn_cfgs[index]['batch_first'] + else: + attn_cfgs[index]['batch_first'] = self.batch_first + attention = build_module(attn_cfgs[index]) + # Some custom attentions used as `self_attn` + # or `cross_attn` can have different behavior. + attention.operation_name = operation_name + self.attentions.append(attention) + index += 1 + + self.embed_dims = self.attentions[0].embed_dims + + self.ffns = nn.ModuleList() + num_ffns = operation_order.count('ffn') + if isinstance(ffn_cfgs, dict): + ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] + assert len(ffn_cfgs) == num_ffns + for ffn_index in range(num_ffns): + if 'embed_dims' not in ffn_cfgs[ffn_index]: + ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims + else: + assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims + self.ffns.append(build_module(ffn_cfgs[ffn_index])) + + self.norms = nn.ModuleList() + num_norms = operation_order.count('norm') + for _ in range(num_norms): + self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) + + def _forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + temp_memory=None, + temp_pos=None, + attn_masks: List[torch.Tensor]=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """ + Forward function for `TransformerDecoderLayer`. + + :param query: The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, + else [bs, num_queries embed_dims]. + :param key: The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, + else [bs, num_keys, embed_dims]. + :param value: The value tensor with same shape as `key`. + :param query_pos: The positional encoding for `query`. Default: None. + :param key_pos: The positional encoding for `key`. Default: None. + :param temp_memory: 2D Tensor used in calculation of corresponding attention. The length of it should equal + to the number of `attention` in `operation_order`. Default: None. + :param temp_pos: + :param attn_masks: 2D Tensor used in calculation of corresponding attention. The length of it should equal + to the number of `attention` in `operation_order`. Default: None. + :param query_key_padding_mask: ByteTensor for `query`, with shape [bs, num_queries]. Only used in `self_attn` + layer. Defaults to None. + :param key_padding_mask: ByteTensor for `query`, with shape [bs, num_keys]. Default: None. + :param kwargs: contains some specific arguments of attentions. + :return: forwarded results with shape [num_queries, bs, embed_dims]. + """ + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + if layer == 'self_attn': + if temp_memory is not None: + temp_key = temp_value = torch.cat([query, temp_memory], dim=0) + temp_pos = torch.cat([query_pos, temp_pos], dim=0) + else: + temp_key = temp_value = query + temp_pos = query_pos + query = self.attentions[attn_index]( + query, + temp_key, + temp_value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=temp_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query + +
[docs] def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + temp_memory=None, + temp_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs + ): + """Forward function for `TransformerCoder`. + :returns: Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if self.use_checkpoint and self.training: + x = cp.checkpoint( + self._forward, + query, + key, + value, + query_pos, + key_pos, + temp_memory, + temp_pos, + attn_masks, + query_key_padding_mask, + key_padding_mask, + ) + else: + x = self._forward( + query, + key, + value, + query_pos, + key_pos, + temp_memory, + temp_pos, + attn_masks, + query_key_padding_mask, + key_padding_mask, + ) + return x
+ + +
[docs]class TransformerLayerSequence(nn.Module): + """ + Base class for TransformerEncoder and TransformerDecoder in vision + transformer. + + As base-class of Encoder and Decoder in vision transformer. + Support customization such as specifying different kind + of `transformer_layer` in `transformer_coder`. + """ + + def __init__(self, transformerlayers=None, num_layers=None): + """ + :param transformerlayers: (list[obj:`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict`) + Config of transformerlayer in TransformerCoder. If it is obj:`mmcv.ConfigDict`, + it would be repeated `num_layer` times to a list[`mmcv.ConfigDict`]. Default: None. + :param num_layers: The number of `TransformerLayer`. Default: None. + """ + super().__init__() + if isinstance(transformerlayers, dict): + transformerlayers = [ + copy.deepcopy(transformerlayers) for _ in range(num_layers) + ] + else: + assert isinstance(transformerlayers, list) and \ + len(transformerlayers) == num_layers + self.num_layers = num_layers + self.layers = nn.ModuleList() + for i in range(num_layers): + self.layers.append(build_module(transformerlayers[i])) + self.embed_dims = self.layers[0].embed_dims + self.pre_norm = self.layers[0].pre_norm + +
[docs] def forward(self, + query, + key, + value, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerCoder`. + + :param query: (Tensor) Input query with shape `(num_queries, bs, embed_dims)`. + :param key: (Tensor) The key tensor with shape `(num_keys, bs, embed_dims)`. + :param value: (Tensor) The value tensor with shape `(num_keys, bs, embed_dims)`. + :param query_pos: (Tensor) The positional encoding for `query`. Default: None. + :param key_pos: (Tensor) The positional encoding for `key`. Default: None. + :param attn_masks: (List[Tensor], optional) Each element is 2D Tensor which is + used in calculation of corresponding attention in operation_order. Default: None. + :param query_key_padding_mask: (Tensor) ByteTensor for `query`, with shape [bs, num_queries]. + Only used in self-attention Default: None. + :param key_padding_mask: (Tensor) ByteTensor for `query`, with shape [bs, num_keys]. Default: None. + + :returns: results with shape [num_queries, bs, embed_dims]. + """ + for layer in self.layers: + query = layer( + query, + key, + value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + **kwargs) + return query
+ + +
[docs]class TransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer.""" + + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + return_intermediate=False, + **kwargs): + """ + :param args: + :param post_norm_cfg: Config of last normalization layer. Default: `LN`. + :param return_intermediate: Whether to return intermediate outputs. + :param kwargs: + """ + + super(TransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if post_norm_cfg is not None: + self.post_norm = build_norm_layer(post_norm_cfg, self.embed_dims)[1] + else: + self.post_norm = None + +
[docs] def forward(self, query, *args, **kwargs): + """Forward function for `TransformerDecoder`. + + :param query: (Tensor) Input query with shape `(num_query, bs, embed_dims)`. + :return:Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape [num_layers, num_query, bs, embed_dims]. + """ + if not self.return_intermediate: + x = super().forward(query, *args, **kwargs) + if self.post_norm: + x = self.post_norm(x)[None] + return x + + intermediate = [] + for layer in self.layers: + query = layer(query, *args, **kwargs) + if self.return_intermediate: + if self.post_norm is not None: + intermediate.append(self.post_norm(query)) + else: + intermediate.append(query) + # if torch.isnan(query).any(): + # print('TransfromerDecoder: Found nan in query.') + # if torch.isnan(intermediate[-1]).any(): + # print('TransfromerDecoder: Found nan in intermediate result.') + return torch.stack(intermediate)
+ + +class PETRTemporalTransformer(nn.Module): + """Implements the DETR transformer. + + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + <https://arxiv.org/pdf/2005.12872>`_ for details. + """ + + def __init__(self, encoder=None, decoder=None, cross=False): + """ + + :param encoder: (`mmcv.ConfigDict` | Dict) Config of + TransformerEncoder. Defaults to None. + :param decoder: ((`mmcv.ConfigDict` | Dict) Config of + TransformerDecoder. Defaults to None. + :param cross: whether to use cross-attention. + """ + super(PETRTemporalTransformer, self).__init__() + if encoder is not None: + self.encoder = build_module(encoder) + else: + self.encoder = None + self.decoder = build_module(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, memory, tgt, query_pos, pos_embed, attn_masks, temp_memory=None, temp_pos=None, + mask=None, query_mask=None, reg_branch=None): + """Forward function for `Transformer`. + """ + memory = memory.transpose(0, 1).contiguous() + query_pos = query_pos.transpose(0, 1).contiguous() + pos_embed = pos_embed.transpose(0, 1).contiguous() + + n, bs, c = memory.shape + + if tgt is None: + tgt = torch.zeros_like(query_pos) + else: + tgt = tgt.transpose(0, 1).contiguous() + + if temp_memory is not None: + temp_memory = temp_memory.transpose(0, 1).contiguous() + temp_pos = temp_pos.transpose(0, 1).contiguous() + + # out_dec: [num_layers, num_query, bs, dim] + if not isinstance(attn_masks, list): + attn_masks = [attn_masks, None] + out_dec = self.decoder( + query=tgt, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_pos, + temp_memory=temp_memory, + temp_pos=temp_pos, + query_key_padding_mask=query_mask, + key_padding_mask=mask, + attn_masks=attn_masks, + reg_branch=reg_branch, + ) + out_dec = out_dec.transpose(1, 2).contiguous() + memory = memory.reshape(-1, bs, c).transpose(0, 1).contiguous() + return out_dec, memory + + +
[docs]class PETRTransformer(nn.Module): + """ + Implements the DETR transformer. + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + <https://arxiv.org/pdf/2005.12872>`_ for details. + """ + + def __init__(self, encoder=None, decoder=None, cross=False): + super(PETRTransformer, self).__init__() + if encoder is not None: + self.encoder = build_module(encoder) + else: + self.encoder = None + self.decoder = build_module(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + +
[docs] def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True
+ +
[docs] def forward(self, memory, tgt, query_pos, pos_embed, attn_masks=None, + mask=None, query_mask=None): + """Forward function for `Transformer`. + """ + memory = memory.transpose(0, 1).contiguous() + query_pos = query_pos.transpose(0, 1).contiguous() + pos_embed = pos_embed.transpose(0, 1).contiguous() + + n, bs, c = memory.shape + + if tgt is None: + tgt = torch.zeros_like(query_pos) + else: + tgt = tgt.transpose(0, 1).contiguous() + + # out_dec: [num_layers, num_query, bs, dim] + if not isinstance(attn_masks, list): + attn_masks = [attn_masks] + assert len(attn_masks) == self.decoder.layers[0].num_attn + out_dec = self.decoder( + query=tgt, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_pos, + query_key_padding_mask=query_mask, + key_padding_mask=mask, + attn_masks=attn_masks, + ) + out_dec = out_dec.transpose(1, 2).contiguous() + memory = memory.reshape(-1, bs, c).transpose(0, 1).contiguous() + return out_dec, memory
+ + +
[docs]class PETRTemporalTransformer(nn.Module): + r""" + Implements the DETR transformer. + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + <https://arxiv.org/pdf/2005.12872>`_ for details. + """ + + def __init__(self, encoder=None, decoder=None, cross=False): + super(PETRTemporalTransformer, self).__init__() + if encoder is not None: + self.encoder = build_module(encoder) + else: + self.encoder = None + self.decoder = build_module(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + +
[docs] def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True
+ +
[docs] def forward(self, memory, tgt, query_pos, pos_embed, attn_masks, temp_memory=None, temp_pos=None, + mask=None, query_mask=None, reg_branch=None): + """Forward function for `Transformer`. + """ + query_pos = query_pos.transpose(0, 1).contiguous() + if memory is not None: + memory = memory.transpose(0, 1).contiguous() + n, bs, c = memory.shape + if pos_embed is not None: + pos_embed = pos_embed.transpose(0, 1).contiguous() + + if tgt is None: + tgt = torch.zeros_like(query_pos) + else: + tgt = tgt.transpose(0, 1).contiguous() + + if temp_memory is not None: + temp_memory = temp_memory.transpose(0, 1).contiguous() + temp_pos = temp_pos.transpose(0, 1).contiguous() + + # out_dec: [num_layers, num_query, bs, dim] + if not isinstance(attn_masks, list): + attn_masks = [attn_masks] + assert len(attn_masks) == self.decoder.layers[0].num_attn + out_dec = self.decoder( + query=tgt, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_pos, + temp_memory=temp_memory, + temp_pos=temp_pos, + query_key_padding_mask=query_mask, + key_padding_mask=mask, + attn_masks=attn_masks, + ) + out_dec = out_dec.transpose(1, 2).contiguous() + if memory is not None: + memory = memory.reshape(-1, bs, c).transpose(0, 1).contiguous() + return out_dec, memory
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/voxel_encoder.html b/docs/_build/html/_modules/cosense3d/modules/plugin/voxel_encoder.html new file mode 100644 index 00000000..7263530c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/voxel_encoder.html @@ -0,0 +1,136 @@ + + + + + + cosense3d.modules.plugin.voxel_encoder — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.voxel_encoder

+import torch
+from torch import nn
+
+
+
[docs]class MeanVFE(nn.Module): + def __init__(self, num_point_features, **kwargs): + super().__init__() + self.num_point_features = num_point_features + +
[docs] def get_output_feature_dim(self): + return self.num_point_features
+ +
[docs] def forward(self, voxel_features, voxel_num_points): + """ + Args: + voxels: (num_voxels, max_points_per_voxel, C) + voxel_num_points: optional (num_voxels) + + + Returns: + vfe_features: (num_voxels, C) + """ + points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False) + normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).\ + type_as(voxel_features) + points_mean = points_mean / normalizer + + return points_mean.contiguous()
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/voxel_generator.html b/docs/_build/html/_modules/cosense3d/modules/plugin/voxel_generator.html new file mode 100644 index 00000000..d8763b37 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/voxel_generator.html @@ -0,0 +1,149 @@ + + + + + + cosense3d.modules.plugin.voxel_generator — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.voxel_generator

+import torch
+from spconv.pytorch.utils import PointToVoxel
+
+
+
[docs]class VoxelGenerator: + def __init__(self, + voxel_size, + lidar_range, + max_points_per_voxel, + empty_mean=True, + mode='train', + device='cuda', + **kwargs): + self.voxel_size = torch.tensor(voxel_size) + self.lidar_range = torch.tensor(lidar_range) + self.max_points_per_voxel = max_points_per_voxel + self.max_voxels = kwargs.get(f"max_voxels_{mode}", 50000) + self.empty_mean = empty_mean + + self.grid_size = ((self.lidar_range[3:] - self.lidar_range[:3]) + / self.voxel_size).round().int() + self.voxel_generator = PointToVoxel( + vsize_xyz=self.voxel_size.tolist(), + coors_range_xyz=self.lidar_range.tolist(), + max_num_points_per_voxel=self.max_points_per_voxel, + num_point_features=4, + max_num_voxels=self.max_voxels, + device=torch.device(device) + ) + + def __call__(self, points_list): + voxels_list = [] + coordinates_list = [] + num_points_list = [] + for points in points_list: + voxels, coordinates, num_points = self.voxel_generator( + points, empty_mean=self.empty_mean) + voxels_list.append(voxels) + coordinates_list.append(coordinates) + num_points_list.append(num_points) + return voxels_list, coordinates_list, num_points_list
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/voxnet_utils.html b/docs/_build/html/_modules/cosense3d/modules/plugin/voxnet_utils.html new file mode 100644 index 00000000..adea5706 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/voxnet_utils.html @@ -0,0 +1,178 @@ + + + + + + cosense3d.modules.plugin.voxnet_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.voxnet_utils

+# -*- coding: utf-8 -*-
+# Author: Runsheng Xu <rxx3386@ucla.edu>, OpenPCDet, modified by Yunshuang Yuan
+# License: TDG-Attribution-NonCommercial-NoDistrib
+# Modified by Yunshuang Yuan
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+import MinkowskiEngine as ME
+
+
+
[docs]class Conv3d(nn.Module): + + def __init__(self, in_channels, out_channels, k, s, p, batch_norm=True): + super(Conv3d, self).__init__() + self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=k, + stride=s, padding=p) + if batch_norm: + self.bn = nn.BatchNorm3d(out_channels) + else: + self.bn = None + +
[docs] def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + + return F.relu(x, inplace=True)
+ + +
[docs]class CML(nn.Module): + def __init__(self, in_channels): + super(CML, self).__init__() + self.dense = True + self.conv3d_1 = Conv3d(in_channels, in_channels, 3, s=(2, 1, 1), p=(1, 1, 1)) + self.conv3d_2 = Conv3d(in_channels, in_channels, 3, s=(1, 1, 1), p=(0, 1, 1)) + self.conv3d_3 = Conv3d(in_channels, in_channels, 3, s=(2, 1, 1), p=(1, 1, 1)) + self.out_strides = (4, 1, 1) + +
[docs] def forward(self, x): + x = self.conv3d_1(x) + x = self.conv3d_2(x) + x = self.conv3d_3(x) + return x
+ + +
[docs]class CMLSparse(nn.Module): + def __init__(self, in_channels): + super(CMLSparse, self).__init__() + self.dense = False + self.conv3d_1 = ME.MinkowskiConvolution( + in_channels, in_channels, 3, (2, 1, 1), dimension=3, expand_coordinates=False) + self.conv3d_2 = ME.MinkowskiConvolution( + in_channels, in_channels, 3, (2, 1, 1), dimension=3, expand_coordinates=False) + self.conv3d_3 = ME.MinkowskiConvolution( + in_channels, in_channels, 3, (2, 1, 1), dimension=3, expand_coordinates=False) + self.out_strides = nn.Parameter(torch.Tensor([8, 1, 1])) + +
[docs] def forward(self, feats, coords): + x = ME.SparseTensor(features=feats, coordinates=coords) + x = self.conv3d_1(x) + x = self.conv3d_2(x) + x = self.conv3d_3(x) + + feats_out = x.F + coords_out = x.C + coords_out[:, 1:] = coords_out[:, 1:] / self.out_strides + return feats_out, coords_out
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/plugin/vsa.html b/docs/_build/html/_modules/cosense3d/modules/plugin/vsa.html new file mode 100644 index 00000000..c0632231 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/plugin/vsa.html @@ -0,0 +1,427 @@ + + + + + + cosense3d.modules.plugin.vsa — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.plugin.vsa

+import copy
+import random
+
+import torch
+import torch.nn as nn
+
+from cosense3d.ops import pointnet2_utils
+from cosense3d.ops.utils import points_in_boxes_gpu
+from cosense3d.modules.utils.common import get_voxel_centers, cat_coor_with_idx
+
+sa_layer_default=dict(
+    raw_points=dict(
+    mlps=[[16, 16], [16, 16]],
+    pool_radius=[0.4, 0.8],
+    n_sample=[16, 16],
+    ),
+    x_conv1=dict(
+        downsample_factor=1,
+        mlps=[[16, 16], [16, 16]],
+        pool_radius=[0.4, 0.8],
+        n_sample=[16, 16],
+    ),
+    x_conv2=dict(
+        downsample_factor=2,
+        mlps=[[32, 32], [32, 32]],
+        pool_radius=[0.8, 1.2],
+        n_sample=[16, 32],
+    ),
+    x_conv3=dict(
+        downsample_factor=4,
+        mlps=[[64, 64], [64, 64]],
+        pool_radius=[1.2, 2.4],
+        n_sample=[16, 32],
+    ),
+    x_conv4=dict(
+        downsample_factor=8,
+        mlps=[[64, 64], [64, 64]],
+        pool_radius=[2.4, 4.8],
+        n_sample=[16, 32],
+    )
+)
+
+default_feature_source = ['bev', 'x_conv1', 'x_conv2', 'x_conv3', 'x_conv4', 'raw_points']
+
+
[docs]def bilinear_interpolate_torch(im, x, y): + """ + Args: + im: (H, W, C) [y, x] + x: (N) + y: (N) + + Returns: + + """ + x0 = torch.floor(x).long() + x1 = x0 + 1 + + y0 = torch.floor(y).long() + y1 = y0 + 1 + + x0 = torch.clamp(x0, 0, im.shape[1] - 1) + x1 = torch.clamp(x1, 0, im.shape[1] - 1) + y0 = torch.clamp(y0, 0, im.shape[0] - 1) + y1 = torch.clamp(y1, 0, im.shape[0] - 1) + + Ia = im[y0, x0] + Ib = im[y1, x0] + Ic = im[y0, x1] + Id = im[y1, x1] + + wa = (x1.type_as(x) - x) * (y1.type_as(y) - y) + wb = (x1.type_as(x) - x) * (y - y0.type_as(y)) + wc = (x - x0.type_as(x)) * (y1.type_as(y) - y) + wd = (x - x0.type_as(x)) * (y - y0.type_as(y)) + ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd) + return ans
+ + +
[docs]class VoxelSetAbstraction(nn.Module): + def __init__(self, + voxel_size, + point_cloud_range, + num_keypoints=4096, + num_out_features=32, + point_source='raw_points', + features_source=None, + num_bev_features=128, + bev_stride=8, + num_rawpoint_features=3, + enlarge_selection_boxes=True, + sa_layer=None, + min_selected_kpts=128, + **kwargs): + super().__init__() + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.features_source = default_feature_source \ + if features_source is None \ + else features_source + self.num_keypoints = num_keypoints + self.num_out_features = num_out_features + self.point_source = point_source + self.num_bev_features = num_bev_features + self.bev_stride = bev_stride + self.num_rawpoint_features = num_rawpoint_features + self.enlarge_selection_boxes = enlarge_selection_boxes + self.min_selected_kpts = min_selected_kpts + + self.SA_layers = nn.ModuleList() + self.SA_layer_names = [] + self.downsample_times_map = {} + c_in = 0 + sa_layer = sa_layer_default if sa_layer is None else sa_layer + for src_name in self.features_source : + if src_name in ['bev', 'raw_points']: + continue + self.downsample_times_map[src_name] = sa_layer[src_name]['downsample_factor'] + mlps = copy.copy(sa_layer[src_name]['mlps']) + for k in range(len(mlps)): + mlps[k] = [mlps[k][0]] + mlps[k] + cur_layer = pointnet2_utils.StackSAModuleMSG( + radii=sa_layer[src_name]['pool_radius'], + nsamples=sa_layer[src_name]['n_sample'], + mlps=mlps, + use_xyz=True, + pool_method='max_pool', + ) + self.SA_layers.append(cur_layer) + self.SA_layer_names.append(src_name) + + c_in += sum([x[-1] for x in mlps]) + + if 'bev' in self.features_source: + c_bev = num_bev_features + c_in += c_bev + + if 'raw_points' in self.features_source: + mlps = copy.copy(sa_layer['raw_points']['mlps']) + for k in range(len(mlps)): + mlps[k] = [num_rawpoint_features - 3] + mlps[k] + + self.SA_rawpoints = pointnet2_utils.StackSAModuleMSG( + radii=sa_layer['raw_points']['pool_radius'], + nsamples=sa_layer['raw_points']['n_sample'], + mlps=mlps, + use_xyz=True, + pool_method='max_pool' + ) + c_in += sum([x[-1] for x in mlps]) + + self.vsa_point_feature_fusion = nn.Sequential( + nn.Linear(c_in, self.num_out_features, bias=False), + nn.BatchNorm1d(self.num_out_features), + nn.ReLU(), + ) + self.num_point_features = self.num_out_features + self.num_point_features_before_fusion = c_in + +
[docs] def interpolate_from_bev_features(self, keypoints_list, bev_features): + B = len(bev_features) + point_bev_features_list = [] + for i in range(B): + keypoints = keypoints_list[i][:, :3] + x_idxs = (keypoints[..., 0] - self.point_cloud_range[0]) / self.voxel_size[0] + y_idxs = (keypoints[..., 1] - self.point_cloud_range[1]) / self.voxel_size[1] + x_idxs = x_idxs / self.bev_stride + y_idxs = y_idxs / self.bev_stride + cur_bev_features = bev_features[i].permute(1, 2, 0) # (H, W, C) + point_bev_features = bilinear_interpolate_torch(cur_bev_features, x_idxs, y_idxs) + point_bev_features_list.append(point_bev_features) + + point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0) + return point_bev_features
+ +
[docs] def get_sampled_points(self, points, voxel_coords): + B = len(points) + keypoints_list = [] + for i in range(B): + if self.point_source == 'raw_points': + src_points = points[i] + else: + raise NotImplementedError + # # generate random keypoints in the perception view field + # keypoints = torch.randn((self.num_keypoints, 4), device=src_points.device) + # keypoints[..., 0] = keypoints[..., 0] * 140 + # keypoints[..., 1] = keypoints[..., 1] * 40 + # # points with height flag 10 are padding/invalid, for later filtering + # keypoints[..., 2] = 10.0 + + sampled_points = src_points.unsqueeze(dim=0) # (1, N, 3) + # sample points with FPS + # some cropped pcd may have very few points, select various number + # of points to ensure similar sample density + # 50000 is approximately the number of points in one full pcd + num_kpts = int(self.num_keypoints * sampled_points.shape[1] / 50000) + 1 + num_kpts = min(num_kpts, self.num_keypoints) + cur_pt_idxs = pointnet2_utils.furthest_point_sample( + sampled_points[..., :3].contiguous(), num_kpts + ).long() + + if sampled_points.shape[1] < num_kpts: + empty_num = num_kpts - sampled_points.shape[1] + cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num] + + keypoints = sampled_points[0][cur_pt_idxs[0]] + + # keypoints[:len(kpts[0]), :] = kpts + keypoints_list.append(keypoints) + + # keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) + return keypoints_list
+ +
[docs] def forward(self, det_out, bev_feat, voxel_feat, points): + B = len(points) + preds = [x['preds'] for x in det_out] + keypoints_list = self.get_sampled_points(points, voxel_feat) # BxNx4 + + # Only select the points that are in the predicted bounding boxes + boxes = cat_coor_with_idx([x['box'] for x in preds]) + scores = torch.cat([x['scr'] for x in preds]) + # At the early training stage, there might be too many boxes, + # we select limited number of boxes for the second stage. + if boxes.shape[0] > B * 100: + topk = scores.topk(k=100 * B).indices + scores = scores[topk] + boxes = boxes[topk] + + boxes_tmp = boxes.clone() + if self.enlarge_selection_boxes: + boxes_tmp[:, 4:7] += 0.5 + keypoints = cat_coor_with_idx(keypoints_list) + if len(boxes_tmp) > 0: + pts_idx_of_box = points_in_boxes_gpu(keypoints[:, :4], boxes_tmp, batch_size=B)[1] + else: + pts_idx_of_box = torch.full((len(keypoints),), fill_value=-1, device=keypoints.device) + kpt_mask = pts_idx_of_box >= 0 + # Ensure enough points are selected to satisfy the + # condition of batch norm in the FC layers of feature fusion module + for i in range(B): + batch_mask = keypoints[:, 0] == i + if kpt_mask[batch_mask].sum().item() < self.min_selected_kpts: + tmp = kpt_mask[batch_mask].clone() + tmp[torch.randint(0, batch_mask.sum().item(), (self.min_selected_kpts,))] = True + kpt_mask[batch_mask] = tmp + + point_features_list = [] + if 'bev' in self.features_source: + point_bev_features = self.interpolate_from_bev_features( + keypoints_list, bev_feat + ) + point_features_list.append(point_bev_features[kpt_mask]) + + new_xyz = keypoints[kpt_mask] + new_xyz_scrs = torch.zeros((kpt_mask.sum().item(),), device=keypoints.device) + valid = pts_idx_of_box[kpt_mask] >= 0 + new_xyz_scrs[valid] = scores[pts_idx_of_box[kpt_mask][valid]] + new_xyz_batch_cnt = torch.tensor([(new_xyz[:, 0] == b).sum() for b in range(B)], + device=new_xyz.device).int() + + if 'raw_points' in self.features_source: + xyz_batch_cnt = torch.tensor([len(pts) for pts in points], + device=points[0].device).int() + raw_points = cat_coor_with_idx(points) + xyz = raw_points[:, 1:4] + point_features = None + + pooled_points, pooled_features = self.SA_rawpoints( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz[:, :3].contiguous(), + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=point_features, + ) + point_features_list.append(pooled_features) + + for k, src_name in enumerate(self.SA_layer_names): + cur_stride = 2 ** (int(src_name[-1]) - 1) + cur_coords = [feat[f"p{cur_stride}"]['coor'] for feat in voxel_feat] + cur_feats = [feat[f"p{cur_stride}"]['feat'] for feat in voxel_feat] + xyz = get_voxel_centers( + torch.cat(cur_coords), + downsample_times=self.downsample_times_map[src_name], + voxel_size=self.voxel_size, + point_cloud_range=self.point_cloud_range + ) + xyz_batch_cnt = torch.tensor([len(coor) for coor in cur_coords], + device=cur_coords[0].device).int() + pooled_points, pooled_features = self.SA_layers[k]( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz[:, :3].contiguous(), + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=torch.cat(cur_feats, dim=0), + ) + + point_features_list.append(pooled_features) + + point_features = torch.cat(point_features_list, dim=1) + + out_dict = {} + # out_dict['point_features_before_fusion'] = point_features + point_features = self.vsa_point_feature_fusion(point_features) + + cur_idx = 0 + out_dict['point_features'] = [] + out_dict['point_coords'] = [] + out_dict['point_scores'] = [] + out_dict['boxes'] = [] + out_dict['scores'] = [] + for i, num in enumerate(new_xyz_batch_cnt): + out_dict['point_features'].append(point_features[cur_idx:cur_idx + num]) + out_dict['point_coords'].append(new_xyz[cur_idx:cur_idx + num]) + out_dict['point_scores'].append(new_xyz_scrs[cur_idx:cur_idx + num]) + mask = boxes[:, 0] == i + out_dict['boxes'].append(boxes[mask, 1:]) + out_dict['scores'].append(scores[mask]) + cur_idx += num + + return out_dict
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/projection/fax.html b/docs/_build/html/_modules/cosense3d/modules/projection/fax.html new file mode 100644 index 00000000..a7fef224 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/projection/fax.html @@ -0,0 +1,196 @@ + + + + + + cosense3d.modules.projection.fax — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.projection.fax

+import torch
+from torch import nn
+from einops import rearrange, repeat, reduce
+from torchvision.models.resnet import Bottleneck
+
+from cosense3d.modules.plugin.cobevt import CrossViewSwapAttention, Attention, BEVEmbedding
+from cosense3d.modules import BaseModule
+ResNetBottleNeck = lambda c: Bottleneck(c, c // 4)
+
+
+
[docs]class FAXModule(BaseModule): + def __init__( + self, + middle, + dim, + img_size, + strides, + feat_dims, + cross_view, + cross_view_swap, + bev_embedding, + self_attn, + **kwargs + ): + super().__init__(**kwargs) + self.img_size = img_size + + cross_views = list() + layers = list() + downsample_layers = list() + + for i, (stride, num_layers) in enumerate(zip(strides, middle)): + feat_dim = feat_dims[i] + feat_height, feat_width = img_size[0] // stride, img_size[1] // stride + + cva = CrossViewSwapAttention(feat_height, feat_width, feat_dim, + dim[i], i, + **cross_view, **cross_view_swap) + cross_views.append(cva) + + layer = nn.Sequential(*[ResNetBottleNeck(dim[i]) for _ in range(num_layers)]) + layers.append(layer) + + if i < len(middle) - 1: + downsample_layers.append(nn.Sequential( + nn.Sequential( + nn.Conv2d(dim[i], dim[i] // 4, + kernel_size=3, stride=1, + padding=1, bias=False), + nn.PixelUnshuffle(2), + nn.Conv2d(dim[i+1], dim[i+1], + 3, padding=1, bias=False), + nn.BatchNorm2d(dim[i+1]), + nn.ReLU(inplace=True), + nn.Conv2d(dim[i+1], + dim[i+1], 1, padding=0, bias=False), + nn.BatchNorm2d(dim[i+1]) + ))) + + self.bev_embedding = BEVEmbedding(dim[0], **bev_embedding) + self.cross_views = nn.ModuleList(cross_views) + self.layers = nn.ModuleList(layers) + self.downsample_layers = nn.ModuleList(downsample_layers) + self.self_attn = Attention(dim[-1], **self_attn) + +
[docs] def forward(self, img_feat, intrinsic, extrinsic, **kwargs): + B = len(img_feat) + N = len(intrinsic[0]) + intrinsic = self.cat_list(intrinsic, recursive=True) + extrinsic = self.cat_list(extrinsic, recursive=True) + I_inv = torch.stack([I.inverse()[:3, :3] for I in intrinsic], dim=0 + ).reshape(B, N, 3, 3) + E_inv = torch.stack([E.inverse() for E in extrinsic], dim=0 + ).reshape(B, N, 4, 4) + + x = self.bev_embedding.get_prior() # d H W + x = repeat(x, '... -> b ...', b=B) # B d H W + + for i, (cross_view, layer) in enumerate(zip(self.cross_views, self.layers)): + feature = torch.stack([feat[i] for feat in img_feat], dim=0) + + x = cross_view(i, x, self.bev_embedding, feature, I_inv, E_inv) + x = layer(x) + if i < len(img_feat[0])-1: + x = self.downsample_layers[i](x) + + x = self.self_attn(x) + return {self.scatter_keys[0]: x}
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/projection/petr.html b/docs/_build/html/_modules/cosense3d/modules/projection/petr.html new file mode 100644 index 00000000..4e51fad0 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/projection/petr.html @@ -0,0 +1,287 @@ + + + + + + cosense3d.modules.projection.petr — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.projection.petr

+from typing import List
+
+import torch
+from torch import nn
+
+from cosense3d.modules import BaseModule
+from cosense3d.modules.plugin import build_plugin_module
+from cosense3d.modules.utils.common import inverse_sigmoid
+from cosense3d.modules.utils.misc import SELayer_Linear, MLN
+from cosense3d.modules.utils.positional_encoding import pos2posemb3d
+
+
+
[docs]class PETR(BaseModule): + def __init__(self, + in_channels, + transformer, + position_range, + num_reg_fcs=2, + num_pred=3, + topk=2048, + num_query=644, + depth_num=64, + LID=True, + depth_start=1, + **kwargs): + super().__init__(**kwargs) + self.transformer = build_plugin_module(transformer) + self.embed_dims = self.transformer.embed_dims + self.img_position_dim = depth_num * 3 + self.num_pose_feat = 64 + self.in_channels = in_channels + self.topk = topk + self.num_query = num_query + self.LID = LID + self.num_reg_fcs = num_reg_fcs + self.num_pred = num_pred + + if self.LID: # linear-increasing discretization + index = torch.arange(start=0, end=depth_num, step=1).float() + index_1 = index + 1 + bin_size = (position_range[3] - depth_start) / (depth_num * (1 + depth_num)) + coords_d = depth_start + bin_size * index * index_1 + else: + index = torch.arange(start=0, end=depth_num, step=1).float() + bin_size = (position_range[3] - depth_start) / depth_num + coords_d = depth_start + bin_size * index + + self.coords_d = nn.Parameter(coords_d, requires_grad=False) + self.position_range = nn.Parameter(torch.tensor(position_range), requires_grad=False) + self.reference_points = nn.Embedding(self.num_query, 3) + + self._init_layers() + + def _init_layers(self): + self.img_position_encoder = nn.Sequential( + nn.Linear(self.img_position_dim, self.embed_dims * 4), + nn.ReLU(), + nn.Linear(self.embed_dims * 4, self.embed_dims), + ) + self.img_memory_embed = nn.Sequential( + nn.Linear(self.in_channels, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.query_embedding = nn.Sequential( + nn.Linear(self.num_pose_feat*3, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + self.spatial_alignment = MLN(8, f_dim=self.embed_dims) + # can be replaced with MLN + self.featurized_pe = SELayer_Linear(self.embed_dims) + +
[docs] def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + nn.utils.init.xavier_uniform_(m) + self._is_init = True
+ +
[docs] def forward(self, img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img, **kwargs): + img_memory, img_pos, img2lidars, Is = self.gather_topk( + img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img) + + img_pos_emb, cone = self.img_position_embeding(img_memory, img_pos, Is, img2lidars) + img_memory = self.img_memory_embed(img_memory) + + # spatial_alignment in focal petr + img_memory = self.spatial_alignment(img_memory, cone) + img_pos_emb = self.featurized_pe(img_pos_emb, img_memory) + + reference_points = (self.reference_points.weight).unsqueeze(0).repeat(img_memory.shape[0], 1, 1) + query_pos = self.query_embedding(pos2posemb3d(reference_points, self.num_pose_feat)) + tgt = torch.zeros_like(query_pos) + outs_dec, _ = self.transformer(img_memory, tgt, query_pos, img_pos_emb) + + outs = [ + { + 'outs_dec': outs_dec[:, i], + 'ref_pts': reference_points[i], + } for i in range(len(img_memory)) + ] + + return {self.scatter_keys[0]: outs}
+ +
[docs] def format_input(self, input: List): + memory = [] + for x in input: + x = x.permute(0, 2, 3, 1).flatten(0, 2) + memory.append(x) + max_l = max([m.shape[0] for m in memory]) + out = x.new_zeros(len(memory), max_l, x.shape[-1]) + mask = x.new_ones(len(memory), max_l) + for i, m in enumerate(memory): + out[i, :len(m)] = m + mask[i, :len(m)] = False + return out, mask
+ +
[docs] def gather_topk(self, img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img): + B = len(img_feat) + out_feat = [] + out_coor = [] + mem_ctrs = [] + img2lidars = [] + Is = [] + for b in range(B): + topk_inds = img_roi[b]['sample_weight'].view(-1).topk(k=self.topk).indices + out_feat.append(img_feat[b].permute(0, 2, 3, 1).flatten(0, 2)[topk_inds]) + # out_coor.append(img_coor[b].flatten(0, 2)[topk_inds]) + N, _, h, w = img_feat[b].shape + H, W = img_size[b][0] + + # [alpha_x, alpha_y] + intrinsic = torch.stack(intrinsics[b], dim=0)[..., [0, 1], [0, 1]] + intrinsic = torch.abs(intrinsic) / 1e3 + intrinsic = intrinsic.view(N, -1, 2).repeat(1, h * w, 1).flatten(0, 1)[topk_inds] + Is.append(intrinsic) + + # transform memery_centers from ratio to pixel + img_coor[b][..., 0] = img_coor[b][..., 0] * W + img_coor[b][..., 1] = img_coor[b][..., 1] * H + topk_ctrs = img_coor[b].flatten(0, 2)[topk_inds] + mem_ctrs.append(topk_ctrs) + + img2lidar = torch.stack(lidar2img[b], dim=0).inverse() + img2lidar = img2lidar.view(N, 1, 4, 4).repeat(1, h * w, 1, 1) + img2lidars.append(img2lidar.flatten(0, 1)[topk_inds]) + + out_feat = torch.stack(out_feat, dim=0) + # out_coor = torch.stack(out_coor, dim=0) + mem_ctrs = torch.stack(mem_ctrs, dim=0) + img2lidars = torch.stack(img2lidars, dim=0) + Is = torch.stack(Is, dim=0) + + return out_feat, mem_ctrs, img2lidars, Is
+ +
[docs] def img_position_embeding(self, img_memory, img_pos, Is, img2lidars): + eps = 1e-5 + B = len(img_memory) + D = self.coords_d.shape[0] + coords_d = self.coords_d.view(1, 1, D, 1).repeat(B, self.topk, 1, 1) + img_pos = img_pos.unsqueeze(-2).repeat(1, 1, D, 1) + coords = torch.cat([img_pos, coords_d], dim=-1) + coords = torch.cat((coords, torch.ones_like(coords_d)), -1) + coords[..., :2] = coords[..., :2] * torch.maximum( + coords[..., 2:3], torch.ones_like(coords_d) * eps) + coords = coords.unsqueeze(-1) + + coords3d = torch.matmul(img2lidars.unsqueeze(-3), coords).squeeze(-1)[..., :3] + coords3d[..., :3] = (coords3d[..., :3] - self.position_range[:3]) / ( + self.position_range[3:] - self.position_range[:3]) + coords3d = coords3d.reshape(B, -1, D * 3) + pos_embed = inverse_sigmoid(coords3d) + coords_position_embeding = self.img_position_encoder(pos_embed) + cone = torch.cat([Is, coords3d[..., -3:], coords3d[..., -90:-87]], dim=-1) + return coords_position_embeding, cone
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/projection/spatial_transform.html b/docs/_build/html/_modules/cosense3d/modules/projection/spatial_transform.html new file mode 100644 index 00000000..33da9125 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/projection/spatial_transform.html @@ -0,0 +1,157 @@ + + + + + + cosense3d.modules.projection.spatial_transform — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.projection.spatial_transform

+import torch.nn as nn
+import torch
+from einops import rearrange
+from cosense3d.modules import BaseModule
+from cosense3d.modules.utils import cobevt_utils as utils
+
+
+
[docs]class STTF(BaseModule): + def __init__(self, + resolution, + downsample_rate, + use_roi_mask=True, + **kwargs): + super(STTF, self).__init__(**kwargs) + self.discrete_ratio = resolution + self.downsample_rate = downsample_rate + self.use_roi_mask = use_roi_mask + +
[docs] def forward(self, bev_feat, requests, coop_poses, **kwargs): + """ + Transform the bev features to ego space. + """ + x = self.stack_data_from_list(bev_feat) + coop_poses = self.stack_data_from_list(coop_poses) + ego_poses = self.stack_data_from_list(requests, 'lidar_pose') + transform_coop2ego = ego_poses.inverse() @ coop_poses + dist_correction_matrix = utils.get_discretized_transformation_matrix( + transform_coop2ego, self.discrete_ratio, self.downsample_rate) + + # transpose and flip to make the transformation correct + x = rearrange(x, 'b c h w -> b c w h') + x = torch.flip(x, dims=(3,)) + # Only compensate non-ego vehicles + B, C, H, W = x.shape + + T = utils.get_transformation_matrix( + dist_correction_matrix.reshape(-1, 2, 3), (H, W)) + cav_features = utils.warp_affine(x.reshape(-1, C, H, W), T, + (H, W)) + cav_features = cav_features.reshape(B, C, H, W) + + # flip and transpose back + x = cav_features + x = torch.flip(x, dims=(3,)) + x = rearrange(x, 'b c w h -> b c h w') + + bev_mask = utils.get_rotated_roi((B, 1, 1, H, W), T).squeeze(1) + + return {'bev_feat': x, 'bev_mask': bev_mask}
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils.html b/docs/_build/html/_modules/cosense3d/modules/utils.html new file mode 100644 index 00000000..1a7ace2d --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils.html @@ -0,0 +1,116 @@ + + + + + + cosense3d.modules.utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils

+import copy
+from torch import nn
+
+
+
[docs]def build_torch_module(cfg): + cfg_ = copy.deepcopy(cfg) + module_name = cfg_.pop('type') + module = getattr(nn, module_name)(**cfg_) + return module
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/box_coder.html b/docs/_build/html/_modules/cosense3d/modules/utils/box_coder.html new file mode 100644 index 00000000..b1d7fbc7 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/box_coder.html @@ -0,0 +1,511 @@ + + + + + + cosense3d.modules.utils.box_coder — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.box_coder

+import copy
+import math
+import torch
+
+from cosense3d.ops.utils import points_in_boxes_gpu
+
+
+
[docs]def build_box_coder(type, **kwargs): + return globals()[type](**kwargs)
+ + +
[docs]class ResidualBoxCoder(object): + def __init__(self, mode: str='simple_dist'): + """ + :param mode: str, simple_dist | sin_cos_dist | compass_rose + """ + self.mode = mode + if mode == 'simple_dist': + self.code_size = 7 + elif mode == 'sin_cos_dist': + self.code_size = 8 + elif mode == 'compass_rose': + self.code_size = 10 + self.cls_code_size = 2 + else: + raise NotImplementedError + +
[docs] def encode_direction(self, ra, rg): + if self.mode == 'simple_dist': + reg = (rg - ra).view(-1, 1) + return reg, None + elif self.mode == 'sin_cos_dist': + rgx = torch.cos(rg) + rgy = torch.sin(rg) + rax = torch.cos(ra) + ray = torch.sin(ra) + rtx = rgx - rax + rty = rgy - ray + ret = [rtx, rty] + reg = torch.stack(ret, dim=-1) # N 2 + return reg, None + elif self.mode == 'compass_rose': + # encode box directions + rgx = torch.cos(rg).view(-1, 1) # N 1 + rgy = torch.sin(rg).view(-1, 1) # N 1 + ra_ext = torch.cat([ra, ra + math.pi], dim=-1) # N 2, invert + rax = torch.cos(ra_ext) # N 2 + ray = torch.sin(ra_ext) # N 2 + # cos(a - b) = cos(a)cos(b) + sin(a)sin(b) + # we use arccos instead of a-b to control the difference in 0-pi + diff_angle = torch.arccos(rax * rgx + ray * rgy) # N 2 + dir_score = 1 - diff_angle / math.pi # N 2 + rtx = rgx - rax # N 2 + rty = rgy - ray # N 2 + + dir_score = dir_score # N 2 + ret = [rtx, rty] + reg = torch.cat(ret, dim=-1) # N 4 + return reg, dir_score + else: + raise NotImplementedError
+ +
[docs] def decode_direction(self, ra, vt, dir_scores=None): + if self.mode == 'simple_dist': + rg = vt + ra + return rg + elif self.mode == 'sin_cos_dist': + rax = torch.cos(ra) + ray = torch.sin(ra) + va = torch.cat([rax, ray], dim=-1) + vg = vt + va + rg = torch.atan2(vg[..., 1], vg[..., 0]) + return rg + elif self.mode == 'compass_rose': + ra_ext = torch.cat([ra, ra + math.pi], dim=-1) # N 2, invert + rax = torch.cos(ra_ext) # N 2 + ray = torch.sin(ra_ext) # N 2 + va = torch.cat([rax, ray], dim=-1) + vg = vt + va + rg = torch.atan2(vg[..., 2:], vg[..., :2]).view(-1, 2) + + dirs = torch.argmax(dir_scores, dim=-1).view(-1) + rg = rg[torch.arange(len(rg)), dirs].view(len(vt), -1, 1) + return rg + else: + raise NotImplementedError
+ +
[docs] def encode(self, anchors, boxes): + xa, ya, za, la, wa, ha, ra = torch.split(anchors, 1, dim=-1) + xg, yg, zg, lg, wg, hg, rg = torch.split(boxes, 1, dim=-1) + + diagonal = torch.sqrt(la ** 2 + wa ** 2) + xt = (xg - xa) / diagonal + yt = (yg - ya) / diagonal + zt = (zg - za) / ha + + lt = torch.log(lg / la) + wt = torch.log(wg / wa) + ht = torch.log(hg / ha) + + reg_dir, dir_score = self.encode_direction(ra, rg) + ret = [xt, yt, zt, lt, wt, ht, reg_dir] + reg = torch.cat(ret, dim=1) # N 6+4 + + return reg, dir_score
+ +
[docs] def decode(self, anchors, boxes_enc, dir_scores=None): + xa, ya, za, la, wa, ha, ra = torch.split(anchors, 1, dim=-1) + xt, yt, zt, lt, wt, ht = torch.split(boxes_enc[..., :6], 1, dim=-1) + vt = boxes_enc[..., 6:] + + diagonal = torch.sqrt(la ** 2 + wa ** 2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * ha + za + + lg = torch.exp(lt) * la + wg = torch.exp(wt) * wa + hg = torch.exp(ht) * ha + + rg = self.decode_direction(ra, vt, dir_scores) + + return torch.cat([xg, yg, zg, lg, wg, hg, rg], dim=-1)
+ + +
[docs]class CenterBoxCoder(object): + def __init__(self, with_velo=False, with_pred=False, reg_radius=1.6, z_offset=1.0): + self.with_velo = with_velo + self.with_pred = with_pred + self.reg_radius = reg_radius + self.z_offset = z_offset + self.pred_max_offset = 2.0 + reg_radius + +
[docs] def encode(self, centers, gt_boxes, meter_per_pixel, gt_preds=None): + """ + + :param centers: (N, 3) + :param gt_boxes: (N, 8) [batch_idx, x, y, z, l, w, h, r] + :param meter_per_pixel: tuple with 2 elements + :param gt_preds: + :return: + """ + if isinstance(meter_per_pixel, list): + assert meter_per_pixel[0] == meter_per_pixel[1], 'only support unified pixel size for x and y' + # TODO: adapt meter per pixel + meter_per_pixel = meter_per_pixel[0] + if len(gt_boxes) == 0: + valid = torch.zeros_like(centers[:, 0]).bool() + res = None, None, None, valid + if self.with_velo: + res = res + (None,) + return res + + # match centers and gt_boxes + dist_ctr_to_box = torch.norm(centers[:, 1:3].unsqueeze(1) + - gt_boxes[:, 1:3].unsqueeze(0), dim=-1) + cc, bb = torch.meshgrid(centers[:, 0], gt_boxes[:, 0], indexing='ij') + dist_ctr_to_box[cc != bb] = 1000 + min_dists, box_idx_of_pts = dist_ctr_to_box.min(dim=1) + diagnal = torch.norm(gt_boxes[:, 4:6].mean(dim=0) / 2) + valid = min_dists < max(diagnal, meter_per_pixel[0]) + # valid = min_dists < self.reg_radius + valid_center, valid_box = centers[valid], gt_boxes[box_idx_of_pts[valid]] + valid_pred = None + if self.with_pred and gt_preds is not None: + valid_pred = gt_preds[box_idx_of_pts[valid]] + + xc, yc = torch.split(valid_center[:, 1:3], 1, dim=-1) + xg, yg, zg, lg, wg, hg, rg = torch.split(valid_box[:, 1:8], 1, dim=-1) + + xt = xg - xc + yt = yg - yc + zt = zg # + self.z_offset + + lt = torch.log(lg) + wt = torch.log(wg) + ht = torch.log(hg) + + # encode box directions + rgx = torch.cos(rg).view(-1, 1) # N 1 + rgy = torch.sin(rg).view(-1, 1) # N 1 + ra = torch.arange(0, 2, 0.5).to(xc.device) * math.pi + ra_ext = torch.ones_like(valid_box[:, :4]) * ra.view(-1, 4) # N 4 + rax = torch.cos(ra_ext) # N 4 + ray = torch.sin(ra_ext) # N 4 + # cos(a - b) = cos(a)cos(b) + sin(a)sin(b) + # we use arccos instead of a-b to control the difference in 0-pi + diff_angle = torch.arccos(rax * rgx + ray * rgy) # N 4 + dir_score = 1 - diff_angle / math.pi # N 4 + rtx = rgx - rax # N 4 + rty = rgy - ray # N 4 + + reg_box = torch.cat([xt, yt, zt, lt, wt, ht], dim=1) # N 6 + reg_dir = torch.cat([rtx, rty], dim=1) # N 8 + # reg_box[..., :3] /= self.reg_radius + + res = (reg_box, reg_dir, dir_score, valid) + + if self.with_velo: + res = res + (valid_box[:, 8:10],) + elif valid_box.shape[-1] > 8: + res = res + (valid_box[:, 8:10],) + if self.with_pred and valid_pred is not None: + prev_angles = valid_box[:, 7:8] + preds_tgt = [] + mask = [] + for i, boxes in enumerate(valid_pred.transpose(1, 0)): + # some gt_boxes do not have gt successors, zero padded virtual successors are used to align the number + # of boxes between gt_boxes and gt_preds, when calculate preds loss, these boxes should be ignored. + mask.append(boxes.any(dim=-1, keepdim=True).float()) + diff_xy = (boxes[:, :2] - valid_center[:, 1:3]) # / self.pred_max_offset + diff_z = boxes[:, 2:3] # + self.z_offset + diff_cos = torch.cos(boxes[:, 3:]) - torch.cos(prev_angles) + diff_sin = torch.sin(boxes[:, 3:]) - torch.sin(prev_angles) + preds_tgt.append(torch.cat([diff_xy, diff_z, diff_cos, diff_sin], dim=-1) / (i + 2)) + preds_tgt = torch.cat(preds_tgt, dim=-1) + mask = torch.cat(mask, dim=-1).all(dim=-1, keepdim=True) + res = res + (torch.cat([mask, preds_tgt], dim=-1),) + return res
+ +
[docs] def decode(self, centers, reg): + """ + + :param centers: Tensor (N, 3) or (B, N, 2+). + :param reg: dict, + box - (N, 6) or (B, N, 6) + dir - (N, 8) or (B, N, 8) + scr - (N, 4) or (B, N, 4) + vel - (N, 2) or (B, N, 2), optional + pred - (N, 5) or (B, N, 5), optional + :return: decoded bboxes. + """ + if centers.ndim > 2: + xc, yc = torch.split(centers[..., 0:2], 1, dim=-1) + else: + xc, yc = torch.split(centers[..., 1:3], 1, dim=-1) + # reg['box'][..., :3] *= self.reg_radius + xt, yt, zt, lt, wt, ht = torch.split(reg['box'], 1, dim=-1) + + xo = xt + xc + yo = yt + yc + zo = zt #- self.z_offset + + lo = torch.exp(lt) + wo = torch.exp(wt) + ho = torch.exp(ht) + + # decode box directions + scr_max, max_idx = reg['scr'].max(dim=-1) + shape = max_idx.shape + max_idx = max_idx.view(-1) + ii = torch.arange(len(max_idx)) + ra = max_idx.float() * 0.5 * math.pi + ct = reg['dir'][..., :4].view(-1, 4)[ii, max_idx] + torch.cos(ra) + st = reg['dir'][..., 4:].view(-1, 4)[ii, max_idx] + torch.sin(ra) + ro = torch.atan2(st.view(*shape), ct.view(*shape)).unsqueeze(-1) + + if centers.ndim > 2: + # dense tensor + ret = torch.cat([xo, yo, zo, lo, wo, ho, ro], dim=-1) + else: + # sparse tensor with batch indices + ret = torch.cat([centers[..., :1], xo, yo, zo, lo, wo, ho, ro], dim=-1) + + if self.with_velo: + ret = torch.cat([ret, reg['vel']], dim=-1) + if self.with_pred: + pred = reg['pred'].clone() + b, n, c = pred.shape + pred_len = c // 5 + mul = torch.arange(1, pred_len + 1, device=pred.device, dtype=pred.dtype) + pred = pred.view(b, n, -1, 5) * mul.view(1, 1, -1, 1) + xy = pred[..., :2] + centers[..., :2].unsqueeze(-2) + z = pred[..., 2:3] + r = torch.atan2(pred[..., 4] + st.view(*shape, 1), pred[..., 3] + ct.view(*shape, 1)).unsqueeze(-1) + lwh = torch.cat([lo, wo, ho], dim=-1).unsqueeze(-2).repeat(1, 1, pred_len, 1) + pred = torch.cat([xy, z, lwh, r], dim=-1) + ret = (ret, pred) + + return ret
+ + +
[docs]class BoxPredCoder(object): + def __init__(self, with_velo=False): + self.with_velo = with_velo + +
[docs] def encode(self, centers, gt_boxes, meter_per_pixel, gt_preds): + """ + + :param centers: (N, 3) + :param gt_boxes: (N, 8) [batch_idx, x, y, z, l, w, h, r] + :param meter_per_pixel: tuple with 2 elements + :param gt_preds: (N, 8) [batch_idx, x, y, z, l, w, h, r], gt boxes to be predicted + :return: encoded bbox targets. + """ + if isinstance(meter_per_pixel, list): + assert meter_per_pixel[0] == meter_per_pixel[1], 'only support unified pixel size for x and y' + # TODO: adapt meter per pixel + meter_per_pixel = meter_per_pixel[0] + if len(gt_boxes) == 0: + valid = torch.zeros_like(centers[:, 0]).bool() + res = None, None, None, valid + if self.with_velo: + res = res + (None,) + return res + + # match centers and gt_boxes + dist_ctr_to_box = torch.norm(centers[:, 1:3].unsqueeze(1) + - gt_boxes[:, 1:3].unsqueeze(0), dim=-1) + cc, bb = torch.meshgrid(centers[:, 0], gt_boxes[:, 0], indexing='ij') + dist_ctr_to_box[cc != bb] = 1000 + min_dists, box_idx_of_pts = dist_ctr_to_box.min(dim=1) + diagnal = torch.norm(gt_boxes[:, 4:6].mean(dim=0) / 2) + valid = min_dists < max(diagnal, meter_per_pixel[0]) + # valid = min_dists < self.reg_radius + valid_center = centers[valid] + valid_box = gt_preds[box_idx_of_pts[valid]] + + xc, yc = torch.split(valid_center[:, 1:3], 1, dim=-1) + xg, yg, zg, lg, wg, hg, rg = torch.split(valid_box[:, 1:8], 1, dim=-1) + + xt = xg - xc + yt = yg - yc + zt = zg # + self.z_offset + + lt = torch.log(lg) + wt = torch.log(wg) + ht = torch.log(hg) + + # encode box directions + rgx = torch.cos(rg).view(-1, 1) # N 1 + rgy = torch.sin(rg).view(-1, 1) # N 1 + ra = torch.arange(0, 2, 0.5).to(xc.device) * math.pi + ra_ext = torch.ones_like(valid_box[:, :4]) * ra.view(-1, 4) # N 4 + rax = torch.cos(ra_ext) # N 4 + ray = torch.sin(ra_ext) # N 4 + # cos(a - b) = cos(a)cos(b) + sin(a)sin(b) + # we use arccos instead of a-b to control the difference in 0-pi + diff_angle = torch.arccos(rax * rgx + ray * rgy) # N 4 + dir_score = 1 - diff_angle / math.pi # N 4 + rtx = rgx - rax # N 4 + rty = rgy - ray # N 4 + + reg_box = torch.cat([xt, yt, zt, lt, wt, ht], dim=1) # N 6 + reg_dir = torch.cat([rtx, rty], dim=1) # N 8 + # reg_box[..., :3] /= self.reg_radius + + res = (reg_box, reg_dir, dir_score, valid) + + if self.with_velo: + res = res + (valid_box[:, 8:10],) + elif valid_box.shape[-1] > 8: + res = res + (valid_box[:, 8:10],) + return res
+ +
[docs] def decode(self, centers, reg): + """ + + :param centers: Tensor (N, 3) or (B, N, 2+). + :param reg: dict, + box - (N, 6) or (B, N, 6) + dir - (N, 8) or (B, N, 8) + scr - (N, 4) or (B, N, 4) + vel - (N, 2) or (B, N, 2), optional + pred - (N, 5) or (B, N, 5), optional + :return: decoded bboxes. + """ + if centers.ndim > 2: + xc, yc = torch.split(centers[..., 0:2], 1, dim=-1) + else: + xc, yc = torch.split(centers[..., 1:3], 1, dim=-1) + # reg['box'][..., :3] *= self.reg_radius + xt, yt, zt, lt, wt, ht = torch.split(reg['box'], 1, dim=-1) + + xo = xt + xc + yo = yt + yc + zo = zt #- self.z_offset + + lo = torch.exp(lt) + wo = torch.exp(wt) + ho = torch.exp(ht) + + # decode box directions + scr_max, max_idx = reg['scr'].max(dim=-1) + shape = max_idx.shape + max_idx = max_idx.view(-1) + ii = torch.arange(len(max_idx)) + ra = max_idx.float() * 0.5 * math.pi + ct = reg['dir'][..., :4].view(-1, 4)[ii, max_idx] + torch.cos(ra) + st = reg['dir'][..., 4:].view(-1, 4)[ii, max_idx] + torch.sin(ra) + ro = torch.atan2(st.view(*shape), ct.view(*shape)).unsqueeze(-1) + + if centers.ndim > 2: + # dense tensor + ret = torch.cat([xo, yo, zo, lo, wo, ho, ro], dim=-1) + else: + # sparse tensor with batch indices + ret = torch.cat([centers[..., :1], xo, yo, zo, lo, wo, ho, ro], dim=-1) + + if self.with_velo: + ret = torch.cat([ret, reg['vel']], dim=-1) + + return ret
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/common.html b/docs/_build/html/_modules/cosense3d/modules/utils/common.html new file mode 100644 index 00000000..43f9ca64 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/common.html @@ -0,0 +1,447 @@ + + + + + + cosense3d.modules.utils.common — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.common

+from importlib import import_module
+
+import torch
+from torch import nn
+import numpy as np
+
+from torch.distributions.multivariate_normal import _batch_mahalanobis
+from cosense3d.modules.utils.me_utils import metric2indices
+
+pi = 3.141592653
+
+
+
[docs]def clip_sigmoid(x: torch.Tensor, eps: float=1e-4) -> torch.Tensor: + """Sigmoid function for input feature. + + :param x: Input feature map with the shape of [B, N, H, W]. + :param eps: Lower bound of the range to be clamped to. + Defaults to 1e-4. + :return: Feature map after sigmoid. + """ + y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) + return y
+ + +
[docs]def cat_name_str(module_name): + """ + :param module_name: str, format in xxx_yyy_zzz + :returns: class_name: str, format in XxxYyyZzz + """ + cls_name = '' + for word in module_name.split('_'): + cls_name += word[:1].upper() + word[1:] + return cls_name
+ + +
[docs]def instantiate(module_name, cls_name=None, module_cfg=None, **kwargs): + package = import_module(f"cosense3d.model.{module_name}") + cls_name = cat_name_str(module_name) if cls_name is None else cls_name + obj_cls = getattr(package, cls_name) + if module_cfg is None: + obj_inst = obj_cls(**kwargs) + else: + obj_inst = obj_cls(module_cfg) + return obj_inst
+ + +
[docs]def bias_init_with_prob(prior_prob: float) -> float: + """initialize conv/fc bias value according to a given probability value.""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init
+ + +
[docs]def topk_gather(feat, topk_indexes): + if topk_indexes is not None: + feat_shape = feat.shape + topk_shape = topk_indexes.shape + + view_shape = [1 for _ in range(len(feat_shape))] + view_shape[:2] = topk_shape[:2] + topk_indexes = topk_indexes.view(*view_shape) + + feat = torch.gather(feat, 1, topk_indexes.repeat(1, 1, *feat_shape[2:])) + return feat
+ + +
[docs]def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + :param x: (Tensor) The tensor to do the + inverse. + :param eps: (float) EPS avoid numerical + overflow. Defaults 1e-5. + :returns: Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2)
+ + +
[docs]def xavier_init(module: nn.Module, + gain: float = 1, + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias)
+ + +
[docs]def limit_period(val, offset=0.5, period=2 * pi): + return val - torch.floor(val / period + offset) * period
+ + +
[docs]def get_conv2d_layers(conv_name, in_channels, out_channels, n_layers, kernel_size, stride, + padding, relu_last=True, sequential=True, **kwargs): + """ + Build convolutional layers. kernel_size, stride and padding should be a list with the + lengths that match n_layers + """ + seq = [] + if 'bias' in kwargs: + bias = kwargs.pop('bias') + else: + bias = False + for i in range(n_layers): + seq.extend([getattr(nn, conv_name)( + in_channels, out_channels, kernel_size[i], stride=stride[i], + padding=padding[i], bias=bias, **{k: v[i] for k, v in kwargs.items()} + ), nn.BatchNorm2d(out_channels, eps=1e-3, momentum=0.01)]) + if i < n_layers - 1 or relu_last: + seq.append(nn.ReLU()) + in_channels = out_channels + if sequential: + return nn.Sequential(*seq) + else: + return seq
+ + +
[docs]def get_norm_layer(channels, norm): + if norm == 'LN': + norm_layer = nn.LayerNorm(channels) + elif norm == 'BN': + norm_layer = nn.BatchNorm1d(channels) + else: + raise NotImplementedError + return norm_layer
+ + +
[docs]def linear_last(in_channels, mid_channels, out_channels, bias=False, norm='BN'): + return nn.Sequential( + nn.Linear(in_channels, mid_channels, bias=bias), + get_norm_layer(mid_channels, norm), + nn.ReLU(inplace=True), + nn.Linear(mid_channels, out_channels) + )
+ + +
[docs]def linear_layers(in_out, activations=None, norm='BN'): + if activations is None: + activations = ['ReLU'] * (len(in_out) - 1) + elif isinstance(activations, str): + activations = [activations] * (len(in_out) - 1) + else: + assert len(activations) == (len(in_out) - 1) + layers = [] + for i in range(len(in_out) - 1): + layers.append(nn.Linear(in_out[i], in_out[i+1], bias=False)) + layers.append(get_norm_layer(in_out[i+1], norm)) + layers.append(getattr(nn, activations[i])()) + return nn.Sequential(*layers)
+ + +
[docs]def meshgrid(xmin, xmax, ymin=None, ymax=None, dim=2, n_steps=None, step=None): + assert dim <= 3, f'dim <= 3, but dim={dim} is given.' + if ymin is not None and ymax is not None: + assert dim == 2 + if n_steps is not None: + x = torch.linspace(xmin, xmax, n_steps) + y = torch.linspace(ymin, ymax, n_steps) + elif step is not None: + x = torch.arange(xmin, xmax, step) + y = torch.arange(ymin, ymax, step) + else: + raise NotImplementedError + xs = (x, y) + else: + if n_steps is not None: + x = torch.linspace(xmin, xmax, n_steps) + if ymin is not None and ymax is not None: + y = torch.linspace(ymin, ymax, n_steps) + elif step is not None: + x = torch.arange(xmin, xmax, step) + else: + raise NotImplementedError + xs = (x, ) * dim + indexing = 'ijk' + indexing = indexing[:dim] + coor = torch.stack( + torch.meshgrid(*xs, indexing=indexing), + dim=-1 + ) + return coor
+ + +
[docs]def meshgrid_cross(xmins, xmaxs, n_steps=None, steps=None): + if n_steps is not None: + assert len(xmins) == len(n_steps) + xs = [torch.linspace(xmin, xmax + 1, nstp) for xmin, xmax, nstp \ + in zip(xmins, xmaxs, n_steps)] + elif steps is not None: + xs = [torch.arange(xmin, xmax + 1, stp) for xmin, xmax, stp \ + in zip(xmins, xmaxs, steps)] + else: + raise NotImplementedError + dim = len(xs) + indexing = 'ijk' + indexing = indexing[:dim] + coor = torch.stack( + torch.meshgrid(*xs, indexing=indexing), + dim=-1 + ) + return coor
+ +
[docs]def pad_r(tensor, value=0): + tensor_pad = torch.ones_like(tensor[..., :1]) * value + return torch.cat([tensor, tensor_pad], dim=-1)
+ + +
[docs]def pad_l(tensor, value=0): + tensor_pad = torch.ones_like(tensor[..., :1]) * value + return torch.cat([tensor_pad, tensor], dim=-1)
+ + +
[docs]def cat_coor_with_idx(tensor_list): + out = [] + for i, t in enumerate(tensor_list): + out.append(pad_l(t, i)) + return torch.cat(out, dim=0)
+ + +
[docs]def fuse_batch_indices(coords, num_cav): + """ + Fusing voxels of CAVs from the same frame + :param stensor: ME sparse tensor + :param num_cav: list of number of CAVs for each frame + :return: fused coordinates and features of stensor + """ + + for i, c in enumerate(num_cav): + idx_start = sum(num_cav[:i]) + mask = torch.logical_and( + coords[:, 0] >= idx_start, + coords[:, 0] < idx_start + c + ) + coords[mask, 0] = i + + return coords
+ + +
[docs]def weighted_mahalanobis_dists(reg_evi, reg_var, dists, var0): + log_probs_list = [] + for i in range(reg_evi.shape[1]): + vars = reg_var[:, i, :] + var0[i] + covs = torch.diag_embed(vars.squeeze(), dim1=1) + unbroadcasted_scale_tril = covs.unsqueeze(1) # N 1 2 2 + + # a.shape = (i, 1, n, n), b = (..., i, j, n), + M = _batch_mahalanobis(unbroadcasted_scale_tril, dists) # N M + log_probs = -0.5 * M + log_probs_list.append(log_probs) + + log_probs = torch.stack(log_probs_list, dim=-1) + probs = log_probs.exp() # N M 2 + cls_evi = reg_evi.view(-1, 1, 2) # N 1 2 + probs_weighted = probs * cls_evi + + return probs_weighted
+ + +
[docs]def draw_sample_prob(centers, reg, samples, res, distr_r, det_r, batch_size, var0): + # from utils.vislib import draw_points_boxes_plt + # vis_ctrs = centers[centers[:, 0]==0, 1:].cpu().numpy() + # vis_sams = samples[samples[:, 0]==0, 1:].cpu().numpy() + # + # ax = draw_points_boxes_plt(50, vis_ctrs, points_c='det_r', return_ax=True) + # draw_points_boxes_plt(50, vis_sams, points_c='b', ax=ax) + reg_evi = reg[:, :2] + reg_var = reg[:, 2:].view(-1, 2, 2) + + grid_size = int(det_r / res) * 2 + centers_map = torch.ones((batch_size, grid_size, grid_size), + device=reg.device).long() * -1 + ctridx = metric2indices(centers, res).T + ctridx[1:] += int(grid_size / 2) + centers_map[ctridx[0], ctridx[1], ctridx[2]] = torch.arange(ctridx.shape[1], + device=ctridx.device) + + steps = int(distr_r / res) + offset = meshgrid(-steps, steps, 2, n_steps=steps * 2 + 1).to(samples.device) # s s 2 + samidx = metric2indices(samples, res).view(-1, 1, 3) \ + + pad_l(offset).view(1, -1, 3) # n s*s 3 + samidx = samidx.view(-1, 3).T # 3 n*s*s + samidx[1:] = (samidx[1:] + (det_r / res)) + mask1 = torch.logical_and((samidx[1:] >= 0).all(dim=0), + (samidx[1:] < (det_r / res * 2)).all(dim=0)) + + inds = samidx[:, mask1].long() + ctr_idx_of_sam = centers_map[inds[0], inds[1], inds[2]] + mask2 = ctr_idx_of_sam >= 0 + ctr_idx_of_sam = ctr_idx_of_sam[mask2] + ns = offset.shape[0]**2 + new_samples = torch.tile(samples.unsqueeze(1), + (1, ns, 1)).view(-1, 3) # n*s*s 3 + new_centers = centers[ctr_idx_of_sam] + dists_sam2ctr = new_samples[mask1][mask2][:, 1:] - new_centers[:, 1:] + + probs_weighted = weighted_mahalanobis_dists( + reg_evi[ctr_idx_of_sam], + reg_var[ctr_idx_of_sam], + dists_sam2ctr.unsqueeze(1), + var0=var0 + ).squeeze() + + sample_evis = torch.zeros_like(samidx[:2].T) + mask = mask1.clone() + mask[mask1] = mask2 + sample_evis[mask] = probs_weighted + sample_evis = sample_evis.view(-1, ns, 2).sum(dim=1) + + return sample_evis
+ + +
[docs]def get_voxel_centers(voxel_coords, + downsample_times, + voxel_size, + point_cloud_range): + """Get centers of spconv voxels. + + :param voxel_coords: (N, 3) + :param downsample_times: + :param voxel_size: + :param point_cloud_range: + :return: + """ + assert voxel_coords.shape[1] == 3 + voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz) + voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times + pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float() + voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range + return voxel_centers
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/conv.html b/docs/_build/html/_modules/cosense3d/modules/utils/conv.html new file mode 100644 index 00000000..409872c2 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/conv.html @@ -0,0 +1,385 @@ + + + + + + cosense3d.modules.utils.conv — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.conv

+import warnings
+from typing import Dict, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn.modules import batchnorm, instancenorm
+
+from cosense3d.modules.utils.norm import build_norm_layer
+from cosense3d.modules.utils import build_torch_module
+from cosense3d.modules.utils.init import kaiming_init, constant_init
+
+PADDING_LAYERS = dict(
+    zero=nn.ZeroPad2d,
+    reflect=nn.ReflectionPad2d,
+    replicate=nn.ReplicationPad2d
+)
+
+
+
[docs]def build_conv_layer(cfg: Optional[Dict], *args, **kwargs) -> nn.Module: + """Build convolution layer. Modified from openmmlab. + + Args: + cfg (None or dict): The conv layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an conv layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding conv layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding conv layer. + + Returns: + nn.Module: Created conv layer. + """ + if cfg is None: + cfg_ = dict(type='Conv2d') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if not hasattr(nn, layer_type): + raise KeyError(f'Unrecognized layer type {layer_type}') + else: + conv_layer = getattr(nn, layer_type) + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer
+ + +
[docs]def build_padding_layer(cfg: Dict, *args, **kwargs) -> nn.Module: + """Build padding layer. + + Args: + cfg (dict): The padding layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate a padding layer. + + Returns: + nn.Module: Created padding layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + + cfg_ = cfg.copy() + padding_type = cfg_.pop('type') + if padding_type not in PADDING_LAYERS: + raise KeyError(f'Unrecognized padding type {padding_type}.') + else: + padding_layer = PADDING_LAYERS.get(padding_type) + + layer = padding_layer(*args, **kwargs, **cfg_) + + return layer
+ + +
[docs]class ConvModule(nn.Module): + """A conv block that bundles conv/norm/activation layers. + + This block simplifies the usage of convolution layers, which are commonly + used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). + It is based upon three build methods: `build_conv_layer()`, + `build_norm_layer()` and `build_activation_layer()`. + + Besides, we add some additional features in this module. + 1. Automatically set `bias` of the conv layer. + 2. Spectral norm is supported. + 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only + supports zero and circular padding, and we add "reflect" padding mode. + + Args: + in_channels (int): Number of channels in the input feature map. + Same as that in ``nn._ConvNd``. + out_channels (int): Number of channels produced by the convolution. + Same as that in ``nn._ConvNd``. + kernel_size (int | tuple[int]): Size of the convolving kernel. + Same as that in ``nn._ConvNd``. + stride (int | tuple[int]): Stride of the convolution. + Same as that in ``nn._ConvNd``. + padding (int | tuple[int]): Zero-padding added to both sides of + the input. Same as that in ``nn._ConvNd``. + dilation (int | tuple[int]): Spacing between kernel elements. + Same as that in ``nn._ConvNd``. + groups (int): Number of blocked connections from input channels to + output channels. Same as that in ``nn._ConvNd``. + bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise + False. Default: "auto". + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + inplace (bool): Whether to use inplace mode for activation. + Default: True. + with_spectral_norm (bool): Whether use spectral norm in conv module. + Default: False. + padding_mode (str): If the `padding_mode` has not been supported by + current `Conv2d` in PyTorch, we will use our own padding layer + instead. Currently, we support ['zeros', 'circular'] with official + implementation and ['reflect'] with our own implementation. + Default: 'zeros'. + order (tuple[str]): The order of conv/norm/activation layers. It is a + sequence of "conv", "norm" and "act". Common examples are + ("conv", "norm", "act") and ("act", "conv", "norm"). + Default: ('conv', 'norm', 'act'). + """ + + _abbr_ = 'conv_block' + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int, int]], + stride: Union[int, Tuple[int, int]] = 1, + padding: Union[int, Tuple[int, int]] = 0, + dilation: Union[int, Tuple[int, int]] = 1, + groups: int = 1, + bias: Union[bool, str] = 'auto', + conv_cfg: Optional[Dict] = None, + norm_cfg: Optional[Dict] = None, + act_cfg: Optional[Dict] = dict(type='ReLU'), + inplace: bool = True, + with_spectral_norm: bool = False, + padding_mode: str = 'zeros', + order: tuple = ('conv', 'norm', 'act')): + super().__init__() + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + assert act_cfg is None or isinstance(act_cfg, dict) + official_padding_mode = ['zeros', 'circular'] + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.inplace = inplace + self.with_spectral_norm = with_spectral_norm + self.with_explicit_padding = padding_mode not in official_padding_mode + self.order = order + assert isinstance(self.order, tuple) and len(self.order) == 3 + assert set(order) == {'conv', 'norm', 'act'} + + self.with_norm = norm_cfg is not None + self.with_activation = act_cfg is not None + # if the conv layer is before a norm layer, bias is unnecessary. + if bias == 'auto': + bias = not self.with_norm + self.with_bias = bias + + if self.with_explicit_padding: + pad_cfg = dict(type=padding_mode) + self.padding_layer = build_padding_layer(pad_cfg, padding) + + # reset padding to 0 for conv module + conv_padding = 0 if self.with_explicit_padding else padding + # build convolution layer + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=conv_padding, + dilation=dilation, + groups=groups, + bias=bias) + # export the attributes of self.conv to a higher level for convenience + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + + if self.with_spectral_norm: + self.conv = nn.utils.spectral_norm(self.conv) + + # build normalization layers + if self.with_norm: + # norm layer is after conv layer + if order.index('norm') > order.index('conv'): + norm_channels = out_channels + else: + norm_channels = in_channels + self.norm_name, norm = build_norm_layer( + norm_cfg, norm_channels) # type: ignore + self.add_module(self.norm_name, norm) + if self.with_bias: + if isinstance(norm, (batchnorm._BatchNorm, + instancenorm._InstanceNorm)): + warnings.warn( + 'Unnecessary conv bias before batch/instance norm') + else: + self.norm_name = None # type: ignore + + # build activation layer + if self.with_activation: + act_cfg_ = act_cfg.copy() # type: ignore + # nn.Tanh has no 'inplace' argument + if act_cfg_['type'] not in [ + 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish', 'GELU' + ]: + act_cfg_.setdefault('inplace', inplace) + self.activate = build_torch_module(act_cfg_) + + # Use msra init by default + self.init_weights() + + @property + def norm(self): + if self.norm_name: + return getattr(self, self.norm_name) + else: + return None + +
[docs] def init_weights(self): + # 1. It is mainly for customized conv layers with their own + # initialization manners by calling their own ``init_weights()``, + # and we do not want ConvModule to override the initialization. + # 2. For customized conv layers without their own initialization + # manners (that is, they don't have their own ``init_weights()``) + # and PyTorch's conv layers, they will be initialized by + # this method with default ``kaiming_init``. + # Note: For PyTorch's conv layers, they will be overwritten by our + # initialization implementation using default ``kaiming_init``. + if not hasattr(self.conv, 'init_weights'): + if self.with_activation and self.act_cfg['type'] == 'LeakyReLU': + nonlinearity = 'leaky_relu' + a = self.act_cfg.get('negative_slope', 0.01) + else: + nonlinearity = 'relu' + a = 0 + kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) + if self.with_norm: + constant_init(self.norm, 1, bias=0)
+ +
[docs] def forward(self, + x: torch.Tensor, + activate: bool = True, + norm: bool = True) -> torch.Tensor: + for layer in self.order: + if layer == 'conv': + if self.with_explicit_padding: + x = self.padding_layer(x) + x = self.conv(x) + elif layer == 'norm' and norm and self.with_norm: + x = self.norm(x) + elif layer == 'act' and activate and self.with_activation: + x = self.activate(x) + return x
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/edl_utils.html b/docs/_build/html/_modules/cosense3d/modules/utils/edl_utils.html new file mode 100644 index 00000000..e9156449 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/edl_utils.html @@ -0,0 +1,131 @@ + + + + + + cosense3d.modules.utils.edl_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.edl_utils

+import torch
+
+
+
[docs]def logit_to_edl(logits): + """ + + Parameters + ---------- + logits: Tensor, (..., C), + + Returns + ------- + + """ + evidence = logits.relu() + alpha = evidence + 1 + S = torch.sum(alpha, dim=-1, keepdim=True) + conf = torch.div(alpha, S) + K = evidence.shape[-1] + unc = torch.div(K, S) + # conf = torch.sqrt(conf * (1 - unc)) + unc = unc.squeeze(dim=-1) + return conf, unc
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/gaussian_utils.html b/docs/_build/html/_modules/cosense3d/modules/utils/gaussian_utils.html new file mode 100644 index 00000000..d2cfee9c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/gaussian_utils.html @@ -0,0 +1,264 @@ + + + + + + cosense3d.modules.utils.gaussian_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.gaussian_utils

+from typing import List
+import torch
+from torch.distributions.multivariate_normal import _batch_mahalanobis
+import torch_scatter
+import numpy as np
+
+
+
[docs]def weighted_mahalanobis_dists(vars, dists, weights=None): + """Compute the squared mahalanobis distances. + + :param vars: (N, 2), variances of Gaussian distribution. + :param dists: (N, 2), distances to gaussian center at each axis. + :param weights: weights to be applied to the output probability. + :return: (N), squared mahalanobis + """ + vars = vars.squeeze() + if len(vars.shape) == 1: + vars = torch.stack([vars, vars], dim=-1) + covs = torch.diag_embed(vars.squeeze(), dim1=1) + unbroadcasted_scale_tril = covs.unsqueeze(1) # N 1 2 2 + + # a.shape = (i, 1, n, n), b = (..., i, j, n), + M = _batch_mahalanobis(unbroadcasted_scale_tril, dists) # N M + log_probs = -0.5 * M + probs = log_probs.exp() # N M 2 + if weights is not None: + probs = probs * weights + + return probs
+ + +
[docs]def mahalanobis_dists_2d(sigmas, dists): + """Compute the squared mahalanobis distances. + + :param sigmas: (N, 2), standard deviation of Gaussian distribution + :param dists: (N, 2), distances to gaussian center + :return: (N), squared mahalanobis + """ + vars = sigmas ** 2 + covs = torch.diag_embed(vars, dim1=1) + unbroadcasted_scale_tril = covs.unsqueeze(1) # 1 1 2 2 + M = -0.5 * _batch_mahalanobis(unbroadcasted_scale_tril, dists.unsqueeze(0)) # N M + return M
+ + +
[docs]def center_to_img_coor(center_in, lidar_range, pixel_sz): + x, y = center_in[:, 0], center_in[:, 1] + coord_x = (x - lidar_range[0]) / pixel_sz + coord_y = (y - lidar_range[1]) / pixel_sz + map_sz_x = (lidar_range[3] - lidar_range[0]) / pixel_sz + map_sz_y = (lidar_range[4] - lidar_range[1]) / pixel_sz + # clamp to fit image size: 1e-6 does not work for center.int() + coord_x = torch.clamp(coord_x, min=0, max=map_sz_x - 0.5) + coord_y = torch.clamp(coord_y, min=0, max=map_sz_y - 0.5) + center_out = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1) + return center_out
+ + +
[docs]def cornernet_gaussian_radius(height, width, min_overlap=0.5): + a1 = 1 + b1 = (height + width) + c1 = width * height * (1 - min_overlap) / (1 + min_overlap) + sq1 = (b1 ** 2 - 4 * a1 * c1).sqrt() + r1 = (b1 + sq1) / 2 + + a2 = 4 + b2 = 2 * (height + width) + c2 = (1 - min_overlap) * width * height + sq2 = (b2 ** 2 - 4 * a2 * c2).sqrt() + r2 = (b2 + sq2) / 2 + + a3 = 4 * min_overlap + b3 = -2 * min_overlap * (height + width) + c3 = (min_overlap - 1) * width * height + sq3 = (b3 ** 2 - 4 * a3 * c3).sqrt() + r3 = (b3 + sq3) / 2 + ret = torch.min(torch.min(r1, r2), r3) + return ret
+ + +
[docs]def gaussian_radius(box_dims, pixel_sz, overlap, min_radius=2): + dx, dy = box_dims[:, 0] / pixel_sz[0], box_dims[:, 1] / pixel_sz[1] + + radius = cornernet_gaussian_radius(dx, dy, min_overlap=overlap) + radius = torch.clamp_min(radius.int(), min=min_radius) + + return radius
+ + +
[docs]def gaussian_2d(shape: List[int], sigma: float=1.0) -> np.ndarray: + """Generate gaussian map. + + :param shape: Shape of the map. + :param sigma: Sigma to generate gaussian map. + Defaults to 1. + :return: Generated gaussian map. + """ + m, n = [(ss - 1.) / 2. for ss in shape] + y, x = np.ogrid[-m:m + 1, -n:n + 1] + + h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + h[h < np.finfo(h.dtype).eps * h.max()] = 0 + return h
+ + +
[docs]def draw_gaussian_map(boxes, lidar_range, pixel_sz, batch_size, radius=None, sigma=1, min_radius=2): + size_x = int((lidar_range[3] - lidar_range[0]) // pixel_sz[0]) + size_y = int((lidar_range[4] - lidar_range[1]) // pixel_sz[1]) + if boxes.shape[0] == 0: + return torch.zeros(batch_size, size_x, size_y, device=boxes.device) + if radius is None: + radius = torch.ones_like(boxes[:, 0]) * 2 + radius_max = radius.max() + center = center_to_img_coor(boxes[:, 1:3], lidar_range, pixel_sz) + ctridx = center.int() + + # sample points for each center point + steps = radius_max * 2 + 1 + x = torch.linspace(- radius_max, radius_max, steps) + offsets = torch.stack(torch.meshgrid(x, x, indexing='ij'), dim=-1).to(center.device) + offsets = offsets[torch.norm(offsets, dim=-1) <= radius_max] + samples = ctridx.unsqueeze(1) + offsets.view(1, -1, 2) + ind = torch.tile(boxes[:, 0].unsqueeze(1), (1, samples.shape[1])).unsqueeze(-1) + samples = torch.cat([ind, samples], dim=-1) + ctr_idx_of_sam = torch.arange(len(center)).unsqueeze(1).tile(1, samples.shape[1]) + + mask = (samples[..., 1] >= 0) & (samples[..., 1] < size_x) & \ + (samples[..., 2] >= 0) & (samples[..., 2] < size_y) + + + new_center = center[ctr_idx_of_sam[mask]] + new_vars = 1 / min_radius * radius[ctr_idx_of_sam[mask]].float() + new_samples = samples[mask] + dists_sam2ctr = new_samples[:, 1:].float() - new_center + + probs = weighted_mahalanobis_dists( + new_vars, + dists_sam2ctr.unsqueeze(1), + ).squeeze() + + # probs = probs / (2 * sigma * sigma) + probs[probs < torch.finfo(probs.dtype).eps * probs.max()] = 0 + + indices = new_samples[:, 0] * size_y * size_x + \ + new_samples[:, 1] * size_x + new_samples[:, 2] + + center_map = torch.zeros(batch_size * size_x * size_y, device=center.device) + torch_scatter.scatter(probs, indices.long(), dim=0, out=center_map, reduce='max') + center_map = center_map.view(batch_size, size_x, size_y) + + # import matplotlib.pyplot as plt + # plt.imshow(center_map[0].cpu().numpy()) + # plt.show() + # plt.close() + + return center_map
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/init.html b/docs/_build/html/_modules/cosense3d/modules/utils/init.html new file mode 100644 index 00000000..ec985778 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/init.html @@ -0,0 +1,187 @@ + + + + + + cosense3d.modules.utils.init — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.init

+from torch import nn
+import numpy as np
+
+
+
[docs]def bias_init_with_prob(prior_prob: float) -> float: + """initialize conv/fc bias value according to a given probability value.""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init
+ + +
[docs]def constant_init(module: nn.Module, val: float, bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + nn.init.constant_(module.weight, val) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias)
+ + +
[docs]def xavier_init(module: nn.Module, + gain: float = 1, + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias)
+ + +
[docs]def normal_init(module: nn.Module, + mean: float = 0, + std: float = 1, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + nn.init.normal_(module.weight, mean, std) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias)
+ + +
[docs]def trunc_normal_init(module: nn.Module, + mean: float = 0, + std: float = 1, + a: float = -2, + b: float = 2, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + trunc_normal_(module.weight, mean, std, a, b) # type: ignore + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) # type: ignore
+ + +
[docs]def uniform_init(module: nn.Module, + a: float = 0, + b: float = 1, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + nn.init.uniform_(module.weight, a, b) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias)
+ + +
[docs]def kaiming_init(module: nn.Module, + a: float = 0, + mode: str = 'fan_out', + nonlinearity: str = 'relu', + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias)
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/me_utils.html b/docs/_build/html/_modules/cosense3d/modules/utils/me_utils.html new file mode 100644 index 00000000..e79055c5 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/me_utils.html @@ -0,0 +1,472 @@ + + + + + + cosense3d.modules.utils.me_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.me_utils

+import torch
+from torch import nn
+import MinkowskiEngine as ME
+from MinkowskiEngine.MinkowskiKernelGenerator import KernelGenerator
+
+
+
[docs]@torch.no_grad() +def metric2indices(coor, voxel_size): + """"Round towards floor""" + indices = coor.clone() + if isinstance(voxel_size, float): + indices[:, 1:3] = indices[:, 1:3] / voxel_size + else: + indices[:, 1] = indices[:, 1] / voxel_size[0] + indices[:, 2] = indices[:, 2] / voxel_size[1] + return torch.floor(indices).long()
+ + +
[docs]@torch.no_grad() +def indices2metric(indices, voxel_size): + """Voxel indices to voxel center in meter""" + coor = indices.clone().float() + coor[:, 1] = (coor[:, 1] + 0.5) * voxel_size[0] + coor[:, 2] = (coor[:, 2] + 0.5) * voxel_size[1] + return coor
+ + +
[docs]@torch.no_grad() +def mink_coor_limit(lidar_range, voxel_size, stride): + if not isinstance(voxel_size, list): + voxel_size = [voxel_size, voxel_size] + lr = lidar_range + x_max = (round(lr[3] / voxel_size[0]) - 1) // stride * stride # relevant to ME + x_min = (round(lr[0] / voxel_size[0]) + 1) // stride * stride - stride # relevant to ME + y_max = (round(lr[4] / voxel_size[1]) - 1) // stride * stride + y_min = (round(lr[1] / voxel_size[1]) + 1) // stride * stride - stride + return [x_min, x_max, y_min, y_max]
+ + +
[docs]def update_me_essentials(self: object, data_info: dict, stride: int=None): + """Update essential variables for ME-based models + + :param self: instance of a python class + :param data_info: + - det_r: float + - lidar_range: [xmin, ymin, zmin, xmax, ymax, zmax] + - voxel_size: [vx, vy, vz] + :param stride: + :return: + """ + for k, v in data_info.items(): + setattr(self, k, v) + + if getattr(self, 'det_r', False): + lr = [-self.det_r, -self.det_r, 0, self.det_r, self.det_r, 0] + elif getattr(self, 'lidar_range', False): + lr = self.lidar_range + else: + raise NotImplementedError + setattr(self, 'lidar_range', lr) + + if stride is not None: + setattr(self, 'stride', stride) + setattr(self, 'res', (self.stride * self.voxel_size[0], self.stride * self.voxel_size[1])) + setattr(self, 'mink_xylim', mink_coor_limit(lr, self.voxel_size, self.stride)) + setattr(self, 'size_x', round((lr[3] - lr[0]) / self.res[0])) + setattr(self, 'size_y', round((lr[4] - lr[1]) / self.res[1])) + setattr(self, 'offset_sz_x', round(lr[0] / self.res[0])) + setattr(self, 'offset_sz_y', round(lr[1] / self.res[1]))
+ + +
[docs]@torch.no_grad() +def me_coor_to_grid_indices(lr, voxel_size, stride, coor): + res_x, res_y = stride * voxel_size[0], stride * voxel_size[1] + size_x = round((lr[3] - lr[0]) / res_x) + size_y = round((lr[4] - lr[1]) / res_y) + offset_sz_x = round(lr[0] / res_x) + offset_sz_y = round(lr[1] / res_y) + inds = coor.clone() + inds[:, 0] -= offset_sz_x + inds[:, 1] -= offset_sz_y + in_range_mask = (inds >= 0).all(dim=-1) & inds[:, 0] < size_x & inds[:, 1] < size_y + return inds, in_range_mask
+ + +
[docs]@torch.no_grad() +def bev_sparse_to_dense(self, preds): + conf, unc = preds['conf'], preds['unc'], + ctrs = preds['centers'][:, :3] # N 2 + batch_size = ctrs[:, 0].max().int() + 1 + conf_map = torch.zeros((batch_size, self.size_x, self.size_y, 2), + device=conf.device) + unc_map = torch.ones((batch_size, self.size_x, self.size_y), + device=unc.device) + inds = metric2indices(ctrs, self.res).T + inds[1] -= self.offset_sz_x + inds[2] -= self.offset_sz_y + conf_map[inds[0], inds[1], inds[2]] = conf + unc_map[inds[0], inds[1], inds[2]] = unc + return conf_map, unc_map
+ + +
[docs]def minkconv_layer(in_dim, out_dim, kernel, stride, d, tr=False): + if not isinstance(kernel, list): + kernel = [kernel] * d + else: + assert len(kernel) == d + if tr: + conv = getattr(ME, 'MinkowskiConvolutionTranspose') + else: + conv = getattr(ME, 'MinkowskiConvolution') + conv_layer = conv( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=kernel, + stride=stride, + dilation=1, + dimension=d + ) + return conv_layer
+ + +
[docs]def minkconv_conv_block(in_dim, out_dim, kernel, stride, + d=3, + bn_momentum=0.1, + activation='LeakyReLU', + tr=False, + expand_coordinates=False, + norm_before=False, + distributed=False): + if isinstance(kernel, int): + kernel = [kernel] * d + if isinstance(stride, int): + stride = [stride] * d + if tr: + conv = getattr(ME, 'MinkowskiConvolutionTranspose') + else: + conv = getattr(ME, 'MinkowskiConvolution') + conv_layer = conv( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=kernel, + stride=stride, + dilation=1, + dimension=d, + expand_coordinates=expand_coordinates + ) + activation_fn = getattr(ME, f'Minkowski{activation}')() + if distributed: + norm_layer = ME.MinkowskiSyncBatchNorm(out_dim, momentum=bn_momentum) + else: + norm_layer = ME.MinkowskiBatchNorm(out_dim, momentum=bn_momentum) + if norm_before: + layer = nn.Sequential(conv_layer, norm_layer, activation_fn) + else: + layer = nn.Sequential(conv_layer, activation_fn, norm_layer) + return layer
+ + +
[docs]def get_conv_block(nc, k=3, d=3, tr=False, bn_momentum=0.1, distributed=False): + """ + create sparse convolution block + :param nc: number of channels in each layer in [in_layer, mid_layer, out_layer] + :param k: kernel size + :param tr: transposed convolution + :return: conv block + """ + if isinstance(k, int): + k = [k,] * d + else: + assert len(k) == d + bnm = bn_momentum + assert len(nc) == 3 + return nn.Sequential( + minkconv_conv_block(nc[0], nc[1], k, 2, d, bnm, tr=tr, distributed=distributed), + minkconv_conv_block(nc[1], nc[1], k, 1, d, bnm, tr=tr, distributed=distributed), + minkconv_conv_block(nc[1], nc[2], k, 1, d, bnm, tr=tr, distributed=distributed), + )
+ + +
[docs]def sparse_to_dense(stensor, voxel_size, det_r): + b = int(stensor.C[:, 0].max()) + 1 + d = stensor.F.shape[-1] + stride = stensor.tensor_stride + h = int((det_r['x'][1] - det_r['x'][0]) / voxel_size[0]) // stride[0] + w = int((det_r['y'][1] - det_r['y'][0]) / voxel_size[1]) // stride[1] + x_offset = int(det_r['x'][0] / voxel_size[0]) + y_offset = int(det_r['y'][0] / voxel_size[1]) + assert len(stensor.C[:, 3].unique()) == 1 + dtensor = stensor.dense( + shape=torch.Size((b, d, h, w, 1)), + min_coordinate=torch.Tensor([x_offset, y_offset, 0]).int())[0].squeeze(dim=-1) + + return dtensor
+ + +
[docs]def prepare_input_data(points_list, voxel_size, QMODE, floor_height, + coor_dim=3, feat_dim=3): + device = points_list[0].device + coords = [] + features = [] + vs = torch.tensor(voxel_size).reshape(1, 3).to(device) + for i, points in enumerate(points_list): + pts = points.clone() + if floor_height is not None: + pts[:, 3] -= floor_height + pts[:, 1:4] = pts[:, 1:4] / vs + features.append(points[:, 1:feat_dim + 1]) + coords.append(pts) + coords = torch.cat(coords, dim=0) + features = torch.cat(features, dim=0) + + x = ME.TensorField( + features=features.contiguous(), + coordinates=coords[:, :coor_dim + 1].contiguous(), + quantization_mode=QMODE, + device=device + ) + # ME rounds to the floor when casting coords to integer + return x
+ + +
[docs]def voxelize_with_centroids(x: ME.TensorField, enc_mlp, pc_range): + cm = x.coordinate_manager + features = x.F + coords = x.C[:, 1:] + + out = x.sparse() + size = torch.Size([len(out), len(x)]) + tensor_map, field_map = cm.field_to_sparse_map(x.coordinate_key, out.coordinate_key) + coords_p1, count_p1 = downsample_points(coords, tensor_map, field_map, size) + features_p1, _ = downsample_points(features, tensor_map, field_map, size) + if len(features) != len(tensor_map): + print('ME: features != tensor map') + norm_features = normalize_points(features, features_p1, tensor_map) + + features[:, :3] = (features[:, :3] - pc_range[:3]) / (pc_range[3:] - pc_range[:3]) + voxel_embs = enc_mlp(torch.cat([features, norm_features], dim=1)) + down_voxel_embs = downsample_embeddings(voxel_embs, tensor_map, size, mode="avg") + out = ME.SparseTensor(features=down_voxel_embs, + coordinate_map_key=out.coordinate_key, + coordinate_manager=cm) + + norm_points_p1 = normalize_centroids(coords_p1, out.C, out.tensor_stride[0]) + return out, norm_points_p1, features_p1, count_p1, voxel_embs
+ + +
[docs]def devoxelize_with_centroids(out: ME.SparseTensor, x: ME.TensorField, h_embs): + feats = torch.cat([out.slice(x).F, h_embs], dim=1) + return feats
+ + +
[docs]@torch.no_grad() +def normalize_points(points, centroids, tensor_map): + tensor_map = tensor_map if tensor_map.dtype == torch.int64 else tensor_map.long() + norm_points = points - centroids[tensor_map] + return norm_points
+ + +
[docs]@torch.no_grad() +def normalize_centroids(down_points, coordinates, tensor_stride): + norm_points = (down_points - coordinates[:, 1:]) / tensor_stride - 0.5 + return norm_points
+ + +
[docs]@torch.no_grad() +def get_kernel_map_and_out_key(stensor, stensor_out=None, + kernel_size=3, stride=1, dilation=1, + kernel_type='cube', kernel_generator=None): + """ + Generate kernel maps for the input stensor. + The hybrid and custom kernel is not implemented in ME v0.5.x, + this function uses a kernel mask to select the kernel maps for + the customized kernel shapes. + :param stensor: ME.SparseTensor, NxC + :param kernel_type: 'cube'(default) | 'hybrid' + :return: masked kernel maps + """ + D = stensor.C.shape[-1] - 1 + if kernel_generator is None: + kernel_generator = KernelGenerator(kernel_size=kernel_size, + stride=stride, + dilation=dilation, + dimension=D) + assert D == len(kernel_generator.kernel_stride) + cm = stensor.coordinate_manager + in_key = stensor.coordinate_key + if stensor_out is None: + out_key = cm.stride(in_key, kernel_generator.kernel_stride) + else: + out_key = stensor_out.coordinate_key + region_type, region_offset, _ = kernel_generator.get_kernel( + stensor.tensor_stride, False) + + kernel_map = cm.kernel_map(in_key, + out_key, + kernel_generator.kernel_stride, + kernel_generator.kernel_size, + kernel_generator.kernel_dilation, + region_type=region_type, + region_offset=region_offset) + if kernel_type=='cube': + kernel_volume = kernel_generator.kernel_volume + elif kernel_type=='hybrid': + assert dilation == 1, "currently, hybrid kernel only support dilation=1." + xx = torch.tensor([-1, 0, 1]).int() + xx_list = [xx for i in range(D)] + kernels = torch.meshgrid([*xx_list], indexing='ij') + kernels = torch.stack([t.flatten() for t in kernels], dim=1) + kernel_mask = torch.zeros_like(kernels[:, 0]).bool() + m = torch.logical_or( + kernels[:, 0] == 0, + torch.logical_and(kernels[:, 0]==-1, (kernels[:, 1:]==0).all(dim=1)) + ) + kernel_mask[m] = True + kernel_mask_map = {ic.item(): ih for ih, ic in enumerate(torch.where(kernel_mask)[0])} + kernel_map = {kernel_mask_map[k]: v for k, v in kernel_map.items() if kernel_mask[k]} + kernel_volume = kernel_mask.sum().item() + else: + raise NotImplementedError + + return kernel_map, out_key, kernel_volume
+ + +
[docs]@torch.no_grad() +def downsample_points(points, tensor_map, field_map, size): + down_points = ME.MinkowskiSPMMAverageFunction().apply( + tensor_map, field_map, size, points + ) + _, counts = torch.unique(tensor_map, return_counts=True) + return down_points, counts.unsqueeze_(1).type_as(down_points)
+ + +
[docs]@torch.no_grad() +def stride_centroids(points, counts, rows, cols, size): + stride_centroids = ME.MinkowskiSPMMFunction().apply(rows, cols, counts, size, points) + ones = torch.ones(size[1], dtype=points.dtype, device=points.device) + stride_counts = ME.MinkowskiSPMMFunction().apply(rows, cols, ones, size, counts) + stride_counts.clamp_(min=1) + return torch.true_divide(stride_centroids, stride_counts), stride_counts
+ + +
[docs]def downsample_embeddings(embeddings, inverse_map, size, mode="avg"): + assert len(embeddings) == size[1] + assert mode in ["avg", "max"] + if mode == "max": + in_map = torch.arange(size[1], dtype=inverse_map.dtype, device=inverse_map.device) + down_embeddings = ME.MinkowskiDirectMaxPoolingFunction().apply( + in_map, inverse_map, embeddings, size[0] + ) + else: + cols = torch.arange(size[1], dtype=inverse_map.dtype, device=inverse_map.device) + down_embeddings = ME.MinkowskiSPMMAverageFunction().apply( + inverse_map, cols, size, embeddings + ) + return down_embeddings
+ + + + + + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/misc.html b/docs/_build/html/_modules/cosense3d/modules/utils/misc.html new file mode 100644 index 00000000..367c1cc3 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/misc.html @@ -0,0 +1,204 @@ + + + + + + cosense3d.modules.utils.misc — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.misc

+from torch import nn
+
+
+
[docs]class SELayer_Linear(nn.Module): + def __init__(self, channels, act_layer=nn.ReLU, gate_layer=nn.Sigmoid, norm=False): + super().__init__() + self.conv_reduce = nn.Linear(channels, channels) + self.act1 = act_layer() + self.conv_expand = nn.Linear(channels, channels) + self.gate = gate_layer() + self.norm = norm + +
[docs] def forward(self, x, x_se): + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se)
+ + +
[docs]class MLN(nn.Module): + ''' + Args: + c_dim (int): dimension of latent code c + f_dim (int): feature dimension + ''' + + def __init__(self, c_dim, f_dim=256): + super().__init__() + self.c_dim = c_dim + self.f_dim = f_dim + + self.reduce = nn.Sequential( + nn.Linear(c_dim, f_dim), + nn.ReLU(), + ) + self.gamma = nn.Linear(f_dim, f_dim) + self.beta = nn.Linear(f_dim, f_dim) + self.ln = nn.LayerNorm(f_dim, elementwise_affine=False) + self.reset_parameters() + +
[docs] def reset_parameters(self): + nn.init.zeros_(self.gamma.weight) + nn.init.zeros_(self.beta.weight) + nn.init.ones_(self.gamma.bias) + nn.init.zeros_(self.beta.bias)
+ +
[docs] def forward(self, x, c): + x = self.ln(x) + c = self.reduce(c) + gamma = self.gamma(c) + beta = self.beta(c) + out = gamma * x + beta + return out
+ + +
[docs]class MLN2(nn.Module): + ''' + Args: + c_dim (int): dimension of latent code c + f_dim (int): feature dimension + ''' + + def __init__(self, c_dim, f_dim=256): + super().__init__() + self.c_dim = c_dim + self.f_dim = f_dim + + self.reduce = nn.Sequential( + nn.Linear(c_dim, f_dim), + nn.LayerNorm(f_dim), + nn.ReLU(), + ) + self.gamma = nn.Sequential( + nn.Linear(f_dim, f_dim), + nn.Sigmoid(), + ) + self.beta = nn.Sequential( + nn.Linear(f_dim, f_dim), + nn.LayerNorm(f_dim), + ) + self.ln = nn.LayerNorm(f_dim, elementwise_affine=False) + self.reset_parameters() + +
[docs] def reset_parameters(self): + nn.init.zeros_(self.gamma[0].weight) + nn.init.zeros_(self.beta[0].weight) + nn.init.ones_(self.gamma[0].bias) + nn.init.zeros_(self.beta[0].bias)
+ +
[docs] def forward(self, x, c): + x = self.ln(x) + c = self.reduce(c) + gamma = self.gamma(c) + beta = self.beta(c) + out = gamma * x + beta + return out
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/nbr_attn.html b/docs/_build/html/_modules/cosense3d/modules/utils/nbr_attn.html new file mode 100644 index 00000000..181a0ba7 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/nbr_attn.html @@ -0,0 +1,170 @@ + + + + + + cosense3d.modules.utils.nbr_attn — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.nbr_attn

+import math
+import torch
+from torch import nn
+
+from cosense3d.modules.utils.positional_encoding import pos2posemb2d
+
+
+
[docs]class NeighborhoodAttention(nn.Module): + """Generate reference points and attend neighborhood features.""" + def __init__(self, emb_dim, n_nbr=16, num_pose_feat=64, **kwargs): + super(NeighborhoodAttention, self).__init__(**kwargs) + self.n_nbr = n_nbr + self.emb_dim = emb_dim + self.num_pose_feat = num_pose_feat + self.q_pos_emb = nn.Sequential( + nn.Linear(num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + self.kv_pos_emb = nn.Sequential( + nn.Linear(num_pose_feat * 2, self.emb_dim), + nn.ReLU(), + nn.Linear(self.emb_dim, self.emb_dim), + ) + +
[docs] def forward(self, memory, mem_coor, q_coor, B): + """ + + Args: + q: (S, D) + kv: (L, D) + q_coor: (S, 3), [idx, x, y] + kv_coor: (L, 3) + + Returns: + + """ + query_pos = self.q_pos_emb(pos2posemb2d(q_coor[:, 1:], self.num_pose_feat)) + memory_pos = self.kv_pos_emb(pos2posemb2d(mem_coor[:, 1:], self.num_pose_feat)) + query = query_pos + kv_pe = memory_pos + memory + + outs = [] + for b in range(B): + qm = q_coor[:, 0] == b + km = mem_coor[:, 0] == b + q = query[qm] + kv = memory[km] + S, D = q.shape + L = kv.shape[0] + dists = torch.norm(q_coor[qm].unsqueeze(1) - mem_coor[km].unsqueeze(0), dim=-1) # (B, S, L) + topk_inds = torch.topk(-dists, k=self.n_nbr, dim=-1) # (B, S, n_nbr) + kv_inds = torch.cat([topk_inds[b] + b * L for b in range(B)], dim=0) # (BS, n_nbr) + q_inds = torch.cat([torch.arange(S) + b * S for b in range(B)], dim=0 + ).view(-1, 1).repeat(1, self.n_nbr) # (BS, n_nbr) + kv_m = kv_pe[km].view(-1, D)[kv_inds.view(-1)] + product = q.view(-1, D)[q_inds.view(-1)] * kv_m # (BS*n_nbr, D) + scaled_product = product / math.sqrt(D) + attn_weights = scaled_product.softmax(dim=-1) + out = (attn_weights * kv.view(-1, D)[kv_inds.view(-1)]).view(B, S, self.n_nbr, D) + outs.append(out) + return out
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/norm.html b/docs/_build/html/_modules/cosense3d/modules/utils/norm.html new file mode 100644 index 00000000..19e0efcb --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/norm.html @@ -0,0 +1,168 @@ + + + + + + cosense3d.modules.utils.norm — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.norm

+from typing import Dict, Tuple, Union
+from torch import nn
+
+NORM_LAYERS = dict(
+    BN=nn.BatchNorm2d,
+    BN1d=nn.BatchNorm1d,
+    BN2d=nn.BatchNorm2d,
+    BN3d=nn.BatchNorm3d,
+    LN=nn.LayerNorm,
+    IN=nn.InstanceNorm2d,
+)
+
+
+
[docs]def build_norm_layer(cfg: Dict, + num_features: int, + postfix: Union[int, str] = '') -> Tuple[str, nn.Module]: + """Build normalization layer. Modified from openmmlab. + + :param cfg: (dict) The norm layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + - requires_grad (bool, optional): Whether stop gradient updates. + :param num_features: (int) Number of input channels. + :param postfix: (int | str) The postfix to be appended into norm abbreviation + to create named layer. + + :returns: tuple[str, nn.Module]: The first element is the layer name consisting + of abbreviation and postfix, e.g., bn1, gn. The second element is the + created norm layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in NORM_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + + norm_layer = NORM_LAYERS.get(layer_type) + abbr = layer_type.lower() + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/modules/utils/positional_encoding.html b/docs/_build/html/_modules/cosense3d/modules/utils/positional_encoding.html new file mode 100644 index 00000000..dfdb159e --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/modules/utils/positional_encoding.html @@ -0,0 +1,243 @@ + + + + + + cosense3d.modules.utils.positional_encoding — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.modules.utils.positional_encoding

+# ------------------------------------------------------------------------
+# Copyright (c) 2022 megvii-model. All Rights Reserved.
+# ------------------------------------------------------------------------
+# Modified from mmdetection (https://github.com/open-mmlab/mmdetection)
+# Copyright (c) OpenMMLab. All rights reserved.
+# ------------------------------------------------------------------------
+#  Modified by Shihao Wang
+#  Modified by Yunshuang Yuan
+# ------------------------------------------------------------------------
+import math
+import torch
+import torch.nn as nn 
+import numpy as np
+
+
+
[docs]def ratio2coord(ratio, lidar_range): + return ratio * (lidar_range[3:] - lidar_range[:3]) + lidar_range[:3]
+ + +
[docs]def coor2ratio(coor, lidar_range): + return (coor - lidar_range[:3]) / (lidar_range[3:] - lidar_range[:3])
+ + +
[docs]def img_locations(img_size, feat_size=None, stride=None): + H, W = img_size + if feat_size is None: + assert stride is not None + h, w = H // stride, W // stride + elif stride is None: + h, w = feat_size + stride = H // h + + shifts_x = (torch.arange( + 0, stride * w, step=stride, + dtype=torch.float32 + ) + stride // 2) / W + shifts_y = (torch.arange( + 0, h * stride, step=stride, + dtype=torch.float32 + ) + stride // 2) / H + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing='ij') + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + coors = torch.stack((shift_x, shift_y), dim=1) + + coors = coors.reshape(h, w, 2) + return coors
+ + +
[docs]def pos2posemb3d(pos, num_pos_feats=128, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + pos_y = pos[..., 1, None] / dim_t + pos_z = pos[..., 2, None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2) + pos_z = torch.stack((pos_z[..., 0::2].sin(), pos_z[..., 1::2].cos()), dim=-1).flatten(-2) + posemb = torch.cat((pos_y, pos_x, pos_z), dim=-1) + return posemb
+ + +
[docs]def pos2posemb2d(pos, num_pos_feats=128, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + pos_y = pos[..., 1, None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2) + posemb = torch.cat((pos_y, pos_x), dim=-1) + return posemb
+ + +
[docs]def pos2posemb1d(pos, num_pos_feats=256, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + + return pos_x
+ + +
[docs]def nerf_positional_encoding( + tensor: torch.Tensor, + num_encoding_functions: int=6, + include_input: bool=False, + log_sampling: bool=True +) -> torch.Tensor: + r"""Apply positional encoding to the input. + + :param tensor: Input tensor to be positionally encoded. + :param num_encoding_functions: Number of encoding functions used to compute + a positional encoding (default: 6). + :param include_input: Whether or not to include the input in the + positional encoding (default: True). + :param log_sampling: + :return: Positional encoding of the input tensor. + """ + # TESTED + # Trivially, the input tensor is added to the positional encoding. + encoding = [tensor] if include_input else [] + frequency_bands = None + if log_sampling: + frequency_bands = 2.0 ** torch.linspace( + 0.0, + num_encoding_functions - 1, + num_encoding_functions, + dtype=tensor.dtype, + device=tensor.device, + ) + else: + frequency_bands = torch.linspace( + 2.0 ** 0.0, + 2.0 ** (num_encoding_functions - 1), + num_encoding_functions, + dtype=tensor.dtype, + device=tensor.device, + ) + + for freq in frequency_bands: + for func in [torch.sin, torch.cos]: + encoding.append(func(tensor * freq)) + + # Special case, for no positional encoding + if len(encoding) == 1: + return encoding[0] + else: + return torch.cat(encoding, dim=-1)
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/box_utils.html b/docs/_build/html/_modules/cosense3d/utils/box_utils.html new file mode 100644 index 00000000..da7ba81c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/box_utils.html @@ -0,0 +1,513 @@ + + + + + + cosense3d.utils.box_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.box_utils

+import numpy as np
+import torch
+from typing import Union
+
+from shapely.geometry import Polygon
+
+from cosense3d.utils.misc import check_numpy_to_torch
+from cosense3d.ops.utils import points_in_boxes_cpu
+from cosense3d.utils.pclib import rotate_points_batch, rotation_mat2euler_torch
+
+
+
+
[docs]def limit_period(val, offset=0.5, period=2 * np.pi): + return val - np.floor(val / period + offset) * period
+ + +
[docs]def decode_boxes(reg, points, lwh_mean): + assert len(reg)==len(points) + if not isinstance(lwh_mean, torch.Tensor): + lwh_mean = torch.Tensor(lwh_mean).view(1, 3) + points = points.to(reg.device) + lwh_mean = lwh_mean.to(reg.device) + + diagonal = torch.norm(lwh_mean[0, :2]) + # encode with diagonal length + xy = reg[:, :2] * diagonal + points[:, :2] + z = reg[:, 2:3] * lwh_mean[0, 2] + points[:, 2:3] + lwh = reg[:, 3:6].exp() * lwh_mean + r = torch.atan2(reg[:, 6:7], reg[:, 7:]) + + return torch.cat([xy, z, lwh, r], dim=-1)
+ + +
[docs]def boxes_to_corners_2d(boxes_np): + """ + Convert boxes to 4 corners in xy plane + :param boxes_np: np.ndarray [N, 7], cols - (x,y,z,dx,dy,dz,det_r) + :return: corners: np.ndarray [N, 4, 2], corner order is + back left, front left, front back, back left + """ + x = boxes_np[:, 0] + y = boxes_np[:, 1] + dx = boxes_np[:, 2] + dy = boxes_np[:, 3] + + x1 = - dx / 2 + y1 = - dy / 2 + x2 = + dx / 2 + y2 = + dy / 2 + theta = boxes_np[:, 6:7] + # bl, fl, fr, br + corners = np.array([[x1, y2],[x2,y2], [x2,y1], [x1, y1]]).transpose(2, 0, 1) + new_x = corners[:, :, 0] * np.cos(theta) + \ + corners[:, :, 1] * -np.sin(theta) + x[:, None] + new_y = corners[:, :, 0] * np.sin(theta) + \ + corners[:, :, 1] * (np.cos(theta)) + y[:, None] + corners = np.stack([new_x, new_y], axis=2) + + return corners
+ + +
[docs]def boxes_to_corners_3d(boxes3d: Union[np.ndarray, torch.Tensor], + order: str='lwh' + ) -> Union[np.ndarray, torch.Tensor]: + r""" + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + + :param boxes3d: (N, 7 + (2: optional)) [x, y, z, dx, dy, dz, yaw] + or [x, y, z, dx, dy, dz, roll, pitch, yaw], (x, y, z) is the box center. + :param order: 'lwh' or 'hwl'. + :return: (N, 8, 3), the 8 corners of the bounding box. + """ + assert isinstance(boxes3d, np.ndarray) \ + or isinstance(boxes3d, torch.Tensor),\ + "input boxes should be numpy array or torch tensor." + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + + if order == 'hwl': + boxes3d[:, 3:6] = boxes3d[:, [5, 4, 3]] + elif order == 'lwh': + pass + + template = boxes3d.new_tensor(( + [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1], + [1, -1, 1], [1, 1, 1], [-1, 1, 1], [-1, -1, 1], + )) / 2 + + corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :] + if boxes3d[:, 6:].shape[1] == 1: + rot_order = 'z' + elif boxes3d[:, 6:].shape[1] == 3: + rot_order = 'xyz' + else: + raise IOError("box input shape should be (N, 7) for (N, 9).") + + corners3d = rotate_points_batch(corners3d.view(-1, 8, 3), + boxes3d[:, 6:], order=rot_order).view(-1, 8, 3) + corners3d += boxes3d[:, None, 0:3] + + return corners3d.numpy() if is_numpy else corners3d
+ + +
[docs]def corners_to_boxes_3d(corners: Union[np.ndarray, torch.Tensor], + mode: int=9 + ) -> Union[np.ndarray, torch.Tensor]: + r""" + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + + :param corners: (N, 8, 3) + :param mode: 9 | 7 + :return: boxes, (N, 9 | 7) + """ + corners, is_numpy = check_numpy_to_torch(corners) + xyz = corners.mean(axis=1) + corners_reduced = corners - xyz.reshape(-1, 1, 3) + diff_x = corners[:, [0, 1, 5, 4], :] - corners[:, [3, 2, 6, 7], :] + diff_y = corners[:, [1, 5, 6, 2], :] - corners[:, [0, 4, 7, 3], :] + diff_z = corners[:, [4, 5, 6, 7], :] - corners[:, [0, 1, 2, 3], :] + l = torch.norm(diff_x, dim=2).mean(dim=1).reshape(-1, 1) + w = torch.norm(diff_y, dim=2).mean(dim=1).reshape(-1, 1) + h = torch.norm(diff_z, dim=2).mean(dim=1).reshape(-1, 1) + + template = corners.new_tensor(( + [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1], + [1, -1, 1], [1, 1, 1], [-1, 1, 1], [-1, -1, 1], + )).reshape(1, 8, 3) * torch.cat([l, w, h], dim=1)[:, None, :] / 2 + R, _ = find_rigid_alignment(template, corners_reduced) + euler = rotation_mat2euler_torch(R) + # yaw = torch.arctan2(dir_x[:, 1], dir_x[:, 0]).reshape(-1, 1) + if mode == 9: + boxes = torch.cat([xyz, l, w, h, euler], dim=1) + elif mode == 7: + boxes = torch.cat([xyz, l, w, h, euler[:, -1:]], dim=1) + else: + raise NotImplementedError + return boxes.numpy() if is_numpy else boxes
+ + +
[docs]def boxes3d_to_standup_bboxes(boxes): + """ + :param boxes: Tensor(N, 7) + :return: Tenosr(N, 4): [x_min, y_min, x_max, y_max) + """ + corners = boxes_to_corners_3d(boxes) + standup_boxes = torch.zeros_like(boxes[:, :4]) + standup_boxes[:, :2] = corners[..., :2].min(dim=1)[0] + standup_boxes[:, 2:] = corners[..., :2].max(dim=1)[0] + return standup_boxes
+ + +
[docs]def find_rigid_alignment(A, B): + """Find rotation and translation from A to B. + Parameters + + :param A: (B, N, 3) + :param B: (B, N, 3) + :return: + """ + A_mean = A.mean(dim=1, keepdim=True) + B_mean = B.mean(dim=1, keepdim=True) + A_c = A - A_mean + B_c = B - B_mean + # Covariance matrix + H = torch.bmm(A_c.permute(0, 2, 1), B_c) # (B, 3, N) @ (B, N, 3) = (B, 3, 3) + U, S, V = torch.svd(H) + # Rotation matrix + R = torch.bmm(V, U.permute(0, 2, 1)) + # Translation vector + t = B_mean[:, None, :] - torch.bmm(R, A_mean.permute(0, 2, 1)).permute(0, 2, 1) + return R, t
+ + +
[docs]def mask_boxes_outside_range_numpy(boxes: np.ndarray, + limit_range: list, + order: str, + min_num_corners: int=2) -> np.ndarray: + """ + + :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + :param limit_range: [minx, miny, minz, maxx, maxy, maxz] + :param order: 'lwh' or 'hwl' + :param min_num_corners: The required minimum number of corners to be considered as in range. + :return: The filtered boxes. + """ + assert boxes.shape[1] == 8 or boxes.shape[1] == 7 + + new_boxes = boxes.copy() + if boxes.shape[1] == 7: + new_boxes = boxes_to_corners_3d(new_boxes, order) + + mask = ((new_boxes >= limit_range[0:3]) & + (new_boxes <= limit_range[3:6])).all(axis=2) + mask = mask.sum(axis=1) >= min_num_corners # (N) + + return boxes[mask], mask
+ + +
[docs]def mask_boxes_outside_range_torch(boxes, lidar_range): + in_range = (boxes[:, 0] > lidar_range[0]) & \ + (boxes[:, 0] < lidar_range[3]) & \ + (boxes[:, 1] > lidar_range[1]) & \ + (boxes[:, 1] < lidar_range[4]) + return in_range
+ + +
[docs]def remove_points_in_boxes3d(points, boxes3d, x_idx=0): + """ + :param points: (num_points, x_idx + 3 + C) + :param boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + + :return: + """ + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + points, is_numpy = check_numpy_to_torch(points) + point_masks = points_in_boxes_cpu(points[:, x_idx:x_idx+3], boxes3d) + points = points[point_masks.sum(dim=0) == 0] + + return points.numpy() if is_numpy else points
+ + +
[docs]def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)): + """ + :param boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + :param extra_width: [extra_x, extra_y, extra_z] + + Returns: + + """ + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + large_boxes3d = boxes3d.clone() + + large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :] + return large_boxes3d
+ + +
[docs]def convert_box_to_polygon(boxes_array): + """ + Convert boxes array to shapely.geometry.Polygon format. + + :param boxes_array : np.ndarray + (N, 4, 2) or (N, 8, 3). + + :return: + list of converted shapely.geometry.Polygon object. + + """ + polygons = [Polygon([(box[i, 0], box[i, 1]) for i in range(4)]) for box in + boxes_array] + return np.array(polygons)
+ + +
[docs]def compute_iou(box, boxes): + """ + Compute iou between box and boxes list + + :param box: shapely.geometry.Polygon + Bounding box Polygon. + + :param boxes: list + List of shapely.geometry.Polygon. + + :return: iou : np.ndarray + Array of iou between box and boxes. + + """ + # Calculate intersection areas + iou = [box.intersection(b).area / box.union(b).area for b in boxes] + + return np.array(iou, dtype=np.float32)
+ + +
[docs]def bbox_cxcywh_to_xyxy(bbox): + """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). + + :param bbox (Tensor): Shape (n, 4) for bboxes. + :return: Tensor: Converted bboxes. + """ + cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] + return torch.cat(bbox_new, dim=-1)
+ + +
[docs]def bbox_xyxy_to_cxcywh(bbox): + """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). + + :param bbox (Tensor): Shape (n, 4) for bboxes. + + :return: Tensor, Converted bboxes. + """ + x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] + return torch.cat(bbox_new, dim=-1)
+ + +
[docs]def transform_boxes_3d(boxes_in, transform, mode=7): + """ + :param boxes_in: (N, 7) + :param transform: (4, 4) + :param mode: 7 | 9 + """ + is_numpy = isinstance(boxes_in, np.ndarray) + assert mode == 11 or mode == 9 or mode == 7 + assert boxes_in.shape[-1] == 11 or boxes_in.shape[-1] == 9 or boxes_in.shape[-1] == 7 + if boxes_in.shape[-1] == 11: + boxes = boxes_in[:, [2, 3, 4, 5, 6, 7, 10]] + elif boxes_in.shape[-1] == 9: + boxes = boxes_in[:, [0, 1, 2, 3, 4, 5, 8]] + else: + boxes = boxes_in + boxes_corner = boxes_to_corners_3d(boxes[:, :7]) # (N, 8, 3) + boxes_corner = boxes_corner.reshape(-1, 3).T # (N*8, 3) + if is_numpy: + boxes_corner = np.concatenate([boxes_corner, np.ones_like(boxes_corner[:1])], axis=0) + else: + boxes_corner = torch.cat([boxes_corner, torch.ones_like(boxes_corner[:1])], dim=0) + # rotate bbx to augmented coords + boxes_corner = (transform @ boxes_corner)[:3].T.reshape(len(boxes), 8, 3) + if mode == 11: + boxes_ = corners_to_boxes_3d(boxes_corner, mode=9) + if is_numpy: + boxes = np.concatenate([boxes_in[:, :2], boxes_], axis=-1) + else: + boxes = torch.cat([boxes_in[:, :2], boxes_], dim=-1) + else: + boxes = corners_to_boxes_3d(boxes_corner, mode=mode) + if is_numpy and isinstance(boxes, torch.Tensor): + boxes = boxes.cpu().numpy() + return boxes
+ + +
[docs]def normalize_bbox(bboxes): + cx = bboxes[..., 0:1] + cy = bboxes[..., 1:2] + cz = bboxes[..., 2:3] + w = bboxes[..., 3:4].log() + l = bboxes[..., 4:5].log() + h = bboxes[..., 5:6].log() + + rot = bboxes[..., 6:7] + if bboxes.size(-1) > 7: + vx = bboxes[..., 7:8] + vy = bboxes[..., 8:9] + normalized_bboxes = torch.cat( + (cx, cy, cz, w, l, h, rot.sin(), rot.cos(), vx, vy), dim=-1 + ) + else: + normalized_bboxes = torch.cat( + (cx, cy, cz, w, l, h, rot.sin(), rot.cos()), dim=-1 + ) + return normalized_bboxes
+ + +
[docs]def denormalize_bbox(normalized_bboxes): + # rotation + rot_sine = normalized_bboxes[..., 6:7] + + rot_cosine = normalized_bboxes[..., 7:8] + rot = torch.atan2(rot_sine, rot_cosine) + + # center in the bev + cx = normalized_bboxes[..., 0:1] + cy = normalized_bboxes[..., 1:2] + cz = normalized_bboxes[..., 2:3] + + # size + w = normalized_bboxes[..., 3:4] + l = normalized_bboxes[..., 4:5] + h = normalized_bboxes[..., 5:6] + + w = w.exp() + l = l.exp() + h = h.exp() + if normalized_bboxes.size(-1) > 8: + # velocity + vx = normalized_bboxes[:, 8:9] + vy = normalized_bboxes[:, 9:10] + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot, vx, vy], dim=-1) + else: + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot], dim=-1) + return denormalized_bboxes
+ + +if __name__=="__main__": + boxes = np.random.random((1, 9)) + boxes[:, 3] *= 4 + boxes[:, 4] *= 1.8 + boxes[:, 5] *= 1.6 + boxes[:, 8] *= 3.14 + + boxes_corner = boxes_to_corners_3d(boxes) + boxes_center = corners_to_boxes_3d(boxes_corner) + print(boxes) + print(boxes_center) + print('------------------------------') + print(boxes_center - boxes) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/eval_detection_utils.html b/docs/_build/html/_modules/cosense3d/utils/eval_detection_utils.html new file mode 100644 index 00000000..7ef6abe1 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/eval_detection_utils.html @@ -0,0 +1,333 @@ + + + + + + cosense3d.utils.eval_detection_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.utils.eval_detection_utils

+import os
+
+import numpy as np
+import torch
+
+from cosense3d.utils.misc import torch_tensor_to_numpy
+from cosense3d.utils.box_utils import convert_box_to_polygon, compute_iou, boxes_to_corners_3d
+from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu, boxes_iou_bev, boxes_iou3d_cpu, boxes_bev_iou_cpu
+
+
+
[docs]def voc_ap(rec, prec): + """ + VOC 2010 Average Precision. + """ + rec.insert(0, 0.0) + rec.append(1.0) + mrec = rec[:] + + prec.insert(0, 0.0) + prec.append(0.0) + mpre = prec[:] + + for i in range(len(mpre) - 2, -1, -1): + mpre[i] = max(mpre[i], mpre[i + 1]) + + i_list = [] + for i in range(1, len(mrec)): + if mrec[i] != mrec[i - 1]: + i_list.append(i) + + ap = 0.0 + for i in i_list: + ap += ((mrec[i] - mrec[i - 1]) * mpre[i]) + return ap, mrec, mpre
+ + +
[docs]def caluclate_tp_fp(det_boxes, det_score, gt_boxes, result_stat, iou_thresh, + det_range=None): + """ + Calculate the true positive and false positive numbers of the current + frames. + + Parameters + ---------- + det_boxes : torch.Tensor + The detection bounding box, shape (N, 8, 3) or (N, 4, 2) or (N, 7). + det_score :torch.Tensor + The confidence score for each preditect bounding box. + gt_boxes : torch.Tensor + The groundtruth bounding box. + result_stat: dict + A dictionary contains fp, tp and gt number. + iou_thresh : float + The iou thresh. + range : list, [left_range, right_range] + The evaluation range left bound + """ + # fp, tp and gt in the current frame + fp = [] + tp = [] + gt = gt_boxes.shape[0] + if det_boxes is not None: + # convert bounding boxes to numpy array + det_boxes = torch_tensor_to_numpy(det_boxes) + det_score = torch_tensor_to_numpy(det_score) + gt_boxes = torch_tensor_to_numpy(gt_boxes) + # convert center format to corners + if det_boxes.ndim==2 and det_boxes.shape[1] == 7: + det_boxes = boxes_to_corners_3d(det_boxes) + if gt_boxes.ndim==2 and gt_boxes.shape[1] == 7: + gt_boxes = boxes_to_corners_3d(gt_boxes) + + # remove the bbx out of range + if det_range is not None: + pass + + # sort the prediction bounding box by score + score_order_descend = np.argsort(-det_score) + det_score = det_score[score_order_descend] # from high to low + det_polygon_list = list(convert_box_to_polygon(det_boxes)) + gt_polygon_list = list(convert_box_to_polygon(gt_boxes)) + + # match prediction and gt bounding box + for i in range(score_order_descend.shape[0]): + det_polygon = det_polygon_list[score_order_descend[i]] + ious = compute_iou(det_polygon, gt_polygon_list) + + if len(gt_polygon_list) == 0 or np.max(ious) < iou_thresh: + fp.append(1) + tp.append(0) + continue + + fp.append(0) + tp.append(1) + + gt_index = np.argmax(ious) + gt_polygon_list.pop(gt_index) + result_stat[iou_thresh]['scr'] += det_score.tolist() + else: + gt = gt_boxes.shape[0] + result_stat[iou_thresh]['fp'] += fp + result_stat[iou_thresh]['tp'] += tp + result_stat[iou_thresh]['gt'] += gt
+ + +
[docs]def calculate_ap(result_stat, iou, global_sort_detections): + """ + Calculate the average precision and recall, and save them into a txt. + + Parameters + ---------- + result_stat : dict + A dictionary contains fp, tp and gt number. + + iou : float + + global_sort_detections : bool + Whether to sort the detection results globally. + """ + iou_5 = result_stat[iou] + + if global_sort_detections: + fp = np.array(iou_5['fp']) + tp = np.array(iou_5['tp']) + score = np.array(iou_5['scr']) + + assert len(fp) == len(tp) and len(tp) == len(score) + sorted_index = np.argsort(-score) + fp = fp[sorted_index].tolist() + tp = tp[sorted_index].tolist() + + else: + fp = iou_5['fp'] + tp = iou_5['tp'] + assert len(fp) == len(tp) + + gt_total = iou_5['gt'] + + cumsum = 0 + for idx, val in enumerate(fp): + fp[idx] += cumsum + cumsum += val + + cumsum = 0 + for idx, val in enumerate(tp): + tp[idx] += cumsum + cumsum += val + + rec = tp[:] + for idx, val in enumerate(tp): + rec[idx] = float(tp[idx]) / gt_total + + prec = tp[:] + for idx, val in enumerate(tp): + prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx]) + + ap, mrec, mprec = voc_ap(rec[:], prec[:]) + + return ap, mrec, mprec
+ + +
[docs]def eval_final_results(result_stat, iou_thrs, global_sort_detections=False): + dump_dict = {} + for iou in iou_thrs: + ap, mrec, mpre = calculate_ap(result_stat, iou, global_sort_detections) + iou_str = f"{int(iou * 100)}" + dump_dict.update({f'ap_{iou_str}': ap, + f'mpre_{iou_str}': mpre, + f'mrec_{iou_str}': mrec, + }) + return dump_dict
+ + +
[docs]def ops_cal_tp(pred_boxes, gt_boxes, iou_mode='3d', IoU_thr=0.7): + if len(pred_boxes) == 0: + return torch.zeros(pred_boxes.shape[0], device=pred_boxes.device) + elif len(gt_boxes) == 0: + return torch.zeros(len(pred_boxes), device=pred_boxes.device).bool() + else: + if pred_boxes.is_cuda: + iou_func = boxes_iou3d_gpu if iou_mode == '3d' else boxes_iou_bev + else: + iou_func = boxes_iou3d_cpu if iou_mode == '3d' else boxes_bev_iou_cpu + ious = iou_func(pred_boxes, gt_boxes) + max_iou_pred_to_gts = ious.max(dim=1) + max_iou_gt_to_preds = ious.max(dim=0) + tp = max_iou_pred_to_gts[0] > IoU_thr + is_best_match = max_iou_gt_to_preds[1][max_iou_pred_to_gts[1]] \ + == torch.tensor([i for i in range(len(tp))], device=tp.device) + tp[torch.logical_not(is_best_match)] = False + return tp
+ + +
[docs]def cal_precision_recall(scores, tps, n_pred, n_gt): + order_inds = scores.argsort(descending=True) + tp_all = tps[order_inds] + list_accTP = tp_all.cumsum(dim=0) + precision = list_accTP.float() / torch.arange(1, n_pred + 1) + recall = list_accTP.float() / n_gt + return precision, recall
+ + +
[docs]def cal_ap_all_point(scores, tps, n_pred, n_gt): + ''' + source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L292 + ''' + + prec, rec = cal_precision_recall(scores, tps, n_pred, n_gt) + mrec = [] + mrec.append(0) + [mrec.append(e.item()) for e in rec] + mrec.append(1) + mpre = [] + mpre.append(0) + [mpre.append(e.item()) for e in prec] + mpre.append(0) + for i in range(len(mpre) - 1, 0, -1): + mpre[i - 1] = max(mpre[i - 1], mpre[i]) + ii = [] + for i in range(len(mrec) - 1): + if mrec[1:][i] != mrec[0:-1][i]: + ii.append(i + 1) + ap = 0 + for i in ii: + ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i]) + # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii] + return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii]
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/iou2d_calculator.html b/docs/_build/html/_modules/cosense3d/utils/iou2d_calculator.html new file mode 100644 index 00000000..74fde799 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/iou2d_calculator.html @@ -0,0 +1,312 @@ + + + + + + cosense3d.utils.iou2d_calculator — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for cosense3d.utils.iou2d_calculator

+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+
+
+
[docs]def cast_tensor_type(x, scale=1., dtype=None): + if dtype == 'fp16': + # scale is for preventing overflows + x = (x / scale).half() + return x
+ + +
[docs]def fp16_clamp(x, min=None, max=None): + if not x.is_cuda and x.dtype == torch.float16: + # clamp for cpu float16, tensor fp16 has no clamp implementation + return x.float().clamp(min, max).half() + + return x.clamp(min, max)
+ + +
[docs]def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): + r"""Calculate overlap between two set of bboxes. + + FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 + Note: + Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', + there are some new generated variable when calculating IOU + using bbox_overlaps function: + + 1) is_aligned is False + area1: M x 1 + area2: N x 1 + lt: M x N x 2 + rb: M x N x 2 + wh: M x N x 2 + overlap: M x N x 1 + union: M x N x 1 + ious: M x N x 1 + + Total memory: + S = (9 x N x M + N + M) * 4 Byte, + + When using FP16, we can reduce: + R = (9 x N x M + N + M) * 4 / 2 Byte + R large than (N + M) * 4 * 2 is always true when N and M >= 1. + Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, + N + 1 < 3 * N, when N or M is 1. + + Given M = 40 (ground truth), N = 400000 (three anchor boxes + in per grid, FPN, R-CNNs), + R = 275 MB (one times) + + A special case (dense detection), M = 512 (ground truth), + R = 3516 MB = 3.43 GB + + When the batch size is B, reduce: + B x R + + Therefore, CUDA memory runs out frequently. + + Experiments on GeForce RTX 2080Ti (11019 MiB): + + | dtype | M | N | Use | Real | Ideal | + +----+----+----+----+----+----+ + | FP32 | 512 | 400000 | 8020 MiB | -- | -- | + | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | + | FP32 | 40 | 400000 | 1540 MiB | -- | -- | + | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | + + 2) is_aligned is True + area1: N x 1 + area2: N x 1 + lt: N x 2 + rb: N x 2 + wh: N x 2 + overlap: N x 1 + union: N x 1 + ious: N x 1 + + Total memory: + S = 11 x N * 4 Byte + + When using FP16, we can reduce: + R = 11 x N * 4 / 2 Byte + + So do the 'giou' (large than 'iou'). + + Time-wise, FP16 is generally faster than FP32. + + When gpu_assign_thr is not -1, it takes more time on cpu + but not reduce memory. + There, we can reduce half the memory and keep the speed. + + If ``is_aligned`` is ``False``, then calculate the overlaps between each + bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned + pair of bboxes1 and bboxes2. + + + :param bboxes1: (Tensor) shape (B, m, 4) in <x1, y1, x2, y2> format or empty. + :param bboxes2: (Tensor) shape (B, n, 4) in <x1, y1, x2, y2> format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + :param mode: (str) "iou" (intersection over union), "iof" (intersection over + foreground) or "giou" (generalized intersection over union). + Default "iou". + :param is_aligned: (bool, optional) If True, then m and n must be equal. + Default False. + :param eps: (float, optional) A value added to the denominator for numerical + stability. Default 1e-6. + + :return: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 10, 10], + >>> [10, 10, 20, 20], + >>> [32, 32, 38, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 10, 20], + >>> [0, 10, 10, 19], + >>> [10, 10, 20, 20], + >>> ]) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + + Example: + >>> empty = torch.empty(0, 4) + >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes' last dimension is 4 + assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( + bboxes1[..., 3] - bboxes1[..., 1]) + area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( + bboxes2[..., 3] - bboxes2[..., 1]) + + if is_aligned: + lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] + rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) + enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) + else: + lt = torch.max(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) # [B, rows, cols, 2] + rb = torch.min(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + else: + union = area1[..., None] + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) + enclosed_rb = torch.max(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou', 'iof']: + return ious + # calculate gious + enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/logger.html b/docs/_build/html/_modules/cosense3d/utils/logger.html new file mode 100644 index 00000000..306ec1d9 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/logger.html @@ -0,0 +1,262 @@ + + + + + + cosense3d.utils.logger — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.logger

+import os, pathlib
+from datetime import datetime
+from collections import defaultdict, deque
+import logging
+
+import torch
+from functools import partial
+from rich.logging import RichHandler
+
+from cosense3d.utils.misc import ensure_dir
+
+
+
[docs]def setup_logger(exp_name, debug): + from imp import reload + + reload(logging) + # reload() reloads a previously imported module. This is useful if you have edited the module source file using an + # external editor and want to try out the new version without leaving the Python interpreter. + + CUDA_TAG = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + EXP_TAG = exp_name + + logger_config = dict( + level=logging.DEBUG if debug else logging.INFO, + format=f"{CUDA_TAG}:[{EXP_TAG}] %(message)s", + handlers=[RichHandler()], + datefmt="[%X]", + ) + logging.basicConfig(**logger_config)
+ + +
[docs]class SmoothedValue(object): + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{avg:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.count = 0 + self.total = 0.0 + self.fmt = fmt + +
[docs] def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n
+ + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value)
+ + +
[docs]class LogMeter(object): + def __init__(self, total_iter, logdir, delimiter="\t", log_every=20, wandb_project=None): + self.meters = defaultdict(partial(SmoothedValue, fmt="{avg:.4f}")) + file_name = datetime.now().strftime("%d_%m_%H_%M_%S") + ".log" + self.logdir = logdir + if not isinstance(logdir, pathlib.Path): + logdir = pathlib.Path(logdir) + self.log_fh = (logdir / file_name).open('a') + self.delimiter = delimiter + self.log_every = log_every + self.log_msg = self.delimiter.join([ + 'E:{epoch:2d}', + 'I:[{itr:4d}/' + str(total_iter) + ']', + 'lr:{lr:.6f}', + '{meters}' + ]) + if wandb_project is not None: + import wandb + wandb.init(project=wandb_project) + wandb.config.log_histo = True + wandb.config.step = 0 + wandb_project = wandb + self.wandb = wandb_project + +
[docs] def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int, str)) + self.meters[k].update(v)
+ + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.log_fh.close() + +
[docs] def add_meter(self, name, meter): + self.meters[name] = meter
+ +
[docs] def log(self, epoch, iteration, lr, **kwargs): + self.update(**kwargs) + if iteration % self.log_every == 0: + msg = self.log_msg.format( + epoch=epoch, + itr=iteration, + lr=lr, + meters=str(self) + ) + print(msg) + self.log_fh.write(msg + "\n") + if self.wandb is not None: + self.wandb.log({('avg/' + name): meter.avg for name, meter in self.meters.items()}) + self.wandb.log({('global_avg/' + name): meter.global_avg for name, meter in self.meters.items()})
+ + +
[docs]class TestLogger(object): + def __init__(self, logdir): + self.logdir = logdir + ensure_dir(self.logdir) + self.log_fh = (pathlib.Path(self.logdir) / "test.log").open('a') + +
[docs] def log(self, msg): + self.log_fh.writelines(msg)
+ + def __exit__(self, exc_type, exc_val, exc_tb): + self.log_fh.close()
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/lr_scheduler.html b/docs/_build/html/_modules/cosense3d/utils/lr_scheduler.html new file mode 100644 index 00000000..bd46456c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/lr_scheduler.html @@ -0,0 +1,236 @@ + + + + + + cosense3d.utils.lr_scheduler — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.lr_scheduler

+from torch.optim import lr_scheduler as torch_lr
+from torch.optim import Optimizer
+
+
+
[docs]def build_lr_scheduler(optimizer, cfg, total_iter): + return LRUpdater(optimizer, total_iter, **cfg)
+ + +
[docs]class TransformerAdaptiveScheduler(torch_lr._LRScheduler): + def __init__(self, + optimizer: Optimizer, + dim_embed: int, + warmup_steps: int, + itrs_per_epoch: int, + last_epoch: int = -1, + global_fade_ratio: float = 1, + verbose: bool = False) -> None: + self.dim_embed = dim_embed + self.warmup_steps = warmup_steps + self.num_param_groups = len(optimizer.param_groups) + self.global_fade_ratio = global_fade_ratio + super().__init__(optimizer, last_epoch, verbose) + if last_epoch > 0: + self._step_count = itrs_per_epoch * last_epoch + +
[docs] def get_lr(self) -> float: + lr = self.calc_lr(self._step_count, self.dim_embed, self.warmup_steps) * self.global_fade_ratio + return [lr] * self.num_param_groups
+ +
[docs] def calc_lr(self, step, dim_embed, warmup_steps): + return dim_embed ** (-0.5) * min(step ** (-0.5), step * warmup_steps ** (-1.5))
+ + +
[docs]class LRUpdater: + """ + Unified API for updating LR with different LR schedulers. + """ + def __init__(self, optimizer, total_iter, policy, **kwargs): + self.policy = policy + self.total_itr = total_iter + if policy == 'MultiStepLR': + # construct a learning rate scheduler + self.lr_scheduler = torch_lr.MultiStepLR(optimizer, **kwargs) + elif policy == 'CosineAnnealingWarm': + from timm.scheduler.cosine_lr import CosineLRScheduler + num_steps = kwargs['epochs'] * total_iter + warmup_lr = kwargs['warmup_lr'] + warmup_steps = kwargs['warmup_epochs'] * total_iter + lr_min = kwargs['lr_min'] + decay_rate = kwargs.get('decay_rate', 0.5) + + self.lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_steps, + lr_min=lr_min, + warmup_lr_init=warmup_lr, + warmup_t=warmup_steps, + cycle_limit=1, + t_in_epochs=False, + cycle_decay=decay_rate + ) + elif policy == 'TransformerAdaptiveScheduler': + kwargs['itrs_per_epoch'] = total_iter + self.lr_scheduler = TransformerAdaptiveScheduler(optimizer, **kwargs) + else: + raise NotImplementedError + + self.optimizer = self.lr_scheduler.optimizer + +
[docs] def step_epoch(self, epoch): + if self.policy == 'TransformerAdaptiveScheduler': + pass + elif self.policy in ['CosineAnnealingWarm',]: + self.lr_scheduler.step(epoch) + else: + self.lr_scheduler.step()
+ +
[docs] def step_itr(self, itr): + if self.policy == 'TransformerAdaptiveScheduler': + self.lr_scheduler.step()
+ +
[docs] def state_dict(self): + return self.lr_scheduler.state_dict()
+ +
[docs] def load_state_dict(self, state_dict): + self.lr_scheduler.load_state_dict(state_dict)
+ +
[docs] def get_last_lr(self): + return self.lr_scheduler.get_last_lr()
+ + +if __name__=="__main__": + import torch + import matplotlib.pyplot as plt + params = torch.nn.Parameter(torch.rand(10, 10)) + optimizer = torch.optim.AdamW([params], + lr=0.0001, + weight_decay=1e-2, + betas=(0.9, 0.98), + eps=1.0e-9, + # init_lr=0.001, + ) + lr_scheduler = TransformerAdaptiveScheduler( + optimizer, + dim_embed=256, + warmup_steps=2000, + itrs_per_epoch=2000, + last_epoch=-1, + global_fade_ratio=0.5 + ) + + # torch.save(optimizer.state_dict(), 'optimizer_checkpoint.pth') + # optimizer.load_state_dict(torch.load('optimizer_checkpoint.pth')) + # lr_scheduler = TransformerAdaptiveScheduler( + # optimizer, + # dim_embed=256, + # warmup_steps=4000, + # itrs_per_epoch=2000, + # last_epoch=3, + # ) + + lrs = [] + for epoch in range(50 * 2000): + lrs.append(lr_scheduler.get_lr()[0]) + optimizer.step() + lr_scheduler.step() + + plt.plot(torch.arange(len(lrs)).numpy(), lrs) + plt.show() + plt.close() +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/metrics.html b/docs/_build/html/_modules/cosense3d/utils/metrics.html new file mode 100644 index 00000000..2bdd86dd --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/metrics.html @@ -0,0 +1,504 @@ + + + + + + cosense3d.utils.metrics — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.metrics

+import os, logging
+import torch
+import torch.nn.functional as F
+import matplotlib.pyplot as plt
+import numpy as np
+from sklearn.metrics import precision_recall_curve
+
+from cosense3d.ops.iou3d_nms_utils import boxes_iou3d_gpu, boxes_iou_bev
+from cosense3d.ops.utils import points_in_boxes_gpu
+from cosense3d.utils.misc import save_json, update_dict
+from cosense3d.utils.box_utils import mask_boxes_outside_range_torch
+
+
+
[docs]class Metric: + def __init__(self, cfg, log_dir): + self.cfg = cfg + self.log_dir = log_dir + +
[docs] def add_samples(self, data_dict): + raise NotImplementedError
+ +
[docs] def save_detections(self, filename): + raise NotImplementedError
+ +
[docs] def summary(self): + raise NotImplementedError
+ + +
[docs]class MetricObjDet(Metric): + def __init__(self, cfg, log_dir, logger, bev=False): + super(MetricObjDet, self).__init__(cfg, log_dir) + self.eval_func = cfg['eval_func'] + self.lidar_range = cfg.get('lidar_range', None) + self.score_metric = cfg.get('score_metric', 'scr') + self.score_thr = cfg.get('score_thr', 0.0) + self.logger = logger + self.samples = [] + self.pred_boxes = {} + self.gt_boxes = {} + self.confidences = {} + self.v_ids = {} + self.bev = bev + self.iou_fn = boxes_iou_bev if self.bev else boxes_iou3d_gpu + self.file_test = os.path.join(log_dir, 'pred.json') + self.has_test_detections = False + self.result = {} + +
[docs] def add_sample(self, name, pred_boxes, gt_boxes, confidences, ids=None): + self.samples.append(name) + valid = confidences > self.score_thr + if self.lidar_range is not None: + in_range_gt = mask_boxes_outside_range_torch(gt_boxes, self.lidar_range), + in_range_pred = mask_boxes_outside_range_torch(pred_boxes, self.lidar_range) + valid = torch.logical_and(valid, in_range_pred) + gt_boxes = gt_boxes[in_range_gt] + self.pred_boxes[name] = pred_boxes[valid] + self.gt_boxes[name] = gt_boxes + self.confidences[name] = confidences[valid] + if ids is not None: + self.v_ids[name] = ids + ss = name.split("/") + scenario = ss[0] + frame = ss[1] + pred_boxes_np = pred_boxes[valid].cpu().numpy() + bbx_out = np.zeros((len(pred_boxes_np), 11)) + bbx_out[:, [2, 3, 4, 5, 6, 7, 10]] = pred_boxes_np + bbx_out[:, 0] = -1 # box id not set + conf_out = confidences[valid].cpu().numpy() + if '.' in frame: + frame, agent_id = frame.split('.') + fdict = {'agents': { + agent_id: { + 'gt_boxes': bbx_out.tolist(), + 'box_confidences': conf_out.tolist() + } + }} + else: + fdict = {'meta': {'bbx_center_global': bbx_out.tolist()}} + update_dict( + self.result, + {scenario: {frame: fdict}} + )
+ +
[docs] @torch.no_grad() + def add_samples(self, out_dict): + data_dict = out_dict['detections'] + names = data_dict['name'] + for i in range(len(names)): + self.add_sample(names[i], + data_dict['pred_boxes'][i]['box'].float(), + data_dict['gt_boxes'][i].float(), + data_dict['pred_boxes'][i][self.score_metric])
+ +
[docs] def save_detections(self, filename): + dict_detections = { + 'samples': self.samples, + 'pred_boxes': self.pred_boxes, + 'gt_boxes': self.gt_boxes, + 'confidences': self.confidences, + 'ids': self.v_ids + } + torch.save(dict_detections, filename) + self.has_test_detections = True
+ +
[docs] def cal_precision_recall(self, IoU_thr=0.5): + list_sample = [] + list_confidence = [] + list_tp = [] + N_gt = 0 + + for sample in self.samples: + if len(self.pred_boxes[sample])>0 and len(self.gt_boxes[sample])>0: + ious = self.iou_fn(self.pred_boxes[sample], self.gt_boxes[sample]) + n, m = ious.shape + list_sample.extend([sample] * n) + list_confidence.extend(self.confidences[sample]) + N_gt += len(self.gt_boxes[sample]) + max_iou_pred_to_gts = ious.max(dim=1) + max_iou_gt_to_preds = ious.max(dim=0) + tp = max_iou_pred_to_gts[0] > IoU_thr + is_best_match = max_iou_gt_to_preds[1][max_iou_pred_to_gts[1]] \ + ==torch.tensor([i for i in range(len(tp))], device=tp.device) + tp[torch.logical_not(is_best_match)] = False + list_tp.extend(tp) + elif len(self.pred_boxes[sample])==0: + N_gt += len(self.gt_boxes[sample]) + elif len(self.gt_boxes[sample])==0: + tp = torch.zeros(len(self.pred_boxes[sample]), device=self.pred_boxes[sample].device) + list_tp.extend(tp.bool()) + order_inds = torch.tensor(list_confidence).argsort(descending=True) + tp_all = torch.tensor(list_tp)[order_inds] + list_accTP = tp_all.cumsum(dim=0) + # list_accFP = torch.logical_not(tp_all).cumsum(dim=0) + list_precision = list_accTP.float() / torch.arange(1, len(list_sample) + 1) + list_recall = list_accTP.float() / N_gt + # plt.plot(list_recall.numpy(), list_precision.numpy(), 'k.') + # plt.savefig(str(model.run_path / 'auc_thr{}_ncoop{}.png' + # .format(model.cfg['score_threshold'], model.n_coop))) + # plt.close() + + return list_precision, list_recall
+ +
[docs] def cal_ap_all_point(self, IoU_thr=0.5): + ''' + source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L292 + ''' + + prec, rec = self.cal_precision_recall(IoU_thr=IoU_thr) + mrec = [] + mrec.append(0) + [mrec.append(e.item()) for e in rec] + mrec.append(1) + mpre = [] + mpre.append(0) + [mpre.append(e.item()) for e in prec] + mpre.append(0) + for i in range(len(mpre) - 1, 0, -1): + mpre[i - 1] = max(mpre[i - 1], mpre[i]) + ii = [] + for i in range(len(mrec) - 1): + if mrec[1:][i] != mrec[0:-1][i]: + ii.append(i + 1) + ap = 0 + for i in ii: + ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i]) + # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii] + return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii]
+ +
[docs] def cal_ap_11_point(self, IoU_thr=0.5): + ''' + source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L315 + ''' + # 11-point interpolated average precision + prec, rec = self.cal_precision_recall(IoU_thr=IoU_thr) + mrec = [] + # mrec.append(0) + [mrec.append(e.item()) for e in rec] + # mrec.append(1) + mpre = [] + # mpre.append(0) + [mpre.append(e.item()) for e in prec] + # mpre.append(0) + recallValues = np.linspace(0, 1, 11) + recallValues = list(recallValues[::-1]) + rhoInterp = [] + recallValid = [] + # For each recallValues (0, 0.1, 0.2, ... , 1) + for r in recallValues: + # Obtain all recall values higher or equal than det_r + argGreaterRecalls = np.argwhere(mrec[:] >= r) + pmax = 0 + # If there are recalls above det_r + if argGreaterRecalls.size != 0: + pmax = max(mpre[argGreaterRecalls.min():]) + recallValid.append(r) + rhoInterp.append(pmax) + # By definition AP = sum(max(precision whose recall is above det_r))/11 + ap = sum(rhoInterp) / 11 + # Generating values for the plot + rvals = [] + rvals.append(recallValid[0]) + [rvals.append(e) for e in recallValid] + rvals.append(0) + pvals = [] + pvals.append(0) + [pvals.append(e) for e in rhoInterp] + pvals.append(0) + # rhoInterp = rhoInterp[::-1] + cc = [] + for i in range(len(rvals)): + p = (rvals[i], pvals[i - 1]) + if p not in cc: + cc.append(p) + p = (rvals[i], pvals[i]) + if p not in cc: + cc.append(p) + recallValues = [i[0] for i in cc] + rhoInterp = [i[1] for i in cc] + return [ap, rhoInterp, recallValues, None]
+ +
[docs] def summary(self): + thrs = [0.3, 0.5, 0.7] + ss = [] + for thr in thrs: + ap = getattr(self, self.eval_func)(thr)[0] + ss.append(f"AP@{thr}: {ap:.4f}") + ss = (f"Score metric: {self.score_metric}\n " + f"Score thr: {self.score_thr:.2f}\n" + f"--------------\n" + + "\n".join(ss) + "\n") + print(ss) + self.logger.write(ss) + + os.makedirs(os.path.join(self.log_dir, "jsons"), exist_ok=True) + for s, sdict in self.result.items(): + save_json(sdict, os.path.join(self.log_dir, "jsons", f'{s}.json'))
+ + +
[docs]class MetricSemSeg(Metric): + def __init__(self, cfg, run_path, name='test'): + super(MetricSemSeg, self).__init__(cfg, run_path) + self.filename = os.path.join(run_path, name) + self.n_cls = cfg['n_cls'] + # model.result = { + # 'tp': [], + # 'tn': [], + # 'fp': [], + # 'fn': [], + # 'N': 0 + # } + self.result = { + 'area_intersect': torch.zeros(self.n_cls), + 'area_label': torch.zeros(self.n_cls), + 'area_pred': torch.zeros(self.n_cls), + 'area_union': torch.zeros(self.n_cls) + } + +
[docs] def add_samples(self, data_dict): + preds = torch.argmax(data_dict['pred_cls'], dim=1).view(-1, 1) + tgts = data_dict['tgt_cls'].view(-1, 1) + # mask = (tgts != 0) + # preds = preds[mask] + # tgts = tgts[mask] + classes = torch.arange(self.n_cls, dtype=preds.dtype, device=preds.device).view(1, -1) + intersect = preds[preds == tgts] + area_intersect = (intersect.view(-1, 1) == (classes)).sum(0) + area_pred = (preds.view(-1, 1) == (classes)).sum(0) + area_label = (tgts.view(-1, 1) == (classes)).sum(0) + area_union = area_label + area_label - area_intersect + self.result['area_intersect'] = self.result['area_intersect'] + area_intersect.cpu() + self.result['area_label'] = self.result['area_label'] + area_label.cpu() + self.result['area_pred'] = self.result['area_pred'] + area_pred.cpu() + self.result['area_union'] = self.result['area_union'] + area_union.cpu()
+ # pred_pos = preds.int() == classes + # pred_neg = torch.logical_not(pred_pos) + # tgt_pos = tgts.int() == classes + # tgt_neg = torch.logical_not(tgt_pos) + # tp = torch.logical_and(pred_pos, tgt_pos).sum(0) + # tn = torch.logical_and(pred_neg, tgt_neg).sum(0) + # fp = torch.logical_and(pred_pos, tgt_neg).sum(0) + # fn = torch.logical_and(pred_neg, tgt_pos).sum(0) + # acc_ = tp.sum() / len(tgts) + # model.result['tp'].append(tp) + # model.result['tn'].append(tn) + # model.result['fp'].append(fp) + # model.result['fn'].append(fn) + # model.result['N'] += len(tgts) + +
[docs] def cal_ious_and_accs(self): + area_intersect = self.result['area_intersect'].sum(0) + area_label = self.result['area_label'].sum(0) + area_union = self.result['area_union'].sum(0) + all_acc = area_intersect.sum() / area_label.sum() + acc = area_intersect / area_label + iou = area_intersect / area_union + + result = { + 'all_acc': all_acc, + 'acc': acc, + 'iou': iou + } + for k, v in result.items(): + print(k, v) + return result
+ +
[docs] def save_detections(self, filename): + torch.save(self.result, filename)
+ + +
[docs]class MetricBev(Metric): + def __init__(self, cfg, run_path, logger, name='test'): + super(MetricBev, self).__init__(cfg, run_path) + self.filename = os.path.join(run_path, name) + self.filename_prefix = '' + self.logger = logger + self.cfg = cfg + self.thrs = torch.arange(0.1, 1.1, 0.1) + self.iou_sum = 0 + self.iou_cnt = 0 + self.result = {} + +
[docs] def add_samples(self, out_dict): + """ + Args: + out_dict: + bev: + conf: Tensor, (B, H, W, C) or (N, C) + unc: Tensor (optional), (B, H, W, C) or (N, C) + gt: Tensor, (B, H, W, C) or (N, C) + """ + self.iou(**out_dict['bev'])
+ +
[docs] def iou(self, conf, gt, unc=None): + """ + Compare the thresholded pred BEV map with the full gt BEV map (including non + observable area) + """ + if unc is None: + pred = conf[..., 1] > 0.5 + mi = torch.logical_and(pred, gt).sum() + mu = torch.logical_or(pred, gt).sum() + self.iou_sum += mi / mu + self.iou_cnt += 1 + else: + pos_mask = conf[..., 1] > 0.5 + pos_mask = torch.logical_and(pos_mask, unc < 1.0) + mi = torch.logical_and(pos_mask, gt).sum() + mu = torch.logical_or(pos_mask, gt).sum() + + self.iou_sum += mi.item() / mu.item() + self.iou_cnt += 1
+ + # import matplotlib.pyplot as plt + # plt.imshow(conf[0, ..., 1].cpu().numpy()) + # plt.show() + # plt.close() + # plt.imshow(gt[0].cpu().numpy()) + # plt.show() + # plt.close() + +
[docs] def summary(self): + iou_mean = self.iou_sum / self.iou_cnt * 100 + + self.summary_hook() + + self.result = { + 'BEV.iou': iou_mean + } + ss = self.format_str(self.result) + print(ss) + self.logger.write(ss)
+ +
[docs] def summary_hook(self): + pass
+ +
[docs] def format_str(self, result_dict): + ss = "==================================================================================\n" + for k, vs in result_dict.items(): + s1 = f"{k:20s} : " + if isinstance(vs, float): + s2 = f"{vs:4.1f} \n" + else: + s2 = " ".join([f"{v:4.1f} " for v in vs]) + "\n" + ss += s1 + s2 + return ss
+ + + +
[docs]class MetricMOT(Metric): + def __init__(self, cfg, log_dir): + super().__init__(cfg, log_dir) + +
[docs] def add_samples(self, data_dict): + pass
+ + + + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/misc.html b/docs/_build/html/_modules/cosense3d/utils/misc.html new file mode 100644 index 00000000..3560ed7c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/misc.html @@ -0,0 +1,324 @@ + + + + + + cosense3d.utils.misc — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.misc

+import os
+import json
+import logging
+import re
+from functools import partial
+
+import yaml
+import torch
+import numpy as np
+from rich.logging import RichHandler
+
+PI = 3.14159265358979323846
+
+
+def multi_apply(func, *args, **kwargs):
+    """Apply function to a list of arguments.
+
+    Note:
+        This function applies the ``func`` to multiple inputs and
+        map the multiple outputs of the ``func`` into different
+        list. Each list contains the same type of outputs corresponding
+        to different inputs.
+
+    Args:
+        func (Function): A function that will be applied to a list of
+            arguments
+
+    Returns:
+        tuple(list): A tuple containing multiple list, each list contains \
+            a kind of returned results by the function
+    """
+    pfunc = partial(func, **kwargs) if kwargs else func
+    map_results = map(pfunc, *args)
+    return tuple(map(list, zip(*map_results)))
+
+
+
[docs]def setup_logger(exp_name, debug): + from imp import reload + + reload(logging) + # reload() reloads a previously imported module. This is useful if you have edited the module source file using an + # external editor and want to try out the new version without leaving the Python interpreter. + + CUDA_TAG = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + EXP_TAG = exp_name + + logger_config = dict( + level=logging.DEBUG if debug else logging.INFO, + format=f"{CUDA_TAG}:[{EXP_TAG}] %(message)s", + handlers=[RichHandler()], + datefmt="[%X]", + ) + logging.basicConfig(**logger_config)
+ + +
[docs]def update_dict(dict_out, dict_add): + """ + Merge config_add into config_out. + Existing values in config_out will be overwritten by the config_add. + + Parameters + ---------- + dict_out: dict + dict_add: dict + + Returns + ------- + config_out: dict + Updated config_out + """ + for add_key, add_content in dict_add.items(): + if add_key not in dict_out or not isinstance(add_content, dict): + dict_out[add_key] = add_content + else: + update_dict(dict_out[add_key], add_content) + + return dict_out
+ + +
[docs]def load_json(filename): + with open(filename, 'r') as fh: + data = json.load(fh) + return data
+ + +
[docs]def save_json(data, filename): + with open(filename, 'w') as fh: + json.dump(data, fh, indent=3)
+ + +
[docs]def load_yaml(filename, cloader=False): + """ + Load yaml file into dictionary. + + Parameters + ---------- + filename : str + Full path of yaml file. + + Returns + ------- + params : dict + A dictionary that contains defined parameters. + """ + with open(filename, 'r') as stream: + if cloader: + loader = yaml.CLoader + else: + loader = yaml.Loader + loader.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(u'''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + params = yaml.load(stream, Loader=loader) + return params
+ + +
[docs]def save_yaml(data, filename, cdumper=False): + with open(filename, 'w') as fid: + if cdumper: + yaml.dump(data, fid, Dumper=yaml.CDumper, + default_flow_style=False) + else: + yaml.dump(data, fid, default_flow_style=False)
+ + +
[docs]def ensure_dir(path): + if not os.path.exists(path): + os.makedirs(path, mode=0o777, exist_ok=True)
+ + +
[docs]def list_dirs(path): + return sorted([x for x in os.listdir(path) if + os.path.isdir(os.path.join(path, x))])
+ + +# @gin.configurable +# def logged_hparams(keys): +# C = dict() +# for k in keys: +# C[k] = gin.query_parameter(f"{k}") +# return C + + +
[docs]def load_from_pl_state_dict(model, pl_state_dict): + state_dict = {} + for k, v in pl_state_dict.items(): + state_dict[k[6:]] = v + model.load_state_dict(state_dict) + return model
+ + +
[docs]def pad_list_to_array_np(data): + """ + Pad list of numpy data to one single numpy array + :param data: list of np.ndarray + :return: np.ndarray + """ + B = len(data) + cnt = [len(d) for d in data] + max_cnt = max(cnt) + out = np.zeros(B, max_cnt, *data[0].shape[1:]) + for b in range(B): + out[b, :cnt[b]] = data[b] + return out
+ + +
[docs]def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False
+ + +
[docs]def multi_apply(func, *args, **kwargs): + """Apply function to a list of arguments. + + Note: + This function applies the ``func`` to multiple inputs and + map the multiple outputs of the ``func`` into different + list. Each list contains the same type of outputs corresponding + to different inputs. + + Args: + func (Function): A function that will be applied to a list of + arguments + + Returns: + tuple(list): A tuple containing multiple list, each list contains \ + a kind of returned results by the function + """ + pfunc = partial(func, **kwargs) if kwargs else func + map_results = list(map(pfunc, *args)) + if isinstance(map_results[0], tuple): + return tuple(map(list, zip(*map_results))) + else: + return map_results
+ + +
[docs]def torch_tensor_to_numpy(torch_tensor): + """ + Convert a torch tensor to numpy. + + Parameters + ---------- + torch_tensor : torch.Tensor + + Returns + ------- + A numpy array. + """ + return torch_tensor.numpy() if not torch_tensor.is_cuda else \ + torch_tensor.cpu().detach().numpy()
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/module_utils.html b/docs/_build/html/_modules/cosense3d/utils/module_utils.html new file mode 100644 index 00000000..cee482cc --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/module_utils.html @@ -0,0 +1,184 @@ + + + + + + cosense3d.utils.module_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.module_utils

+import copy
+import warnings
+from importlib import import_module
+from packaging.version import parse
+from torch import nn
+
+
+
[docs]def build_norm_layer(cfgs, shape): + if cfgs['type'] == 'LN': + _cfgs = copy.copy(cfgs) + _cfgs.pop('type') + norm = nn.LayerNorm(shape, **_cfgs) + else: + raise NotImplementedError + return norm
+ + +
[docs]def build_dropout(cfgs): + if cfgs['type'] == 'Dropout': + dropout = nn.Dropout(cfgs['drop_prob']) + else: + raise NotImplementedError + return dropout
+ + +
[docs]def get_target_module(target): + module, cls_name = target.rsplit('.', 1) + module = import_module(module) + cls_obj = getattr(module, cls_name) + return cls_obj
+ + +
[docs]def instantiate_target_module(target, cfg=None, **kwargs): + if cfg is not None: + return get_target_module(target)(cfg) + else: + return get_target_module(target)(**kwargs)
+ + +
[docs]def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + assert 'parrots' not in version_str + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) # type: ignore + else: + release.extend([0, 0]) + return tuple(release)
+
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/pclib.html b/docs/_build/html/_modules/cosense3d/utils/pclib.html new file mode 100644 index 00000000..a7521ade --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/pclib.html @@ -0,0 +1,608 @@ + + + + + + cosense3d.utils.pclib — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.pclib

+import os
+import shutil
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+import open3d as o3d
+from plyfile import PlyData, PlyElement
+from scipy.spatial.transform import Rotation as R
+
+from cosense3d.utils.misc import check_numpy_to_torch
+from cosense3d.utils.pcdio import point_cloud_from_path
+
+ply_fields = {'x': 'f4', 'y': 'f4', 'z': 'f4', 'ObjIdx': 'u4', 'ObjTag': 'u4', 'ring': 'u1', 'time': 'f4'}
+np_types = {'f4': np.float32, 'u4': np.uint32, 'u1': np.uint8}
+
+
+
+
+
+
[docs]def pose_to_transformation(pose): + """ + + :param pose: list, [x, y, z, roll, pitch, yaw] + + :return: + transformation: np.ndarray, (4, 4) + """ + transformation = np.eye(4) + r = R.from_euler('xyz', pose[3:]).as_matrix() + transformation[:3, :3] = r + transformation[:3, 3] = np.array(pose[:3]) + return transformation
+ + +
[docs]def read_ply(filename): + ply = PlyData.read(filename) + data = ply['vertex'] + properties = [prop.name for prop in data.properties] + property_types = [prop.val_dtype for prop in data.properties] + + return {name: np.array(data[name]) for name in properties}, property_types
+ + +
[docs]def save_cosense_ply(data, output_file_name): + data = { + 'x': data['x'].astype(np_types[ply_fields['x']]), + 'y': data['y'].astype(np_types[ply_fields['y']]), + 'z': data['z'].astype(np_types[ply_fields['z']]), + 'ObjIdx': data['ObjIdx'].astype(np_types[ply_fields['ObjIdx']]), + 'ObjTag': data['ObjTag'].astype(np_types[ply_fields['ObjTag']]), + 'ring': data['ring'].astype(np_types[ply_fields['ring']]), + 'time': data['time'].astype(np_types[ply_fields['time']]) + } + vertex_data = list(zip(*[data[k] for k, v in ply_fields.items()])) + vertex_type = [(k, v) for k, v in ply_fields.items()] + vertex = np.array(vertex_data, dtype=vertex_type) + el = PlyElement.describe(vertex, 'vertex') + PlyData([el]).write(output_file_name)
+ + +
[docs]def lidar_ply2bin(ply_file, bin_file, + fields=['x', 'y', 'z', 'intensity'], + replace=False): + """ + Read ply and save to the cosense3d binary format. + + :param ply_file: str, input file name + :param bin_file: str, output file name + :param fields: list of str, names that indicates 'x', 'y', 'z' and 'intensity' + :param replace: replace the exisiting file if True + """ + if not replace and os.path.exists(bin_file): + return + pointcloud, property_types = read_ply(ply_file) + pcd_out = np.stack([pointcloud[k] for k in fields], axis=1) + pcd_out.tofile(bin_file)
+ + +
[docs]def lidar_bin2pcd_o3d(bin_file, out_file, replace=False): + if not replace and os.path.exists(out_file): + return + bin_pcd = np.fromfile(bin_file, dtype=np.float32) + + # reshape + points = bin_pcd.reshape(-1, 4) + # remove nan points + mask = np.logical_not(np.isnan(points[:, :3]).any(axis=1)) + points = points[mask] + + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(points[:, :-1]) + + point_intensity = np.zeros_like(points[:, :-1]) + point_intensity[:, 0] = points[:, -1] / 255. + o3d_pcd.colors = o3d.utility.Vector3dVector(point_intensity) + + # write to pcd file + o3d.io.write_point_cloud(out_file, + pointcloud=o3d_pcd, + write_ascii=True)
+ + +
[docs]def lidar_bin2pcd(bin_file, out_file, replace=False): + if not replace and os.path.exists(out_file): + return + bin_pcd = np.fromfile(bin_file, dtype=np.float32) + # reshape + points = bin_pcd.reshape(-1, 4) + points[:, 3] /= 255 + mask = np.logical_not(np.isnan(points[:, :3]).any(axis=1)) + points = points[mask] + header_str = header(points) + with open(out_file, 'w') as fh: + # fh.write() + np.savetxt(fh, points, fmt='%f', header=header_str)
+ # shutil.copy(out_file.replace('pcd', 'txt'), out_file) + + +
[docs]def lidar_bin2bin(bin_file, out_file): + shutil.copy(bin_file, out_file)
+ + +
[docs]def load_pcd(pcd_file: str, return_o3d: bool=False): + """ + Read pcd and return numpy array. + + :param pcd_file: The pcd file that contains the point cloud. + :param return_o3d: Default returns numpy array, set True to return pcd as o3d PointCloud object + + :return: lidar_dict, + xyz: (pcd_np | pcd : np.ndarray | o3d.geometry.PointCloud) the lidar xyz coordinates in numpy format, shape:(n, 3); + intensity: (optional) np.ndarray, (n,). + label: (optional) np.ndarray, (n,). + time: (optional) np.ndarray, (n,). + ray: (optional) np.ndarray, (n,). + """ + lidar_dict = {} + ext = os.path.splitext(pcd_file)[-1] + if ext == '.pcd': + if return_o3d: + return o3d.io.read_point_cloud(pcd_file) + else: + pcd = point_cloud_from_path(pcd_file) + lidar_dict['xyz'] = np.stack([pcd.pc_data[x] for x in 'xyz'], axis=-1).astype(float) + # we save the intensity in the first channel + if 'intensity' in pcd.fields: + lidar_dict['intensity'] = pcd.pc_data['intensity'] + if 'timestamp' in pcd.fields: + lidar_dict['time'] = pcd.pc_data['timestamp'] + + elif ext == '.bin': + pcd_np = np.fromfile(pcd_file, dtype=np.float32).reshape(-1, 4) + if return_o3d: + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(pcd_np) + return pcd + lidar_dict['xyz'] = pcd_np[:, :3] + # check attribute of last column, + # num of unique labels for the datasets in this projects is less than 50, + # unique intensities is normally larger then 50 + if len(np.unique(pcd_np[:, -1])) < 50: + lidar_dict['label'] = pcd_np[:, -1] + elif pcd_np[:, -1].max() > 1: + lidar_dict['intensity'] = pcd_np[:, -1] / 255 + else: + lidar_dict['intensity'] = pcd_np[:, -1] + + elif ext == '.ply': + data = read_ply(pcd_file)[0] + xyz = np.stack([data.pop(x) for x in 'xyz'], axis=1) + lidar_dict['xyz'] = xyz + lidar_dict.update(data) + else: + raise NotImplementedError + + return lidar_dict
+ + +
[docs]def tf2pose(tf_matrix): + euler = R.from_matrix(tf_matrix[:3, :3]).as_euler('xyz') + translation = tf_matrix[:3, 3] + return translation.tolist() + euler.tolist()
+ + +
[docs]def pose2tf(pose): + tf_matrix = np.eye(4) + tf_matrix[:3, :3] = rotation_matrix(pose[3:]) + tf_matrix[:3, 3] = np.array(pose[:3]) + return tf_matrix
+ + +
[docs]def rotation_matrix(euler, degrees=True): + """ + Construct rotation matrix with the given pose. + + :param euler: list or np.ndarray + [roll, pitch, yaw] + :return: rot: np.ndarray, 3x3 + rotation matrix + """ + return R.from_euler('xyz', euler, degrees=degrees).as_matrix()
+ + +
[docs]def rotate3d(points, euler): + """ + Rotate point cloud with the euler angles given in pose. + + :param points: np.ndarray, N x (3 + C) + each point in the row has the format [x, y, z, ...] + :param euler: list or np.ndarray + [roll, pitch, yaw] + + :return: points: np.ndarray + rotated point cloud + """ + assert len(euler) == 3 + rot = rotation_matrix(euler) + points[:, :3] = (rot @ points[:, :3].T).T + return points
+ + +
[docs]def cart2cyl(input_xyz): + rho = np.sqrt(input_xyz[..., 0] ** 2 + input_xyz[..., 1] ** 2) + phi = np.arctan2(input_xyz[..., 1], input_xyz[..., 0]) + return np.concatenate((rho.reshape(-1, 1), phi.reshape(-1, 1), input_xyz[..., 2:]), axis=-1)
+ + +
[docs]def cyl2cart(input_xyz_polar): + x = input_xyz_polar[..., 0] * np.cos(input_xyz_polar[..., 1]) + y = input_xyz_polar[..., 0] * np.sin(input_xyz_polar[..., 1]) + return np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), input_xyz_polar[..., 2:]), axis=-1)
+ + +
[docs]def mat_yaw(cosa, sina, zeros=0, ones=1): + return [ + cosa, -sina, zeros, + sina, cosa, zeros, + zeros, zeros, ones + ]
+ + +
[docs]def mat_pitch(cosa, sina, zeros=0, ones=1): + return [ + cosa, zeros, sina, + zeros, ones, zeros, + -sina, zeros, cosa, + ]
+ + +
[docs]def mat_roll(cosa, sina, zeros=0, ones=1): + return [ + ones, zeros, zeros, + zeros, cosa, -sina, + zeros, sina, cosa, + ]
+ + +
[docs]def rotate_points_along_z_np(points, angle): + """ + :param points: (N, 3 + C or 2 + C) + :param angle: float, angle along z-axis, angle increases x ==> y + + """ + cosa = np.cos(angle) + sina = np.sin(angle) + rot_matrix = np.array([ + [cosa, sina, 0], + [-sina, cosa, 0], + [0, 0, 1] + ]).astype(np.float) + if points.shape[1]==2: + points_rot = np.matmul(points, rot_matrix[:2, :2]) + elif points.shape[1]>2: + points_rot = np.matmul(points[:, 0:3], rot_matrix) + points_rot = np.concatenate((points_rot, points[:, 3:]), axis=-1) + else: + raise IOError('Input points should have the shape: (N, 3 + C or 2 + C).') + return points_rot
+ + +
[docs]def rotate_points_batch(points, angles, order='xyz'): + """ + :param points: (B, N, 3 + C) + :param angles: (B, 1|3), radians + rotation = R(3)R(2)R(1) if angles shape in (B, 3) + :return: points_rot: (B, N, 3 + C) + """ + assert angles.shape[1] == len(order), \ + "angles should has the shape (len(points), len(order))." + + points, is_numpy = check_numpy_to_torch(points) + angles, _ = check_numpy_to_torch(angles) + + cosas = torch.cos(angles) + sinas = torch.sin(angles) + zeros = angles[:, 0].new_zeros(points.shape[0]) + ones = angles[:, 0].new_ones(points.shape[0]) + rot_matrix = torch.eye(3, dtype=points.dtype, device=points.device) + rot_matrix = rot_matrix.reshape((1, 3, 3)).repeat(angles.shape[0], 1, 1) + for cosa, sina, ax in zip(cosas.T, sinas.T, order): + if ax == 'z': + rot = torch.stack(mat_yaw( + cosa, sina, zeros, ones + ), dim=1).view(-1, 3, 3).float() + elif ax == 'y': + rot = torch.stack(mat_pitch( + cosa, sina, zeros, ones + ), dim=1).view(-1, 3, 3).float() + elif ax == 'x': + rot = torch.stack(mat_roll( + cosa, sina, zeros, ones + ), dim=1).view(-1, 3, 3).float() + else: + raise NotImplementedError + rot_matrix = torch.bmm(rot, rot_matrix) + points_rot = torch.bmm(rot_matrix, points[:, :, 0:3].float(). + permute(0, 2, 1)).permute(0, 2, 1) + points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1) + return points_rot.numpy() if is_numpy else points_rot
+ + +
[docs]def rotate_points_along_z_torch(points, angle): + """ + :param points: (N, 2 + C) or (B, 2 + C) + :param angle: float or tensor of shape (B), angle along z-axis, angle increases x ==> y + + """ + if len(points.shape) == 2: + points = points.unsqueeze(0) + if isinstance(angle, float): + angle = torch.tensor([angle], device=points.device) + else: + assert isinstance(angle, torch.Tensor) + assert points.shape[0] == 1 or angle.shape[0] == points.shape[0] + cosa = torch.cos(angle) + sina = torch.sin(angle) + rot_matrix = torch.stack([ + torch.stack([cosa, sina], dim=-1), + torch.stack([-sina, cosa], dim=-1) + ], dim=1).float().to(points.device) + if points.shape[0] == 1 and angle.shape[0] > 1: + points = torch.tile(points, (len(rot_matrix), 1, 1)) + points_rot = torch.bmm(points[..., 0:2], rot_matrix) + points_rot = torch.cat((points_rot, points[..., 2:]), dim=-1) + return points_rot
+ + +
[docs]def rotate_points_with_tf_np(points: np.ndarray, tf_np: np.ndarray) -> np.ndarray: + """ + Rotate points with transformation matrix. + + :param points (np.ndarray): Nx3 points array + :param tf_np (np.ndarray): 4x4 transformation matrix + :return: points (np.ndarray): Nx3 points array + """ + points_homo = np.concatenate([points, np.ones_like(points[:, :1])], axis=-1).T + points = (tf_np @ points_homo)[:3].T + return points
+ + +
[docs]def rotate_box_corners_with_tf_np(corners: np.ndarray, tf_np: np.ndarray) -> np.ndarray: + """ + Rotate points with transformation matrix + :param corners: Nx8X3 points array + :param tf_np: 4x4 transformation matrix + :return: corners, Nx8X3 points array + """ + points = rotate_points_with_tf_np(corners.reshape(-1, 3), tf_np) + corners = points.reshape(corners.shape) + return corners
+ + +
[docs]def mask_values_in_range(values, min, max): + return np.logical_and(values>min, values<max)
+ + +
[docs]def mask_points_in_box(points, pc_range): + n_ranges = len(pc_range) // 2 + list_mask = [mask_values_in_range(points[:, i], pc_range[i], + pc_range[i+n_ranges]) for i in range(n_ranges)] + return np.array(list_mask).all(axis=0)
+ + +
[docs]def mask_points_in_range(points: np.array, dist: float) -> np.array: + """ + :rtype: np.array + """ + return np.linalg.norm(points[:, :2], axis=1) < dist
+ + +
[docs]def get_tf_matrix_torch(vectors, inv=False): + device = vectors.device + n, _ = vectors.shape + xs = vectors[:, 0] + ys = vectors[:, 1] + angles = vectors[:, 2] + cosa = torch.cos(angles) + sina = torch.sin(angles) + ones = torch.ones_like(angles) + zeros = torch.zeros_like(angles) + rot_matrix = torch.zeros((n, 3, 3), device=device, requires_grad=True) + rot_matrix[:, 0, 0] = cosa + rot_matrix[:, 0, 1] = -sina + rot_matrix[:, 1, 0] = sina + rot_matrix[:, 1, 1] = cosa + shift_matrix = torch.zeros_like(rot_matrix, requires_grad=True) + shift_matrix[:, 0, 1] = xs + shift_matrix[:, 1, 0] = ys + shift_matrix[:, [0, 1, 2], [0, 1, 2]] = 1.0 + if inv: + mat = torch.einsum('...ij, ...jk', rot_matrix, shift_matrix) + else: + mat = torch.einsum('...ij, ...jk', shift_matrix, rot_matrix) + return mat, rot_matrix, shift_matrix
+ + +
[docs]def rotation_mat2euler_torch(mat): + sy = torch.norm(mat[:, :2, 0], dim=1) + singular = sy < 1e-6 + not_singular = torch.logical_not(singular) + euler = torch.zeros_like(mat[:, 0]) + + if not_singular.sum() > 0: + euler[not_singular, 0] = torch.atan2(mat[not_singular, 2, 1], mat[not_singular, 2, 2]) + euler[not_singular, 1] = torch.atan2(-mat[not_singular, 2, 0], sy) + euler[not_singular, 2] = torch.atan2(mat[not_singular, 1, 0], mat[not_singular, 0, 0]) + if singular.sum() > 0: + euler[singular, 0] = torch.atan2(-mat[singular, 1, 2], mat[singular, 1, 1]) + euler[singular, 1] = torch.atan2(-mat[singular, 2, 0], sy) + + return euler
+ + +
[docs]def pose_err_global2relative_torch(poses, errs): + """ + Calculate relative pose transformation based on the errorneous global positioning + :param poses: Nx2 or Nx3, first row is ego pose, other rows are the coop poses + :param errs: Nx3, first row is ego pose error and other rows for coop pose errors + :return: (N-1)x3, relative localization errors between ego and coop vehicles + """ + if poses.shape[-1]==2: + poses = torch.cat([poses, torch.zeros_like(poses[:, 0:1])], dim=-1) + poses_err = poses + errs + + R01, _, _ = get_tf_matrix_torch(-poses[:1], inv=True) + R10_hat, _, _ = get_tf_matrix_torch(poses_err[:1]) + R20, _, _ = get_tf_matrix_torch(poses[1:]) + R02_hat, _, _ = get_tf_matrix_torch(-poses_err[1:], inv=True) + + delta_R21 = torch.einsum('...ij, ...jk', R01, R20) + delta_R21 = torch.einsum('...ij, ...jk', delta_R21, R02_hat) + delta_R21 = torch.einsum('...ij, ...jk', delta_R21, R10_hat) + + x = delta_R21[0, 2] + y = delta_R21[1, 2] + theta = torch.atan2(delta_R21[1, 0], delta_R21[0, 0]) + return torch.stack([x, y, theta], dim=-1)
+ + +
[docs]def project_points_by_matrix_torch(points, transformation_matrix): + """ + Project the points to another coordinate system based on the + transformation matrix. + + :param points: torch.Tensor, 3D points, (N, 3) + :param transformation_matrix: torch.Tensor, Transformation matrix, (4, 4) + :return: projected_points : torch.Tensor, The projected points, (N, 3) + """ + points, is_numpy = \ + check_numpy_to_torch(points) + transformation_matrix, _ = \ + check_numpy_to_torch(transformation_matrix) + + # convert to homogeneous coordinates via padding 1 at the last dimension. + # (N, 4) + points_homogeneous = F.pad(points, (0, 1), mode="constant", value=1) + # (N, 4) + projected_points = torch.einsum("ik, jk->ij", points_homogeneous, + transformation_matrix) + + return projected_points[:, :3] if not is_numpy \ + else projected_points[:, :3].numpy()
+ +if __name__=="__main__": + for i in range(0, 300): + frame = f"{i:06d}" + ply_file = f"/koko/LUMPI/train/measurement5/lidar/{frame}.ply" + bin_file = f"/media/hdd/projects/TAL/data/lumpi_m5/lidar0/{frame}.bin" + lidar_ply2bin(ply_file, bin_file) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/tensor_utils.html b/docs/_build/html/_modules/cosense3d/utils/tensor_utils.html new file mode 100644 index 00000000..81d5eef2 --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/tensor_utils.html @@ -0,0 +1,132 @@ + + + + + + cosense3d.utils.tensor_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.tensor_utils

+import torch
+import numpy as np
+
+
+
[docs]def pad_list_to_array_torch(data): + """ + Pad list of numpy data to one single numpy array + :param data: list of np.ndarray + :return: np.ndarray + """ + B = len(data) + cnt = [len(d) for d in data] + max_cnt = max(cnt) + out = torch.zeros((B, max_cnt,) + tuple(data[0].shape[1:]), + device=data[0].device, dtype=data[0].dtype) + for b in range(B): + out[b, :cnt[b]] = data[b] + return out
+ + +
[docs]def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False
+ + +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/train_utils.html b/docs/_build/html/_modules/cosense3d/utils/train_utils.html new file mode 100644 index 00000000..04fc3b1c --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/train_utils.html @@ -0,0 +1,234 @@ + + + + + + cosense3d.utils.train_utils — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.train_utils

+import copy
+import random
+import warnings
+
+import numpy as np
+import torch
+from torch.nn.utils.clip_grad import clip_grad_norm_
+
+
+
[docs]def get_gpu_architecture(): + if torch.cuda.is_available(): + device = torch.device("cuda") + gpu_props = torch.cuda.get_device_properties(device) + return gpu_props.major * 10 + gpu_props.minor + else: + return 0
+ + +
[docs]def seed_everything(seed): + torch.manual_seed(seed) + random.seed(seed) + np.random.seed(seed)
+ + +
[docs]def build_optimizer(model, cfg): + # construct an optimizer + params = [p for p in model.parameters() if p.requires_grad] + optimizer = torch.optim.AdamW(params, lr=cfg['lr'], + weight_decay=cfg['weight_decay'], + betas=tuple(cfg['betas'])) + + return optimizer
+ + +
[docs]def build_lr_scheduler(optimizer, cfg, steps_per_epoch): + cfg_ = copy.copy(cfg) + policy = cfg_.pop('policy', 'MultiStepLR') + if policy == 'MultiStepLR': + # construct a learning rate scheduler + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, + milestones=cfg['milestones'], + gamma=cfg['gamma']) + elif policy == 'CosineAnnealingWarm': + from timm.scheduler.cosine_lr import CosineLRScheduler + num_steps = cfg['epochs'] * steps_per_epoch + warmup_lr = cfg['warmup_lr'] + warmup_steps = cfg['warmup_epochs'] * steps_per_epoch + lr_min = cfg['lr_min'] + + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_steps, + lr_min=lr_min, + warmup_lr_init=warmup_lr, + warmup_t=warmup_steps, + cycle_limit=1, + t_in_epochs=False, + ) + else: + raise NotImplementedError + + return lr_scheduler
+ + +
[docs]def is_tensor_to_cuda(data, device=0): + if isinstance(data, dict): + for k, v in data.items(): + data[k] = is_tensor_to_cuda(v, device) + return data + elif isinstance(data, torch.Tensor): + return data.to(device) + elif isinstance(data, list) or isinstance(data, tuple): + data_t = [] + for i in range(len(data)): + data_t.append(is_tensor_to_cuda(data[i], device)) + return data_t + else: + return data
+ + +
[docs]def load_tensors_to_gpu(batch_dict, device=0): + """ + Load all tensors in batch_dict to gpu + """ + + for k, v in batch_dict.items(): + batch_dict[k] = is_tensor_to_cuda(v, device=device)
+ + +
[docs]def load_model_dict(model, pretrained_dict): + try: + model.load_state_dict(pretrained_dict) + except: + UnmatchedParams = "" + # 1. filter out unnecessary keys + model_dict = model.state_dict() + matched_dict = {} + + pretrained_keys = list() + for k, v in pretrained_dict.items(): + if 'module' in k: + k = k.replace('module.', '') + if k in model_dict and v.shape == model_dict[k].shape: + matched_dict[k] = v + elif v.shape != model_dict[k].shape: + UnmatchedParams += f"{k} : Unmatched shape ({v.shape} -> {model_dict[k].shape})\n" + else: + UnmatchedParams += f"{k} : Pretrained parameters not in model dict\n" + pretrained_keys.append(k) + for k in set(model_dict.keys()) - set(pretrained_keys): + UnmatchedParams += f"{k} : Model parameters not in pretrained dict\n" + if len(UnmatchedParams) > 0: + warnings.warn("Model state dict does not match pretrained state dict. Unmatched parameters are:\n" + + UnmatchedParams) + # 2. overwrite entries in the existing state dict + model_dict.update(matched_dict) + # 3. load the new state dict + model.load_state_dict(model_dict) + return model
+ + +
[docs]def clip_grads(params, max_norm=35, norm_type=2): + params = list( + filter(lambda p: p.requires_grad and p.grad is not None, params)) + if len(params) > 0: + total_norm = clip_grad_norm_(params, max_norm=max_norm, norm_type=norm_type) + return total_norm
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/cosense3d/utils/vislib.html b/docs/_build/html/_modules/cosense3d/utils/vislib.html new file mode 100644 index 00000000..2153648a --- /dev/null +++ b/docs/_build/html/_modules/cosense3d/utils/vislib.html @@ -0,0 +1,704 @@ + + + + + + cosense3d.utils.vislib — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for cosense3d.utils.vislib

+import random
+import sys
+import os
+import time
+
+import torch
+import numpy as np
+import matplotlib.pyplot as plt
+import open3d as o3d
+
+from cosense3d.utils import pclib
+from cosense3d.utils.box_utils import corners_to_boxes_3d, boxes_to_corners_3d
+from matplotlib.patches import Polygon
+
+
+COLOR_PALETTES = {
+    'pastels_rock': {
+        'DesertSand': [238, 185, 161],
+        'DeepChampagne': [241, 213, 170],
+        'Champagne': [242, 237, 207],
+        'JetStream': [186, 224, 195],
+        'LightPeriwinkle':[190, 198, 225],
+    },
+    'calm_afternoon': {
+        'MiddleBlueGreen': [137, 204, 202],
+        'Khaki': [245, 222, 145],
+        'MacaroniAndCheese': [245, 193, 129],
+        'Middle Red': [232, 132, 107],
+        'Rose Gold': [189, 93, 115],
+        'Rackley': [101, 135, 168],
+    },
+    'objects': {
+        'vehicle': [0, 0, 142],
+        'cyclist': [200, 100, 0],
+        'pedestrian': [220, 20, 60],
+        'truck': [0, 0, 0],
+        'motorcycle': [100, 200, 0],
+        'bus': [100, 100, 0]
+    },
+    'random': {i: [random.randint(0, 255),
+                   random.randint(0, 255),
+                   random.randint(0, 255)] for i in range(20)}
+}
+
+
+
[docs]def get_palette_colors(palette): + return np.array( + list(COLOR_PALETTES[palette].values()) + ) / 255
+ + +
[docs]def visualization(func_list, batch_data): + for func_str in func_list: + getattr(sys.modules[__name__], func_str)(batch_data)
+ + +
[docs]def draw_box_plt(boxes_dec, ax, color=None, linewidth_scale=2.0, linestyle='solid'): + """ + draw boxes in a given plt ax + :param boxes_dec: (N, 5) or (N, 7) in metric + :param ax: + :return: ax with drawn boxes + """ + if not len(boxes_dec)>0: + return ax + boxes_np= boxes_dec + if isinstance(boxes_np, torch.Tensor): + boxes_np = boxes_np.cpu().detach().numpy() + elif isinstance(boxes_np, list): + boxes_np = np.array(boxes_np) + if boxes_np.shape[-1]>5: + boxes_np = boxes_np[:, [0, 1, 3, 4, 6]] + x = boxes_np[:, 0] + y = boxes_np[:, 1] + dx = boxes_np[:, 2] + dy = boxes_np[:, 3] + + x1 = x - dx / 2 + y1 = y - dy / 2 + x2 = x + dx / 2 + y2 = y + dy / 2 + theta = boxes_np[:, 4:5] + # bl, fl, fr, br + corners = np.array([[x1, y1],[x1,y2], [x2,y2], [x2, y1]]).transpose(2, 0, 1) + new_x = (corners[:, :, 0] - x[:, None]) * np.cos(theta) + (corners[:, :, 1] + - y[:, None]) * (-np.sin(theta)) + x[:, None] + new_y = (corners[:, :, 0] - x[:, None]) * np.sin(theta) + (corners[:, :, 1] + - y[:, None]) * (np.cos(theta)) + y[:, None] + corners = np.stack([new_x, new_y], axis=2) + for corner in corners: + ax.plot(corner[[0,1,2,3,0], 0], corner[[0,1,2,3,0], 1], color=color, + linewidth=linewidth_scale, linestyle=linestyle) + # draw direction + # front = corner[[2, 3]].mean(axis=0) + # center = corner.mean(axis=0) + # ax.plot([front[0], center[0]], [front[1], center[1]], color=color, + # linewidth=linewidth_scale) + ax.plot(corner[[2, 3], 0], corner[[2, 3], 1], color=color, linewidth=1.5*linewidth_scale) + return ax
+ + +
[docs]def draw_points_boxes_plt(pc_range=None, points=None, boxes_pred=None, boxes_gt=None, wandb_name=None, + points_c='gray', bbox_gt_c='green', bbox_pred_c='red', linewidth_scale=0.75, + bbox_pred_label=None, bbox_gt_label=None, + return_ax=False, ax=None, marker_size=2.0, filename=None): + if pc_range is not None: + if isinstance(pc_range, int) or isinstance(pc_range, float): + pc_range = [-pc_range, -pc_range, pc_range, pc_range] + elif isinstance(pc_range, list) and len(pc_range)==6: + pc_range = [pc_range[i] for i in [0, 1, 3, 4]] + else: + assert isinstance(pc_range, list) and len(pc_range)==4, \ + "pc_range should be a int, float or list of lenth 6 or 4" + if ax is None: + ax = plt.figure(figsize=((pc_range[2] - pc_range[0]) / 20, + (pc_range[3] - pc_range[1]) / 20)).add_subplot(1, 1, 1) + ax.set_aspect('equal', 'box') + if pc_range is not None: + ax.set(xlim=(pc_range[0], pc_range[2]), + ylim=(pc_range[1], pc_range[3])) + if points is not None: + ax.plot(points[:, 0], points[:, 1], '.', + color=points_c, markersize=marker_size) + if (boxes_pred is not None) and len(boxes_pred) > 0: + ax = draw_box_plt(boxes_pred, ax, color=bbox_pred_c, linewidth_scale=linewidth_scale) + if bbox_pred_label is not None: + assert len(boxes_pred) == len(bbox_pred_label) + for box, label in zip(boxes_pred, bbox_pred_label): + ax.annotate(label, (box[0], box[1]), textcoords="offset points", xytext=(0, 10), ha='center', color='r') + if (boxes_gt is not None) and len(boxes_gt) > 0: + ax = draw_box_plt(boxes_gt, ax, color=bbox_gt_c, linewidth_scale=linewidth_scale) + if bbox_gt_label is not None: + assert len(boxes_gt) == len(bbox_gt_label) + for box, label in zip(boxes_gt, bbox_gt_label): + ax.annotate(label, (box[0], box[1]), textcoords="offset points", xytext=(0, 10), ha='center', color='g') + plt.xlabel('x') + plt.ylabel('y') + + if return_ax: + return ax + if filename is not None: + plt.savefig(filename) + plt.close()
+ + +
[docs]def update_axis_linset(line_set, axis_len=5): + points = [ + [0, 0, 0], + [axis_len, 0, 0], + [0, axis_len, 0], + [0, 0, axis_len] + ] + lines = [[0, 1], [0, 2], [0, 3]] + colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] + line_set.points = o3d.utility.Vector3dVector(points) + line_set.lines = o3d.utility.Vector2iVector(lines) + line_set.colors = o3d.utility.Vector3dVector(colors) + return line_set
+ + +
[docs]def bbx2linset(bbx, color=(0, 1, 0)): + """ + Convert the bounding box to o3d lineset for visualization. + + :param bbx : np.ndarray + shape: (n, 7) or (n, 11) or (n, 8, 3). + :param color : tuple + The bounding box color. + + :return: line_set : open3d.LineSet + """ + if len(bbx) > 0 and len(bbx[0]) == 11: + bbx = bbx[:, 2:] + bbx_corner = boxes_to_corners_3d(bbx, 'lwh') + elif len(bbx) > 0 and len(bbx[0]) == 7: + bbx_tmp = np.zeros((len(bbx), 9)) + bbx_tmp[:, :6] = bbx[:, :6] + bbx_tmp[:, -1] = bbx[:, -1] + bbx_corner = boxes_to_corners_3d(bbx_tmp, 'lwh') + else: + bbx_corner = bbx + bbx_corner = np.array(bbx_corner) + # Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc... + lines = [[0, 1], [1, 2], [2, 3], [0, 3], + [4, 5], [5, 6], [6, 7], [4, 7], + [0, 4], [1, 5], [2, 6], [3, 7]] + + # Use the same color for all lines + colors = [list(color) for _ in range(len(lines))] + bbx_linset = [] + + for i in range(len(bbx_corner)): + bbx = bbx_corner[i] + # o3d use right-hand coordinate + bbx[:, :1] = - bbx[:, :1] + + line_set = o3d.geometry.LineSet() + line_set.points = o3d.utility.Vector3dVector(bbx) + line_set.lines = o3d.utility.Vector2iVector(lines) + line_set.colors = o3d.utility.Vector3dVector(colors) + bbx_linset.append(line_set) + + return bbx_linset
+ + +
[docs]def update_lineset_vbo(vbo, bbx, color=None): + if len(bbx) > 0 and len(bbx[0]) == 9: + bbx = bbx[:, 2:] + bbx_corner = boxes_to_corners_3d(bbx, 'lwh') + else: + bbx_corner = bbx + bbx_corner = np.array(bbx_corner) + # Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc... + lines = [[0, 1], [1, 2], [2, 3], [0, 3], + [4, 5], [5, 6], [6, 7], [4, 7], + [0, 4], [1, 5], [2, 6], [3, 7]] + lines = np.array(lines) + if isinstance(color, np.ndarray): + color = color.squeeze().tolist() + + points_all = [] + lines_all = [] + colors_all = [] + for i in range(len(bbx_corner)): + bbx = bbx_corner[i] + # o3d use right-hand coordinate + bbx[:, :1] = - bbx[:, :1] + points_all.extend(bbx) + lines_all.extend((lines + 8 * i).tolist()) + # if no color given, use green for all lines + if color is None: + box_color = [[0, 1, 0] for _ in range(len(lines))] + elif isinstance(color[0], float): + box_color = [color for _ in range(len(lines))] + else: + box_color = [color[i] for _ in range(len(lines))] + + colors_all.extend(box_color) + vbo.points = o3d.utility.Vector3dVector(points_all) + vbo.lines = o3d.utility.Vector2iVector(lines_all) + vbo.colors = o3d.utility.Vector3dVector(colors_all) + return vbo
+ + +
[docs]def o3d_draw_pcds_bbxs(pcds: list, + bbxs: list, + bbxs_colors: list=None, + pcds_colors: list=None): + """ + :param pcds: list of np array + :param bbxs: list of np array, + bounding boxes in corner format + :param bbxs_colors: list of tuples + :param pcds_colors: list of np array, shape same as pcds + """ + pcds_vis = [] + linsets = [] + for i, bbx in enumerate(bbxs): + bbx_color = (0, 1, 0) + if bbxs_colors is not None: + assert len(bbxs_colors) == len(bbxs) + bbx_color = bbxs_colors[i] + linset = bbx2linset(bbx, bbx_color) + linsets.extend(linset) + for i, points in enumerate(pcds): + points[:, 0] *= -1 + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points[:, :3]) + if pcds_colors is not None: + assert pcds_colors[i].shape == points[:, :3].shape + pcd.colors = o3d.utility.Vector3dVector(pcds_colors[i]) + else: + colors = get_palette_colors('random') + pcd.paint_uniform_color(colors[i]) + pcds_vis.append(pcd) + o3d.visualization.draw_geometries(pcds_vis + linsets)
+ + +
[docs]def o3d_draw_frame_data(frame_dict, data_path): + pcds = [] + bbx_global = frame_dict['meta']['bbx_center_global'] + bbx_corners = boxes_to_corners_3d(np.array(bbx_global[:, 2:])) + linsets = [] + bbx_colors = get_palette_colors('objects') + for l in np.unique(bbx_global[:, 1]): + assert l < 3 + linsets.extend(bbx2linset(bbx_corners, bbx_colors[int(l)])) + for ai, acontent in frame_dict['agents'].items(): + for li, lidar_dict in acontent['lidar0'].items(): + lidar_file = os.path.join(data_path, lidar_dict['filename']) + points = pclib.load_pcd(lidar_file)[:, :3] + points = pclib.rotate3d(points, lidar_dict['pose'][3:]) + points = points + np.array(lidar_dict['pose'][:3]).reshape(1, 3) + # o3d use right hand: left -> right hand + points[:, 0] *= -1 + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + colors = get_palette_colors('calm_afternoon') + pcd.paint_uniform_color(colors[ai]) + pcds.append(pcd) + o3d.visualization.draw_geometries(pcds + linsets)
+ + +
[docs]def o3d_draw_agent_data(agent_dict, data_path): + pcds = [] + bbx_lensets = [] + for li, lidar_dict in agent_dict['lidar0'].items(): + lidar_file = os.path.join(data_path, lidar_dict['filename']) + points = pclib.load_pcd(lidar_file)[:, :3] + # o3d use right hand: left -> right hand + points[:, 0] *= -1 + bbx = np.array(agent_dict['bbx_center']) + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + pcd.paint_uniform_color([0.5] * 3) + linsets = bbx2linset(bbx, (0, 1, 0)) + pcds.append(pcd) + bbx_lensets.extend(linsets) + o3d.visualization.draw_geometries(pcds + bbx_lensets)
+ + +
[docs]def o3d_play_sequence(meta_dict, data_path): + vis = o3d.visualization.Visualizer() + vis.create_window() + vis.get_render_option().background_color = [0.05, 0.05, 0.05] + vis.get_render_option().point_size = 1.0 + vis.get_render_option().show_coordinate_frame = True + + vbo_pcd = o3d.geometry.PointCloud() + vbo_lineset = o3d.geometry.LineSet() + painter = get_palette_colors('pastels_rock') + + idx = 0 + while True: + for scenario, scenario_dict in meta_dict.items(): + for frame, frame_dict in scenario_dict.items(): + pcds = [] + colors = [] + for i, (ai, agent_dict) in enumerate(frame_dict['agents'].items()): + for li, lidar_dict in agent_dict['lidar0'].items(): + points = pclib.load_pcd(os.path.join( + data_path, + lidar_dict['filename']) + )[:, :3] + points = pclib.rotate3d(points, lidar_dict['pose'][3:]) + points = points + np.array(lidar_dict['pose'][:3]).reshape(1, 3) + pcds.append(points) + colors.append(np.ones_like(points) * + np.array(painter[i]).reshape(1, 3)) + pcds = np.concatenate(pcds, axis=0) + pcds[:, 0] *= -1 + colors = np.concatenate(colors, axis=0) + vbo_pcd.points = o3d.utility.Vector3dVector(pcds) + vbo_pcd.colors = o3d.utility.Vector3dVector(colors) + + # add boxes + bbxs = frame_dict['meta']['bbx_center_global'] + if len(bbxs) > 0: + bbxs = boxes_to_corners_3d(np.array(bbxs)[:, 2:]) + vbo_lineset = update_lineset_vbo(vbo_lineset, bbxs) + if idx == 0: + vis.add_geometry(vbo_lineset) + else: + vis.update_geometry(vbo_lineset) + # add pcds + if idx == 0: + vis.add_geometry(vbo_pcd) + else: + vis.update_geometry(vbo_pcd) + + vis.poll_events() + vis.update_renderer() + time.sleep(0.1) + idx += 1
+ + +
[docs]def plt_draw_frame_data(frame_dict, data_path): + fig = plt.figure(figsize=(10, 10)) + ax = fig.add_subplot() + for ai, acontent in frame_dict.items(): + for li, lidar_dict in acontent['lidar0'].items(): + lidar_file = os.path.join(data_path, lidar_dict['filename']) + points = pclib.load_pcd(lidar_file)[:, :3] + points = pclib.rotate3d(points, lidar_dict['pose']) + points = points + np.array(lidar_dict['pose'][:3]).reshape(1, 3) + # points = np.r_[points, [np.ones(points.shape[1])]] + # points = np.dot(lidar_dict['pose'], points).T[:, :3] + bbx = np.array(acontent['objects']) + assert len(bbx.shape) == 2 + bbx = bbx[:, 2:] + + ax.plot(points[:, 0], points[:, 1], '.', markersize=.5) + ax = draw_box_plt(bbx, ax) + plt.show() + plt.close()
+ + +def draw_3d_points_boxes_on_img(img, lidar2cam, I, points=None, boxes=None): + """ + 4 -------- 5 ^ z + /| /| | + 7 -------- 6 . | + | | | | | . x + . 0 -------- 1 |/ + |/ |/ +-------> y + 3 -------- 2 + + :param img: np.ndarray + :param lidar2cam: np.ndarray, (4, 4), transformation matrix from lidar to camera coordinates + :param I: np.ndarray, (3, 3), intrinsic parameters + :param points: np.ndarray, (N, 3+C) + :param boxes: np.ndarray, (N, 8, 3), corners are in lidar coordinates + """ + assert lidar2cam.shape == (4, 4) + assert I.shape == (3, 3) + # Create a figure and axis + fig, ax = plt.subplots(1) + + if points is not None: + points_homo = np.concatenate([points[:, :3], np.ones_like(points[:, :1])], axis=1).T + points_homo = lidar2cam @ points_homo + pixels = I @ points_homo[:3] + pixels[:2] = pixels[:2] / pixels[2:] + px = pixels[0].astype(int) + py = pixels[1].astype(int) + mask = (px >= 0) & (px<800) & (py >= 0) & (py < 600) & (pixels[2] > 0) + px, py = px[mask], py[mask] + dist = np.linalg.norm(points_homo[:2].T[mask], axis=1) + dist_norm = np.clip(dist, a_min=0, a_max=100) / 100. + # Create a colormap based on the numbers + cmap = plt.get_cmap('jet') + + # Convert the numbers to colors using the colormap + colors = np.array([cmap(num) for num in dist_norm])[:, :3] * 255 + img[py, px] = colors + + ax.imshow(img) + # Loop through the boxes and draw them on the image + if boxes is not None: + n_box = len(boxes) + box_points = boxes.reshape(-1, 3) + box_points_homo = np.concatenate([box_points[:, :3], np.ones_like(box_points[:, :1])], axis=1).T + box_points_homo = lidar2cam @ box_points_homo + box_pixels = I @ box_points_homo[:3] + box_pixels[:2] = box_pixels[:2] / box_pixels[2:] + box_pixels = box_pixels.T.reshape(n_box, 8, 3) + box_pixels = box_pixels[(box_pixels[:, :, 2] > 0).all(axis=1)] + for box in box_pixels: + faces = [ + [0, 1, 2, 3, 0], + [4, 5, 6, 7, 4], + [0, 1, 5, 4, 0], + [2, 3, 7, 6, 2] + ] + for face in faces: + vertices = [(box[i][0], box[i][1]) for i in face] + polygon = Polygon(vertices, fill=None, edgecolor='g') + ax.add_patch(polygon) + + plt.show() + plt.close() + + +
[docs]def draw_2d_bboxes_on_img(img, boxes2d, ax_in=None): + """ + :param img: np.ndarray + :param boxes2d: np.ndarray, (N, 4, 2) for 4 corners or (N, 2, 2) for left top and right bottom corners inn pixel metric + """ + if ax_in is None: + fig, ax = plt.subplots(1) + else: + ax = ax_in + ax.imshow(img) + + if boxes2d is not None and len(boxes2d) > 0: + assert len(boxes2d.shape) == 3 + if boxes2d.shape[1] == 2: + box_4corners = [] + for box in boxes2d: + box_4corners.append([ + box[0], # left top + [box[1, 0], box[0, 1]], # right top + box[1], # right bottom + [box[0, 0], box[1, 1]], # left bottom + ]) + else: + box_4corners = boxes2d + + for box in box_4corners: + vertices = [(box[i][0], box[i][1]) for i in [0, 1, 2, 3, 0]] + polygon = Polygon(vertices, fill=None, edgecolor='lime') + ax.add_patch(polygon) + + if ax_in is None: + plt.show() + plt.close() + else: + return ax
+ + +
[docs]def draw_3d_points_boxes_on_img(ax, img, lidar2img, points=None, boxes=None): + """ + 1 -------- 6 ^ z + /| /| | + 2 -------- 5. | + | | | | | . x + . 0 -------- 7 |/ + |/ |/ +-------> y + 3 -------- 4 + + :param ax: plt plot axis + :param img: np.ndarray, (H, W, 3) + :param lidar2img: np.ndarray, (4, 4), transformation matrix from lidar to camera coordinates + :param points: np.ndarray, (N, 3+C) + :param boxes: np.ndarray, (N, 8, 3) or (N, 7), in lidar coordinates + """ + H, W = img.shape[:2] + if points is not None: + points_homo = np.concatenate([points[:, :3], np.ones_like(points[:, :1])], axis=1).T + points_homo = lidar2img @ points_homo + pixels = points_homo[:3] + pixels[:2] = pixels[:2] / pixels[2:] + px = pixels[0].astype(int) + py = pixels[1].astype(int) + mask = (px >= 0) & (px<W) & (py >= 0) & (py < H) & (pixels[2] > 0) + if mask.sum() > 0: + px, py = px[mask], py[mask] + dist = np.linalg.norm(points_homo[:2].T[mask], axis=1) + dist_norm = np.clip(dist, a_min=0, a_max=100) / 100. + # Create a colormap based on the numbers + cmap = plt.get_cmap('cool') + + # Convert the numbers to colors using the colormap + colors = np.array([cmap(num) for num in dist_norm]) + colors = colors[:, :3] * 255 + img[py, px] = colors + + ax.imshow(img) + # Loop through the boxes and draw them on the image + if boxes is not None: + n_box = len(boxes) + if boxes.shape[1] == 7: + boxes = boxes_to_corners_3d(boxes) + box_points = boxes.reshape(-1, 3) + box_points_homo = np.concatenate([box_points[:, :3], np.ones_like(box_points[:, :1])], axis=1).T + box_points_homo = lidar2img @ box_points_homo + + box_pixels = box_points_homo[:3] + box_pixels[:2] = box_pixels[:2] / box_pixels[2:] + box_pixels = box_pixels.T.reshape(n_box, 8, 3) + box_pixels = box_pixels[(box_pixels[:, :, 2] > 0).all(axis=1)] + for box in box_pixels: + faces = [ + [0, 1, 2, 3, 0], + [4, 5, 6, 7, 4], + [0, 1, 5, 4, 0], + [2, 3, 7, 6, 2] + ] + for face in faces: + vertices = [(box[i][0], box[i][1]) for i in face] + polygon = Polygon(vertices, fill=None, edgecolor='lime') + ax.add_patch(polygon)
+ + +
[docs]def draw_matched_boxes(boxes1, boxes2, match, out_file=None): + fig = plt.figure(figsize=(10, 10)) + ax = fig.add_subplot() + ax.axis('equal') + + ax = draw_box_plt(boxes1, ax=ax, color='b') + ax = draw_box_plt(boxes2, ax=ax, color='r') + + for p1, p2 in match: + ax.plot([boxes1[p1][0], boxes2[p2][0]], [boxes1[p1][1], boxes2[p2][1]], c='k', markersize=3) + + if out_file is None: + plt.show() + plt.close() + else: + plt.savefig(out_file)
+ + +
[docs]def plot_cavs_points(cavs, points_key='points'): + lidar_range = cavs[0].lidar_range.tolist() + ax = draw_points_boxes_plt( + pc_range=lidar_range, + return_ax=True + ) + colors = ['green', 'blue', 'orange', 'magenta', 'cyan'] + for i, cav in enumerate(cavs): + points = cav.data[points_key].detach().cpu().numpy() + roadline = cav.data['roadline_pred'].detach().cpu().numpy() + ax.plot(points[:, 0], points[:, 1], '.', markersize=1, color=colors[i]) + ax.plot(roadline[:, 0], roadline[:, 1], 'ro', markersize=1) + plt.savefig("/home/yys/Downloads/tmp.jpg") + plt.close()
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html new file mode 100644 index 00000000..d151f661 --- /dev/null +++ b/docs/_build/html/_modules/index.html @@ -0,0 +1,219 @@ + + + + + + Overview: module code — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ +

All modules for which code is available

+ + +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_sources/cosense3d.agents.cav_prototype.rst.txt b/docs/_build/html/_sources/cosense3d.agents.cav_prototype.rst.txt new file mode 100644 index 00000000..4bb42d0f --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.agents.cav_prototype.rst.txt @@ -0,0 +1,29 @@ +cosense3d.agents.cav\_prototype package +======================================= + +Submodules +---------- + +cosense3d.agents.cav\_prototype.base\_cav module +------------------------------------------------ + +.. automodule:: cosense3d.agents.cav_prototype.base_cav + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.cav\_prototype.streamLTS\_collection module +------------------------------------------------------------ + +.. automodule:: cosense3d.agents.cav_prototype.streamLTS_collection + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.cav_prototype + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.agents.core.rst.txt b/docs/_build/html/_sources/cosense3d.agents.core.rst.txt new file mode 100644 index 00000000..a38dabe3 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.agents.core.rst.txt @@ -0,0 +1,93 @@ +cosense3d.agents.core package +============================= + +Submodules +---------- + +cosense3d.agents.core.base\_runner module +----------------------------------------- + +.. automodule:: cosense3d.agents.core.base_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.cav\_manager module +----------------------------------------- + +.. automodule:: cosense3d.agents.core.cav_manager + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.data\_manager module +------------------------------------------ + +.. automodule:: cosense3d.agents.core.data_manager + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.forward\_runner module +-------------------------------------------- + +.. automodule:: cosense3d.agents.core.forward_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.gui module +-------------------------------- + +.. automodule:: cosense3d.agents.core.gui + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.hooks module +---------------------------------- + +.. automodule:: cosense3d.agents.core.hooks + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.task\_manager module +------------------------------------------ + +.. automodule:: cosense3d.agents.core.task_manager + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.test\_runner module +----------------------------------------- + +.. automodule:: cosense3d.agents.core.test_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.train\_runner module +------------------------------------------ + +.. automodule:: cosense3d.agents.core.train_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.vis\_runner module +---------------------------------------- + +.. automodule:: cosense3d.agents.core.vis_runner + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.core + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.agents.rst.txt b/docs/_build/html/_sources/cosense3d.agents.rst.txt new file mode 100644 index 00000000..5db920ff --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.agents.rst.txt @@ -0,0 +1,32 @@ +cosense3d.agents package +======================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents.cav_prototype + cosense3d.agents.core + cosense3d.agents.utils + cosense3d.agents.viewer + +Submodules +---------- + +cosense3d.agents.center\_controller module +------------------------------------------ + +.. automodule:: cosense3d.agents.center_controller + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.agents.utils.rst.txt b/docs/_build/html/_sources/cosense3d.agents.utils.rst.txt new file mode 100644 index 00000000..3ca7e6db --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.agents.utils.rst.txt @@ -0,0 +1,29 @@ +cosense3d.agents.utils package +============================== + +Submodules +---------- + +cosense3d.agents.utils.deco module +---------------------------------- + +.. automodule:: cosense3d.agents.utils.deco + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.utils.transform module +--------------------------------------- + +.. automodule:: cosense3d.agents.utils.transform + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.agents.viewer.items.rst.txt b/docs/_build/html/_sources/cosense3d.agents.viewer.items.rst.txt new file mode 100644 index 00000000..d7d493fb --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.agents.viewer.items.rst.txt @@ -0,0 +1,21 @@ +cosense3d.agents.viewer.items package +===================================== + +Submodules +---------- + +cosense3d.agents.viewer.items.graph\_items module +------------------------------------------------- + +.. automodule:: cosense3d.agents.viewer.items.graph_items + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.viewer.items + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.agents.viewer.rst.txt b/docs/_build/html/_sources/cosense3d.agents.viewer.rst.txt new file mode 100644 index 00000000..0fcf4a70 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.agents.viewer.rst.txt @@ -0,0 +1,61 @@ +cosense3d.agents.viewer package +=============================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents.viewer.items + +Submodules +---------- + +cosense3d.agents.viewer.gl\_viewer module +----------------------------------------- + +.. automodule:: cosense3d.agents.viewer.gl_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.img\_anno3d\_viewer module +-------------------------------------------------- + +.. automodule:: cosense3d.agents.viewer.img_anno3d_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.img\_viewer module +------------------------------------------ + +.. automodule:: cosense3d.agents.viewer.img_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.output\_viewer module +--------------------------------------------- + +.. automodule:: cosense3d.agents.viewer.output_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.utils module +------------------------------------ + +.. automodule:: cosense3d.agents.viewer.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.viewer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.dataset.pipeline.rst.txt b/docs/_build/html/_sources/cosense3d.dataset.pipeline.rst.txt new file mode 100644 index 00000000..cb14002f --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.dataset.pipeline.rst.txt @@ -0,0 +1,29 @@ +cosense3d.dataset.pipeline package +================================== + +Submodules +---------- + +cosense3d.dataset.pipeline.loading module +----------------------------------------- + +.. automodule:: cosense3d.dataset.pipeline.loading + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.pipeline.transform module +------------------------------------------- + +.. automodule:: cosense3d.dataset.pipeline.transform + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.dataset.pipeline + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.dataset.rst.txt b/docs/_build/html/_sources/cosense3d.dataset.rst.txt new file mode 100644 index 00000000..470c5269 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.dataset.rst.txt @@ -0,0 +1,46 @@ +cosense3d.dataset package +========================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.dataset.pipeline + cosense3d.dataset.toolkit + +Submodules +---------- + +cosense3d.dataset.const module +------------------------------ + +.. automodule:: cosense3d.dataset.const + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.cosense\_dataset module +----------------------------------------- + +.. automodule:: cosense3d.dataset.cosense_dataset + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.temporal\_cosense\_dataset module +--------------------------------------------------- + +.. automodule:: cosense3d.dataset.temporal_cosense_dataset + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.dataset + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.dataset.toolkit.rst.txt b/docs/_build/html/_sources/cosense3d.dataset.toolkit.rst.txt new file mode 100644 index 00000000..a599c35b --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.dataset.toolkit.rst.txt @@ -0,0 +1,45 @@ +cosense3d.dataset.toolkit package +================================= + +Submodules +---------- + +cosense3d.dataset.toolkit.cosense module +---------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.cosense + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.toolkit.dairv2x module +---------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.dairv2x + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.toolkit.opv2v module +-------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.opv2v + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.toolkit.opv2v\_t module +----------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.opv2v_t + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.dataset.toolkit + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.backbone2d.rst.txt b/docs/_build/html/_sources/cosense3d.modules.backbone2d.rst.txt new file mode 100644 index 00000000..6a2c7fc3 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.backbone2d.rst.txt @@ -0,0 +1,21 @@ +cosense3d.modules.backbone2d package +==================================== + +Submodules +---------- + +cosense3d.modules.backbone2d.resnet\_encoder module +--------------------------------------------------- + +.. automodule:: cosense3d.modules.backbone2d.resnet_encoder + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.backbone2d + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.backbone3d.rst.txt b/docs/_build/html/_sources/cosense3d.modules.backbone3d.rst.txt new file mode 100644 index 00000000..12c1f5d1 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.backbone3d.rst.txt @@ -0,0 +1,45 @@ +cosense3d.modules.backbone3d package +==================================== + +Submodules +---------- + +cosense3d.modules.backbone3d.mink\_unet module +---------------------------------------------- + +.. automodule:: cosense3d.modules.backbone3d.mink_unet + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.backbone3d.pillar\_bev module +----------------------------------------------- + +.. automodule:: cosense3d.modules.backbone3d.pillar_bev + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.backbone3d.spconv module +------------------------------------------ + +.. automodule:: cosense3d.modules.backbone3d.spconv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.backbone3d.voxelnet module +-------------------------------------------- + +.. automodule:: cosense3d.modules.backbone3d.voxelnet + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.backbone3d + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.fusion.rst.txt b/docs/_build/html/_sources/cosense3d.modules.fusion.rst.txt new file mode 100644 index 00000000..0ba729e5 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.fusion.rst.txt @@ -0,0 +1,77 @@ +cosense3d.modules.fusion package +================================ + +Submodules +---------- + +cosense3d.modules.fusion.attn\_fusion module +-------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.attn_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.box\_fusion module +------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.box_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.fax module +----------------------------------- + +.. automodule:: cosense3d.modules.fusion.fax + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.keypoints module +----------------------------------------- + +.. automodule:: cosense3d.modules.fusion.keypoints + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.maxout\_fusion module +---------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.maxout_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.naive\_fusion module +--------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.naive_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.spatial\_query\_fusion module +------------------------------------------------------ + +.. automodule:: cosense3d.modules.fusion.spatial_query_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.temporal\_fusion module +------------------------------------------------ + +.. automodule:: cosense3d.modules.fusion.temporal_fusion + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.fusion + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.heads.rst.txt b/docs/_build/html/_sources/cosense3d.modules.heads.rst.txt new file mode 100644 index 00000000..43eaa832 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.heads.rst.txt @@ -0,0 +1,109 @@ +cosense3d.modules.heads package +=============================== + +Submodules +---------- + +cosense3d.modules.heads.bev module +---------------------------------- + +.. automodule:: cosense3d.modules.heads.bev + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.bev\_dense module +----------------------------------------- + +.. automodule:: cosense3d.modules.heads.bev_dense + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_anchor\_dense module +------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_anchor_dense + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_anchor\_sparse module +-------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_anchor_sparse + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_center\_sparse module +-------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_center_sparse + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_roi\_refine module +----------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_roi_refine + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.img\_focal module +----------------------------------------- + +.. automodule:: cosense3d.modules.heads.img_focal + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.lidar\_petr\_head module +------------------------------------------------ + +.. automodule:: cosense3d.modules.heads.lidar_petr_head + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.multitask\_head module +---------------------------------------------- + +.. automodule:: cosense3d.modules.heads.multitask_head + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.nbr\_attn\_bev module +--------------------------------------------- + +.. automodule:: cosense3d.modules.heads.nbr_attn_bev + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.petr\_head module +----------------------------------------- + +.. automodule:: cosense3d.modules.heads.petr_head + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.query\_guided\_petr\_head module +-------------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.query_guided_petr_head + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.heads + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.losses.rst.txt b/docs/_build/html/_sources/cosense3d.modules.losses.rst.txt new file mode 100644 index 00000000..08a241ed --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.losses.rst.txt @@ -0,0 +1,69 @@ +cosense3d.modules.losses package +================================ + +Submodules +---------- + +cosense3d.modules.losses.base\_loss module +------------------------------------------ + +.. automodule:: cosense3d.modules.losses.base_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.common module +-------------------------------------- + +.. automodule:: cosense3d.modules.losses.common + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.edl module +----------------------------------- + +.. automodule:: cosense3d.modules.losses.edl + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.focal\_loss module +------------------------------------------- + +.. automodule:: cosense3d.modules.losses.focal_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.iou\_loss module +----------------------------------------- + +.. automodule:: cosense3d.modules.losses.iou_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.l1\_loss module +---------------------------------------- + +.. automodule:: cosense3d.modules.losses.l1_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.vanilla\_seg\_loss module +-------------------------------------------------- + +.. automodule:: cosense3d.modules.losses.vanilla_seg_loss + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.losses + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.necks.rst.txt b/docs/_build/html/_sources/cosense3d.modules.necks.rst.txt new file mode 100644 index 00000000..321065e0 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.necks.rst.txt @@ -0,0 +1,37 @@ +cosense3d.modules.necks package +=============================== + +Submodules +---------- + +cosense3d.modules.necks.cpm\_composer module +-------------------------------------------- + +.. automodule:: cosense3d.modules.necks.cpm_composer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.necks.dilation\_spconv module +----------------------------------------------- + +.. automodule:: cosense3d.modules.necks.dilation_spconv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.necks.formatting module +----------------------------------------- + +.. automodule:: cosense3d.modules.necks.formatting + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.necks + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.plugin.rst.txt b/docs/_build/html/_sources/cosense3d.modules.plugin.rst.txt new file mode 100644 index 00000000..542932ba --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.plugin.rst.txt @@ -0,0 +1,141 @@ +cosense3d.modules.plugin package +================================ + +Submodules +---------- + +cosense3d.modules.plugin.attn module +------------------------------------ + +.. automodule:: cosense3d.modules.plugin.attn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.bev\_rpn module +---------------------------------------- + +.. automodule:: cosense3d.modules.plugin.bev_rpn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.downsample\_conv module +------------------------------------------------ + +.. automodule:: cosense3d.modules.plugin.downsample_conv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.flash\_attn module +------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.flash_attn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.fpn module +----------------------------------- + +.. automodule:: cosense3d.modules.plugin.fpn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.gevbev\_decoder module +----------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.gevbev_decoder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.mink\_spconv module +-------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.mink_spconv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.naive\_compressor module +------------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.naive_compressor + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.pillar\_encoder module +----------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.pillar_encoder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.ssfa module +------------------------------------ + +.. automodule:: cosense3d.modules.plugin.ssfa + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.target\_assigners module +------------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.target_assigners + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.transformer module +------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.transformer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.voxel\_encoder module +---------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.voxel_encoder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.voxel\_generator module +------------------------------------------------ + +.. automodule:: cosense3d.modules.plugin.voxel_generator + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.voxnet\_utils module +--------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.voxnet_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.vsa module +----------------------------------- + +.. automodule:: cosense3d.modules.plugin.vsa + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.plugin + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.projection.rst.txt b/docs/_build/html/_sources/cosense3d.modules.projection.rst.txt new file mode 100644 index 00000000..96a562c2 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.projection.rst.txt @@ -0,0 +1,37 @@ +cosense3d.modules.projection package +==================================== + +Submodules +---------- + +cosense3d.modules.projection.fax module +--------------------------------------- + +.. automodule:: cosense3d.modules.projection.fax + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.projection.petr module +---------------------------------------- + +.. automodule:: cosense3d.modules.projection.petr + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.projection.spatial\_transform module +------------------------------------------------------ + +.. automodule:: cosense3d.modules.projection.spatial_transform + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.projection + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.rst.txt b/docs/_build/html/_sources/cosense3d.modules.rst.txt new file mode 100644 index 00000000..af915302 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.rst.txt @@ -0,0 +1,26 @@ +cosense3d.modules package +========================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.modules.backbone2d + cosense3d.modules.backbone3d + cosense3d.modules.fusion + cosense3d.modules.heads + cosense3d.modules.losses + cosense3d.modules.necks + cosense3d.modules.plugin + cosense3d.modules.projection + cosense3d.modules.utils + +Module contents +--------------- + +.. automodule:: cosense3d.modules + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.modules.utils.rst.txt b/docs/_build/html/_sources/cosense3d.modules.utils.rst.txt new file mode 100644 index 00000000..d52d7625 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.modules.utils.rst.txt @@ -0,0 +1,101 @@ +cosense3d.modules.utils package +=============================== + +Submodules +---------- + +cosense3d.modules.utils.box\_coder module +----------------------------------------- + +.. automodule:: cosense3d.modules.utils.box_coder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.common module +------------------------------------- + +.. automodule:: cosense3d.modules.utils.common + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.conv module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.conv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.edl\_utils module +----------------------------------------- + +.. automodule:: cosense3d.modules.utils.edl_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.gaussian\_utils module +---------------------------------------------- + +.. automodule:: cosense3d.modules.utils.gaussian_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.init module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.init + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.me\_utils module +---------------------------------------- + +.. automodule:: cosense3d.modules.utils.me_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.misc module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.misc + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.nbr\_attn module +---------------------------------------- + +.. automodule:: cosense3d.modules.utils.nbr_attn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.norm module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.norm + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.positional\_encoding module +--------------------------------------------------- + +.. automodule:: cosense3d.modules.utils.positional_encoding + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.rst.txt b/docs/_build/html/_sources/cosense3d.rst.txt new file mode 100644 index 00000000..3758fc44 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.rst.txt @@ -0,0 +1,21 @@ +cosense3d package +================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents + cosense3d.dataset + cosense3d.modules + cosense3d.utils + +Module contents +--------------- + +.. automodule:: cosense3d + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/cosense3d.utils.rst.txt b/docs/_build/html/_sources/cosense3d.utils.rst.txt new file mode 100644 index 00000000..af2460e5 --- /dev/null +++ b/docs/_build/html/_sources/cosense3d.utils.rst.txt @@ -0,0 +1,109 @@ +cosense3d.utils package +======================= + +Submodules +---------- + +cosense3d.utils.box\_utils module +--------------------------------- + +.. automodule:: cosense3d.utils.box_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.eval\_detection\_utils module +--------------------------------------------- + +.. automodule:: cosense3d.utils.eval_detection_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.iou2d\_calculator module +---------------------------------------- + +.. automodule:: cosense3d.utils.iou2d_calculator + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.logger module +----------------------------- + +.. automodule:: cosense3d.utils.logger + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.lr\_scheduler module +------------------------------------ + +.. automodule:: cosense3d.utils.lr_scheduler + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.metrics module +------------------------------ + +.. automodule:: cosense3d.utils.metrics + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.misc module +--------------------------- + +.. automodule:: cosense3d.utils.misc + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.module\_utils module +------------------------------------ + +.. automodule:: cosense3d.utils.module_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.pclib module +---------------------------- + +.. automodule:: cosense3d.utils.pclib + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.tensor\_utils module +------------------------------------ + +.. automodule:: cosense3d.utils.tensor_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.train\_utils module +----------------------------------- + +.. automodule:: cosense3d.utils.train_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.vislib module +----------------------------- + +.. automodule:: cosense3d.utils.vislib + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt new file mode 100644 index 00000000..5bbd97fe --- /dev/null +++ b/docs/_build/html/_sources/index.rst.txt @@ -0,0 +1,24 @@ +.. OpenCosense3D documentation master file, created by + sphinx-quickstart on Tue Feb 27 18:37:05 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to OpenCosense3D's documentation! +========================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + md/installation + md/prepare_data + md/structure + modules + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/_build/html/_sources/md/installation.md.txt b/docs/_build/html/_sources/md/installation.md.txt new file mode 100644 index 00000000..526b23f6 --- /dev/null +++ b/docs/_build/html/_sources/md/installation.md.txt @@ -0,0 +1,69 @@ +# Installation + +## Requirements +- Ubuntu LTS 20.04 +- GPU: tested on *Nvidia RTX 3090 Ti* and *Nvidia RTX 4090* +- Python: >= 3.8 + +## Installation options + +### Via bash script +You can install the environment with our provided batch script with the following commands: +```bash +conda create -n consense3d python=3.8 +conda activate cosense3d +cd OpenCosense3D +# for Nvidia RTX 3090 +bash setup_env_3090.sh +# for Nvidia RTX 4090 +bash setup_env_4090.sh +``` + +### Step-by-step +If you confront with any errors at the script installation, please try step-by-step installation. + +1.Create conda environment and install dependencies. +```shell +conda create -n consense3d python=3.8 +conda activate cosense3d +conda install openblas-devel -c anaconda -y +conda install -c conda-forge libstdcxx-ng libffi -y +sudo apt install build-essential python3-dev libopenblas-dev -y +``` + +2.Install pytorch and compile local Pytorch Extensions (CUDA nvcc compiler needed). +```shell +# For 3090 +pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 \ +--extra-index-url https://download.pytorch.org/whl/cu113 +# For 4090 +pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 +# Install extentions +cd cosense3d/ops +pip install . && cd .. +``` + +3.Install python packages. +```shell +# for 3090 +pip install -r reququirements_cosense_3090.txt +# for 4090 +pip install -r reququirements_cosense_4090.txt +# for Graphical Interface +pip install -r requirements_ui.txt +``` + +4.Install MinkovskiEngine. +```shell +pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ + --global-option="--blas_include_dirs=${CONDA_PREFIX}/include" \ + --global-option="--blas=openblas" +export OMP_NUM_THREADS=16 +``` + +5.Check Installation. +```shell +python -c "import torch; print(torch.__version__)" +python -W ignore -c "import MinkowskiEngine as ME; print(ME.__version__)" +``` + diff --git a/docs/_build/html/_sources/md/prepare_data.md.txt b/docs/_build/html/_sources/md/prepare_data.md.txt new file mode 100644 index 00000000..c8132bfb --- /dev/null +++ b/docs/_build/html/_sources/md/prepare_data.md.txt @@ -0,0 +1,32 @@ +# Prepare Datasets +> Check the dataset [page](https://data.uni-hannover.de/dataset/cosense3d) for download links or use the downloading script as following commands. +## OPV2Vt +```shell +cd CoSense3D +bash cosense3d/tools/download.sh OPV2Vt path/to/output_dir +``` + +## DairV2Xt + +Download [DAIR-V2X-C](https://thudair.baai.ac.cn/coop-dtest) dataset and extract it to the following structure. + +```shell +├── dair-v2x +│ ├── cooperative-vehicle-infrastructure +| |── 2021_08_16_22_26_54 +| |── ... +│ ├── cooperative-vehicle-infrastructure-infrastructure-side-image +│ ├── cooperative-vehicle-infrastructure-infrastructure-side-velodyne +│ ├── cooperative-vehicle-infrastructure-vehicle-side-image +│ ├── cooperative-vehicle-infrastructure-vehicle-side-velodyne +``` +Then download the meta files with +```shell +bash cosense3d/tools/download.sh DairV2xt /path/to/dair-v2x +``` + +## OPV2V + +```shell +bash cosense3d/tools/download.sh OPV2V path/to/output_dir +``` diff --git a/docs/_build/html/_sources/md/structure.md.txt b/docs/_build/html/_sources/md/structure.md.txt new file mode 100644 index 00000000..17065ec7 --- /dev/null +++ b/docs/_build/html/_sources/md/structure.md.txt @@ -0,0 +1,64 @@ +# The Structure of the framework +![framework](../_static/imgs/framework-structure.png) + +The overall framework contains four main modules, namely Dataloader, +Graphical user interface (GUI), Runner and Central Controller. +The Central Controller is the core module of the framework which contains four sub-modules: +CAV manager, Data manager, Task manager and Forward runner. Black arrows indicate the instruction flow, +green arrows show the data flow. The framework can run either with or without visualization in the GUI. + +## Dataloader +The framework standardizes the data loading API for collective perception with a predefined dictionary format +to store the meta information in JSON files. With this API, a new dataset can be easily converted to the +a standardized format without rewriting the PyTorch Dataloader and coping the large media files, such as point clouds +and images, to a new data structure. Only the meta information such as scenarios, frames, timestamps, parameters +of sensors and the annotations are parsed and saved to CoSense3D format in JSON files. This standardized Dataloader is able to load images, point cloud data, 2D annotations for images, +3D local annotations for perception without CAV cooperation and 3D global annotations for collective perception. + +## GUI +The graphical user interface can visualize the training and test data and check the training and test outcomes by one click. +This is helpful for loading new datasets and developing new models. +Before training on a new dataset, it is necessary to check if the data is converted and loaded correctly. +During and after training, visualizing the model output is also helpful to identify the drawbacks and problems +of the model and then refine or modify the model accordingly. + +The GUI can send commands to the runner to start, stop or step the runner process. After each runner step, +it updates the visualization modules, 3D GLViewer, ImgViewer, ImgAnno3DViewer and OutputViewer. +GLViewer is a OpenGL-based visualizer for 3D data, annotations (green boxes) and predictions (red boxes). +ImgViewer shows image data and the corresponding 2D bounding boxes. ImgAnno3DViewer is used to visualize +if the transformations and augmentations of images and 3D annotations are correctly loaded and processed. +Each row in ImgViewer and ImgAnno3Dviewer shows the images of a single CAV. After training the model, +the OutputViewer can be used to visualize the test result. The OutputViewer can contain multiple canvases +which can be customized by the user. +An example that shows the BEV segmentation (top) and object detection (bottom) result. +![glviewer](../_static/imgs/glviewer.png) +![imgviewer](../_static/imgs/imgviewer.png) +![imganno2viewer](../_static/imgs/imganno2dviewer.png) +![outputviewer](../_static/imgs/outputviewer.png) + +## Runner +In this framework, three types of Runners are available, namely, TrainRunner, TestRunner and VisRunner. +The user can launch these runners with or without GUI. They are used for training, testing and input +data visualization, respectively. Runners manage the frame-wise data and orders dispatching to Central Controller, +which then process the orders with the provided frame data accordingly. + +## Central Controller +![controller](../_static/imgs/center_controller.png) +Central Controller is the core module of this framework, it communicates with the order-dispatcher (Runner) +and the CAVs through its CAV manager. The Data manager is responsible for data gathering and scattering +between the central controller and the CAVs. Similarly, the Task manager gathers pseudo tasks generated by CAVs, +batches these tasks and dispatches them to the forward runner, which contains all shared deep learning modules, +for implementation. In this framework, a standardized CAV prototyping API is provided to allow the user to define +the customized workflow for collective perception, including the data augmentations, CAV coordinate transformations, +CPM sharing strategies, the forwarding order of the shared neuron network modules and gradient computation strategies +of these modules. + +Based on the CAV prototype, the central controller will then implement a standardized pipeline based on the tasks +generated by the CAV prototypes. Once the Central Controller receives the order and frame data from the Runner (step 0), +the CAV manager will update the CAVs according to the meta information in the frame data and the provided prototype +of CAV (step 1). Then the Data manager distributes the input frame data to the updated CAVs (step2). +Upon receiving the input data, the CAVs then pre-process the input data and generate tasks and send them back to the +Central Controller for processing (step3). To increase the efficiency of the forward process, the Task manager will +first summarize the tasks from all CAVs and batch them in two forward steps, one requires gradients, and one without +gradient computation, for parallel processing in the Forward Runner (step 4 and 5). After finishing these tasks, +the generated results are then distributed back to individual CAVs. \ No newline at end of file diff --git a/docs/_build/html/_sources/modules.rst.txt b/docs/_build/html/_sources/modules.rst.txt new file mode 100644 index 00000000..738c8791 --- /dev/null +++ b/docs/_build/html/_sources/modules.rst.txt @@ -0,0 +1,10 @@ +CoSense3D +========= + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents + cosense3d.dataset + cosense3d.modules + cosense3d.utils diff --git a/docs/_build/html/_static/_sphinx_javascript_frameworks_compat.js b/docs/_build/html/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000..81415803 --- /dev/null +++ b/docs/_build/html/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,123 @@ +/* Compatability shim for jQuery and underscores.js. + * + * Copyright Sphinx contributors + * Released under the two clause BSD licence + */ + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css new file mode 100644 index 00000000..cfc60b86 --- /dev/null +++ b/docs/_build/html/_static/basic.css @@ -0,0 +1,921 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_build/html/_static/css/badge_only.css b/docs/_build/html/_static/css/badge_only.css new file mode 100644 index 00000000..c718cee4 --- /dev/null +++ b/docs/_build/html/_static/css/badge_only.css @@ -0,0 +1 @@ +.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 00000000..6cb60000 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 00000000..7059e231 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 00000000..f815f63f Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 00000000..f2c76e5b Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 00000000..e9f60ca9 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 00000000..855c845e --- /dev/null +++ b/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 00000000..35acda2f Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 00000000..400014a4 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 00000000..4d13fc60 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 00000000..88ad05b9 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 00000000..c4e3d804 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff b/docs/_build/html/_static/css/fonts/lato-bold.woff new file mode 100644 index 00000000..c6dff51f Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff2 b/docs/_build/html/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 00000000..bb195043 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 00000000..76114bc0 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 00000000..3404f37e Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff b/docs/_build/html/_static/css/fonts/lato-normal.woff new file mode 100644 index 00000000..ae1307ff Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff2 b/docs/_build/html/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 00000000..3bf98433 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal.woff2 differ diff --git a/docs/_build/html/_static/css/theme.css b/docs/_build/html/_static/css/theme.css new file mode 100644 index 00000000..19a446a0 --- /dev/null +++ b/docs/_build/html/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js new file mode 100644 index 00000000..d06a71d7 --- /dev/null +++ b/docs/_build/html/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js new file mode 100644 index 00000000..995f333f --- /dev/null +++ b/docs/_build/html/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '1.0.0', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/docs/_build/html/_static/file.png differ diff --git a/docs/_build/html/_static/imgs/buffer_based_sampling.png b/docs/_build/html/_static/imgs/buffer_based_sampling.png new file mode 100644 index 00000000..6678d0f4 Binary files /dev/null and b/docs/_build/html/_static/imgs/buffer_based_sampling.png differ diff --git a/docs/_build/html/_static/imgs/center_controller.png b/docs/_build/html/_static/imgs/center_controller.png new file mode 100644 index 00000000..3d758489 Binary files /dev/null and b/docs/_build/html/_static/imgs/center_controller.png differ diff --git a/docs/_build/html/_static/imgs/cosense_logo.png b/docs/_build/html/_static/imgs/cosense_logo.png new file mode 100644 index 00000000..d12fa8b0 Binary files /dev/null and b/docs/_build/html/_static/imgs/cosense_logo.png differ diff --git a/docs/_build/html/_static/imgs/dairv2xt.gif b/docs/_build/html/_static/imgs/dairv2xt.gif new file mode 100644 index 00000000..17be4cdd Binary files /dev/null and b/docs/_build/html/_static/imgs/dairv2xt.gif differ diff --git a/docs/_build/html/_static/imgs/download.png b/docs/_build/html/_static/imgs/download.png new file mode 100644 index 00000000..bfb8906a Binary files /dev/null and b/docs/_build/html/_static/imgs/download.png differ diff --git a/docs/_build/html/_static/imgs/framework-structure.png b/docs/_build/html/_static/imgs/framework-structure.png new file mode 100644 index 00000000..78c5136a Binary files /dev/null and b/docs/_build/html/_static/imgs/framework-structure.png differ diff --git a/docs/_build/html/_static/imgs/glviewer.png b/docs/_build/html/_static/imgs/glviewer.png new file mode 100644 index 00000000..ef75085f Binary files /dev/null and b/docs/_build/html/_static/imgs/glviewer.png differ diff --git a/docs/_build/html/_static/imgs/imganno2dviewer.png b/docs/_build/html/_static/imgs/imganno2dviewer.png new file mode 100644 index 00000000..dbd4a6cc Binary files /dev/null and b/docs/_build/html/_static/imgs/imganno2dviewer.png differ diff --git a/docs/_build/html/_static/imgs/imgviewer.png b/docs/_build/html/_static/imgs/imgviewer.png new file mode 100644 index 00000000..0eb6a691 Binary files /dev/null and b/docs/_build/html/_static/imgs/imgviewer.png differ diff --git a/docs/_build/html/_static/imgs/opv2vt.gif b/docs/_build/html/_static/imgs/opv2vt.gif new file mode 100644 index 00000000..cafe71cf Binary files /dev/null and b/docs/_build/html/_static/imgs/opv2vt.gif differ diff --git a/docs/_build/html/_static/imgs/outputviewer.png b/docs/_build/html/_static/imgs/outputviewer.png new file mode 100644 index 00000000..66efa4cf Binary files /dev/null and b/docs/_build/html/_static/imgs/outputviewer.png differ diff --git a/docs/_build/html/_static/jquery.js b/docs/_build/html/_static/jquery.js new file mode 100644 index 00000000..c4c6022f --- /dev/null +++ b/docs/_build/html/_static/jquery.js @@ -0,0 +1,2 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/_build/html/_static/js/html5shiv.min.js b/docs/_build/html/_static/js/html5shiv.min.js new file mode 100644 index 00000000..cd1c674f --- /dev/null +++ b/docs/_build/html/_static/js/html5shiv.min.js @@ -0,0 +1,4 @@ +/** +* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed +*/ +!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/_build/html/_static/js/theme.js b/docs/_build/html/_static/js/theme.js new file mode 100644 index 00000000..1fddb6ee --- /dev/null +++ b/docs/_build/html/_static/js/theme.js @@ -0,0 +1 @@ +!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/docs/_build/html/_static/minus.png b/docs/_build/html/_static/minus.png new file mode 100644 index 00000000..d96755fd Binary files /dev/null and b/docs/_build/html/_static/minus.png differ diff --git a/docs/_build/html/_static/plus.png b/docs/_build/html/_static/plus.png new file mode 100644 index 00000000..7107cec9 Binary files /dev/null and b/docs/_build/html/_static/plus.png differ diff --git a/docs/_build/html/_static/pygments.css b/docs/_build/html/_static/pygments.css new file mode 100644 index 00000000..84ab3030 --- /dev/null +++ b/docs/_build/html/_static/pygments.css @@ -0,0 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #008000; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #9C6500 } /* Comment.Preproc */ +.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #E40000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #008400 } /* Generic.Inserted */ +.highlight .go { color: #717171 } /* Generic.Output */ +.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #008000 } /* Keyword.Pseudo */ +.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #B00040 } /* Keyword.Type */ +.highlight .m { color: #666666 } /* Literal.Number */ +.highlight .s { color: #BA2121 } /* Literal.String */ +.highlight .na { color: #687822 } /* Name.Attribute */ +.highlight .nb { color: #008000 } /* Name.Builtin */ +.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.highlight .no { color: #880000 } /* Name.Constant */ +.highlight .nd { color: #AA22FF } /* Name.Decorator */ +.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #0000FF } /* Name.Function */ +.highlight .nl { color: #767600 } /* Name.Label */ +.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #19177C } /* Name.Variable */ +.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #666666 } /* Literal.Number.Bin */ +.highlight .mf { color: #666666 } /* Literal.Number.Float */ +.highlight .mh { color: #666666 } /* Literal.Number.Hex */ +.highlight .mi { color: #666666 } /* Literal.Number.Integer */ +.highlight .mo { color: #666666 } /* Literal.Number.Oct */ +.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ +.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ +.highlight .sc { color: #BA2121 } /* Literal.String.Char */ +.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ +.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.highlight .sx { color: #008000 } /* Literal.String.Other */ +.highlight .sr { color: #A45A77 } /* Literal.String.Regex */ +.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ +.highlight .ss { color: #19177C } /* Literal.String.Symbol */ +.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #0000FF } /* Name.Function.Magic */ +.highlight .vc { color: #19177C } /* Name.Variable.Class */ +.highlight .vg { color: #19177C } /* Name.Variable.Global */ +.highlight .vi { color: #19177C } /* Name.Variable.Instance */ +.highlight .vm { color: #19177C } /* Name.Variable.Magic */ +.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/docs/_build/html/_static/searchtools.js b/docs/_build/html/_static/searchtools.js new file mode 100644 index 00000000..97d56a74 --- /dev/null +++ b/docs/_build/html/_static/searchtools.js @@ -0,0 +1,566 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = docUrlRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = docUrlRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/docs/_build/html/_static/sphinx_highlight.js b/docs/_build/html/_static/sphinx_highlight.js new file mode 100644 index 00000000..aae669d7 --- /dev/null +++ b/docs/_build/html/_static/sphinx_highlight.js @@ -0,0 +1,144 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(SphinxHighlight.highlightSearchWords); +_ready(SphinxHighlight.initEscapeListener); diff --git a/docs/_build/html/cosense3d.agents.cav_prototype.html b/docs/_build/html/cosense3d.agents.cav_prototype.html new file mode 100644 index 00000000..92f695cf --- /dev/null +++ b/docs/_build/html/cosense3d.agents.cav_prototype.html @@ -0,0 +1,696 @@ + + + + + + + cosense3d.agents.cav_prototype package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.agents.cav_prototype package

+
+

Submodules

+
+
+

cosense3d.agents.cav_prototype.base_cav module

+
+
+class cosense3d.agents.cav_prototype.base_cav.BaseCAV(id: str, mapped_id: int, is_ego: bool, lidar_range: Tensor, memory_len: int, lidar_pose: Tensor | None = None, require_grad: bool = False, seq_len: int = 1, **kwargs)[source]
+

Bases: object

+
+
+apply_transform()[source]
+
+ +
+
+forward(tasks, training_mode, **kwargs)[source]
+
+ +
+
+forward_fusion(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_head(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_local(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_localization(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+get_request_cpm()[source]
+
+ +
+
+get_response_cpm()[source]
+
+ +
+
+has_request()[source]
+
+ +
+
+loss(tasks, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+post_update_memory()[source]
+

Update memory after each forward run of a single frame.

+
+ +
+
+pre_update_memory()[source]
+

Update memory before each forward run of a single frame.

+
+ +
+
+prepare_data()[source]
+
+ +
+
+receive_request(request)[source]
+
+ +
+
+receive_response(response)[source]
+
+ +
+
+reset_data(*args, **kwargs)[source]
+
+ +
+
+transform_data()[source]
+
+ +
+
+update(lidar_pose, is_ego, require_grad)[source]
+
+ +
+ +
+
+class cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV(id, mapped_id, is_ego, lidar_range, memory_len, lidar_pose=None, require_grad=False, seq_len=1, **kwargs)[source]
+

Bases: object

+
+
+apply_transform(seq_idx)[source]
+
+ +
+
+forward(tasks, training_mode, seq_idx, with_loss)[source]
+
+ +
+
+forward_fusion(tasks, training_mode, seq_idx, with_loss)[source]
+

To be overloaded.

+
+ +
+
+forward_head(tasks, training_mode, seq_idx, with_loss)[source]
+

To be overloaded.

+
+ +
+
+forward_local(tasks, training_mode, seq_idx, with_loss)[source]
+

To be overloaded.

+
+ +
+
+get_data(keys, seq_idx=None)[source]
+
+ +
+
+get_request_cpm()[source]
+
+ +
+
+get_response_cpm()[source]
+
+ +
+
+has_request()[source]
+
+ +
+
+loss(tasks, training_mode, seq_idx, with_loss)[source]
+

To be overloaded.

+
+ +
+
+post_update_memory(seq_idx, **kwargs)[source]
+

Update memory after each forward run of a single frame.

+
+ +
+
+pre_update_memory(seq_idx, **kwargs)[source]
+

Update memory before each forward run of a single frame.

+
+ +
+
+prepare_data(seq_idx)[source]
+
+ +
+
+receive_request(request)[source]
+
+ +
+
+receive_response(response, seq_idx)[source]
+
+ +
+
+reset_data(*args, **kwargs)[source]
+
+ +
+
+task_id(seq_idx)[source]
+
+ +
+
+update(lidar_pose)[source]
+
+ +
+ +
+
+class cosense3d.agents.cav_prototype.base_cav.DairV2XCAV(*args, **kwargs)[source]
+

Bases: BaseCAV

+
+ +
+
+class cosense3d.agents.cav_prototype.base_cav.OPV2VtCAV(*args, **kwargs)[source]
+

Bases: BaseCAV

+
+ +
+
+class cosense3d.agents.cav_prototype.base_cav.OPV2VtCAV_v2(*args, **kwargs)[source]
+

Bases: BaseCAV

+
+ +
+
+

cosense3d.agents.cav_prototype.streamLTS_collection module

+
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr(*args, **kwargs)[source]
+

Bases: StreamLidarCAV

+
+
+apply_transform()[source]
+
+ +
+
+forward_fusion(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_head(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_local(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_localization(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+get_response_cpm()[source]
+
+ +
+
+prepare_data()[source]
+
+ +
+
+transform_data()[source]
+
+ +
+ +
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.LTSDairV2X(*args, **kwargs)[source]
+

Bases: StreamLidarCAV

+
+
+forward_fusion(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_head(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_local(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+loss(tasks, **kwargs)[source]
+

To be overloaded.

+
+ +
+ +
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV(*args, **kwargs)[source]
+

Bases: BaseCAV

+
+
+apply_transform()[source]
+
+ +
+
+forward_fusion(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_head(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_local(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+get_response_cpm()[source]
+
+ +
+
+loss(tasks, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+post_update_memory()[source]
+

Update memory after each forward run of a single frame.

+
+ +
+
+pre_update_memory()[source]
+

Update memory before each forward run of a single frame.

+
+ +
+
+prepare_data()[source]
+
+ +
+
+prepare_time_scale()[source]
+
+ +
+
+refresh_memory(prev_exists)[source]
+
+ +
+
+property timestamp
+
+ +
+
+transform_data()[source]
+
+ +
+
+transform_ref_pts(reference_points, matrix)[source]
+
+ +
+
+update_memory_timestamps(ref_pts)[source]
+
+ +
+
+vis_local_detection()[source]
+
+ +
+
+vis_local_pred()[source]
+
+ +
+
+vis_poses(ax=None, label=None, his_len=1, **kwargs)[source]
+
+ +
+
+vis_ref_pts(ax=None, label=None, his_len=1, **kwargs)[source]
+
+ +
+ +
+
+cosense3d.agents.cav_prototype.streamLTS_collection.slcAttnFusion
+

alias of slcDenseToSparse

+
+ +
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD(*args, **kwargs)[source]
+

Bases: StreamLidarCAV

+
+
+apply_transform()[source]
+
+ +
+
+forward_fusion(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_head(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+forward_local(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+get_response_cpm()[source]
+
+ +
+
+loss(tasks, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+post_update_memory()[source]
+

Update memory after each forward run of a single frame.

+
+ +
+
+pre_update_memory()[source]
+

Update memory before each forward run of a single frame.

+
+ +
+
+prepare_data()[source]
+
+ +
+ +
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.slcDenseToSparse(*args, **kwargs)[source]
+

Bases: StreamLidarCAV

+
+
+forward_local(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+prepare_data()[source]
+
+ +
+ +
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.slcFPVRCNN(*args, **kwargs)[source]
+

Bases: StreamLidarCAV

+
+
+forward_local(tasks, training_mode, **kwargs)[source]
+

To be overloaded.

+
+ +
+
+prepare_data()[source]
+
+ +
+ +
+
+cosense3d.agents.cav_prototype.streamLTS_collection.slcFcooper
+

alias of slcDenseToSparse

+
+ +
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTime(*args, **kwargs)[source]
+

Bases: StreamLidarCAV

+
+
+prepare_data()[source]
+
+ +
+
+update_memory_timestamps(ref_pts)[source]
+
+ +
+ +
+
+class cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTimeDairV2X(*args, **kwargs)[source]
+

Bases: LTSDairV2X

+
+
+prepare_data()[source]
+
+ +
+
+update_memory_timestamps(ref_pts)[source]
+
+ +
+ +
+
+

Module contents

+
+
+cosense3d.agents.cav_prototype.get_prototype(module_full_path: str)[source]
+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.agents.core.html b/docs/_build/html/cosense3d.agents.core.html new file mode 100644 index 00000000..e9a07aef --- /dev/null +++ b/docs/_build/html/cosense3d.agents.core.html @@ -0,0 +1,883 @@ + + + + + + + cosense3d.agents.core package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.agents.core package

+
+

Submodules

+
+
+

cosense3d.agents.core.base_runner module

+
+
+class cosense3d.agents.core.base_runner.BaseRunner(dataloader, controller, gpus=0, log_every=10, hooks=None, **kwargs)[source]
+

Bases: object

+
+
+init()[source]
+
+ +
+
+property logdir
+
+ +
+
+next_batch()[source]
+
+ +
+
+run()[source]
+
+ +
+
+set_logdir(logdir)[source]
+
+ +
+
+setup_logger(*args, **kwargs)[source]
+
+ +
+
+vis_data(keys=None, **kwargs)[source]
+
+ +
+ +
+
+

cosense3d.agents.core.cav_manager module

+
+
+class cosense3d.agents.core.cav_manager.CAVManager(lidar_range, prototype=None, memory_len=1, all_grad=False, num_grad_cav=1, seq_len=0, cpm_statistic=False, **kwargs)[source]
+

Bases: object

+
+
+apply_cav_function(func_name)[source]
+
+ +
+
+forward(with_loss, training_mode, **kwargs)[source]
+
+ +
+
+get_cav_with_id(id)[source]
+
+ +
+
+has_cav(cav_id)[source]
+
+ +
+
+receive_request(request)[source]
+
+ +
+
+receive_response(response)[source]
+
+ +
+
+reset()[source]
+
+ +
+
+send_request()[source]
+
+ +
+
+send_response()[source]
+
+ +
+
+update_cav_info(valid_agent_ids=None, lidar_poses=None, **data)[source]
+
+ +
+
+update_cpm_statistic(response)[source]
+
+ +
+ +
+
+

cosense3d.agents.core.data_manager module

+
+
+class cosense3d.agents.core.data_manager.DataManager(cav_manager, lidar_range, voxel_size=None, aug=None, pre_process=[], loc_err=None)[source]
+

Bases: object

+
+
+add_loc_err(batch_dict, seq_len)[source]
+
+ +
+
+apply_preprocess()[source]
+
+ +
+
+boxes_to_vis_format(boxes, labels, id_appendix=0)[source]
+
+ +
+
+distribute_to_cav(valid_agent_ids=None, **data)[source]
+
+ +
+
+distribute_to_seq_cav(data)[source]
+
+ +
+
+distribute_to_seq_list(batch_dict, seq_len)[source]
+
+ +
+
+gather(cav_list, data_keys)[source]
+
+ +
+
+gather_batch(batch_idx, key, to_numpy=False)[source]
+
+ +
+
+gather_cav_data(key)[source]
+
+ +
+
+gather_ego_data(key)[source]
+
+ +
+
+gather_vis_data(batch_idx=0, keys=['points'])[source]
+
+ +
+
+generate_augment_params(batch_dict, seq_len)[source]
+
+ +
+
+generate_global_non_empty_mask()[source]
+
+ +
+
+generate_local_non_empty_mask(ego_only=False)[source]
+
+ +
+
+get_gt_boxes_as_vis_format(batch_idx, coor='global', successors=False)[source]
+
+ +
+
+get_vis_data_bev(batch_idx=0, keys='bev')[source]
+
+ +
+
+get_vis_data_detection(batch_idx=0, keys='detection')[source]
+
+

Parameters

+

batch_idx: batch index +key: the default key for detection is ‘detection’, customized key can also be used, +depending on which key is used for saving detection result in the CAV data pool.

+
+
+

Returns

+
+

detection: result with boxes and labels converted to the visualizing format.

+
+
+
+ +
+
+get_vis_data_input(batch_idx=0, keys=None)[source]
+
+

Parameters

+

batch_idx +key: additional gt keys that are not standarlized in consense3d data API

+
+
+

Returns

+
+
+ +
+
+get_vis_data_meta(batch_idx=0, keys=None)[source]
+
+ +
+
+remove_global_empty_boxes()[source]
+
+ +
+
+remove_local_empty_boxes(ego_only=False)[source]
+
+ +
+
+sample_global_bev_tgt_pts(sam_res=0.4, map_res=0.2, range=50, max_num_pts=5000, discrete=False)[source]
+
+ +
+
+scatter(cav_list, data_dict)[source]
+
+ +
+
+update(cav_id, data_key, data)[source]
+
+ +
+
+vis_global_data_plt(vis_funcs, seq_len=1)[source]
+
+ +
+ +
+
+

cosense3d.agents.core.forward_runner module

+
+
+class cosense3d.agents.core.forward_runner.ForwardRunner(shared_modules, data_manager, dist=False, chunk_size=24, **kwargs)[source]
+

Bases: Module

+
+
+forward(tasks, with_grad=True, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+frame_loss(tasks, **kwargs)[source]
+
+ +
+
+gather_cav_ids(tasks)[source]
+
+ +
+
+loss(tasks, **kwargs)[source]
+
+ +
+
+to_gpu(gpu_id)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.agents.core.gui module

+
+
+class cosense3d.agents.core.gui.GUI(mode, cfg)[source]
+

Bases: QMainWindow

+
+
+change_color_mode()[source]
+
+ +
+
+change_glcolor()[source]
+
+ +
+
+change_visible(name)[source]
+
+ +
+
+connect_events_to_funcs()[source]
+
+ +
+
+get_toolbar()[source]
+
+ +
+
+initGUI()[source]
+
+ +
+
+refresh()[source]
+
+ +
+
+setRunner(runner)[source]
+
+ +
+
+setupUI(cfg)[source]
+
+ +
+
+start()[source]
+
+ +
+
+step()[source]
+
+ +
+
+stop()[source]
+
+ +
+ +
+
+

cosense3d.agents.core.hooks module

+
+
+class cosense3d.agents.core.hooks.BaseHook(**kwargs)[source]
+

Bases: object

+
+
+post_epoch(runner, **kwargs)[source]
+
+ +
+
+post_iter(runner, **kwargs)[source]
+
+ +
+
+pre_epoch(runner, **kwargs)[source]
+
+ +
+
+pre_iter(runner, **kwargs)[source]
+
+ +
+
+set_logger(logger)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.CPMStatisticHook(device='cuda:0', **kwargs)[source]
+

Bases: BaseHook

+
+
+post_epoch(runner, **kwargs)[source]
+
+ +
+
+set_logger(logger)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.CheckPointsHook(max_ckpt=3, epoch_every=None, iter_every=None, **kwargs)[source]
+

Bases: BaseHook

+
+
+post_epoch(runner, **kwargs)[source]
+
+ +
+
+post_iter(runner, **kwargs)[source]
+
+ +
+
+static save(runner, name)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.DetectionNMSHook(nms_thr, pre_max_size, det_key='detection', **kwargs)[source]
+

Bases: BaseHook

+
+
+post_iter(runner, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.EvalBEVSemsegHook(test_range, test_res=0.4, save_result=False, eval_static=True, bev_semseg_key='bev_semseg', gt_bev_key='bevmap', gt_boxes_key='global_bboxes_3d', **kwargs)[source]
+

Bases: BaseHook

+
+
+cal_ious(preds, gt_map, tag, token=None)[source]
+
+ +
+
+crop_map(bevmap)[source]
+
+ +
+
+gt_dynamic_map(boxes)[source]
+
+ +
+
+gt_static_map(bevmap)[source]
+
+ +
+
+iou(conf, unc, gt, obs_mask=None)[source]
+
+ +
+
+post_epoch(runner, **kwargs)[source]
+
+ +
+
+post_iter(runner, **kwargs)[source]
+
+ +
+
+set_logger(logger)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.EvalDetectionBEVHook(pc_range, iou_thr=[0.5, 0.7], save_result=False, det_key='detection', gt_key='global_bboxes_3d', **kwargs)[source]
+

Bases: BaseHook

+
+
+filter_box_ranges(boxes, scores=None, labels=None, indices=None, times=None)[source]
+
+ +
+
+format_final_result(out_dict)[source]
+
+ +
+
+post_epoch(runner, **kwargs)[source]
+
+ +
+
+post_iter(runner, **kwargs)[source]
+
+ +
+
+set_logger(logger)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.EvalDetectionHook(pc_range, iou_thr=[0.5, 0.7], metrics=['CoSense3D'], save_result=False, det_key='detection', gt_key='global_bboxes_3d', **kwargs)[source]
+

Bases: BaseHook

+
+
+eval_cosense3d_final()[source]
+
+ +
+
+filter_box_ranges(boxes, scores=None, labels=None, indices=None, times=None)[source]
+
+ +
+
+format_final_result(out_dict)[source]
+
+ +
+
+post_epoch(runner, **kwargs)[source]
+
+ +
+
+post_iter(runner, **kwargs)[source]
+
+ +
+
+set_logger(logger)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.Hooks(cfg)[source]
+

Bases: object

+
+
+set_logger(logger)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.MemoryUsageHook(device='cuda:0', **kwargs)[source]
+

Bases: BaseHook

+
+
+post_iter(runner, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.agents.core.hooks.TrainTimerHook(**kwargs)[source]
+

Bases: BaseHook

+
+
+post_iter(runner, **kwargs)[source]
+
+ +
+
+pre_epoch(runner, **kwargs)[source]
+
+ +
+ +
+
+

cosense3d.agents.core.task_manager module

+
+
+class cosense3d.agents.core.task_manager.TaskManager[source]
+

Bases: object

+
+
+reformat_tasks(task_list)[source]
+
+ +
+
+summarize_loss_tasks(tasks)[source]
+
+ +
+
+summarize_tasks(tasks)[source]
+
+ +
+
+task_to_ordered_dict(tasks)[source]
+
+ +
+ +
+
+

cosense3d.agents.core.test_runner module

+
+
+class cosense3d.agents.core.test_runner.TestRunner(load_from=None, logdir=None, **kwargs)[source]
+

Bases: BaseRunner

+
+
+load(load_from)[source]
+
+ +
+
+run()[source]
+
+ +
+
+run_itr(data)[source]
+
+ +
+
+setup_logger(ckpt, logdir)[source]
+
+ +
+
+step()[source]
+
+ +
+ +
+
+

cosense3d.agents.core.train_runner module

+
+
+class cosense3d.agents.core.train_runner.TrainRunner(max_epoch, optimizer, lr_scheduler, gpus=0, resume_from=None, load_from=None, run_name='default', log_dir='work_dir', use_wandb=False, debug=False, **kwargs)[source]
+

Bases: BaseRunner

+
+
+resume(resume_from, load_from)[source]
+
+ +
+
+run()[source]
+
+ +
+
+run_epoch()[source]
+
+ +
+
+run_itr(**kwargs)
+
+ +
+
+setup_logger(resume_from, run_name, log_dir, use_wandb)[source]
+
+ +
+
+step()[source]
+
+ +
+ +
+
+

cosense3d.agents.core.vis_runner module

+
+
+class cosense3d.agents.core.vis_runner.VisRunner(**kwargs)[source]
+

Bases: BaseRunner

+
+
+load(load_from)[source]
+
+ +
+
+run()[source]
+
+ +
+
+run_itr(data)[source]
+
+ +
+
+step()[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.agents.html b/docs/_build/html/cosense3d.agents.html new file mode 100644 index 00000000..e8ef9283 --- /dev/null +++ b/docs/_build/html/cosense3d.agents.html @@ -0,0 +1,675 @@ + + + + + + + cosense3d.agents package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.agents package

+
+

Subpackages

+
+ +
+
+
+

Submodules

+
+
+

cosense3d.agents.center_controller module

+
+
+class cosense3d.agents.center_controller.CenterController(cfg, data_loader, dist=False)[source]
+

Bases: object

+
+
+property model
+
+ +
+
+property modules
+
+ +
+
+property parameters
+
+ +
+
+run_frame(frame_data, with_loss, training_mode, **kwargs)[source]
+
+ +
+
+run_seq(seq_data, training_mode, **kwargs)[source]
+
+ +
+
+setup_core(cfg)[source]
+
+ +
+
+test_forward(batch_dict, **kwargs)[source]
+
+ +
+
+train_forward(batch_dict, **kwargs)[source]
+
+ +
+
+update_cfg(cfg, *args)[source]
+
+ +
+
+vis_forward(batch_dict, **kwargs)[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.agents.utils.html b/docs/_build/html/cosense3d.agents.utils.html new file mode 100644 index 00000000..34a4669c --- /dev/null +++ b/docs/_build/html/cosense3d.agents.utils.html @@ -0,0 +1,267 @@ + + + + + + + cosense3d.agents.utils package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.agents.utils package

+
+

Submodules

+
+
+

cosense3d.agents.utils.deco module

+
+
+cosense3d.agents.utils.deco.save_ckpt_on_error(func)[source]
+
+ +
+
+

cosense3d.agents.utils.transform module

+
+
+class cosense3d.agents.utils.transform.DataOnlineProcessor[source]
+

Bases: object

+
+
+static adaptive_free_space_augmentation(data: dict, min_h: float = -1.5, steps: int = 20, alpha: float = 0.05, res: float = 0.5, time_idx: int = None)[source]
+

Add free space points according to the distance of points to the origin.

+
+lidar origin ->  *
+              *  *
+           *     * h
+        *  ele   *
+      ************
+             d
+
+

Assume the \(\theta = \frac{\\pi}{2} - \text{ele}\) (elevation angle), +\(\alpha\) = average angle between two lidar rings, +\(d_k\) is the ground distance of the \(n_{th}\) lidar ring to lidar origin, \(k=1,...,n\), +\(\delta_d\) is the distance between two neighboring lidar rings, +then

+
+\[\begin{split}d &= h \tan(\theta) \\ +\delta_d &= d_n - d_{n-1} = d_n - h\tan(\arctan(\frac{h}{d_n}) - \alpha)\end{split}\]
+

we sample free space points in the ground distance of \(\delta_d\) relative to each ring +with the given ‘step’ distance.

+
+
Parameters:
+
    +
  • data – input data dict containing ‘points’.

  • +
  • min_h – minimum sample height relative to lidar origin. Default is -1.5.

  • +
  • steps – number of points to be sampled for each lidar ray. Default is 20.

  • +
  • alpha – average angle offset between two neighboring lidar casting rays. Default is 0.05.

  • +
  • res – resolution for down-sampling the free space points. Default is 0.5.

  • +
  • time_idx – if provided, time will be copied from the original points to free space points.

  • +
+
+
Returns:
+

updated data.

+
+
+
+ +
+
+static apply_transform(data, transform, apply_to=['points'])[source]
+
+ +
+
+static cav_aug_transform(data, transform, aug_params, apply_to=['points', 'imgs', 'annos_global'])[source]
+
+ +
+
+static filter_range(data, lidar_range, apply_to: list)[source]
+
+ +
+
+static free_space_augmentation(data, d: float = 10.0, h: float = -1.5, step: float = 1.5, res=0.25)[source]
+
+ +
+
+static generate_sparse_target_bev_points(data: dict, transform=None, sam_res=0.4, map_res=0.2, range=50, max_num_pts=3000, discrete=False)[source]
+
+ +
+
+static generate_sparse_target_roadline_points(data: dict, transform=None, map_res=0.2, range=50, kernel=3, max_num_pts=3000)[source]
+
+ +
+
+static update_transform_with_aug(transform, aug_params)[source]
+
+ +
+ +
+
+cosense3d.agents.utils.transform.add_flip(tf, flip_idx, flip_axis='xy')[source]
+
+ +
+
+cosense3d.agents.utils.transform.add_rotate(tf, rot)[source]
+
+ +
+
+cosense3d.agents.utils.transform.add_scale(tf, scale_ratio)[source]
+
+ +
+
+cosense3d.agents.utils.transform.apply_transform(data, transform, key)[source]
+
+ +
+
+cosense3d.agents.utils.transform.filter_range(data, lidar_range, key)[source]
+
+ +
+
+cosense3d.agents.utils.transform.filter_range_mask(points, lidar_range, eps=0.0001)[source]
+
+ +
+
+cosense3d.agents.utils.transform.generate_bev_tgt_pts(points, data, transform=None, sam_res=0.4, map_res=0.2, range=50, max_num_pts=5000, discrete=False)[source]
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.agents.viewer.html b/docs/_build/html/cosense3d.agents.viewer.html new file mode 100644 index 00000000..45e41128 --- /dev/null +++ b/docs/_build/html/cosense3d.agents.viewer.html @@ -0,0 +1,459 @@ + + + + + + + cosense3d.agents.viewer package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.agents.viewer package

+
+

Subpackages

+ +
+
+

Submodules

+
+
+

cosense3d.agents.viewer.gl_viewer module

+
+
+class cosense3d.agents.viewer.gl_viewer.GLViewer(name: str, parent=None)[source]
+

Bases: GLViewWidget

+
+
+addBox()[source]
+
+ +
+
+box()[source]
+
+ +
+
+change_visibility(key, visible)[source]
+
+ +
+
+drawRectangle()[source]
+
+ +
+
+draw_axes()[source]
+
+ +
+
+draw_depth_buffer()[source]
+
+ +
+
+evt_pos_to_world(pos1, pos2=None)[source]
+
+
Args:

pos1: center pos if pos2 is None else start post of a region +pos2: end pos of a region

+
+
+
+ +
+
+get_point_depth(x, y)[source]
+
+ +
+
+get_region_depth(p1: QPoint, p2: QPoint) ndarray[source]
+
+
Args:

p1: start point of region. +p2: end point of region

+
+
+
+ +
+
+highlightBox(pos)[source]
+
+ +
+
+initializeGL()[source]
+

Initialize items that were not initialized during addItem().

+
+ +
+
+keyPressEvent(self, a0: QKeyEvent | None)[source]
+
+ +
+
+keyReleaseEvent(self, a0: QKeyEvent | None)[source]
+
+ +
+
+model_pose_to_world(x, y, z)[source]
+
+ +
+
+mouseDoubleClickEvent(self, a0: QMouseEvent | None)[source]
+
+ +
+
+mouseMoveEvent(self, a0: QMouseEvent | None)[source]
+
+ +
+
+mousePressEvent(self, a0: QMouseEvent | None)[source]
+
+ +
+
+mouseReleaseEvent(self, a0: QMouseEvent | None)[source]
+
+ +
+
+paintGL(region=None, viewport=None, useItemNames=False)[source]
+

viewport specifies the arguments to glViewport. If None, then we use self.opts[‘viewport’] +region specifies the sub-region of self.opts[‘viewport’] that should be rendered. +Note that we may use viewport != self.opts[‘viewport’] when exporting.

+
+ +
+
+paintRect()[source]
+
+ +
+
+refresh(data_dict, visible_keys=['globalGT'], color_mode='united', **kwargs)[source]
+
+ +
+
+removeActivate()[source]
+
+ +
+
+removeHeilight()[source]
+
+ +
+
+removeRectangle()[source]
+
+ +
+
+selectHeilight()[source]
+
+ +
+
+updateFrameData(pcds, local_label=None, global_label=None, local_det=None, global_det=None, predecessor=None, successor=None, successor_gt=None, pcd_color='united')[source]
+
+ +
+
+updateLabel(local_labels, global_labels, local_det, global_det, successor=None, successor_gt=None, predecessor=None)[source]
+
+ +
+
+updatePCDs(pcds, color_mode='united', **kwargs)[source]
+
+ +
+ +
+
+

cosense3d.agents.viewer.img_anno3d_viewer module

+
+
+class cosense3d.agents.viewer.img_anno3d_viewer.ImgAnno3DViewer(dpi=50)[source]
+

Bases: FigureCanvasQTAgg

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+

cosense3d.agents.viewer.img_viewer module

+
+
+class cosense3d.agents.viewer.img_viewer.ImgViewer(dpi=100, mean=None, std=None)[source]
+

Bases: FigureCanvasQTAgg

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+

cosense3d.agents.viewer.output_viewer module

+
+
+class cosense3d.agents.viewer.output_viewer.BEVDenseCanvas(lidar_range=None, **kwargs)[source]
+

Bases: MplCanvas

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.agents.viewer.output_viewer.BEVSparseCanvas(lidar_range=None, s=4, **kwargs)[source]
+

Bases: MplCanvas

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.agents.viewer.output_viewer.DetectionCanvas(lidar_range=None, topk_ctr=0, **kwargs)[source]
+

Bases: MplCanvas

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.agents.viewer.output_viewer.DetectionScoreMap(lidar_range=None, s=4, **kwargs)[source]
+

Bases: MplCanvas

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.agents.viewer.output_viewer.MplCanvas(data_keys, width=5, height=4, dpi=100, title='plot', nrows=1, ncols=1)[source]
+

Bases: FigureCanvasQTAgg

+
+
+update_title(scenario, frame, cav_id)[source]
+
+ +
+ +
+
+class cosense3d.agents.viewer.output_viewer.OutputViewer(plots, parent=None)[source]
+

Bases: QWidget

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.agents.viewer.output_viewer.SparseDetectionCanvas(lidar_range=None, topk_ctr=0, **kwargs)[source]
+

Bases: MplCanvas

+
+
+refresh(data, **kwargs)[source]
+
+ +
+ +
+
+

cosense3d.agents.viewer.utils module

+
+
+cosense3d.agents.viewer.utils.circular_mask(arr_length, center, radius)[source]
+
+ +
+
+cosense3d.agents.viewer.utils.depth_min(depths, center, r=10) float[source]
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.agents.viewer.items.html b/docs/_build/html/cosense3d.agents.viewer.items.html new file mode 100644 index 00000000..903d63de --- /dev/null +++ b/docs/_build/html/cosense3d.agents.viewer.items.html @@ -0,0 +1,219 @@ + + + + + + + cosense3d.agents.viewer.items package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.agents.viewer.items package

+
+

Submodules

+
+
+

cosense3d.agents.viewer.items.graph_items module

+
+
+class cosense3d.agents.viewer.items.graph_items.LineBoxItem(box, status='inactive', show_direction=False, last_pose=None, line_width=1.0)[source]
+

Bases: GLLinePlotItem

+
+
+activate()[source]
+
+ +
+
+color(status)[source]
+
+ +
+
+deactivate()[source]
+
+ +
+
+highlight()[source]
+
+ +
+
+id_ptr = 0
+
+ +
+
+ids = {}
+
+ +
+
+property isActive
+
+ +
+
+to_center()[source]
+

Convert box to center format

+
+ +
+ +
+
+class cosense3d.agents.viewer.items.graph_items.LineItem(line, parent=None)[source]
+

Bases: QGraphicsLineItem

+
+
+hoverEvent(event)[source]
+
+ +
+ +
+
+class cosense3d.agents.viewer.items.graph_items.MeshBoxItem(size=(1, 1, 1), color=(0.0, 1.0, 0.0, 0.25))[source]
+

Bases: GLMeshItem

+
+ +
+
+class cosense3d.agents.viewer.items.graph_items.RectangleItem(rect)[source]
+

Bases: QGraphicsRectItem

+
+
+hoverEvent(event)[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.dataset.html b/docs/_build/html/cosense3d.dataset.html new file mode 100644 index 00000000..7c15a74b --- /dev/null +++ b/docs/_build/html/cosense3d.dataset.html @@ -0,0 +1,414 @@ + + + + + + + cosense3d.dataset package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.dataset package

+
+

Subpackages

+
+ +
+
+
+

Submodules

+
+
+

cosense3d.dataset.const module

+
+
+

cosense3d.dataset.cosense_dataset module

+
+
+class cosense3d.dataset.cosense_dataset.CosenseDataset(cfgs, mode)[source]
+

Bases: Dataset

+
+
+LABEL_COLORS = {}
+
+ +
+
+VALID_CLS = []
+
+ +
+
+static collate_batch(batch_list)[source]
+
+ +
+
+get_valid_agents(sample_info: dict, prev_agents: List | None = None) List[source]
+
+
Return prev_agents if given else select the given number of agents in the communication range

which includes the ego agent.

+
+
+
+

Parameters

+

sample_info: meta info the one sample. +prev_agents: list of the agent ids loader last time.

+
+
+

Returns

+

agents_ids: list of valid agent for the current sample

+
+
+ +
+
+init_dataset()[source]
+

Load all necessary meta information

+
+ +
+
+load_frame_data(item: int, prev_agents: List | None = None, prev_item: int | None = None, omit_gt: bool | None = False, loc_err: list | None = None) dict[source]
+

Load all data and annotations from one frame to standard CoSense format.

+
+
Parameters:
+
    +
  • item – sample index.

  • +
  • prev_agents – only load data the previous agents if given, this is used for temporal data loading.

  • +
  • prev_item – the index of the previous loaded sample.

  • +
  • omit_gt – whether to omit loading the ground truth annotations.

  • +
  • loc_err – localization error.

  • +
+
+
Returns:
+

data_dict

+
+
+
+ +
+
+load_meta()[source]
+

Load meta data from CoSense json files

+
+ +
+
+load_sample_info(item: int, prev_agents: List | None = None, prev_item: int | None = None) dict[source]
+

Load meta info of the `item`’th sample.

+
+
Parameters:
+
    +
  • item – sample index.

  • +
  • prev_agents – only load data the previous agents if given, this is used for temporal data loading.

  • +
  • prev_item – the index of the previous loaded sample.

  • +
+
+
Returns:
+

batch_dict: dict(scenario: str, frame: str, sample_info: dict)

+
+
+
+ +
+
+parse_samples()[source]
+

List all frame-wise instances

+
+ +
+ +
+
+

cosense3d.dataset.temporal_cosense_dataset module

+
+
+class cosense3d.dataset.temporal_cosense_dataset.TemporalCosenseDataset(cfgs, mode)[source]
+

Bases: CosenseDataset

+

Sequential Cosense data loader.

+
+ +
+
+

Module contents

+
+
+cosense3d.dataset.get_dataloader(cfgs, mode='train', distributed=False)[source]
+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.dataset.pipeline.html b/docs/_build/html/cosense3d.dataset.pipeline.html new file mode 100644 index 00000000..207e7a43 --- /dev/null +++ b/docs/_build/html/cosense3d.dataset.pipeline.html @@ -0,0 +1,239 @@ + + + + + + + cosense3d.dataset.pipeline package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.dataset.pipeline package

+
+

Submodules

+
+
+

cosense3d.dataset.pipeline.loading module

+
+
+class cosense3d.dataset.pipeline.loading.LoadAnnotations(load2d=False, load_cam_param=False, load3d_local=False, load3d_global=False, load_global_time=False, load3d_pred=False, min_num_pts=0, with_velocity=False, class_agnostic_3d=True, time_offset=0, loc_err=None)[source]
+

Bases: object

+
+
+get_lidar2img_transform(lidar2cam, intrinsic)[source]
+
+ +
+ +
+
+class cosense3d.dataset.pipeline.loading.LoadCarlaRoadlineMaps(ego_only=True, range=75)[source]
+

Bases: object

+
+
+load_single(path, ai, data_dict)[source]
+
+ +
+ +
+
+class cosense3d.dataset.pipeline.loading.LoadLidarPoints(coop_mode=True, load_attributes=['xyz', 'intensity'], time_offset=0)[source]
+

Bases: object

+
+
+read_pcd(pts_filename)[source]
+
+ +
+ +
+
+class cosense3d.dataset.pipeline.loading.LoadMultiViewImg(bgr2rgb=False, to_float32=False, max_num_img=None, img_filter_keys=None)[source]
+

Bases: object

+
+ +
+
+class cosense3d.dataset.pipeline.loading.LoadOPV2VBevMaps(keys=None, use_global_map=True, ego_only=True, range=75)[source]
+

Bases: object

+
+
+crop_map_for_pose(data_dict, ai)[source]
+
+ +
+
+load_single(path, ai, data_dict)[source]
+
+ +
+ +
+
+class cosense3d.dataset.pipeline.loading.LoadSparseBevTargetPoints(num_points=3000, ego_only=False)[source]
+

Bases: object

+
+
+generate_sparse_bev_pts(pcd)[source]
+
+ +
+ +
+
+

cosense3d.dataset.pipeline.transform module

+
+
+class cosense3d.dataset.pipeline.transform.ResizeCropFlipRotImage(data_aug_conf=None, with_2d=True, filter_invisible=True, training=True)[source]
+

Bases: object

+

Augment images with random resize, crop, flip and rotation. Modified from StreamPETR.

+
+ +
+
+class cosense3d.dataset.pipeline.transform.ResizeImage(img_size)[source]
+

Bases: object

+

Resize images.

+
+ +
+
+

Module contents

+
+
+class cosense3d.dataset.pipeline.Pipeline(cfgs)[source]
+

Bases: object

+

Composes several processing modules together. +Take care that these functions modify the input data directly.

+
+
+build_process(k, v)[source]
+
+ +
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.dataset.toolkit.html b/docs/_build/html/cosense3d.dataset.toolkit.html new file mode 100644 index 00000000..246b7774 --- /dev/null +++ b/docs/_build/html/cosense3d.dataset.toolkit.html @@ -0,0 +1,685 @@ + + + + + + + cosense3d.dataset.toolkit package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.dataset.toolkit package

+
+

Submodules

+
+
+

cosense3d.dataset.toolkit.cosense module

+
+
+class cosense3d.dataset.toolkit.cosense.CoSenseDataConverter(data_path, meta_path, mode='all')[source]
+

Bases: object

+
+
+OBJ_ID2NAME = {0: 'vehicle.car', 1: 'vehicle.van', 2: 'vehicle.truck', 3: 'vehicle.bus', 4: 'vehicle.tram', 5: 'vehicle.motorcycle', 6: 'vehicle.cyclist', 7: 'vehicle.scooter', 8: 'vehicle.other', 9: 'human.pedestrian', 10: 'human.wheelchair', 11: 'human.sitting', 12: 'static.trafficcone', 13: 'static.barrowlist', 14: 'vehicle.tricyclist', 15: 'unknown'}
+
+ +
+
+OBJ_LIST = ['vehicle.car', 'vehicle.van', 'vehicle.truck', 'vehicle.bus', 'vehicle.tram', 'vehicle.motorcycle', 'vehicle.cyclist', 'vehicle.scooter', 'vehicle.other', 'human.pedestrian', 'human.wheelchair', 'human.sitting', 'static.trafficcone', 'static.barrowlist', 'vehicle.tricyclist', 'unknown']
+
+ +
+
+OBJ_NAME2ID = {'human.pedestrian': 9, 'human.sitting': 11, 'human.wheelchair': 10, 'static.barrowlist': 13, 'static.trafficcone': 12, 'unknown': 15, 'vehicle.bus': 3, 'vehicle.car': 0, 'vehicle.cyclist': 6, 'vehicle.motorcycle': 5, 'vehicle.other': 8, 'vehicle.scooter': 7, 'vehicle.tram': 4, 'vehicle.tricyclist': 14, 'vehicle.truck': 2, 'vehicle.van': 1}
+
+ +
+
+static add_cam_to_fdict(fdict, agent_id, cam_id, filenames, intrinsic, extrinsic, **kwargs)[source]
+
+ +
+
+static cal_vbbx_mean_dim(meta)[source]
+

Calculate mean dimensions of four-wheel vehicles

+
+ +
+
+static draw_sample_distributions(meta_path)[source]
+

Draw distribution of the number of observation points for each sample category.

+
+
Parameters:
+

meta_path – path contains pickle files of object samples

+
+
Returns:
+

+
+
+
+ +
+
+static fdict_template()[source]
+
+ +
+
+static global_boxes_to_local(meta_dict, data_path, meta_path)[source]
+
+ +
+
+static load_meta(meta_path, mode)[source]
+
+ +
+
+obj_from_sustech(label_file)[source]
+
+ +
+
+obj_to_opv2v(bbxs, pose, out_file, timestamp=None)[source]
+
+ +
+
+obj_to_sustech(cosense_objs, sustech_file)[source]
+
+ +
+
+static parse_global_bbox_velo(meta_dict, data_path, meta_path)[source]
+
+ +
+
+static remove_lidar_info(fdict, agent_id)[source]
+
+ +
+
+static supervison_full_to_sparse(meta_dict, out_path, lidar_range=None, det_r=None, num_box_per_frame=None, num_box_total=None, label_ratio=None)[source]
+
+ +
+
+to_kitti(out_dir=None)[source]
+
+ +
+
+to_opv2v(out_dir=None)[source]
+
+ +
+
+to_sustech(out_dir=None)[source]
+
+ +
+
+static update_agent(fdict, agent_id, agent_type=None, agent_pose=None, agent_time=None, **kwargs)[source]
+
+ +
+
+static update_agent_gt_boxes(fdict, agent_id, gt_boxes)[source]
+
+ +
+
+static update_agent_lidar(fdict, agent_id, lidar_id, lidar_pose=None, lidar_time=None, lidar_file=None)[source]
+
+ +
+
+static update_frame_bbx(fdict, bbx)[source]
+
+ +
+
+update_from_sustech(sustech_path)[source]
+
+ +
+ +
+
+

cosense3d.dataset.toolkit.dairv2x module

+
+
+cosense3d.dataset.toolkit.dairv2x.calib_to_tf_matrix(calib_file)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.convert_v2x_c(root_dir, meta_out_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.convert_v2x_seq(root_dir, meta_out_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.load_info_to_dict(info_file)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.load_label(label_file)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.optimize_poses(meta_path)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.optimize_trajectory(seq, sdict, root_dir, out_meta_dir, ego_agent_id, idx, sub_idx)[source]
+

This function iterates over scenarios, for each scenario it does the following steps: +1. register point clouds sequentially for each agent to get accurate trajectory of agents. +Before registration, the points belonging to the labeled objets with high dynamics are removed. +After registration of each sequence pair, the merged point cloud is down-sampled to save space. +2. match the registered point clouds of different agents to get optimized relative poses. +3. recover the relative pose to the world pose.

+
+

Parameters

+

meta_path: directory of meta files +root_dir: root dir of data

+
+
+

Returns

+

meta: meta information with updated poses of agents

+
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.parse_global_bboxes(sdict, frames, root_dir)[source]
+

Step three

+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.parse_static_pcd(adict, root_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.parse_timestamped_boxes(adict, root_dir, four_wheel_only=True)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.register_pcds_to_blocks(seq, sdict, root_dir, idx=0)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.register_sequence(sdict, frames, root_dir, ignore_ids=[], vis=False)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.register_step_one(mf)[source]
+

Find vehicle that is most close to infra

+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.register_step_two(start_frame, mf, meta_out_dir)[source]
+

Register point clouds

+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.remove_ego_boxes(meta_in)[source]
+
+ +
+
+cosense3d.dataset.toolkit.dairv2x.select_sub_scenes(meta_in, root_dir, meta_out, split)[source]
+
+ +
+
+

cosense3d.dataset.toolkit.opv2v module

+
+
+cosense3d.dataset.toolkit.opv2v.boxes_3d_to_2d(boxes3d, num_pts, lidar2cam, I, img_size)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.convert_bev_semantic_map_to_road_height_map(map_dir, map_bounds_file, scenario_town_map_file, meta_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.corner_to_center(corner3d, order='lwh')[source]
+

Convert 8 corners to x, y, z, dx, dy, dz, yaw.

+
+

Parameters

+
+
corner3dnp.ndarray

(N, 8, 3)

+
+
orderstr

‘lwh’ or ‘hwl’

+
+
+
+
+

Returns

+
+
box3dnp.ndarray

(N, 7)

+
+
+
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.create_bbx(extent)[source]
+

Create bounding box with 8 corners under obstacle vehicle reference.

+
+

Parameters

+
+
extentlist

Width, height, length of the bbx.

+
+
+
+
+

Returns

+
+
bbxnp.array

The bounding box with 8 corners, shape: (8, 3)

+
+
+
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.generate_bevmaps(data_dir, meta_path)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.generate_roadline(map_dir, map_bounds_file)[source]
+

Convert global BEV semantic maps to 2d road line points.

+
+
Parameters:
+
    +
  • map_dir – directory for images of BEV semantic maps

  • +
  • map_bounds_file – json file that describe the world coordinates of the BEV map origin (image[0, 0])

  • +
+
+
Returns:
+

Nx2 array, 2d world coordinates of road line points in meters.

+
+
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.opv2v_pose_to_cosense(pose)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.opv2v_to_cosense(path_in, path_out, isSim=True, correct_transf=False, pcd_ext='pcd')[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.pose_to_transformation(pose)[source]
+
+
Args:

pose: list, [x, y, z, roll, pitch, yaw]

+
+
Returns:

transformation: np.ndarray, (4, 4)

+
+
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.project_points(points, lidar2cam, I)[source]
+

Project 3d points to image planes

+
+ +
+
+cosense3d.dataset.toolkit.opv2v.project_world_objects(object_dict, output_dict, lidar_pose, order)[source]
+

Project the objects under world coordinates into another coordinate +based on the provided extrinsic.

+
+

Parameters

+
+
object_dictdict

The dictionary contains all objects surrounding a certain cav.

+
+
output_dictdict

key: object id, value: object bbx (xyzlwhyaw).

+
+
lidar_poselist

(6, ), lidar pose under world coordinate, [x, y, z, roll, yaw, pitch].

+
+
orderstr

‘lwh’ or ‘hwl’

+
+
+
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.update_2d_bboxes(fdict, cav_id, lidar_pose, data_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.update_cam_params(opv2v_params, cosense_fdict, agent_id, scenario, frame)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.update_global_bboxes_num_pts(data_dir, meta_path)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.update_local_boxes3d(fdict, objects_dict, ref_pose, order, data_dir, cav_id)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.x1_to_x2(x1, x2)[source]
+

Transformation matrix from x1 to x2.

+
+

Parameters

+
+
x1list or np.ndarray

The pose of x1 under world coordinates or +transformation matrix x1->world

+
+
x2list or np.ndarray
+
The pose of x2 under world coordinates or

transformation matrix x2->world

+
+
+
+
+
+
+

Returns

+
+
transformation_matrixnp.ndarray

The transformation matrix.

+
+
+
+
+ +
+
+cosense3d.dataset.toolkit.opv2v.x_to_world(pose: list) ndarray[source]
+

The transformation matrix from x-coordinate system to carla world system +Parameters

+
+
Parameters:
+

pose – [x, y, z, roll, yaw, pitch]

+
+
Returns:
+

The transformation matrix.

+
+
+
+ +
+
+

cosense3d.dataset.toolkit.opv2v_t module

+
+
+cosense3d.dataset.toolkit.opv2v_t.gen_time_offsets(data_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.generate_roadline_reference_points(root_dir, meta_file)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.get_box_velo(box, speeds, frame)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.get_local_boxes3d(objects_dict, ref_pose, order)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.get_velos(boxes, speeds, frame)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.load_frame_data(scene_dir, cavs, frame)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.load_vehicles_gframe(params)[source]
+

Load vehicles in global coordinate system.

+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.opv2vt_to_cosense(data_dir, split, data_out_dir, meta_out_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.pad_box_result(res, out_len)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.parse_speed_from_yamls(scene_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.parse_sub_frame(f)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.read_frame_plys_boxes(path, frame, prev_frame=None, time_offset=0, parse_boxes=True)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.read_ply(filename, properties=None)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.read_ply_to_dict(f)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.read_sub_frame(f)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.transform_boxes_global_to_ref(boxes, ref_pose)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.update_bev_map(root_dir, meta_in, meta_out, split)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.update_global_boxes(root_dir, meta_in, meta_out, split)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.update_velo(scenario_meta_file)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.vis_cosense_scenario(scenario_meta_file, data_dir)[source]
+
+ +
+
+cosense3d.dataset.toolkit.opv2v_t.vis_frame_data()[source]
+
+ +
+
+

Module contents

+
+
+cosense3d.dataset.toolkit.callback_registrations(source, target, source_points, target_points)[source]
+

Callback function for point picking. Registers two point clouds using selected corresponding points.

+
+ +
+
+cosense3d.dataset.toolkit.click_register(source, target)[source]
+
+ +
+
+cosense3d.dataset.toolkit.register_pcds(source_cloud, target_cloud, initial_transf, thr=0.2, visualize=False, title='PCD')[source]
+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.html b/docs/_build/html/cosense3d.html new file mode 100644 index 00000000..ce1aadf2 --- /dev/null +++ b/docs/_build/html/cosense3d.html @@ -0,0 +1,581 @@ + + + + + + + cosense3d package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.backbone2d.html b/docs/_build/html/cosense3d.modules.backbone2d.html new file mode 100644 index 00000000..9fb3f66e --- /dev/null +++ b/docs/_build/html/cosense3d.modules.backbone2d.html @@ -0,0 +1,177 @@ + + + + + + + cosense3d.modules.backbone2d package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.backbone2d package

+
+

Submodules

+
+
+

cosense3d.modules.backbone2d.resnet_encoder module

+
+
+class cosense3d.modules.backbone2d.resnet_encoder.ResnetEncoder(num_layers, feat_indices, out_index, img_size, neck=None, **kwargs)[source]
+

Bases: BaseModule

+

Resnet family to encode image.

+
+
+format_output(output, num_imgs)[source]
+
+ +
+
+forward(input_images, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.backbone3d.html b/docs/_build/html/cosense3d.modules.backbone3d.html new file mode 100644 index 00000000..54e384f9 --- /dev/null +++ b/docs/_build/html/cosense3d.modules.backbone3d.html @@ -0,0 +1,330 @@ + + + + + + + cosense3d.modules.backbone3d package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.backbone3d package

+
+

Submodules

+
+
+

cosense3d.modules.backbone3d.mink_unet module

+
+
+class cosense3d.modules.backbone3d.mink_unet.MinkUnet(data_info, stride, in_dim, d=3, kernel_size_layer1=3, enc_dim=32, cache_strides=None, floor_height=0, height_compression=None, compression_kernel_size_xy=1, to_dense=False, dist=False, **kwargs)[source]
+

Bases: BaseModule

+
+
+QMODE = 1
+
+ +
+
+format_output(res, N)[source]
+
+ +
+
+forward(points: list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+forward_height_compression(res)[source]
+
+ +
+
+forward_unet(points, **kwargs)[source]
+
+ +
+
+grid_size(stride)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+stensor_to_dense(stensor)[source]
+
+ +
+
+to_gpu(gpu_id)[source]
+
+ +
+
+training: bool
+
+ +
+
+valid_coords(stensor)[source]
+
+ +
+ +
+
+

cosense3d.modules.backbone3d.pillar_bev module

+
+
+class cosense3d.modules.backbone3d.pillar_bev.PillarBEV(in_channels, layer_nums, layer_strides, downsample_channels, upsample_channels, upsample_strides, voxel_generator, pillar_encoder, bev_shrinker=None, bev_compressor=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(res, N)[source]
+
+ +
+
+forward(points: list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+to_dense_bev(coor, feat, N)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.backbone3d.spconv module

+
+
+class cosense3d.modules.backbone3d.spconv.Spconv(in_channels, out_channels, voxel_generator, voxel_encoder, bev_neck=None, bev_compressor=None, cache_coords=True, cache_strides=[1, 2, 4, 8], **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(out_dict, B)[source]
+
+ +
+
+forward(points: list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+to_dense(stensor)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.backbone3d.spconv.post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, conv_type='subm', norm_fn=None)[source]
+
+ +
+
+

cosense3d.modules.backbone3d.voxelnet module

+
+
+class cosense3d.modules.backbone3d.voxelnet.VoxelNet(voxel_generator, voxel_encoder, cml, neck=None, bev_compressor=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(points: list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+to_dense(coor, feat, N, filter_range=False)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.fusion.html b/docs/_build/html/cosense3d.modules.fusion.html new file mode 100644 index 00000000..5835b8b1 --- /dev/null +++ b/docs/_build/html/cosense3d.modules.fusion.html @@ -0,0 +1,907 @@ + + + + + + + cosense3d.modules.fusion package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.fusion package

+
+

Submodules

+
+
+

cosense3d.modules.fusion.attn_fusion module

+
+
+class cosense3d.modules.fusion.attn_fusion.DenseAttentionFusion(feature_dim, neck=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(ego_feats, coop_feats=None, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.attn_fusion.SparseAttentionFusion(stride, in_channels, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(output)[source]
+
+ +
+
+forward(ego_feats, coop_feats=None, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+fuse_feature_at_stride(ego_feat, coop_feat, stride, fuse_key)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.fusion.box_fusion module

+
+
+class cosense3d.modules.fusion.box_fusion.BoxFusion(lidar_range, **kwargs)[source]
+

Bases: BaseModule

+
+
+cluster_fusion(clusters, scores, labels, times, global_time)[source]
+

Merge boxes in each cluster with scores as weights for merging

+
+ +
+
+clustering(boxes, scores, labels, times, global_time)[source]
+
+ +
+
+forward(ego_preds, coop_preds, memory, global_times, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+merge_sync_boxes(c, s)[source]
+
+ +
+
+temporal_cluster_fusion(clusters, scores, labels, times, global_time)[source]
+

Merge boxes in each cluster with scores as weights for merging

+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.fusion.box_fusion.limit_period(val, offset=0.5, period=6.283185306)[source]
+
+ +
+
+

cosense3d.modules.fusion.fax module

+

This class is about swap fusion applications

+
+
+class cosense3d.modules.fusion.fax.Attention(dim, dim_head=32, dropout=0.0, agent_size=6, window_size=7)[source]
+

Bases: Module

+

Unit Attention class. Todo: mask is not added yet.

+
+

Parameters

+
+
dim: int

Input feature dimension.

+
+
dim_head: int

The head dimension.

+
+
dropout: float

Dropout rate

+
+
agent_size: int

The agent can be different views, timestamps or vehicles.

+
+
+
+
+forward(x, mask=None)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+
+ +
+
+class cosense3d.modules.fusion.fax.FeedForward(dim, hidden_dim, dropout=0.0)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.fax.PreNormResidual(dim, fn)[source]
+

Bases: Module

+
+
+forward(x, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.fax.SwapFusionBlock(input_dim, mlp_dim, dim_head, window_size, agent_size, drop_out)[source]
+

Bases: Module

+

Swap Fusion Block contains window attention and grid attention.

+
+
+forward(x, mask=None)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.fax.SwapFusionBlockMask(input_dim, mlp_dim, dim_head, window_size, agent_size, drop_out)[source]
+

Bases: Module

+

Swap Fusion Block contains window attention and grid attention with +mask enabled for multi-vehicle cooperation.

+
+
+forward(x, mask)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.fax.SwapFusionEncoder(input_dim=128, mlp_dim=256, agent_size=5, window_size=8, dim_head=32, drop_out=0.1, depth=3, mask=False, decoder=None, **kwargs)[source]
+

Bases: BaseModule

+

Data rearrange -> swap block -> mlp_head

+
+
+forward(ego_feat, coop_cpm, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.fusion.keypoints module

+
+
+class cosense3d.modules.fusion.keypoints.KeypointsFusion(lidar_range, train_from_epoch=0, **kwargs)[source]
+

Bases: BaseModule

+
+
+cluster_fusion(clusters, scores)[source]
+

Merge boxes in each cluster with scores as weights for merging

+
+ +
+
+clustering(boxes, scores)[source]
+
+ +
+
+forward(ego_feats, coop_feats, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.fusion.keypoints.limit_period(val, offset=0.5, period=6.283185306)[source]
+
+ +
+
+

cosense3d.modules.fusion.maxout_fusion module

+
+
+class cosense3d.modules.fusion.maxout_fusion.BEVMaxoutFusion(**kwargs)[source]
+

Bases: BaseModule

+
+
+forward(ego_feats, coop_feats, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.maxout_fusion.SparseBEVMaxoutFusion(pc_range, resolution, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(output)[source]
+
+ +
+
+forward(ego_feats, coop_feats, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.fusion.naive_fusion module

+
+
+class cosense3d.modules.fusion.naive_fusion.NaiveFusion(stride, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(output)[source]
+
+ +
+
+forward(ego_feats, coop_feats=None, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+fuse_feature_at_stride(ego_feat, coop_feat, stride, fuse_key)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.fusion.spatial_query_fusion module

+
+
+class cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryAlignFusionRL(in_channels, pc_range, resolution, num_pose_feat=64, **kwargs)[source]
+

Bases: BaseModule

+
+
+align_coordinates(ego_bctr, ego_rl, ego_rl_pred, ego_pose, cpfeat)[source]
+
+ +
+
+format_output(output, **kwargs)[source]
+
+ +
+
+forward(det_local, roadline, roadline_preds, ego_queries, ego_pose_corrected, ego_poses, ego_poses_aug, cpms, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryFusion(in_channels, pc_range, resolution, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(output)[source]
+
+ +
+
+forward(ego_feats, coop_feats, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.fusion.temporal_fusion module

+
+
+class cosense3d.modules.fusion.temporal_fusion.LocalNaiveFusion(in_channels, feature_stride, lidar_range, pos_dim=3, topk_ref_pts=1024, ref_pts_stride=2, transformer_itrs=1, global_ref_time=0, **kwargs)[source]
+

Bases: BaseModule

+

This is a naive replacement of LocalTemporalFusion by only selecting the topk points for later spatial fusion

+
+
+forward(local_roi, global_roi, bev_feat, mem_dict, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+gather_topk(rois, bev_feats, stride, topk)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion(in_channels, transformer, feature_stride, lidar_range, pos_dim=3, num_pose_feat=128, topk_ref_pts=1024, topk_feat=512, num_propagated=256, memory_len=1024, ref_pts_stride=2, transformer_itrs=1, global_ref_time=0, norm_fusion=False, **kwargs)[source]
+

Bases: BaseModule

+

Modified from TemporalFusion to standardize input and output keys

+
+
+embed_pos(pos, dim=None)[source]
+
+ +
+
+forward(local_roi, global_roi, bev_feat, mem_dict, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+gather_topk(rois, bev_feats, stride, topk)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+temporal_alignment(query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV1(in_channels, transformer, feature_stride, lidar_range, pos_dim=3, num_pose_feat=128, topk_ref_pts=1024, topk_feat=512, num_propagated=256, memory_len=1024, ref_pts_stride=2, transformer_itrs=1, global_ref_time=0, norm_fusion=False, **kwargs)[source]
+

Bases: LocalTemporalFusion

+
+
+forward(rois, bev_feat, mem_dict, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV2(in_channels, transformer, feature_stride, lidar_range, pos_dim=3, num_pose_feat=128, topk_ref_pts=1024, topk_feat=512, num_propagated=256, memory_len=1024, ref_pts_stride=2, transformer_itrs=1, global_ref_time=0, norm_fusion=False, **kwargs)[source]
+

Bases: LocalTemporalFusion

+
+
+forward(local_roi, bev_feat, mem_dict, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3(in_channels, transformer, feature_stride, lidar_range, pos_dim=3, num_pose_feat=128, topk_ref_pts=1024, topk_feat=512, num_propagated=256, memory_len=1024, ref_pts_stride=2, transformer_itrs=1, global_ref_time=0, norm_fusion=False, **kwargs)[source]
+

Bases: BaseModule

+

TemporalFusion with feature flow

+
+
+embed_pos(pos, dim=None)[source]
+
+ +
+
+forward(local_roi, global_roi, bev_feat, mem_dict, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+gather_topk(rois, bev_feats, stride, topk)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+temporal_alignment(query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.temporal_fusion.TemporalFusion(in_channels, transformer, feature_stride, lidar_range, pos_dim=3, num_pose_feat=128, topk_ref_pts=1024, topk_feat=512, num_propagated=256, memory_len=1024, ref_pts_stride=2, transformer_itrs=1, global_ref_time=0, **kwargs)[source]
+

Bases: BaseModule

+
+
+embed_pos(pos, dim=None)[source]
+
+ +
+
+forward(rois, bev_feat, mem_dict, time_scale=None, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+gather_topk(rois, bev_feats, stride, topk)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+temporal_alignment(query_pos, tgt, ref_pts, ref_feat, mem_dict, ref_time=None)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion(in_channels, transformer, feature_stride, lidar_range, pos_dim=3, num_pose_feat=64, topk=2048, num_propagated=256, memory_len=1024, num_query=644, **kwargs)[source]
+

Bases: BaseModule

+
+
+embed_pos(pos, dim=None)[source]
+
+ +
+
+forward(rois, bev_feat, mem_dict, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+gather_topk(rois, bev_feats)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+temporal_alignment(query_pos, tgt, ref_pts, mem_dict)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.heads.html b/docs/_build/html/cosense3d.modules.heads.html new file mode 100644 index 00000000..3fdca0b4 --- /dev/null +++ b/docs/_build/html/cosense3d.modules.heads.html @@ -0,0 +1,999 @@ + + + + + + + cosense3d.modules.heads package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.heads package

+
+

Submodules

+
+
+

cosense3d.modules.heads.bev module

+
+
+class cosense3d.modules.heads.bev.BEV(data_info, in_dim, stride, target_assigner, loss_cls, num_cls=1, class_names_each_head=None, down_sample_tgt=False, generate_roi_scr=True, **kwargs)[source]
+

Bases: BaseModule

+
+
+down_sample(coor, feat)[source]
+
+ +
+
+format_input(stensor_list)[source]
+
+ +
+
+format_output(output, B=None)[source]
+
+ +
+
+forward(stensor_list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(batch_list, gt_boxes, gt_labels, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.bev.BEVMultiResolution(strides, strides_for_loss, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(stensor_list, *args, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(batch_list, gt_boxes, gt_labels, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.bev.ContiAttnBEV(out_channels, data_info, in_dim, stride, context_decoder, target_assigner, loss_cls, class_names_each_head=None, **kwargs)[source]
+

Bases: ContinuousBEV

+
+
+get_evidence(ref_pts, coor, feat)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.bev.ContiGevBEV(out_channels, data_info, in_dim, stride, context_decoder, target_assigner, loss_cls, class_names_each_head=None, **kwargs)[source]
+

Bases: ContinuousBEV

+
+
+get_evidence(ref_pts, coor, feat)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.bev.ContinuousBEV(out_channels, data_info, in_dim, stride, context_decoder, target_assigner, loss_cls, class_names_each_head=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+down_sample(coor, feat)[source]
+
+ +
+
+format_input(stensor_list)[source]
+
+ +
+
+format_output(output, B=None)[source]
+
+ +
+
+forward(stensor_list, gt_boxes, gt_labels, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_evidence(ref_pts, coor, feat)[source]
+
+ +
+
+loss(batch_list, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+sample_reference_points(centers, gt_boxes, gt_labels)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.bev_dense module

+

Seg head for bev understanding

+
+
+class cosense3d.modules.heads.bev_dense.BevRoIDenseHead(in_dim, stride, num_cls=1, loss_cls=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(input, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(bev_preds, bev_tgt, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.bev_dense.BevSegHead(target, input_dim, output_class, loss_cls, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(x, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(dynamic_bev_preds, dynamic_bev, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.det_anchor_dense module

+
+
+class cosense3d.modules.heads.det_anchor_dense.DetAnchorDense(in_channels, loss_cls, loss_box, num_classes=1, stride=None, target_assigner=None, get_boxes_when_training=False, box_stamper=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+static add_sin_difference(boxes1, boxes2, dim=6)[source]
+
+ +
+
+format_output(output, B)[source]
+
+ +
+
+forward(bev_feat_list, points=None, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+init_weights()[source]
+
+ +
+
+loss(preds, gt_boxes, gt_labels, **kwargs)[source]
+

The dense bev maps show have the shape ((b, c, h, w))

+
+ +
+
+predictions(preds)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.det_anchor_sparse module

+
+
+class cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse(in_channels, loss_cls, loss_box, num_classes=1, target_assigner=None, get_boxes_when_training=False, get_roi_scores=False, **kwargs)[source]
+

Bases: BaseModule

+
+
+static add_sin_difference(boxes1, boxes2, dim=6)[source]
+
+ +
+
+format(output, coor, B)[source]
+
+ +
+
+forward(stensor_list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+init_weights()[source]
+
+ +
+
+loss(preds, stensor_list, gt_boxes, gt_labels, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+predictions(coors, preds)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.det_center_sparse module

+
+
+class cosense3d.modules.heads.det_center_sparse.DetCenterSparse(data_info, stride, class_names_each_head, shared_conv_channel, cls_head_cfg, reg_head_cfg, reg_channels, cls_assigner, box_assigner, loss_cls, loss_box, center_threshold=0.5, generate_roi_scr=False, norm='BN', **kwargs)[source]
+

Bases: BaseModule

+
+
+format_input(stensor_list)[source]
+
+ +
+
+format_output(output, B=None)[source]
+
+ +
+
+forward(stensor_list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(batch_list, gt_boxes, gt_labels, gt_mask=None, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+predictions(preds)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse(nlvls, sparse, *args, **kwargs)[source]
+

Bases: DetCenterSparse

+
+
+format_input(feat_in)[source]
+
+ +
+
+format_output(output, B=None, reference_inds=None)[source]
+
+ +
+
+forward(feat_in, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(batch_list, gt_boxes, gt_labels, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+predictions(preds)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.det_center_sparse.SeparatedClsHead(class_names_each_head, in_channel, one_hot_encoding=True, use_bias=False, norm='BN', **kwargs)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.det_center_sparse.UnitedClsHead(class_names_each_head, in_channel, one_hot_encoding=True, use_bias=False, norm='BN', **kwargs)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.heads.det_center_sparse.UnitedRegHead(reg_channels, in_channel, combine_channels=True, sigmoid_keys=None, use_bias=False, norm='BN', **kwargs)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.det_roi_refine module

+
+
+class cosense3d.modules.heads.det_roi_refine.KeypointRoIHead(num_cls, in_channels, n_fc_channels, roi_grid_pool, target_assigner, dp_ratio=0.3, train_from_epoch=0, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(preds, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+static get_dense_grid_points(rois, batch_size_rcnn, grid_size)[source]
+

Get the local coordinates of each grid point of a roi in the coordinate +system of the roi(origin lies in the center of this roi.

+
+ +
+
+get_global_grid_points_of_roi(rois)[source]
+
+ +
+
+loss(out, gt_boxes, epoch, **kwargs)[source]
+
+

Parameters

+

output_dict : dict +target_dict : dict

+
+
+ +
+
+roi_grid_pool(preds)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.img_focal module

+
+
+class cosense3d.modules.heads.img_focal.ImgFocal(in_channels, embed_dims, num_classes, center_assigner, box_assigner, loss_cls2d, loss_centerness, loss_bbox2d, loss_iou2d, loss_centers2d, with_depth=False, **kwargs)[source]
+

Bases: BaseModule

+
+
+static apply_center_offset(locations, center_offset)[source]
+
+
Parameters:
+
    +
  • locations – (1, H, W, 2)

  • +
  • pred_ltrb – (N, H, W, 4)

  • +
+
+
+
+ +
+
+static apply_ltrb(locations, pred_ltrb)[source]
+
+
Parameters:
+
    +
  • locations – (1, H, W, 2)

  • +
  • pred_ltrb – (N, H, W, 4)

  • +
+
+
+
+ +
+
+format_output(out_dict, img_feat)[source]
+
+ +
+
+forward(img_feat, img_coor, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(batch_list, labels2d, centers2d, bboxes2d, img_size, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.lidar_petr_head module

+
+
+class cosense3d.modules.heads.lidar_petr_head.LidarPETRHead(in_channels, transformer, feature_stride, lidar_range, topk=2048, memory_len=256, num_query=644, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_input(input)[source]
+
+ +
+
+forward(rois, bev_feat, memory, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+gather_topk(rois, bev_feats)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.multitask_head module

+
+
+class cosense3d.modules.heads.multitask_head.MultiTaskHead(heads, strides, losses, formatting=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(tensor_list, *args, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+loss(*args, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.nbr_attn_bev module

+
+
+class cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV(data_info, in_dim, stride, annealing_step, sampling, target_assigner=None, class_names_each_head=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+downsample_tgt_pts(tgt_label, max_sam)[source]
+
+ +
+
+format_input(stensor_list)[source]
+
+ +
+
+format_output(output, B=None)[source]
+
+ +
+
+forward(stensor_list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+generate_reference_points(centers)[source]
+
+ +
+
+get_tgt(batch_list, gt_boxes, gt_labels, **kwargs)[source]
+
+ +
+
+loss(batch_list, gt_boxes, gt_labels, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.petr_head module

+
+
+class cosense3d.modules.heads.petr_head.PETRHead(embed_dims, pc_range, code_weights, num_classes, box_assigner, loss_cls, loss_bbox, loss_iou=None, num_reg_fcs=2, num_pred=3, use_logits=True, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(feat_in, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+init_weights()[source]
+
+ +
+
+loss(petr_out, gt_boxes, gt_labels, det, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.heads.query_guided_petr_head module

+
+
+class cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead(embed_dims, pc_range, code_weights, num_classes, cls_assigner, box_assigner, loss_cls, loss_box, num_reg_fcs=3, num_pred=3, use_logits=False, reg_channels=None, sparse=False, pred_while_training=False, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(feat_in, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_pred_boxes(bbox_preds, ref_pts)[source]
+
+ +
+
+get_predictions(cls_scores, det_boxes, pred_boxes, batch_inds=None)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+loss(petr_out, gt_boxes_global, gt_labels_global, *args, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+training: bool
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.html b/docs/_build/html/cosense3d.modules.html new file mode 100644 index 00000000..fd515999 --- /dev/null +++ b/docs/_build/html/cosense3d.modules.html @@ -0,0 +1,1386 @@ + + + + + + + cosense3d.modules package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+class cosense3d.modules.BaseModule(gather_keys, scatter_keys, gt_keys=[], freeze=False, **kwargs)[source]
+

Bases: Module

+
+
+cat_data_from_list(input, key=None, pad_idx=False)[source]
+
+ +
+
+cat_dict_list(d_list: List[Dict])[source]
+
+ +
+
+cat_list(x_list, recursive=False)[source]
+

Concatenate sub_lists to one list

+
+ +
+
+compose_imgs(img_list)[source]
+
+ +
+
+compose_result_list(res, N)[source]
+
+
Parameters:
+
    +
  • res – dict(k:list)

  • +
  • N

  • +
+
+
Returns:
+

+
+
+
+ +
+
+compose_stensor(stensor_list, stride)[source]
+
+ +
+
+decompose_stensor(res, N)[source]
+
+ +
+
+format_input(input: List)[source]
+
+ +
+
+format_output(output, B)[source]
+
+ +
+
+forward(*args, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+freeze_parameters()[source]
+
+ +
+
+loss(*args, **kwargs)[source]
+

This must be implemented in head module.

+
+ +
+
+prepare_vis_data()[source]
+
+ +
+
+stack_data_from_list(input, key=None)[source]
+
+ +
+
+stack_dict_list(d_list: List[Dict])[source]
+
+ +
+
+to_gpu(gpu_id)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.build_module(module_cfg)[source]
+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.losses.html b/docs/_build/html/cosense3d.modules.losses.html new file mode 100644 index 00000000..ed28c2ae --- /dev/null +++ b/docs/_build/html/cosense3d.modules.losses.html @@ -0,0 +1,636 @@ + + + + + + + cosense3d.modules.losses package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.losses package

+
+

Submodules

+
+
+

cosense3d.modules.losses.base_loss module

+
+
+class cosense3d.modules.losses.base_loss.BaseLoss(reduction: str = 'mean', activation: str = 'none', loss_weight: float = 1.0)[source]
+

Bases: Module

+
+
+forward(preds: Tensor, targets: Tensor, weight: Tensor | None = None, avg_factor: int | None = None, reduction_override: str | None = None, *args, **kwargs) Tensor[source]
+
+
Parameters:
+
    +
  • preds – prediction tensor.

  • +
  • targets – target tensor.

  • +
  • weight – The weight of loss for each +prediction. Defaults to None.

  • +
  • avg_factor – Average factor that is used to average +the loss. Defaults to None.

  • +
  • reduction_override – The reduction method used to +override the original reduction method of the loss. +Defaults to None.

  • +
  • args – additional arguments.

  • +
  • kwargs

  • +
+
+
Returns:
+

weighted loss.

+
+
+
+ +
+
+loss(*args, **kwargs)[source]
+
+ +
+
+property name
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.losses.common module

+
+
+cosense3d.modules.losses.common.cross_entroy_with_logits(preds, tgts, n_cls, weights=None, reduction='none')[source]
+
+ +
+
+cosense3d.modules.losses.common.focal_loss(preds, tgts, weights=None, reduction='none', gamma=2.0, alpha=0.25, use_sigmoid=True)[source]
+
+

Parameters

+

preds: FloatTensor(…, n_cls) +tgts: FloatTensor(…, n_cls) or LongTensor(…,) or LongTensor(…,1), largest label is background +weights: same as preds or tgts +——-

+
+
+ +
+
+cosense3d.modules.losses.common.indices_to_dense_vector(indices: Tensor, size: int, indices_value: float = 1.0, default_value: float = 0.0) Tensor[source]
+

Creates dense vector with indices set to specific value and rest to zeros.

+

This function exists because it is unclear if it is safe to use +tf.sparse_to_dense(indices, [size], 1, validate_indices=False) +with indices which are not ordered. +This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

+
+
Parameters:
+
    +
  • indices – 1d Tensor with integer indices which are to be set to indices_values.

  • +
  • size – size of output Tensor.

  • +
  • indices_value – values of elements specified by indices in the output vector.

  • +
  • default_value – values of other elements in the output vector.

  • +
+
+
Returns:
+

dense 1D Tensor of shape [size] with indices set to indices_values and the +rest set to default_value.

+
+
+
+ +
+
+cosense3d.modules.losses.common.sigmoid_binary_cross_entropy(preds, tgts, weights=None, reduction='none')[source]
+
+

Parameters

+

preds: Tensor(d1, …, dn) +tgts: Tensor(d1, …, dn) +weights. Tensor(d1, …, dn) +reduction: str(‘none’ | ‘mean’ | ‘sum’) +——-

+
+
+ +
+
+cosense3d.modules.losses.common.weighted_l1_loss(preds, targets, sigma=3.0, weights=None)[source]
+
+ +
+
+cosense3d.modules.losses.common.weighted_sigmoid_binary_cross_entropy(preds, tgts, weights=None, class_indices=None)[source]
+
+ +
+
+cosense3d.modules.losses.common.weighted_smooth_l1_loss(preds, targets, sigma=3.0, weights=None)[source]
+
+ +
+
+

cosense3d.modules.losses.edl module

+
+
+class cosense3d.modules.losses.edl.EDLLoss(n_cls: int, annealing_step: int, **kwargs)[source]
+

Bases: BaseLoss

+
+
+loss(preds, tgt, temp, n_cls_override=None)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.losses.edl.edl_mse_loss(preds, tgt, n_cls, temp, annealing_step, model_label='edl')[source]
+

Calculate evidential loss +:param model_label: (str) a name to distinguish edl loss of different modules +:param preds: (N, n_cls) the logits of each class +:param tgt: (N,) labels with values from 0…(n_cls - 1) or (N, n_cls) +:param n_cls: (int) number of classes, including background +:param temp: current temperature for annealing of KL Divergence term of the loss +:param annealing_step: maximum annealing step +:return:

+
+ +
+
+cosense3d.modules.losses.edl.evidence_to_conf_unc(evidence, edl=True)[source]
+
+ +
+
+cosense3d.modules.losses.edl.exp_evidence(y)[source]
+
+ +
+
+cosense3d.modules.losses.edl.kl_divergence(alpha, num_classes)[source]
+
+ +
+
+cosense3d.modules.losses.edl.loglikelihood_loss(y, alpha)[source]
+
+ +
+
+cosense3d.modules.losses.edl.mse_loss(y, alpha, epoch_num, num_classes, annealing_step)[source]
+
+ +
+
+cosense3d.modules.losses.edl.pred_to_conf_unc(preds, activation='relu', edl=True)[source]
+
+ +
+
+cosense3d.modules.losses.edl.relu_evidence(y)[source]
+
+ +
+
+cosense3d.modules.losses.edl.softplus_evidence(y)[source]
+
+ +
+
+

cosense3d.modules.losses.focal_loss module

+
+
+class cosense3d.modules.losses.focal_loss.FocalLoss(use_sigmoid: bool = True, gamma: float = 2.0, alpha: float = 0.25, activated: bool = False, bg_idx: int | None = None, **kwargs)[source]
+

Bases: BaseLoss

+
+
+loss(pred: Tensor, target: Tensor, *args, **kwargs)[source]
+
+
Parameters:
+
    +
  • pred – prediction.

  • +
  • target – ground truth targets.

  • +
  • args

  • +
  • kwargs

  • +
+
+
Returns:
+

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.losses.focal_loss.GaussianFocalLoss(alpha: float = 2.0, gamma: float = 4.0, reduction: str = 'mean', loss_weight: float = 1.0)[source]
+

Bases: BaseLoss

+

GaussianFocalLoss is a variant of focal loss.

+

More details can be found in the paper +Code is modified from kp_utils.py # noqa: E501 +Please notice that the target in GaussianFocalLoss is a gaussian heatmap, +not 0/1 binary target.

+
+
+loss(pred: Tensor, target: Tensor)[source]
+

Focal Loss for targets in gaussian +distribution.

+
+
Parameters:
+
    +
  • pred – The prediction.

  • +
  • target – The learning target of the prediction +in gaussian distribution.

  • +
+
+
Returns:
+

loss result.

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.losses.focal_loss.QualityFocalLoss(use_sigmoid: bool = True, beta: float = 2.0, activated: bool = False, **kwargs)[source]
+

Bases: BaseLoss

+
+
+loss(pred: Tensor, target: Tensor)[source]
+

Forward function.

+
+
Parameters:
+
    +
  • pred – Predicted joint representation of +classification and quality (IoU) estimation with shape (N, C), +C is the number of classes.

  • +
  • target – Target category label with shape +(N,) and target quality label with shape (N,).

  • +
+
+
Returns:
+

loss result.

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.losses.focal_loss.py_focal_loss_with_prob(pred: Tensor, target: Tensor, gamma: float = 2.0, alpha: float = 0.25)[source]
+

PyTorch version of Focal Loss. +Different from py_sigmoid_focal_loss, this function accepts probability +as input.

+
+
Parameters:
+
    +
  • pred – The prediction probability with shape (N, C), +C is the number of classes.

  • +
  • target – The learning label of the prediction.

  • +
  • gamma – The gamma for calculating the modulating +factor. Defaults to 2.0.

  • +
  • alpha – A balanced form for Focal Loss. +Defaults to 0.25.

  • +
+
+
Returns:
+

loss result.

+
+
+
+ +
+
+cosense3d.modules.losses.focal_loss.py_sigmoid_focal_loss(pred: Tensor, target: Tensor, gamma: float = 2.0, alpha: float = 0.25)[source]
+

PyTorch version of Focal Loss. +Different from py_sigmoid_focal_loss, this function accepts probability +as input.

+
+
Parameters:
+
    +
  • pred – The prediction probability with shape (N, C), +C is the number of classes.

  • +
  • target – The learning label of the prediction.

  • +
  • gamma – The gamma for calculating the modulating +factor. Defaults to 2.0.

  • +
  • alpha – A balanced form for Focal Loss. +Defaults to 0.25.

  • +
+
+
Returns:
+

loss result.

+
+
+
+ +
+
+cosense3d.modules.losses.focal_loss.quality_focal_loss(pred: ~torch.Tensor, target: (<class 'torch.Tensor'>, ), beta: float = 2.0) Tensor[source]
+

Quality Focal Loss (QFL) is from Generalized Focal Loss: Learning +Qualified and Distributed Bounding Boxes for Dense Object Detection.

+
+
Parameters:
+
    +
  • pred – Predicted joint representation of classification +and quality (IoU) estimation with shape (N, C), C is the number of +classes.

  • +
  • target – Target category label with shape (N,) +and target quality label with shape (N,).

  • +
  • beta – The beta parameter for calculating the modulating factor. +Defaults to 2.0.

  • +
+
+
Returns:
+

Loss tensor with shape (N,).

+
+
+
+ +
+
+cosense3d.modules.losses.focal_loss.quality_focal_loss_with_prob(pred: ~torch.Tensor, target: (<class 'torch.Tensor'>, ), beta: float = 2.0) Tensor[source]
+

Quality Focal Loss (QFL) is from Generalized Focal Loss: Learning +Qualified and Distributed Bounding Boxes for Dense Object Detection.

+
+
Parameters:
+
    +
  • pred – Predicted joint representation of classification +and quality (IoU) estimation with shape (N, C), C is the number of +classes.

  • +
  • target – Target category label with shape (N,) +and target quality label with shape (N,).

  • +
  • beta – The beta parameter for calculating the modulating factor. +Defaults to 2.0.

  • +
+
+
Returns:
+

Loss tensor with shape (N,).

+
+
+
+ +
+
+

cosense3d.modules.losses.iou_loss module

+
+
+class cosense3d.modules.losses.iou_loss.GIoULoss(eps: float = 1e-07, **kwargs)[source]
+

Bases: BaseLoss

+
+
+loss(pred, target)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.losses.iou_loss.IoULoss(mode: str = 'log', eps: float = 1e-06, **kwargs)[source]
+

Bases: BaseLoss

+
+
+loss(pred, target)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.losses.l1_loss module

+
+
+class cosense3d.modules.losses.l1_loss.L1Loss(reduction: str = 'mean', activation: str = 'none', loss_weight: float = 1.0)[source]
+

Bases: BaseLoss

+
+
+loss(pred, target)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.losses.l1_loss.SmoothL1Loss(beta: float = 1.0, **kwargs)[source]
+

Bases: BaseLoss

+
+
+loss(pred, target)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.losses.vanilla_seg_loss module

+
+
+class cosense3d.modules.losses.vanilla_seg_loss.VanillaSegLoss(d_weights, s_weights, d_coe, s_coe, l_weights=50, **kwargs)[source]
+

Bases: Module

+
+
+forward(static_pred=None, dynamic_pred=None, static_gt=None, dynamic_gt=None)[source]
+

Perform loss function on the prediction.

+
+

Parameters

+
+
output_dictdict

The dictionary contains the prediction.

+
+
gt_dictdict

The dictionary contains the groundtruth.

+
+
+
+
+

Returns

+

Loss dictionary.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

Module contents

+
+
+cosense3d.modules.losses.build_loss(type, **kwargs)[source]
+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.necks.html b/docs/_build/html/cosense3d.modules.necks.html new file mode 100644 index 00000000..f430d87e --- /dev/null +++ b/docs/_build/html/cosense3d.modules.necks.html @@ -0,0 +1,347 @@ + + + + + + + cosense3d.modules.necks package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.necks package

+
+

Submodules

+
+
+

cosense3d.modules.necks.cpm_composer module

+
+
+class cosense3d.modules.necks.cpm_composer.KeypointComposer(vsa, train_from_epoch=0, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(preds, bev_feat, voxel_feat, points, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.necks.dilation_spconv module

+
+
+class cosense3d.modules.necks.dilation_spconv.DilationSpconv(data_info, convs, d=2, n_layers=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(out_dict, B)[source]
+
+ +
+
+forward(stensor_list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_conv_layer(args)[source]
+
+ +
+
+to_gpu(gpu_id)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation(data_info, convs, d=2, n_layers=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_output(out_dict, B)[source]
+
+ +
+
+forward(stensor_list, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_conv_layer(args)[source]
+
+ +
+
+to_gpu(gpu_id)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.necks.formatting module

+
+
+class cosense3d.modules.necks.formatting.DenseToSparse(data_info, strides=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(*args, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_centers(stride, device)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.necks.formatting.DetDenseToSparse(data_info, stride, **kwargs)[source]
+

Bases: Module

+
+
+forward(input)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_centers()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.necks.formatting.FPVRCNNToLTS(data_info, strides=None, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(*args, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_centers(stride, device)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.plugin.html b/docs/_build/html/cosense3d.modules.plugin.html new file mode 100644 index 00000000..c465fe6e --- /dev/null +++ b/docs/_build/html/cosense3d.modules.plugin.html @@ -0,0 +1,1885 @@ + + + + + + + cosense3d.modules.plugin package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.plugin package

+
+

Submodules

+
+
+

cosense3d.modules.plugin.attn module

+
+
+class cosense3d.modules.plugin.attn.NeighborhoodAttention(data_info, stride, emb_dim=128)[source]
+

Bases: Module

+
+
+coor_to_indices(coor)[source]
+
+ +
+
+forward(ref_pts, ctr_coor, ctr_feat)[source]
+
+

Parameters

+

ref_pts LongTensor(Q, 3): 2d coordinates in metrics(batch_idx, x, y) +ctr_coor LongTensor(V, 3): 2d coordinates in indices (batch_idx, x, y) +ctr_feat FloatTensor(V, d): bev grid center point features

+
+
+

Returns

+
+

out_features FloatTensor(Q, d): attended features

+
+
+
+ +
+
+get_nbr_mapping(query_pos, value_pos)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.attn.ScaledDotProductAttention(dim: int)[source]
+

Bases: Module

+

Scaled Dot-Product Attention proposed in “Attention Is All You Need” +Compute the dot products of the query with all keys, divide each by sqrt(dim), +and apply a softmax function to obtain the weights on the values

+
+
+forward(query, key, value)[source]
+
+
Parameters:
+
    +
  • query – (batch, q_len, d_model) tensor containing projection vector for decoder.

  • +
  • key – (batch, k_len, d_model) tensor containing projection vector for encoder.

  • +
  • value – (batch, v_len, d_model) tensor containing features of the encoded input sequence.

  • +
+
+
Returns:
+

context, attn +- context: tensor containing the context vector from attention mechanism. +- attn: tensor containing the attention (alignment) from the encoder outputs.

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.bev_rpn module

+
+
+class cosense3d.modules.plugin.bev_rpn.Conv2d(in_channels, out_channels, k, s, p, activation=True, batch_norm=True)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.bev_rpn.CustomRPN(strides=[2, 2, 2], down_sample=2, num_layers=3, in_channels=128, out_channels=256)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.bev_rpn.RPN(anchor_num=2)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.downsample_conv module

+

Class used to downsample features by 3*3 conv

+
+
+class cosense3d.modules.plugin.downsample_conv.DoubleConv(in_channels: int, out_channels: int, kernel_size: int, stride: int, padding: bool)[source]
+

Bases: Module

+

Double convoltuion

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.downsample_conv.DownsampleConv(in_channels, kernel_sizes=[1], dims=[256], strides=[1], paddings=[0])[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.flash_attn module

+
+
+class cosense3d.modules.plugin.flash_attn.FlashAttention(softmax_scale: float | None = None, attention_dropout: float = 0.0, return_attn_weights: float = False, device: str | None = None, dtype: type | None = None)[source]
+

Bases: Module

+

Implement the scaled dot product attention with softmax.

+
+
+forward(q: Tensor, kv: Tensor, causal: bool = False, key_padding_mask: Tensor | None = None)[source]
+

Implements the multihead softmax attention.

+
+
Parameters:
+
    +
  • q – The tensor containing the query. (B, T, H, D)

  • +
  • kv – The tensor containing the key, and value. (B, S, 2, H, D)

  • +
  • causal

  • +
  • key_padding_mask – a bool tensor of shape (B, S)

  • +
+
+
Returns:
+

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.flash_attn.FlashMHA(embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0, causal=False, device=None, dtype=None, **kwargs)[source]
+

Bases: Module

+
+
+forward(q, k, v, key_padding_mask=None)[source]
+

x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) +key_padding_mask: bool tensor of shape (batch, seqlen)

+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.plugin.flash_attn.flash_attn_unpadded_kvpacked_test(q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, dropout_p, softmax_scale, causal, batch_size)[source]
+
+ +
+
+cosense3d.modules.plugin.flash_attn.index_first_axis()
+
+ +
+
+

cosense3d.modules.plugin.fpn module

+
+
+class cosense3d.modules.plugin.fpn.FPN(in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg={'mode': 'nearest'}, init_cfg={'distribution': 'uniform', 'layer': 'Conv2d', 'type': 'Xavier'})[source]
+

Bases: Module

+
+
+forward(inputs)[source]
+

Forward function.

+
+ +
+
+init_weights()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.gevbev_decoder module

+
+
+class cosense3d.modules.plugin.gevbev_decoder.GevBEVDecoder(data_info, stride, kernel=3, var0=0.1)[source]
+

Bases: Module

+
+
+coor_to_indices(coor)[source]
+
+ +
+
+forward(ref_pts, ctr_coor, ctr_reg)[source]
+
+
Parameters:
+
    +
  • ref_pts – LongTensor(Q, 3) 2d coordinates in metrics(batch_idx, x, y)

  • +
  • ctr_coor – LongTensor(V, 3) 2d coordinates in indices (batch_idx, x, y)

  • +
  • ctr_reg – FloatTensor(V, d) bev grid center point regression result

  • +
+
+
Returns:
+

out_evidence FloatTensor(Q, d): attended features

+
+
+
+ +
+
+get_nbr_mapping(query_pos, value_pos)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.mink_spconv module

+
+
+class cosense3d.modules.plugin.mink_spconv.Spconv(data_info, convs, d=2, dilation=False, **kwargs)[source]
+

Bases: Module

+
+
+forward(stensor_dict, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_2d_stensor(stensor_dict, stride)[source]
+
+ +
+
+get_conv_layer(args)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.naive_compressor module

+
+
+class cosense3d.modules.plugin.naive_compressor.NaiveCompressor(input_dim, compress_ratio)[source]
+

Bases: Module

+

A very naive compression that only compress on the channel.

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.pillar_encoder module

+
+
+class cosense3d.modules.plugin.pillar_encoder.PFNLayer(in_channels, out_channels, use_norm=True, last_layer=False)[source]
+

Bases: Module

+
+
+forward(inputs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.pillar_encoder.PillarEncoder(features, voxel_size, lidar_range, channels, use_norm=True)[source]
+

Bases: Module

+
+
+property absolute_xyz_dim
+
+ +
+
+compose_voxel_feature(voxel_features)[source]
+
+ +
+
+property distance_dim
+
+ +
+
+forward(voxel_features, coords, voxel_num_points)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+static get_paddings_indicator(actual_num, max_num, axis=0)[source]
+
+ +
+
+property intensity_dim
+
+ +
+
+training: bool
+
+ +
+
+property xyz_dim
+
+ +
+ +
+
+

cosense3d.modules.plugin.ssfa module

+
+
+class cosense3d.modules.plugin.ssfa.SSFA(in_channels, out_channels=128, shrink_strides=None, shrink_channels=None)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+init_weights()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.plugin.ssfa.get_conv_layers(conv_name, in_channels, out_channels, n_layers, kernel_size, stride, padding, relu_last=True, sequential=True, **kwargs)[source]
+

Build convolutional layers. kernel_size, stride and padding should be a list with the lengths that match n_layers

+
+ +
+
+

cosense3d.modules.plugin.target_assigners module

+
+
+class cosense3d.modules.plugin.target_assigners.BEVBoxAssigner(n_cls, pos_neg_ratio=5, mining_thr=0, max_mining_ratio=3, mining_start_epoch=5, merge_all_classes=False)[source]
+

Bases: BaseAssigner

+

Assign center points in the BEV maps to positve if the point is in the range ‘min_radius’ of any gt box center.

+
+
+assign(centers, gt_boxes, gt_labels, pred_scores=None, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+
+get_labels_single_head(centers, gt_boxes, pred_scores=None, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.BEVCenternessAssigner(n_cls, min_radius=1.0, pos_neg_ratio=5, mining_thr=0, max_mining_ratio=3, mining_start_epoch=5, merge_all_classes=False, use_gaussian=False, sigma=1.0)[source]
+

Bases: BaseAssigner

+

Assign center points in the BEV maps to positve if the point is in the range ‘min_radius’ of any gt box center.

+
+
+assign(centers, gt_boxes, gt_labels, pred_scores=None, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+
+get_labels_single_head(centers, gt_boxes, pred_scores=None, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.BEVPointAssigner(down_sample=True, sample_mining_thr=0.0, max_mining_ratio=3, annealing_step=None, topk_sampling=False, annealing_sampling=False)[source]
+

Bases: BaseAssigner

+

Assign target points to BEV boxes and down-sample the target points with buffered-based method.

+
+
+assign(tgt_pts, gt_boxes, B, conf=None, down_sample=True, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+
+downsample_tgt_pts(tgt_label, max_sam)[source]
+
+ +
+
+get_predictions(x, edl=True, activation='none')[source]
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner(data_info, stride, tgt_range=None, down_sample=False, annealing_step=None)[source]
+

Bases: BaseAssigner

+
+
+assign(ctr_pts, samples, B, gt_boxes=None, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+
+static down_sample_pred_pts(ctr_pts)[source]
+
+ +
+
+downsample_tgt_pts(tgt_label, max_sam)[source]
+
+ +
+
+filter_range(ctr_pts, samples)[source]
+
+ +
+
+get_obs_mask(inds, B)[source]
+
+ +
+
+get_predictions(data_dict, B, edl=True, activation='none', **kwargs)[source]
+
+ +
+
+pts_to_inds(pts)[source]
+

Calculate indices of samples in the bev map

+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.BaseAssigner[source]
+

Bases: object

+

Base assigner.

+
+
+abstract assign(*args, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner(box_size, dirs, voxel_size, lidar_range, stride, box_coder, pos_threshold=0.6, neg_threshold=0.45, score_thrshold=0.25)[source]
+

Bases: BaseAssigner, Module

+
+
+assign(gt_boxes)[source]
+
+

Parameters

+

gt_boxes Tensor(N, 7): [x, y, z, l, w, h, r, …]

+
+
+

Returns

+

reg Tensor(H, W, num_anchors, code_size): box regression targets

+
+
+ +
+
+box_overlaps(boxes1, boxes2)[source]
+
+ +
+
+get_anchor_template(box_size, dirs)[source]
+
+ +
+
+get_predictions(preds)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.BoxCenterAssigner(voxel_size, lidar_range, stride, detection_benchmark, class_names_each_head, center_threshold, box_coder, activation='relu', edl=True)[source]
+

Bases: BaseAssigner, Module

+
+
+assign(centers, gt_boxes, gt_labels, gt_preds=None, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+
+get_predictions(preds)[source]
+

Decode the center and regression maps into BBoxes.

+
+
Parameters:
+

preds

    +
  • cls: list[Tensor], each tensor is the result from a cls head with shape (B or N, Ncls, …).

  • +
  • +
    reg:
      +
    • box: list[Tensor], one tensor per reg head with shape (B or N, 6, …).

    • +
    • dir: list[Tensor], one tensor per reg head with shape (B or N, 8, …).

    • +
    • scr: list[Tensor], one tensor per reg head with shape (B or N, 4, …).

    • +
    +
    +
    +
  • +
+

+
+
Returns:
+

roi: +* box: list[Tensor], one tensor per head with shape (N, 8). +* scr: list[Tensor], one tensor per head with shape (N,). +* lbl: list[Tensor], one tensor per head with shape (N,). +* idx: list[Tensor], one tensor per head with shape (3, N), center map indices of the boxes.

+
+
+
+ +
+
+pts_to_indices(bev_pts: Tensor)[source]
+
+
Parameters:
+

bev_pts – (N, 3+), BEV points, 1st column should be batch index.

+
+
Returns:
+

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner(box_size, dirs, voxel_size, lidar_range, stride, box_coder, me_coor=True, pos_threshold=0.6, neg_threshold=0.45, score_thrshold=0.25)[source]
+

Bases: BaseAssigner, Module

+
+
+assign(coors: Tensor, gt_boxes: Tensor)[source]
+
+
Parameters:
+
    +
  • coors – (N, 2) 2D mink coor [x, y]

  • +
  • gt_boxes – (M, 7) [x, y, z, l, w, h, r]

  • +
+
+
Returns:
+

    +
  • labels Tensor(N, num_anchors): box regression targets

  • +
  • reg_tgt Tensor(N, num_anchors, code_size): box regression targets

  • +
  • ir_score Tensor(N, num_anchors, 4) or None: direction score target

  • +
+

+
+
+
+ +
+
+box_overlaps(boxes1, boxes2)[source]
+
+ +
+
+get_anchor_template(box_size, dirs)[source]
+
+ +
+
+get_predictions(coors, preds)[source]
+
+
Parameters:
+
    +
  • coors – Tensor(N, 3) mink coor [batch_idx, x, y]

  • +
  • preds

  • +
+
+
Returns:
+

+
+
+
+ +
+
+me_coor_to_grid_indices(coor)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.ContiBEVAssigner(distr_r=2.0, var0=0.1, **kwargs)[source]
+

Bases: BEVSemsegAssigner

+
+
+assign(ctr_pts, samples, B, gt_boxes=None, **kwargs) dict[source]
+

Assign target.

+
+
Parameters:
+
    +
  • ctr_pts – center points of bev maps, including indices, metric centers and regression results.

  • +
  • samples – BEV target point samples.

  • +
  • B – batch size.

  • +
  • gt_boxes – ground truth BBoxes.

  • +
  • kwargs – keyword arguments.

  • +
+
+
Returns:
+

target_dict that contains the static or/and dynamic target points and their corresponding labels.

+
+
+
+ +
+
+get_predictions(ctr_pts, B, tag, **kwargs)[source]
+

Given center points and its corresponding regressions, generate the dense bev semseg maps +and its uncertainty and observation mask.

+
+
Parameters:
+
    +
  • ctr_pts – center points of bev maps, including indices, metric centers and regression results.

  • +
  • B – batch size.

  • +
  • tag – tag for regression key “static | dynamic”.

  • +
  • kwargs – keyword arguments

  • +
+
+
Returns:
+

    +
  • conf: confidence bev map.

  • +
  • unc: uncertainty bev map.

  • +
  • obs_mask: observation mask of the bev map.

  • +
+

+
+
+
+ +
+
+sample_dynamic_tgt_pts(ctr_pts: dict, gt_boxes: Tensor, B: int) Tuple[Tensor, Tensor, Tensor][source]
+

Given the input coordinates of the center points and the ground truth BBoxes, +sample the BEV target points for BEV semantic segmentation following the buffer-based sampling as illustrated +in the following image:

+Buffer-based sampling of the BEV target +
+
Parameters:
+
    +
  • ctr_pts – center points of bev maps, including indices, metric centers and regression results.

  • +
  • gt_boxes – ground truth BBoxes.

  • +
  • B – batch size.

  • +
+
+
Returns:
+

    +
  • tgt_pts: sampled target points.

  • +
  • tgt_lbl: labels of the sampled target points.

  • +
  • inds: map indices of the sampled target points.

  • +
+

+
+
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.DiscreteBEVAssigner(data_info, stride, down_sample=False, annealing_step=None)[source]
+

Bases: BaseAssigner

+
+
+assign(ctr_pts, samples, B, gt_boxes=None, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+
+get_obs_mask(inds, B)[source]
+
+ +
+
+get_predictions(data_dict, B, edl=True, activation='none', **kwargs)[source]
+
+ +
+
+pts_to_inds(samples)[source]
+

Calculate indices of samples in the bev map

+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.HeatmapAssigner[source]
+

Bases: BaseAssigner

+
+
+assign(obj_centers2d, obj_bboxes, img_shape, stride)[source]
+

Assign preds to targets.

+
+ +
+
+static draw_heatmap_gaussian(heatmap, center, radius, k=1)[source]
+

Get gaussian masked heatmap.

+
+
Args:

heatmap (torch.Tensor): Heatmap to be masked. +center (torch.Tensor): Center coord of the heatmap. +radius (int): Radius of gaussian. +k (int, optional): Multiple of masked_gaussian. Defaults to 1.

+
+
Returns:

torch.Tensor: Masked heatmap.

+
+
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.HungarianAssigner2D(cls_cost={'type': 'classification', 'weight': 1.0}, reg_cost={'type': 'bboxl1', 'weight': 1.0}, iou_cost={'type': 'giou', 'weight': 1.0}, centers2d_cost={'type': 'l1', 'weight': 1.0})[source]
+

Bases: BaseAssigner

+

Computes one-to-one matching between predictions and ground truth.

+

This class computes an assignment between the targets and the predictions +based on the costs. The costs are weighted sum of three components: +classification cost, regression L1 cost, regression iou cost and center2d l1 cost. +The assignment is done in the following steps, the order matters.

+
    +
  1. assign every prediction to -1

  2. +
  3. compute the weighted costs

  4. +
  5. do Hungarian matching on CPU based on the costs

  6. +
  7. assign all to 0 (background) first, then for each matched pair +between predictions and gts, treat this prediction as foreground +and assign the corresponding gt index (plus 1) to it.

  8. +
+
+
+assign(bbox_pred, cls_pred, pred_centers2d, gt_bboxes, gt_labels, centers2d, img_size, eps: float = 1e-07)[source]
+

Computes one-to-one matching based on the weighted costs.

+

This method assign each query prediction to a ground truth or +background. The assigned_gt_inds with -1 means don’t care, +0 means negative sample, and positive number is the index (1-based) +of assigned gt. +The assignment is done in the following steps, the order matters.

+
    +
  1. assign every prediction to -1

  2. +
  3. compute the weighted costs

  4. +
  5. do Hungarian matching on CPU based on the costs

  6. +
  7. assign all to 0 (background) first, then for each matched pair +between predictions and gts, treat this prediction as foreground +and assign the corresponding gt index (plus 1) to it.

  8. +
+
+
Parameters:
+
    +
  • bbox_pred – Predicted boxes with normalized coordinates +(cx, cy, w, h), which are all in range [0, 1]. Shape +[num_query, 4].

  • +
  • cls_pred – Predicted classification logits, shape +[num_query, num_class].

  • +
  • pred_centers2d – prediction 2d center points.

  • +
  • gt_bboxes – ground truth bboxes.

  • +
  • gt_labels – Label of gt_bboxes, shape (num_gt,). +img_size: input image size.

  • +
  • centers2d – 2d center points.

  • +
  • img_size – input image size.

  • +
  • eps – A value added to the denominator for +numerical stability. Default 1e-7.

  • +
+
+
Returns:
+

+
+
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.HungarianAssigner3D(cls_cost={'type': 'focal_loss', 'weight': 1.0}, reg_cost={'type': 'l1', 'weight': 1.0}, iou_cost={'type': 'iou', 'weight': 1.0})[source]
+

Bases: BaseAssigner

+
+
+assign(bbox_pred, cls_pred, gt_bboxes, gt_labels, code_weights=None, eps=1e-07)[source]
+

Assign preds to targets.

+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.MatchCost[source]
+

Bases: object

+

This class is modified from mmdet.

+
+
+static bboxl1(bbox_pred: Tensor, gt_bboxes: Tensor, weight: float = 1.0, box_format: str = 'xyxy') Tensor[source]
+
+
Parameters:
+
    +
  • bbox_pred – Predicted boxes with normalized coordinates +(cx, cy, w, h), which are all in range [0, 1]. Shape +(num_query, 4).

  • +
  • gt_bboxes – Ground truth boxes with normalized +coordinates (x1, y1, x2, y2). Shape (num_gt, 4).

  • +
  • weight – loss_weight.

  • +
  • box_format – ‘xyxy’ for DETR, ‘xywh’ for Sparse_RCNN.

  • +
+
+
Returns:
+

bbox_cost value with weight

+
+
+
+ +
+
+static binary_focal_loss(cls_pred, gt_labels, weight=1.0, alpha=0.25, gamma=2, eps=1e-12)[source]
+
+ +
+
+build(type, **kwargs)[source]
+
+ +
+
+static classification(cls_pred: Tensor, gt_labels: Tensor, weight: float = 1.0) Tensor[source]
+
+
Parameters:
+
    +
  • cls_pred – Predicted classification logits, shape +(num_query, num_class).

  • +
  • gt_labels – Label of gt_bboxes, shape (num_gt,).

  • +
  • weight – loss_weight.

  • +
+
+
Returns:
+

cls_cost value with weight

+
+
+
+ +
+
+static focal_loss(cls_pred, gt_labels, weight=1.0, alpha=0.25, gamma=2, eps=1e-12)[source]
+
+ +
+
+static giou(bboxes: Tensor, gt_bboxes: Tensor, weight: float = 1.0)[source]
+
+
Parameters:
+
    +
  • bboxes – Predicted boxes with unnormalized coordinates +(x1, y1, x2, y2). Shape (num_query, 4).

  • +
  • gt_bboxes – Ground truth boxes with unnormalized +coordinates (x1, y1, x2, y2). Shape (num_gt, 4).

  • +
  • weight – loss weight.

  • +
+
+
Returns:
+

giou_cost value with weight

+
+
+
+ +
+
+static iou(bboxes, gt_bboxes, weight=1.0)[source]
+

See giou

+
+ +
+
+static l1(pred, gt, weight=1.0)[source]
+

L1 distance between pred and gt Tensors

+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.RoIBox3DAssigner(box_coder)[source]
+

Bases: BaseAssigner

+
+
+assign(pred_boxes, gt_boxes, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+
+get_predictions(rcnn_cls, rcnn_iou, rcnn_reg, rois)[source]
+
+ +
+ +
+
+class cosense3d.modules.plugin.target_assigners.RoadLineAssigner(res, range, pos_neg_ratio=2)[source]
+

Bases: BaseAssigner

+
+
+assign(coor, tgt_pts, B, **kwargs)[source]
+

Assign preds to targets.

+
+ +
+ +
+
+cosense3d.modules.plugin.target_assigners.pos_neg_sampling(labels: Tensor, pos_neg_ratio: float) Tensor[source]
+

Downsample negative targets.

+
+
Parameters:
+
    +
  • labels – class labels.

  • +
  • pos_neg_ratio – ratio = num_neg_samples / num_pos_samples.

  • +
+
+
Returns:
+

class labels with -1 labels to be ignored during training.

+
+
+
+ +
+
+cosense3d.modules.plugin.target_assigners.sample_mining(scores: Tensor, labels: Tensor, dists=None, sample_mining_thr=0.5, max_sample_ratio=5, max_num_sample=None)[source]
+

When only limited numbers of negative targets are sampled for training, +and the majority of the negative samples are ignored, then there is a +high probability that hard negative targets are also ignored. This will +weaken the model to learn from these hard negative targets and generate +a lot of false positives. +Therefore, this function mines the samples that have high predictive +scores as training targets. This function should be used after ‘pos_neg_sampling’.

+
+
Parameters:
+
    +
  • scores – (N1, …Nk) classification scores/confidences that the +sample belong to foreground.

  • +
  • labels – (N1…, Nk) class labels, -1 indicates ignore, 0 indicates negative, +positive numbers indicates classes.

  • +
  • dists – distances.

  • +
  • sample_mining_thr – score threshold for sampling

  • +
  • max_sample_ration_sample / n_pos_sample

  • +
  • max_num_sample – maximum number of samples.

  • +
+
+
Returns:
+

+
+
+
+ +
+
+

cosense3d.modules.plugin.transformer module

+
+
+class cosense3d.modules.plugin.transformer.FFN(embed_dims: int, feedforward_channels: int, num_fcs: int = 2, act_cfg: dict = {'inplace': True, 'type': 'ReLU'}, dropout: float = 0.0, add_residual: bool = True)[source]
+

Bases: Module

+

Implements feed-forward networks (FFNs) with residual connection.

+
+
+forward(x, residual=None)[source]
+

Forward function for FFN.

+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper(*args, **kwargs)[source]
+

Bases: MultiheadAttention

+
+
+bias_k: Tensor | None
+
+ +
+
+bias_v: Tensor | None
+
+ +
+
+forward(*args, **kwargs)[source]
+
+
Args:
+
query: Query embeddings of shape \((L, E_q)\) for unbatched input, \((L, N, E_q)\) when batch_first=False

or \((N, L, E_q)\) when batch_first=True, where \(L\) is the target sequence length, +\(N\) is the batch size, and \(E_q\) is the query embedding dimension embed_dim. +Queries are compared against key-value pairs to produce the output. +See “Attention Is All You Need” for more details.

+
+
key: Key embeddings of shape \((S, E_k)\) for unbatched input, \((S, N, E_k)\) when batch_first=False

or \((N, S, E_k)\) when batch_first=True, where \(S\) is the source sequence length, +\(N\) is the batch size, and \(E_k\) is the key embedding dimension kdim. +See “Attention Is All You Need” for more details.

+
+
value: Value embeddings of shape \((S, E_v)\) for unbatched input, \((S, N, E_v)\) when

batch_first=False or \((N, S, E_v)\) when batch_first=True, where \(S\) is the source +sequence length, \(N\) is the batch size, and \(E_v\) is the value embedding dimension vdim. +See “Attention Is All You Need” for more details.

+
+
key_padding_mask: If specified, a mask of shape \((N, S)\) indicating which elements within key

to ignore for the purpose of attention (i.e. treat as “padding”). For unbatched query, shape should be \((S)\). +Binary and byte masks are supported. +For a binary mask, a True value indicates that the corresponding key value will be ignored for +the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding key +value will be ignored.

+
+
need_weights: If specified, returns attn_output_weights in addition to attn_outputs.

Default: True.

+
+
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape

\((L, S)\) or \((N\cdot\text{num\_heads}, L, S)\), where \(N\) is the batch size, +\(L\) is the target sequence length, and \(S\) is the source sequence length. A 2D mask will be +broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch. +Binary, byte, and float masks are supported. For a binary mask, a True value indicates that the +corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the +corresponding position is not allowed to attend. For a float mask, the mask values will be added to +the attention weight.

+
+
average_attn_weights: If true, indicates that the returned attn_weights should be averaged across

heads. Otherwise, attn_weights are provided separately per head. Note that this flag only has an +effect when need_weights=True. Default: True (i.e. average weights across heads)

+
+
+
+
Outputs:
    +
  • attn_output - Attention outputs of shape \((L, E)\) when input is unbatched, +\((L, N, E)\) when batch_first=False or \((N, L, E)\) when batch_first=True, +where \(L\) is the target sequence length, \(N\) is the batch size, and \(E\) is the +embedding dimension embed_dim.

  • +
  • attn_output_weights - Only returned when need_weights=True. If average_attn_weights=True, +returns attention weights averaged across heads of shape \((L, S)\) when input is unbatched or +\((N, L, S)\), where \(N\) is the batch size, \(L\) is the target sequence length, and +\(S\) is the source sequence length. If average_weights=False, returns attention weights per +head of shape \((\text{num\_heads}, L, S)\) when input is unbatched or \((N, \text{num\_heads}, L, S)\).

  • +
+
+

Note

+

batch_first argument is ignored for unbatched inputs.

+
+
+
+
+ +
+
+forward_fp16(*args, **kwargs)[source]
+
+ +
+
+forward_fp32(*args, **kwargs)[source]
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.MultiheadAttention(embed_dims: int, num_heads: int, dropout: float = 0.1, batch_first: bool = False, cache_attn_weights: bool = False, fp16: bool = False, **kwargs)[source]
+

Bases: Module

+

A wrapper for torch.nn.MultiheadAttention. +This module implements MultiheadAttention with identity connection, +and positional encoding is also passed as input.

+
+
+forward(query, key=None, value=None, identity=None, query_pos=None, key_pos=None, attn_mask=None, key_padding_mask=None, **kwargs)[source]
+

Forward function for MultiheadAttention.

+
+
Parameters:
+
    +
  • query – The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, +else [bs, num_queries embed_dims].

  • +
  • key – The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, +else [bs, num_keys, embed_dims]. If None, the query will be used. Defaults to None.

  • +
  • value – The value tensor with same shape as key. Same in nn.MultiheadAttention.forward. +Defaults to None. If None, the key will be used.

  • +
  • identity – This tensor, with the same shape as x, will be used for the identity link. +If None, x will be used. Defaults to None.

  • +
  • query_pos – The positional encoding for query, with the same shape as x. +If not None, it will be added to x before forward function. Defaults to None.

  • +
  • key_pos – The positional encoding for key, with the same shape as key. +Defaults to None. If not None, it will be added to key before query_pos has the same shape as key, +then query_pos will be used for key_pos. Defaults to None.

  • +
  • attn_mask – ByteTensor mask with shape [num_queries, num_keys]. +Same in nn.MultiheadAttention.forward. Defaults to None.

  • +
  • key_padding_mask – ByteTensor with shape [bs, num_keys]. Defaults to None.

  • +
  • kwargs – allow passing a more general data flow when combining with other operations in transformerlayer.

  • +
+
+
Returns:
+

forwarded results with shape [num_queries, bs, embed_dims] if self.batch_first is False, +else[bs, num_queries embed_dims].

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.MultiheadFlashAttention(embed_dims: int, num_heads: int, attn_drop: float = 0.0, proj_drop: float = 0.0, dropout: float | None = None, batch_first: bool = True, cache_attn_weights: bool = False, **kwargs)[source]
+

Bases: Module

+

A wrapper for torch.nn.MultiheadAttention. +This module implements MultiheadAttention with identity connection, +and positional encoding is also passed as input.

+
+
+forward(query, key=None, value=None, identity=None, query_pos=None, key_pos=None, attn_mask=None, key_padding_mask=None, **kwargs)[source]
+

Forward function for MultiheadAttention.

+
+
Parameters:
+
    +
  • query – The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, +else [bs, num_queries embed_dims].

  • +
  • key – The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, else +[bs, num_keys, embed_dims]. If None, the query will be used. Defaults to None.

  • +
  • value – The value tensor with same shape as key. Same in nn.MultiheadAttention.forward. +Defaults to None. If None, the key will be used.

  • +
  • identity – This tensor, with the same shape as x, will be used for the identity link. +If None, x will be used. Defaults to None.

  • +
  • query_pos – The positional encoding for query, with the same shape as x. If not None, it will +be added to x before forward function. Defaults to None.

  • +
  • key_pos – The positional encoding for key, with the same shape as key. Defaults to None. +If not None, it will be added to key before forward function. If None, and query_pos has the same +shape as key, then query_pos will be used for key_pos. Defaults to None.

  • +
  • attn_mask – ByteTensor mask with shape [num_queries, num_keys]. +Same in nn.MultiheadAttention.forward. Defaults to None.

  • +
  • key_padding_mask – ByteTensor with shape [bs, num_keys]. Defaults to None.

  • +
  • kwargs – allow passing a more general data flow when combining with +other operations in transformerlayer.

  • +
+
+
Returns:
+

forwarded results with shape [num_queries, bs, embed_dims] if self.batch_first is False, else +[bs, num_queries embed_dims].

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.PETRTemporalTransformer(encoder=None, decoder=None, cross=False)[source]
+

Bases: Module

+

Implements the DETR transformer. +Following the official DETR implementation, this module copy-paste +from torch.nn.Transformer with modifications: +* positional encodings are passed in MultiheadAttention +* extra LN at the end of encoder is removed +* decoder returns a stack of activations from all decoding layers +See paper: End-to-End Object Detection with Transformers for details.

+
+
+forward(memory, tgt, query_pos, pos_embed, attn_masks, temp_memory=None, temp_pos=None, mask=None, query_mask=None, reg_branch=None)[source]
+

Forward function for Transformer.

+
+ +
+
+init_weights()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.PETRTransformer(encoder=None, decoder=None, cross=False)[source]
+

Bases: Module

+

Implements the DETR transformer. +Following the official DETR implementation, this module copy-paste +from torch.nn.Transformer with modifications: +* positional encodings are passed in MultiheadAttention +* extra LN at the end of encoder is removed +* decoder returns a stack of activations from all decoding layers +See paper: End-to-End Object Detection with Transformers for details.

+
+
+forward(memory, tgt, query_pos, pos_embed, attn_masks=None, mask=None, query_mask=None)[source]
+

Forward function for Transformer.

+
+ +
+
+init_weights()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.TransformerDecoder(*args, post_norm_cfg={'type': 'LN'}, return_intermediate=False, **kwargs)[source]
+

Bases: TransformerLayerSequence

+

Implements the decoder in DETR transformer.

+
+
+forward(query, *args, **kwargs)[source]
+

Forward function for TransformerDecoder.

+
+
Parameters:
+

query – (Tensor) Input query with shape (num_query, bs, embed_dims).

+
+
Return:Tensor:
+

Results with shape [1, num_query, bs, embed_dims] when +return_intermediate is False, otherwise it has shape [num_layers, num_query, bs, embed_dims].

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.TransformerDecoderLayer(attn_cfgs=None, ffn_cfgs=None, operation_order=None, norm_cfg={'type': 'LN'}, batch_first=False, with_cp=True, **kwargs)[source]
+

Bases: Module

+
+
+forward(query, key=None, value=None, query_pos=None, key_pos=None, temp_memory=None, temp_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs)[source]
+

Forward function for TransformerCoder. +:returns: Tensor: forwarded results with shape [num_query, bs, embed_dims].

+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.transformer.TransformerLayerSequence(transformerlayers=None, num_layers=None)[source]
+

Bases: Module

+

Base class for TransformerEncoder and TransformerDecoder in vision +transformer.

+

As base-class of Encoder and Decoder in vision transformer. +Support customization such as specifying different kind +of transformer_layer in transformer_coder.

+
+
+forward(query, key, value, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs)[source]
+

Forward function for TransformerCoder.

+
+
Parameters:
+
    +
  • query – (Tensor) Input query with shape (num_queries, bs, embed_dims).

  • +
  • key – (Tensor) The key tensor with shape (num_keys, bs, embed_dims).

  • +
  • value – (Tensor) The value tensor with shape (num_keys, bs, embed_dims).

  • +
  • query_pos – (Tensor) The positional encoding for query. Default: None.

  • +
  • key_pos – (Tensor) The positional encoding for key. Default: None.

  • +
  • attn_masks – (List[Tensor], optional) Each element is 2D Tensor which is +used in calculation of corresponding attention in operation_order. Default: None.

  • +
  • query_key_padding_mask – (Tensor) ByteTensor for query, with shape [bs, num_queries]. +Only used in self-attention Default: None.

  • +
  • key_padding_mask – (Tensor) ByteTensor for query, with shape [bs, num_keys]. Default: None.

  • +
+
+
Returns:
+

results with shape [num_queries, bs, embed_dims].

+
+
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.plugin.transformer.build_module(cfg)[source]
+
+ +
+
+

cosense3d.modules.plugin.voxel_encoder module

+
+
+class cosense3d.modules.plugin.voxel_encoder.MeanVFE(num_point_features, **kwargs)[source]
+

Bases: Module

+
+
+forward(voxel_features, voxel_num_points)[source]
+
+
Args:

voxels: (num_voxels, max_points_per_voxel, C) +voxel_num_points: optional (num_voxels)

+
+
Returns:

vfe_features: (num_voxels, C)

+
+
+
+ +
+
+get_output_feature_dim()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.voxel_generator module

+
+
+class cosense3d.modules.plugin.voxel_generator.VoxelGenerator(voxel_size, lidar_range, max_points_per_voxel, empty_mean=True, mode='train', device='cuda', **kwargs)[source]
+

Bases: object

+
+ +
+
+

cosense3d.modules.plugin.voxnet_utils module

+
+
+class cosense3d.modules.plugin.voxnet_utils.CML(in_channels)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.voxnet_utils.CMLSparse(in_channels)[source]
+

Bases: Module

+
+
+forward(feats, coords)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.plugin.voxnet_utils.Conv3d(in_channels, out_channels, k, s, p, batch_norm=True)[source]
+

Bases: Module

+
+
+forward(x)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.plugin.vsa module

+
+
+class cosense3d.modules.plugin.vsa.VoxelSetAbstraction(voxel_size, point_cloud_range, num_keypoints=4096, num_out_features=32, point_source='raw_points', features_source=None, num_bev_features=128, bev_stride=8, num_rawpoint_features=3, enlarge_selection_boxes=True, sa_layer=None, min_selected_kpts=128, **kwargs)[source]
+

Bases: Module

+
+
+forward(det_out, bev_feat, voxel_feat, points)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+get_sampled_points(points, voxel_coords)[source]
+
+ +
+
+interpolate_from_bev_features(keypoints_list, bev_features)[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.plugin.vsa.bilinear_interpolate_torch(im, x, y)[source]
+
+
Args:

im: (H, W, C) [y, x] +x: (N) +y: (N)

+
+
+

Returns:

+
+ +
+
+

Module contents

+
+
+cosense3d.modules.plugin.build_plugin_layer(cfg: Dict, postfix: int | str = '', **kwargs) Tuple[str, Module][source]
+

Build plugin layer.

+
+
Parameters:
+
    +
  • cfg

    cfg should contain:

    +
      +
    • type (str): identify plugin layer type.

    • +
    • layer args: args needed to instantiate a plugin layer.

    • +
    +

  • +
  • postfix – appended into norm abbreviation to +create named layer. Default: ‘’.

  • +
  • kwargs

  • +
+
+
Returns:
+

The first one is the concatenation of +abbreviation and postfix. The second is the created plugin layer.

+
+
+
+ +
+
+cosense3d.modules.plugin.build_plugin_module(cfg: Dict)[source]
+
+ +
+
+cosense3d.modules.plugin.infer_abbr(class_type: type) str[source]
+

Infer abbreviation from the class name.

+

This method will infer the abbreviation to map class types to +abbreviations.

+

Rule 1: If the class has the property “abbr”, return the property. +Rule 2: Otherwise, the abbreviation falls back to snake case of class +name, e.g. the abbreviation of FancyBlock will be fancy_block.

+
+
Parameters:
+

class_type – The norm layer type.

+
+
Returns:
+

The inferred abbreviation.

+
+
+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.projection.html b/docs/_build/html/cosense3d.modules.projection.html new file mode 100644 index 00000000..c6810cc5 --- /dev/null +++ b/docs/_build/html/cosense3d.modules.projection.html @@ -0,0 +1,245 @@ + + + + + + + cosense3d.modules.projection package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.projection package

+
+

Submodules

+
+
+

cosense3d.modules.projection.fax module

+
+
+class cosense3d.modules.projection.fax.FAXModule(middle, dim, img_size, strides, feat_dims, cross_view, cross_view_swap, bev_embedding, self_attn, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(img_feat, intrinsic, extrinsic, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.projection.fax.ResNetBottleNeck(c)
+
+ +
+
+

cosense3d.modules.projection.petr module

+
+
+class cosense3d.modules.projection.petr.PETR(in_channels, transformer, position_range, num_reg_fcs=2, num_pred=3, topk=2048, num_query=644, depth_num=64, LID=True, depth_start=1, **kwargs)[source]
+

Bases: BaseModule

+
+
+format_input(input: List)[source]
+
+ +
+
+forward(img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img, **kwargs)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+gather_topk(img_feat, img_roi, img_coor, img_size, intrinsics, lidar2img)[source]
+
+ +
+
+img_position_embeding(img_memory, img_pos, Is, img2lidars)[source]
+
+ +
+
+init_weights()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.projection.spatial_transform module

+
+
+class cosense3d.modules.projection.spatial_transform.STTF(resolution, downsample_rate, use_roi_mask=True, **kwargs)[source]
+

Bases: BaseModule

+
+
+forward(bev_feat, requests, coop_poses, **kwargs)[source]
+

Transform the bev features to ego space.

+
+ +
+
+training: bool
+
+ +
+ +
+
+

Module contents

+

Modules for projecting image features to BEV space

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.modules.utils.html b/docs/_build/html/cosense3d.modules.utils.html new file mode 100644 index 00000000..315fd872 --- /dev/null +++ b/docs/_build/html/cosense3d.modules.utils.html @@ -0,0 +1,1037 @@ + + + + + + + cosense3d.modules.utils package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.modules.utils package

+
+

Submodules

+
+
+

cosense3d.modules.utils.box_coder module

+
+
+class cosense3d.modules.utils.box_coder.BoxPredCoder(with_velo=False)[source]
+

Bases: object

+
+
+decode(centers, reg)[source]
+
+
Parameters:
+
    +
  • centers – Tensor (N, 3) or (B, N, 2+).

  • +
  • reg – dict, +box - (N, 6) or (B, N, 6) +dir - (N, 8) or (B, N, 8) +scr - (N, 4) or (B, N, 4) +vel - (N, 2) or (B, N, 2), optional +pred - (N, 5) or (B, N, 5), optional

  • +
+
+
Returns:
+

decoded bboxes.

+
+
+
+ +
+
+encode(centers, gt_boxes, meter_per_pixel, gt_preds)[source]
+
+
Parameters:
+
    +
  • centers – (N, 3)

  • +
  • gt_boxes – (N, 8) [batch_idx, x, y, z, l, w, h, r]

  • +
  • meter_per_pixel – tuple with 2 elements

  • +
  • gt_preds – (N, 8) [batch_idx, x, y, z, l, w, h, r], gt boxes to be predicted

  • +
+
+
Returns:
+

encoded bbox targets.

+
+
+
+ +
+ +
+
+class cosense3d.modules.utils.box_coder.CenterBoxCoder(with_velo=False, with_pred=False, reg_radius=1.6, z_offset=1.0)[source]
+

Bases: object

+
+
+decode(centers, reg)[source]
+
+
Parameters:
+
    +
  • centers – Tensor (N, 3) or (B, N, 2+).

  • +
  • reg – dict, +box - (N, 6) or (B, N, 6) +dir - (N, 8) or (B, N, 8) +scr - (N, 4) or (B, N, 4) +vel - (N, 2) or (B, N, 2), optional +pred - (N, 5) or (B, N, 5), optional

  • +
+
+
Returns:
+

decoded bboxes.

+
+
+
+ +
+
+encode(centers, gt_boxes, meter_per_pixel, gt_preds=None)[source]
+
+
Parameters:
+
    +
  • centers – (N, 3)

  • +
  • gt_boxes – (N, 8) [batch_idx, x, y, z, l, w, h, r]

  • +
  • meter_per_pixel – tuple with 2 elements

  • +
  • gt_preds

  • +
+
+
Returns:
+

+
+
+
+ +
+ +
+
+class cosense3d.modules.utils.box_coder.ResidualBoxCoder(mode: str = 'simple_dist')[source]
+

Bases: object

+
+
+decode(anchors, boxes_enc, dir_scores=None)[source]
+
+ +
+
+decode_direction(ra, vt, dir_scores=None)[source]
+
+ +
+
+encode(anchors, boxes)[source]
+
+ +
+
+encode_direction(ra, rg)[source]
+
+ +
+ +
+
+cosense3d.modules.utils.box_coder.build_box_coder(type, **kwargs)[source]
+
+ +
+
+

cosense3d.modules.utils.common module

+
+
+cosense3d.modules.utils.common.bias_init_with_prob(prior_prob: float) float[source]
+

initialize conv/fc bias value according to a given probability value.

+
+ +
+
+cosense3d.modules.utils.common.cat_coor_with_idx(tensor_list)[source]
+
+ +
+
+cosense3d.modules.utils.common.cat_name_str(module_name)[source]
+
+
Parameters:
+

module_name – str, format in xxx_yyy_zzz

+
+
Returns:
+

class_name: str, format in XxxYyyZzz

+
+
+
+ +
+
+cosense3d.modules.utils.common.clip_sigmoid(x: Tensor, eps: float = 0.0001) Tensor[source]
+

Sigmoid function for input feature.

+
+
Parameters:
+
    +
  • x – Input feature map with the shape of [B, N, H, W].

  • +
  • eps – Lower bound of the range to be clamped to. +Defaults to 1e-4.

  • +
+
+
Returns:
+

Feature map after sigmoid.

+
+
+
+ +
+
+cosense3d.modules.utils.common.draw_sample_prob(centers, reg, samples, res, distr_r, det_r, batch_size, var0)[source]
+
+ +
+
+cosense3d.modules.utils.common.fuse_batch_indices(coords, num_cav)[source]
+

Fusing voxels of CAVs from the same frame +:param stensor: ME sparse tensor +:param num_cav: list of number of CAVs for each frame +:return: fused coordinates and features of stensor

+
+ +
+
+cosense3d.modules.utils.common.get_conv2d_layers(conv_name, in_channels, out_channels, n_layers, kernel_size, stride, padding, relu_last=True, sequential=True, **kwargs)[source]
+

Build convolutional layers. kernel_size, stride and padding should be a list with the +lengths that match n_layers

+
+ +
+
+cosense3d.modules.utils.common.get_norm_layer(channels, norm)[source]
+
+ +
+
+cosense3d.modules.utils.common.get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range)[source]
+

Get centers of spconv voxels.

+
+
Parameters:
+
    +
  • voxel_coords – (N, 3)

  • +
  • downsample_times

  • +
  • voxel_size

  • +
  • point_cloud_range

  • +
+
+
Returns:
+

+
+
+
+ +
+
+cosense3d.modules.utils.common.instantiate(module_name, cls_name=None, module_cfg=None, **kwargs)[source]
+
+ +
+
+cosense3d.modules.utils.common.inverse_sigmoid(x, eps=1e-05)[source]
+

Inverse function of sigmoid.

+
+
Parameters:
+
    +
  • x – (Tensor) The tensor to do the +inverse.

  • +
  • eps – (float) EPS avoid numerical +overflow. Defaults 1e-5.

  • +
+
+
Returns:
+

Tensor: The x has passed the inverse +function of sigmoid, has same +shape with input.

+
+
+
+ +
+
+cosense3d.modules.utils.common.limit_period(val, offset=0.5, period=6.283185306)[source]
+
+ +
+
+cosense3d.modules.utils.common.linear_last(in_channels, mid_channels, out_channels, bias=False, norm='BN')[source]
+
+ +
+
+cosense3d.modules.utils.common.linear_layers(in_out, activations=None, norm='BN')[source]
+
+ +
+
+cosense3d.modules.utils.common.meshgrid(xmin, xmax, ymin=None, ymax=None, dim=2, n_steps=None, step=None)[source]
+
+ +
+
+cosense3d.modules.utils.common.meshgrid_cross(xmins, xmaxs, n_steps=None, steps=None)[source]
+
+ +
+
+cosense3d.modules.utils.common.pad_l(tensor, value=0)[source]
+
+ +
+
+cosense3d.modules.utils.common.pad_r(tensor, value=0)[source]
+
+ +
+
+cosense3d.modules.utils.common.topk_gather(feat, topk_indexes)[source]
+
+ +
+
+cosense3d.modules.utils.common.weighted_mahalanobis_dists(reg_evi, reg_var, dists, var0)[source]
+
+ +
+
+cosense3d.modules.utils.common.xavier_init(module: Module, gain: float = 1, bias: float = 0, distribution: str = 'normal') None[source]
+
+ +
+
+

cosense3d.modules.utils.conv module

+
+
+class cosense3d.modules.utils.conv.ConvModule(in_channels: int, out_channels: int, kernel_size: int | Tuple[int, int], stride: int | Tuple[int, int] = 1, padding: int | Tuple[int, int] = 0, dilation: int | Tuple[int, int] = 1, groups: int = 1, bias: bool | str = 'auto', conv_cfg: Dict | None = None, norm_cfg: Dict | None = None, act_cfg: Dict | None = {'type': 'ReLU'}, inplace: bool = True, with_spectral_norm: bool = False, padding_mode: str = 'zeros', order: tuple = ('conv', 'norm', 'act'))[source]
+

Bases: Module

+

A conv block that bundles conv/norm/activation layers.

+

This block simplifies the usage of convolution layers, which are commonly +used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). +It is based upon three build methods: build_conv_layer(), +build_norm_layer() and build_activation_layer().

+

Besides, we add some additional features in this module. +1. Automatically set bias of the conv layer. +2. Spectral norm is supported. +3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only +supports zero and circular padding, and we add “reflect” padding mode.

+
+
Args:
+
in_channels (int): Number of channels in the input feature map.

Same as that in nn._ConvNd.

+
+
out_channels (int): Number of channels produced by the convolution.

Same as that in nn._ConvNd.

+
+
kernel_size (int | tuple[int]): Size of the convolving kernel.

Same as that in nn._ConvNd.

+
+
stride (int | tuple[int]): Stride of the convolution.

Same as that in nn._ConvNd.

+
+
padding (int | tuple[int]): Zero-padding added to both sides of

the input. Same as that in nn._ConvNd.

+
+
dilation (int | tuple[int]): Spacing between kernel elements.

Same as that in nn._ConvNd.

+
+
groups (int): Number of blocked connections from input channels to

output channels. Same as that in nn._ConvNd.

+
+
bias (bool | str): If specified as auto, it will be decided by the

norm_cfg. Bias will be set as True if norm_cfg is None, otherwise +False. Default: “auto”.

+
+
conv_cfg (dict): Config dict for convolution layer. Default: None,

which means using conv2d.

+
+
+

norm_cfg (dict): Config dict for normalization layer. Default: None. +act_cfg (dict): Config dict for activation layer.

+
+

Default: dict(type=’ReLU’).

+
+
+
inplace (bool): Whether to use inplace mode for activation.

Default: True.

+
+
with_spectral_norm (bool): Whether use spectral norm in conv module.

Default: False.

+
+
padding_mode (str): If the padding_mode has not been supported by

current Conv2d in PyTorch, we will use our own padding layer +instead. Currently, we support [‘zeros’, ‘circular’] with official +implementation and [‘reflect’] with our own implementation. +Default: ‘zeros’.

+
+
order (tuple[str]): The order of conv/norm/activation layers. It is a

sequence of “conv”, “norm” and “act”. Common examples are +(“conv”, “norm”, “act”) and (“act”, “conv”, “norm”). +Default: (‘conv’, ‘norm’, ‘act’).

+
+
+
+
+
+
+forward(x: Tensor, activate: bool = True, norm: bool = True) Tensor[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+init_weights()[source]
+
+ +
+
+property norm
+
+ +
+
+training: bool
+
+ +
+ +
+
+cosense3d.modules.utils.conv.build_conv_layer(cfg: Dict | None, *args, **kwargs) Module[source]
+

Build convolution layer. Modified from openmmlab.

+
+
Args:
+
cfg (None or dict): The conv layer config, which should contain:
    +
  • type (str): Layer type.

  • +
  • layer args: Args needed to instantiate an conv layer.

  • +
+
+
args (argument list): Arguments passed to the __init__

method of the corresponding conv layer.

+
+
kwargs (keyword arguments): Keyword arguments passed to the __init__

method of the corresponding conv layer.

+
+
+
+
Returns:

nn.Module: Created conv layer.

+
+
+
+ +
+
+cosense3d.modules.utils.conv.build_padding_layer(cfg: Dict, *args, **kwargs) Module[source]
+

Build padding layer.

+
+
Args:
+
cfg (dict): The padding layer config, which should contain:
    +
  • type (str): Layer type.

  • +
  • layer args: Args needed to instantiate a padding layer.

  • +
+
+
+
+
Returns:

nn.Module: Created padding layer.

+
+
+
+ +
+
+

cosense3d.modules.utils.edl_utils module

+
+
+cosense3d.modules.utils.edl_utils.logit_to_edl(logits)[source]
+
+

Parameters

+

logits: Tensor, (…, C),

+
+
+

Returns

+
+
+ +
+
+

cosense3d.modules.utils.gaussian_utils module

+
+
+cosense3d.modules.utils.gaussian_utils.center_to_img_coor(center_in, lidar_range, pixel_sz)[source]
+
+ +
+
+cosense3d.modules.utils.gaussian_utils.cornernet_gaussian_radius(height, width, min_overlap=0.5)[source]
+
+ +
+
+cosense3d.modules.utils.gaussian_utils.draw_gaussian_map(boxes, lidar_range, pixel_sz, batch_size, radius=None, sigma=1, min_radius=2)[source]
+
+ +
+
+cosense3d.modules.utils.gaussian_utils.gaussian_2d(shape: List[int], sigma: float = 1.0) ndarray[source]
+

Generate gaussian map.

+
+
Parameters:
+
    +
  • shape – Shape of the map.

  • +
  • sigma – Sigma to generate gaussian map. +Defaults to 1.

  • +
+
+
Returns:
+

Generated gaussian map.

+
+
+
+ +
+
+cosense3d.modules.utils.gaussian_utils.gaussian_radius(box_dims, pixel_sz, overlap, min_radius=2)[source]
+
+ +
+
+cosense3d.modules.utils.gaussian_utils.mahalanobis_dists_2d(sigmas, dists)[source]
+

Compute the squared mahalanobis distances.

+
+
Parameters:
+
    +
  • sigmas – (N, 2), standard deviation of Gaussian distribution

  • +
  • dists – (N, 2), distances to gaussian center

  • +
+
+
Returns:
+

(N), squared mahalanobis

+
+
+
+ +
+
+cosense3d.modules.utils.gaussian_utils.weighted_mahalanobis_dists(vars, dists, weights=None)[source]
+

Compute the squared mahalanobis distances.

+
+
Parameters:
+
    +
  • vars – (N, 2), variances of Gaussian distribution.

  • +
  • dists – (N, 2), distances to gaussian center at each axis.

  • +
  • weights – weights to be applied to the output probability.

  • +
+
+
Returns:
+

(N), squared mahalanobis

+
+
+
+ +
+
+

cosense3d.modules.utils.init module

+
+
+cosense3d.modules.utils.init.bias_init_with_prob(prior_prob: float) float[source]
+

initialize conv/fc bias value according to a given probability value.

+
+ +
+
+cosense3d.modules.utils.init.constant_init(module: Module, val: float, bias: float = 0) None[source]
+
+ +
+
+cosense3d.modules.utils.init.kaiming_init(module: Module, a: float = 0, mode: str = 'fan_out', nonlinearity: str = 'relu', bias: float = 0, distribution: str = 'normal') None[source]
+
+ +
+
+cosense3d.modules.utils.init.normal_init(module: Module, mean: float = 0, std: float = 1, bias: float = 0) None[source]
+
+ +
+
+cosense3d.modules.utils.init.trunc_normal_init(module: Module, mean: float = 0, std: float = 1, a: float = -2, b: float = 2, bias: float = 0) None[source]
+
+ +
+
+cosense3d.modules.utils.init.uniform_init(module: Module, a: float = 0, b: float = 1, bias: float = 0) None[source]
+
+ +
+
+cosense3d.modules.utils.init.xavier_init(module: Module, gain: float = 1, bias: float = 0, distribution: str = 'normal') None[source]
+
+ +
+
+

cosense3d.modules.utils.me_utils module

+
+
+cosense3d.modules.utils.me_utils.bev_sparse_to_dense(self, preds)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.devoxelize_with_centroids(out: SparseTensor, x: TensorField, h_embs)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.downsample_embeddings(embeddings, inverse_map, size, mode='avg')[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.downsample_points(points, tensor_map, field_map, size)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.get_conv_block(nc, k=3, d=3, tr=False, bn_momentum=0.1, distributed=False)[source]
+

create sparse convolution block +:param nc: number of channels in each layer in [in_layer, mid_layer, out_layer] +:param k: kernel size +:param tr: transposed convolution +:return: conv block

+
+ +
+
+cosense3d.modules.utils.me_utils.get_kernel_map_and_out_key(stensor, stensor_out=None, kernel_size=3, stride=1, dilation=1, kernel_type='cube', kernel_generator=None)[source]
+

Generate kernel maps for the input stensor. +The hybrid and custom kernel is not implemented in ME v0.5.x, +this function uses a kernel mask to select the kernel maps for +the customized kernel shapes. +:param stensor: ME.SparseTensor, NxC +:param kernel_type: ‘cube’(default) | ‘hybrid’ +:return: masked kernel maps

+
+ +
+
+cosense3d.modules.utils.me_utils.indices2metric(indices, voxel_size)[source]
+

Voxel indices to voxel center in meter

+
+ +
+
+cosense3d.modules.utils.me_utils.me_coor_to_grid_indices(lr, voxel_size, stride, coor)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.metric2indices(coor, voxel_size)[source]
+

“Round towards floor

+
+ +
+
+cosense3d.modules.utils.me_utils.mink_coor_limit(lidar_range, voxel_size, stride)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.minkconv_conv_block(in_dim, out_dim, kernel, stride, d=3, bn_momentum=0.1, activation='LeakyReLU', tr=False, expand_coordinates=False, norm_before=False, distributed=False)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.minkconv_layer(in_dim, out_dim, kernel, stride, d, tr=False)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.normalize_centroids(down_points, coordinates, tensor_stride)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.normalize_points(points, centroids, tensor_map)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.prepare_input_data(points_list, voxel_size, QMODE, floor_height, coor_dim=3, feat_dim=3)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.sparse_to_dense(stensor, voxel_size, det_r)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.stride_centroids(points, counts, rows, cols, size)[source]
+
+ +
+
+cosense3d.modules.utils.me_utils.update_me_essentials(self: object, data_info: dict, stride: int | None = None)[source]
+

Update essential variables for ME-based models

+
+
Parameters:
+
    +
  • self – instance of a python class

  • +
  • data_info

      +
    • det_r: float

    • +
    • lidar_range: [xmin, ymin, zmin, xmax, ymax, zmax]

    • +
    • voxel_size: [vx, vy, vz]

    • +
    +

  • +
  • stride

  • +
+
+
Returns:
+

+
+
+
+ +
+
+cosense3d.modules.utils.me_utils.voxelize_with_centroids(x: TensorField, enc_mlp, pc_range)[source]
+
+ +
+
+

cosense3d.modules.utils.misc module

+
+
+class cosense3d.modules.utils.misc.MLN(c_dim, f_dim=256)[source]
+

Bases: Module

+
+
Args:

c_dim (int): dimension of latent code c +f_dim (int): feature dimension

+
+
+
+
+forward(x, c)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+reset_parameters()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.utils.misc.MLN2(c_dim, f_dim=256)[source]
+

Bases: Module

+
+
Args:

c_dim (int): dimension of latent code c +f_dim (int): feature dimension

+
+
+
+
+forward(x, c)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+reset_parameters()[source]
+
+ +
+
+training: bool
+
+ +
+ +
+
+class cosense3d.modules.utils.misc.SELayer_Linear(channels, act_layer=<class 'torch.nn.modules.activation.ReLU'>, gate_layer=<class 'torch.nn.modules.activation.Sigmoid'>, norm=False)[source]
+

Bases: Module

+
+
+forward(x, x_se)[source]
+

Defines the computation performed at every call.

+

Should be overridden by all subclasses.

+
+

Note

+

Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

+
+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.utils.nbr_attn module

+
+
+class cosense3d.modules.utils.nbr_attn.NeighborhoodAttention(emb_dim, n_nbr=16, num_pose_feat=64, **kwargs)[source]
+

Bases: Module

+

Generate reference points and attend neighborhood features.

+
+
+forward(memory, mem_coor, q_coor, B)[source]
+
+
Args:

q: (S, D) +kv: (L, D) +q_coor: (S, 3), [idx, x, y] +kv_coor: (L, 3)

+
+
+

Returns:

+
+ +
+
+training: bool
+
+ +
+ +
+
+

cosense3d.modules.utils.norm module

+
+
+cosense3d.modules.utils.norm.build_norm_layer(cfg: Dict, num_features: int, postfix: int | str = '') Tuple[str, Module][source]
+

Build normalization layer. Modified from openmmlab.

+
+
Parameters:
+
    +
  • cfg – (dict) The norm layer config, which should contain: +- type (str): Layer type. +- layer args: Args needed to instantiate a norm layer. +- requires_grad (bool, optional): Whether stop gradient updates.

  • +
  • num_features – (int) Number of input channels.

  • +
  • postfix – (int | str) The postfix to be appended into norm abbreviation +to create named layer.

  • +
+
+
Returns:
+

tuple[str, nn.Module]: The first element is the layer name consisting +of abbreviation and postfix, e.g., bn1, gn. The second element is the +created norm layer.

+
+
+
+ +
+
+

cosense3d.modules.utils.positional_encoding module

+
+
+cosense3d.modules.utils.positional_encoding.coor2ratio(coor, lidar_range)[source]
+
+ +
+
+cosense3d.modules.utils.positional_encoding.img_locations(img_size, feat_size=None, stride=None)[source]
+
+ +
+
+cosense3d.modules.utils.positional_encoding.nerf_positional_encoding(tensor: Tensor, num_encoding_functions: int = 6, include_input: bool = False, log_sampling: bool = True) Tensor[source]
+

Apply positional encoding to the input.

+
+
Parameters:
+
    +
  • tensor – Input tensor to be positionally encoded.

  • +
  • num_encoding_functions – Number of encoding functions used to compute +a positional encoding (default: 6).

  • +
  • include_input – Whether or not to include the input in the +positional encoding (default: True).

  • +
  • log_sampling

  • +
+
+
Returns:
+

Positional encoding of the input tensor.

+
+
+
+ +
+
+cosense3d.modules.utils.positional_encoding.pos2posemb1d(pos, num_pos_feats=256, temperature=10000)[source]
+
+ +
+
+cosense3d.modules.utils.positional_encoding.pos2posemb2d(pos, num_pos_feats=128, temperature=10000)[source]
+
+ +
+
+cosense3d.modules.utils.positional_encoding.pos2posemb3d(pos, num_pos_feats=128, temperature=10000)[source]
+
+ +
+
+cosense3d.modules.utils.positional_encoding.ratio2coord(ratio, lidar_range)[source]
+
+ +
+
+

Module contents

+
+
+cosense3d.modules.utils.build_torch_module(cfg)[source]
+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cosense3d.utils.html b/docs/_build/html/cosense3d.utils.html new file mode 100644 index 00000000..6318f885 --- /dev/null +++ b/docs/_build/html/cosense3d.utils.html @@ -0,0 +1,1662 @@ + + + + + + + cosense3d.utils package — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

cosense3d.utils package

+
+

Submodules

+
+
+

cosense3d.utils.box_utils module

+
+
+cosense3d.utils.box_utils.bbox_cxcywh_to_xyxy(bbox)[source]
+

Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).

+
+
Parameters:
+

(Tensor) (bbox) – Shape (n, 4) for bboxes.

+
+
Returns:
+

Tensor: Converted bboxes.

+
+
+
+ +
+
+cosense3d.utils.box_utils.bbox_xyxy_to_cxcywh(bbox)[source]
+

Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).

+
+
Parameters:
+

(Tensor) (bbox) – Shape (n, 4) for bboxes.

+
+
Returns:
+

Tensor, Converted bboxes.

+
+
+
+ +
+
+cosense3d.utils.box_utils.boxes3d_to_standup_bboxes(boxes)[source]
+
+
Parameters:
+

boxes – Tensor(N, 7)

+
+
Returns:
+

Tenosr(N, 4): [x_min, y_min, x_max, y_max)

+
+
+
+ +
+
+cosense3d.utils.box_utils.boxes_to_corners_2d(boxes_np)[source]
+

Convert boxes to 4 corners in xy plane +:param boxes_np: np.ndarray [N, 7], cols - (x,y,z,dx,dy,dz,det_r) +:return: corners: np.ndarray [N, 4, 2], corner order is +back left, front left, front back, back left

+
+ +
+
+cosense3d.utils.box_utils.boxes_to_corners_3d(boxes3d: ndarray | Tensor, order: str = 'lwh') ndarray | Tensor[source]
+
+
+
+

4 ——– 5 ^ z

+
+

/| /| |

+
+

7 ——– 6 . | +| | | | | . x +. 0 ——– 1 |/ +|/ |/ +——-> y +3 ——– 2

+
+
+
Parameters:
+

boxes3d – (N, 7 + (2: optional)) [x, y, z, dx, dy, dz, yaw]

+
+
+

or [x, y, z, dx, dy, dz, roll, pitch, yaw], (x, y, z) is the box center. +:param order: ‘lwh’ or ‘hwl’. +:return: (N, 8, 3), the 8 corners of the bounding box.

+
+ +
+
+cosense3d.utils.box_utils.compute_iou(box, boxes)[source]
+

Compute iou between box and boxes list

+
+
Parameters:
+
    +
  • box – shapely.geometry.Polygon +Bounding box Polygon.

  • +
  • boxes – list +List of shapely.geometry.Polygon.

  • +
+
+
Returns:
+

iou : np.ndarray +Array of iou between box and boxes.

+
+
+
+ +
+
+cosense3d.utils.box_utils.convert_box_to_polygon(boxes_array)[source]
+

Convert boxes array to shapely.geometry.Polygon format.

+
+
:param boxes_arraynp.ndarray

(N, 4, 2) or (N, 8, 3).

+
+
+
+
Returns:
+

list of converted shapely.geometry.Polygon object.

+
+
+
+ +
+
+cosense3d.utils.box_utils.corners_to_boxes_3d(corners: ndarray | Tensor, mode: int = 9) ndarray | Tensor[source]
+
+
+
+

4 ——– 5 ^ z

+
+

/| /| |

+
+

7 ——– 6 . | +| | | | | . x +. 0 ——– 1 |/ +|/ |/ +——-> y +3 ——– 2

+
+
+
Parameters:
+
    +
  • corners – (N, 8, 3)

  • +
  • mode – 9 | 7

  • +
+
+
Returns:
+

boxes, (N, 9 | 7)

+
+
+
+ +
+
+cosense3d.utils.box_utils.decode_boxes(reg, points, lwh_mean)[source]
+
+ +
+
+cosense3d.utils.box_utils.denormalize_bbox(normalized_bboxes)[source]
+
+ +
+
+cosense3d.utils.box_utils.enlarge_box3d(boxes3d, extra_width=(0, 0, 0))[source]
+
+
Parameters:
+
    +
  • boxes3d – [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center

  • +
  • extra_width – [extra_x, extra_y, extra_z]

  • +
+
+
+

Returns:

+
+ +
+
+cosense3d.utils.box_utils.find_rigid_alignment(A, B)[source]
+

Find rotation and translation from A to B. +Parameters

+
+
Parameters:
+
    +
  • A – (B, N, 3)

  • +
  • B – (B, N, 3)

  • +
+
+
Returns:
+

+
+
+
+ +
+
+cosense3d.utils.box_utils.limit_period(val, offset=0.5, period=6.283185307179586)[source]
+
+ +
+
+cosense3d.utils.box_utils.mask_boxes_outside_range_numpy(boxes: ndarray, limit_range: list, order: str, min_num_corners: int = 2) ndarray[source]
+
+
Parameters:
+
    +
  • boxes – (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center

  • +
  • limit_range – [minx, miny, minz, maxx, maxy, maxz]

  • +
  • order – ‘lwh’ or ‘hwl’

  • +
  • min_num_corners – The required minimum number of corners to be considered as in range.

  • +
+
+
Returns:
+

The filtered boxes.

+
+
+
+ +
+
+cosense3d.utils.box_utils.mask_boxes_outside_range_torch(boxes, lidar_range)[source]
+
+ +
+
+cosense3d.utils.box_utils.normalize_bbox(bboxes)[source]
+
+ +
+
+cosense3d.utils.box_utils.remove_points_in_boxes3d(points, boxes3d, x_idx=0)[source]
+
+
Parameters:
+
    +
  • points – (num_points, x_idx + 3 + C)

  • +
  • boxes3d – (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps

  • +
+
+
Returns:
+

+
+
+
+ +
+
+cosense3d.utils.box_utils.transform_boxes_3d(boxes_in, transform, mode=7)[source]
+
+
Parameters:
+
    +
  • boxes_in – (N, 7)

  • +
  • transform – (4, 4)

  • +
  • mode – 7 | 9

  • +
+
+
+
+ +
+
+

cosense3d.utils.eval_detection_utils module

+
+
+cosense3d.utils.eval_detection_utils.cal_ap_all_point(scores, tps, n_pred, n_gt)[source]
+

source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L292

+
+ +
+
+cosense3d.utils.eval_detection_utils.cal_precision_recall(scores, tps, n_pred, n_gt)[source]
+
+ +
+
+cosense3d.utils.eval_detection_utils.calculate_ap(result_stat, iou, global_sort_detections)[source]
+

Calculate the average precision and recall, and save them into a txt.

+
+

Parameters

+
+
result_statdict

A dictionary contains fp, tp and gt number.

+
+
+

iou : float

+
+
global_sort_detectionsbool

Whether to sort the detection results globally.

+
+
+
+
+ +
+
+cosense3d.utils.eval_detection_utils.caluclate_tp_fp(det_boxes, det_score, gt_boxes, result_stat, iou_thresh, det_range=None)[source]
+

Calculate the true positive and false positive numbers of the current +frames.

+
+

Parameters

+
+
det_boxestorch.Tensor

The detection bounding box, shape (N, 8, 3) or (N, 4, 2) or (N, 7).

+
+
det_score :torch.Tensor

The confidence score for each preditect bounding box.

+
+
gt_boxestorch.Tensor

The groundtruth bounding box.

+
+
result_stat: dict

A dictionary contains fp, tp and gt number.

+
+
iou_threshfloat

The iou thresh.

+
+
rangelist, [left_range, right_range]

The evaluation range left bound

+
+
+
+
+ +
+
+cosense3d.utils.eval_detection_utils.eval_final_results(result_stat, iou_thrs, global_sort_detections=False)[source]
+
+ +
+
+cosense3d.utils.eval_detection_utils.ops_cal_tp(pred_boxes, gt_boxes, iou_mode='3d', IoU_thr=0.7)[source]
+
+ +
+
+cosense3d.utils.eval_detection_utils.voc_ap(rec, prec)[source]
+

VOC 2010 Average Precision.

+
+ +
+
+

cosense3d.utils.iou2d_calculator module

+
+
+cosense3d.utils.iou2d_calculator.bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06)[source]
+

Calculate overlap between two set of bboxes.

+

FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 +Note: +Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is ‘iou’, +there are some new generated variable when calculating IOU +using bbox_overlaps function:

+
    +
  1. +
    is_aligned is False

    area1: M x 1 +area2: N x 1 +lt: M x N x 2 +rb: M x N x 2 +wh: M x N x 2 +overlap: M x N x 1 +union: M x N x 1 +ious: M x N x 1

    +
    +
    Total memory:

    S = (9 x N x M + N + M) * 4 Byte,

    +
    +
    When using FP16, we can reduce:

    R = (9 x N x M + N + M) * 4 / 2 Byte +R large than (N + M) * 4 * 2 is always true when N and M >= 1. +Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,

    +
    +

    N + 1 < 3 * N, when N or M is 1.

    +
    +
    +
    +

    Given M = 40 (ground truth), N = 400000 (three anchor boxes +in per grid, FPN, R-CNNs),

    +
    +

    R = 275 MB (one times)

    +
    +
    +
    A special case (dense detection), M = 512 (ground truth),

    R = 3516 MB = 3.43 GB

    +
    +
    When the batch size is B, reduce:

    B x R

    +
    +
    +

    Therefore, CUDA memory runs out frequently.

    +

    Experiments on GeForce RTX 2080Ti (11019 MiB):

    +
    +
    dtype | M | N | Use | Real | Ideal |
    +
    +
    +
    +
  2. +
  3. +
    is_aligned is True

    area1: N x 1 +area2: N x 1 +lt: N x 2 +rb: N x 2 +wh: N x 2 +overlap: N x 1 +union: N x 1 +ious: N x 1

    +
    +
    Total memory:

    S = 11 x N * 4 Byte

    +
    +
    When using FP16, we can reduce:

    R = 11 x N * 4 / 2 Byte

    +
    +
    +
    +
    +
  4. +
+

So do the ‘giou’ (large than ‘iou’).

+

Time-wise, FP16 is generally faster than FP32.

+

When gpu_assign_thr is not -1, it takes more time on cpu +but not reduce memory. +There, we can reduce half the memory and keep the speed.

+

If is_aligned is False, then calculate the overlaps between each +bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned +pair of bboxes1 and bboxes2.

+
+
Parameters:
+
    +
  • bboxes1 – (Tensor) shape (B, m, 4) in <x1, y1, x2, y2> format or empty.

  • +
  • bboxes2 – (Tensor) shape (B, n, 4) in <x1, y1, x2, y2> format or empty.

  • +
+
+
+

B indicates the batch dim, in shape (B1, B2, …, Bn). +If is_aligned is True, then m and n must be equal. +:param mode: (str) “iou” (intersection over union), “iof” (intersection over +foreground) or “giou” (generalized intersection over union). +Default “iou”. +:param is_aligned: (bool, optional) If True, then m and n must be equal. +Default False. +:param eps: (float, optional) A value added to the denominator for numerical

+
+

stability. Default 1e-6.

+
+
+
Returns:
+

Tensor: shape (m, n) if is_aligned is False else shape (m,)

+
+
+
+
Example:
>>> bboxes1 = torch.FloatTensor([
+>>>     [0, 0, 10, 10],
+>>>     [10, 10, 20, 20],
+>>>     [32, 32, 38, 42],
+>>> ])
+>>> bboxes2 = torch.FloatTensor([
+>>>     [0, 0, 10, 20],
+>>>     [0, 10, 10, 19],
+>>>     [10, 10, 20, 20],
+>>> ])
+>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
+>>> assert overlaps.shape == (3, 3)
+>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
+>>> assert overlaps.shape == (3, )
+
+
+
+
Example:
>>> empty = torch.empty(0, 4)
+>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
+>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
+>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
+>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
+
+
+
+
+
+ +
+
+cosense3d.utils.iou2d_calculator.cast_tensor_type(x, scale=1.0, dtype=None)[source]
+
+ +
+
+cosense3d.utils.iou2d_calculator.fp16_clamp(x, min=None, max=None)[source]
+
+ +
+
+

cosense3d.utils.logger module

+
+
+class cosense3d.utils.logger.LogMeter(total_iter, logdir, delimiter='\t', log_every=20, wandb_project=None)[source]
+

Bases: object

+
+
+add_meter(name, meter)[source]
+
+ +
+
+log(epoch, iteration, lr, **kwargs)[source]
+
+ +
+
+update(**kwargs)[source]
+
+ +
+ +
+
+class cosense3d.utils.logger.SmoothedValue(window_size=20, fmt=None)[source]
+

Bases: object

+
+
+property avg
+
+ +
+
+property global_avg
+
+ +
+
+property max
+
+ +
+
+property median
+
+ +
+
+update(value, n=1)[source]
+
+ +
+
+property value
+
+ +
+ +
+
+class cosense3d.utils.logger.TestLogger(logdir)[source]
+

Bases: object

+
+
+log(msg)[source]
+
+ +
+ +
+
+cosense3d.utils.logger.setup_logger(exp_name, debug)[source]
+
+ +
+
+

cosense3d.utils.lr_scheduler module

+
+
+class cosense3d.utils.lr_scheduler.LRUpdater(optimizer, total_iter, policy, **kwargs)[source]
+

Bases: object

+

Unified API for updating LR with different LR schedulers.

+
+
+get_last_lr()[source]
+
+ +
+
+load_state_dict(state_dict)[source]
+
+ +
+
+state_dict()[source]
+
+ +
+
+step_epoch(epoch)[source]
+
+ +
+
+step_itr(itr)[source]
+
+ +
+ +
+
+class cosense3d.utils.lr_scheduler.TransformerAdaptiveScheduler(optimizer: Optimizer, dim_embed: int, warmup_steps: int, itrs_per_epoch: int, last_epoch: int = -1, global_fade_ratio: float = 1, verbose: bool = False)[source]
+

Bases: _LRScheduler

+
+
+calc_lr(step, dim_embed, warmup_steps)[source]
+
+ +
+
+get_lr() float[source]
+
+ +
+ +
+
+cosense3d.utils.lr_scheduler.build_lr_scheduler(optimizer, cfg, total_iter)[source]
+
+ +
+
+

cosense3d.utils.metrics module

+
+
+class cosense3d.utils.metrics.Metric(cfg, log_dir)[source]
+

Bases: object

+
+
+add_samples(data_dict)[source]
+
+ +
+
+save_detections(filename)[source]
+
+ +
+
+summary()[source]
+
+ +
+ +
+
+class cosense3d.utils.metrics.MetricBev(cfg, run_path, logger, name='test')[source]
+

Bases: Metric

+
+
+add_samples(out_dict)[source]
+
+
Args:
+
out_dict:
+
bev:

conf: Tensor, (B, H, W, C) or (N, C) +unc: Tensor (optional), (B, H, W, C) or (N, C) +gt: Tensor, (B, H, W, C) or (N, C)

+
+
+
+
+
+
+
+ +
+
+format_str(result_dict)[source]
+
+ +
+
+iou(conf, gt, unc=None)[source]
+

Compare the thresholded pred BEV map with the full gt BEV map (including non +observable area)

+
+ +
+
+summary()[source]
+
+ +
+
+summary_hook()[source]
+
+ +
+ +
+
+class cosense3d.utils.metrics.MetricMOT(cfg, log_dir)[source]
+

Bases: Metric

+
+
+add_samples(data_dict)[source]
+
+ +
+ +
+
+class cosense3d.utils.metrics.MetricObjDet(cfg, log_dir, logger, bev=False)[source]
+

Bases: Metric

+
+
+add_sample(name, pred_boxes, gt_boxes, confidences, ids=None)[source]
+
+ +
+
+add_samples(out_dict)[source]
+
+ +
+
+cal_ap_11_point(IoU_thr=0.5)[source]
+

source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L315

+
+ +
+
+cal_ap_all_point(IoU_thr=0.5)[source]
+

source: https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c/lib/Evaluator.py#L292

+
+ +
+
+cal_precision_recall(IoU_thr=0.5)[source]
+
+ +
+
+save_detections(filename)[source]
+
+ +
+
+summary()[source]
+
+ +
+ +
+
+class cosense3d.utils.metrics.MetricSemSeg(cfg, run_path, name='test')[source]
+

Bases: Metric

+
+
+add_samples(data_dict)[source]
+
+ +
+
+cal_ious_and_accs()[source]
+
+ +
+
+save_detections(filename)[source]
+
+ +
+ +
+
+

cosense3d.utils.misc module

+
+
+cosense3d.utils.misc.check_numpy_to_torch(x)[source]
+
+ +
+
+cosense3d.utils.misc.ensure_dir(path)[source]
+
+ +
+
+cosense3d.utils.misc.list_dirs(path)[source]
+
+ +
+
+cosense3d.utils.misc.load_from_pl_state_dict(model, pl_state_dict)[source]
+
+ +
+
+cosense3d.utils.misc.load_json(filename)[source]
+
+ +
+
+cosense3d.utils.misc.load_yaml(filename, cloader=False)[source]
+

Load yaml file into dictionary.

+
+

Parameters

+
+
filenamestr

Full path of yaml file.

+
+
+
+
+

Returns

+
+
paramsdict

A dictionary that contains defined parameters.

+
+
+
+
+ +
+
+cosense3d.utils.misc.multi_apply(func, *args, **kwargs)[source]
+

Apply function to a list of arguments.

+
+
Note:

This function applies the func to multiple inputs and +map the multiple outputs of the func into different +list. Each list contains the same type of outputs corresponding +to different inputs.

+
+
Args:
+
func (Function): A function that will be applied to a list of

arguments

+
+
+
+
Returns:

tuple(list): A tuple containing multiple list, each list contains a kind of returned results by the function

+
+
+
+ +
+
+cosense3d.utils.misc.pad_list_to_array_np(data)[source]
+

Pad list of numpy data to one single numpy array +:param data: list of np.ndarray +:return: np.ndarray

+
+ +
+
+cosense3d.utils.misc.save_json(data, filename)[source]
+
+ +
+
+cosense3d.utils.misc.save_yaml(data, filename, cdumper=False)[source]
+
+ +
+
+cosense3d.utils.misc.setup_logger(exp_name, debug)[source]
+
+ +
+
+cosense3d.utils.misc.torch_tensor_to_numpy(torch_tensor)[source]
+

Convert a torch tensor to numpy.

+
+

Parameters

+

torch_tensor : torch.Tensor

+
+
+

Returns

+

A numpy array.

+
+
+ +
+
+cosense3d.utils.misc.update_dict(dict_out, dict_add)[source]
+

Merge config_add into config_out. +Existing values in config_out will be overwritten by the config_add.

+
+

Parameters

+

dict_out: dict +dict_add: dict

+
+
+

Returns

+
+
config_out: dict

Updated config_out

+
+
+
+
+ +
+
+

cosense3d.utils.module_utils module

+
+
+cosense3d.utils.module_utils.build_dropout(cfgs)[source]
+
+ +
+
+cosense3d.utils.module_utils.build_norm_layer(cfgs, shape)[source]
+
+ +
+
+cosense3d.utils.module_utils.digit_version(version_str: str, length: int = 4)[source]
+

Convert a version string into a tuple of integers.

+

This method is usually used for comparing two versions. For pre-release +versions: alpha < beta < rc.

+
+
Args:

version_str (str): The version string. +length (int): The maximum number of version levels. Default: 4.

+
+
Returns:

tuple[int]: The version info in digits (integers).

+
+
+
+ +
+
+cosense3d.utils.module_utils.get_target_module(target)[source]
+
+ +
+
+cosense3d.utils.module_utils.instantiate_target_module(target, cfg=None, **kwargs)[source]
+
+ +
+
+

cosense3d.utils.pclib module

+
+
+cosense3d.utils.pclib.cart2cyl(input_xyz)[source]
+
+ +
+
+cosense3d.utils.pclib.cyl2cart(input_xyz_polar)[source]
+
+ +
+
+cosense3d.utils.pclib.get_tf_matrix_torch(vectors, inv=False)[source]
+
+ +
+
+cosense3d.utils.pclib.header(points)[source]
+
+ +
+
+cosense3d.utils.pclib.lidar_bin2bin(bin_file, out_file)[source]
+
+ +
+
+cosense3d.utils.pclib.lidar_bin2pcd(bin_file, out_file, replace=False)[source]
+
+ +
+
+cosense3d.utils.pclib.lidar_bin2pcd_o3d(bin_file, out_file, replace=False)[source]
+
+ +
+
+cosense3d.utils.pclib.lidar_ply2bin(ply_file, bin_file, fields=['x', 'y', 'z', 'intensity'], replace=False)[source]
+

Read ply and save to the cosense3d binary format.

+
+
Parameters:
+
    +
  • ply_file – str, input file name

  • +
  • bin_file – str, output file name

  • +
  • fields – list of str, names that indicates ‘x’, ‘y’, ‘z’ and ‘intensity’

  • +
  • replace – replace the exisiting file if True

  • +
+
+
+
+ +
+
+cosense3d.utils.pclib.load_pcd(pcd_file: str, return_o3d: bool = False)[source]
+

Read pcd and return numpy array.

+
+
Parameters:
+
    +
  • pcd_file – The pcd file that contains the point cloud.

  • +
  • return_o3d – Default returns numpy array, set True to return pcd as o3d PointCloud object

  • +
+
+
Returns:
+

lidar_dict, +xyz: (pcd_np | pcd : np.ndarray | o3d.geometry.PointCloud) the lidar xyz coordinates in numpy format, shape:(n, 3); +intensity: (optional) np.ndarray, (n,). +label: (optional) np.ndarray, (n,). +time: (optional) np.ndarray, (n,). +ray: (optional) np.ndarray, (n,).

+
+
+
+ +
+
+cosense3d.utils.pclib.mask_points_in_box(points, pc_range)[source]
+
+ +
+
+cosense3d.utils.pclib.mask_points_in_range(points: array, dist: float) array[source]
+
+
Return type:
+

np.array

+
+
+
+ +
+
+cosense3d.utils.pclib.mask_values_in_range(values, min, max)[source]
+
+ +
+
+cosense3d.utils.pclib.mat_pitch(cosa, sina, zeros=0, ones=1)[source]
+
+ +
+
+cosense3d.utils.pclib.mat_roll(cosa, sina, zeros=0, ones=1)[source]
+
+ +
+
+cosense3d.utils.pclib.mat_yaw(cosa, sina, zeros=0, ones=1)[source]
+
+ +
+
+cosense3d.utils.pclib.pose2tf(pose)[source]
+
+ +
+
+cosense3d.utils.pclib.pose_err_global2relative_torch(poses, errs)[source]
+

Calculate relative pose transformation based on the errorneous global positioning +:param poses: Nx2 or Nx3, first row is ego pose, other rows are the coop poses +:param errs: Nx3, first row is ego pose error and other rows for coop pose errors +:return: (N-1)x3, relative localization errors between ego and coop vehicles

+
+ +
+
+cosense3d.utils.pclib.pose_to_transformation(pose)[source]
+
+
Parameters:
+

pose – list, [x, y, z, roll, pitch, yaw]

+
+
Returns:
+

transformation: np.ndarray, (4, 4)

+
+
+
+ +
+
+cosense3d.utils.pclib.project_points_by_matrix_torch(points, transformation_matrix)[source]
+

Project the points to another coordinate system based on the +transformation matrix.

+
+
Parameters:
+
    +
  • points – torch.Tensor, 3D points, (N, 3)

  • +
  • transformation_matrix – torch.Tensor, Transformation matrix, (4, 4)

  • +
+
+
Returns:
+

projected_points : torch.Tensor, The projected points, (N, 3)

+
+
+
+ +
+
+cosense3d.utils.pclib.read_ply(filename)[source]
+
+ +
+
+cosense3d.utils.pclib.rotate3d(points, euler)[source]
+

Rotate point cloud with the euler angles given in pose.

+
+
Parameters:
+
    +
  • points – np.ndarray, N x (3 + C) +each point in the row has the format [x, y, z, …]

  • +
  • euler – list or np.ndarray +[roll, pitch, yaw]

  • +
+
+
Returns:
+

points: np.ndarray +rotated point cloud

+
+
+
+ +
+
+cosense3d.utils.pclib.rotate_box_corners_with_tf_np(corners: ndarray, tf_np: ndarray) ndarray[source]
+

Rotate points with transformation matrix +:param corners: Nx8X3 points array +:param tf_np: 4x4 transformation matrix +:return: corners, Nx8X3 points array

+
+ +
+
+cosense3d.utils.pclib.rotate_points_along_z_np(points, angle)[source]
+
+
Parameters:
+
    +
  • points – (N, 3 + C or 2 + C)

  • +
  • angle – float, angle along z-axis, angle increases x ==> y

  • +
+
+
+
+ +
+
+cosense3d.utils.pclib.rotate_points_along_z_torch(points, angle)[source]
+
+
Parameters:
+
    +
  • points – (N, 2 + C) or (B, 2 + C)

  • +
  • angle – float or tensor of shape (B), angle along z-axis, angle increases x ==> y

  • +
+
+
+
+ +
+
+cosense3d.utils.pclib.rotate_points_batch(points, angles, order='xyz')[source]
+
+
Parameters:
+
    +
  • points – (B, N, 3 + C)

  • +
  • angles – (B, 1|3), radians +rotation = R(3)R(2)R(1) if angles shape in (B, 3)

  • +
+
+
Returns:
+

points_rot: (B, N, 3 + C)

+
+
+
+ +
+
+cosense3d.utils.pclib.rotate_points_with_tf_np(points: ndarray, tf_np: ndarray) ndarray[source]
+

Rotate points with transformation matrix.

+
+
Parameters:
+
    +
  • (np.ndarray) (tf_np) – Nx3 points array

  • +
  • (np.ndarray) – 4x4 transformation matrix

  • +
+
+
Returns:
+

points (np.ndarray): Nx3 points array

+
+
+
+ +
+
+cosense3d.utils.pclib.rotation_mat2euler_torch(mat)[source]
+
+ +
+
+cosense3d.utils.pclib.rotation_matrix(euler, degrees=True)[source]
+

Construct rotation matrix with the given pose.

+
+
Parameters:
+

euler – list or np.ndarray +[roll, pitch, yaw]

+
+
Returns:
+

rot: np.ndarray, 3x3 +rotation matrix

+
+
+
+ +
+
+cosense3d.utils.pclib.save_cosense_ply(data, output_file_name)[source]
+
+ +
+
+cosense3d.utils.pclib.tf2pose(tf_matrix)[source]
+
+ +
+
+

cosense3d.utils.tensor_utils module

+
+
+cosense3d.utils.tensor_utils.check_numpy_to_torch(x)[source]
+
+ +
+
+cosense3d.utils.tensor_utils.pad_list_to_array_torch(data)[source]
+

Pad list of numpy data to one single numpy array +:param data: list of np.ndarray +:return: np.ndarray

+
+ +
+
+

cosense3d.utils.train_utils module

+
+
+cosense3d.utils.train_utils.build_lr_scheduler(optimizer, cfg, steps_per_epoch)[source]
+
+ +
+
+cosense3d.utils.train_utils.build_optimizer(model, cfg)[source]
+
+ +
+
+cosense3d.utils.train_utils.clip_grads(params, max_norm=35, norm_type=2)[source]
+
+ +
+
+cosense3d.utils.train_utils.get_gpu_architecture()[source]
+
+ +
+
+cosense3d.utils.train_utils.is_tensor_to_cuda(data, device=0)[source]
+
+ +
+
+cosense3d.utils.train_utils.load_model_dict(model, pretrained_dict)[source]
+
+ +
+
+cosense3d.utils.train_utils.load_tensors_to_gpu(batch_dict, device=0)[source]
+

Load all tensors in batch_dict to gpu

+
+ +
+
+cosense3d.utils.train_utils.seed_everything(seed)[source]
+
+ +
+
+

cosense3d.utils.vislib module

+
+
+cosense3d.utils.vislib.bbx2linset(bbx, color=(0, 1, 0))[source]
+

Convert the bounding box to o3d lineset for visualization.

+
+
:param bbxnp.ndarray

shape: (n, 7) or (n, 11) or (n, 8, 3).

+
+
:param colortuple

The bounding box color.

+
+
+
+
Returns:
+

line_set : open3d.LineSet

+
+
+
+ +
+
+cosense3d.utils.vislib.draw_2d_bboxes_on_img(img, boxes2d, ax_in=None)[source]
+
+
Parameters:
+
    +
  • img – np.ndarray

  • +
  • boxes2d – np.ndarray, (N, 4, 2) for 4 corners or (N, 2, 2) for left top and right bottom corners inn pixel metric

  • +
+
+
+
+ +
+
+cosense3d.utils.vislib.draw_3d_points_boxes_on_img(ax, img, lidar2img, points=None, boxes=None)[source]
+
+
+
+

1 ——– 6 ^ z

+
+

/| /| |

+
+

2 ——– 5. | +| | | | | . x +. 0 ——– 7 |/ +|/ |/ +——-> y +3 ——– 4

+
+
+
Parameters:
+
    +
  • ax – plt plot axis

  • +
  • img – np.ndarray, (H, W, 3)

  • +
  • lidar2img – np.ndarray, (4, 4), transformation matrix from lidar to camera coordinates

  • +
  • points – np.ndarray, (N, 3+C)

  • +
  • boxes – np.ndarray, (N, 8, 3) or (N, 7), in lidar coordinates

  • +
+
+
+
+ +
+
+cosense3d.utils.vislib.draw_box_plt(boxes_dec, ax, color=None, linewidth_scale=2.0, linestyle='solid')[source]
+

draw boxes in a given plt ax +:param boxes_dec: (N, 5) or (N, 7) in metric +:param ax: +:return: ax with drawn boxes

+
+ +
+
+cosense3d.utils.vislib.draw_matched_boxes(boxes1, boxes2, match, out_file=None)[source]
+
+ +
+
+cosense3d.utils.vislib.draw_points_boxes_plt(pc_range=None, points=None, boxes_pred=None, boxes_gt=None, wandb_name=None, points_c='gray', bbox_gt_c='green', bbox_pred_c='red', linewidth_scale=0.75, bbox_pred_label=None, bbox_gt_label=None, return_ax=False, ax=None, marker_size=2.0, filename=None)[source]
+
+ +
+
+cosense3d.utils.vislib.get_palette_colors(palette)[source]
+
+ +
+
+cosense3d.utils.vislib.o3d_draw_agent_data(agent_dict, data_path)[source]
+
+ +
+
+cosense3d.utils.vislib.o3d_draw_frame_data(frame_dict, data_path)[source]
+
+ +
+
+cosense3d.utils.vislib.o3d_draw_pcds_bbxs(pcds: list, bbxs: list, bbxs_colors: list | None = None, pcds_colors: list | None = None)[source]
+
+
Parameters:
+
    +
  • pcds – list of np array

  • +
  • bbxs – list of np array, +bounding boxes in corner format

  • +
  • bbxs_colors – list of tuples

  • +
  • pcds_colors – list of np array, shape same as pcds

  • +
+
+
+
+ +
+
+cosense3d.utils.vislib.o3d_play_sequence(meta_dict, data_path)[source]
+
+ +
+
+cosense3d.utils.vislib.plot_cavs_points(cavs, points_key='points')[source]
+
+ +
+
+cosense3d.utils.vislib.plt_draw_frame_data(frame_dict, data_path)[source]
+
+ +
+
+cosense3d.utils.vislib.update_axis_linset(line_set, axis_len=5)[source]
+
+ +
+
+cosense3d.utils.vislib.update_lineset_vbo(vbo, bbx, color=None)[source]
+
+ +
+
+cosense3d.utils.vislib.visualization(func_list, batch_data)[source]
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/genindex.html b/docs/_build/html/genindex.html new file mode 100644 index 00000000..adca929f --- /dev/null +++ b/docs/_build/html/genindex.html @@ -0,0 +1,4114 @@ + + + + + + Index — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | G + | H + | I + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + | X + +
+

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

K

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

Q

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ +

X

+ + + +
+ + + +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/index.html b/docs/_build/html/index.html new file mode 100644 index 00000000..35272311 --- /dev/null +++ b/docs/_build/html/index.html @@ -0,0 +1,150 @@ + + + + + + + Welcome to OpenCosense3D’s documentation! — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/md/installation.html b/docs/_build/html/md/installation.html new file mode 100644 index 00000000..c1e16104 --- /dev/null +++ b/docs/_build/html/md/installation.html @@ -0,0 +1,190 @@ + + + + + + + Installation — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Installation

+
+

Requirements

+
    +
  • Ubuntu LTS 20.04

  • +
  • GPU: tested on Nvidia RTX 3090 Ti and Nvidia RTX 4090

  • +
  • Python: >= 3.8

  • +
+
+
+

Installation options

+
+

Via bash script

+

You can install the environment with our provided batch script with the following commands:

+
conda create -n consense3d python=3.8
+conda activate cosense3d
+cd OpenCosense3D 
+# for Nvidia RTX 3090
+bash setup_env_3090.sh
+# for Nvidia RTX 4090
+bash setup_env_4090.sh
+
+
+
+
+

Step-by-step

+

If you confront with any errors at the script installation, please try step-by-step installation.

+

1.Create conda environment and install dependencies.

+
conda create -n consense3d python=3.8
+conda activate cosense3d
+conda install openblas-devel -c anaconda -y
+conda install -c conda-forge libstdcxx-ng libffi -y
+sudo apt install build-essential python3-dev libopenblas-dev -y
+
+
+

2.Install pytorch and compile local Pytorch Extensions (CUDA nvcc compiler needed).

+
# For 3090
+pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 \
+--extra-index-url https://download.pytorch.org/whl/cu113
+# For 4090
+pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
+# Install extentions
+cd cosense3d/ops
+pip install . && cd ..
+
+
+

3.Install python packages.

+
# for 3090
+pip install -r reququirements_cosense_3090.txt
+# for 4090
+pip install -r reququirements_cosense_4090.txt
+# for Graphical Interface
+pip install -r requirements_ui.txt
+
+
+

4.Install MinkovskiEngine.

+
pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \
+    --global-option="--blas_include_dirs=${CONDA_PREFIX}/include" \
+    --global-option="--blas=openblas"
+export OMP_NUM_THREADS=16
+
+
+

5.Check Installation.

+
python -c "import torch; print(torch.__version__)" 
+python  -W ignore -c "import MinkowskiEngine as ME; print(ME.__version__)"
+
+
+
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/md/prepare_data.html b/docs/_build/html/md/prepare_data.html new file mode 100644 index 00000000..38e4a1cb --- /dev/null +++ b/docs/_build/html/md/prepare_data.html @@ -0,0 +1,153 @@ + + + + + + + Prepare Datasets — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Prepare Datasets

+
+

Check the dataset page for download links or use the downloading script as following commands.

+
+
+

OPV2Vt

+
cd CoSense3D
+bash cosense3d/tools/download.sh OPV2Vt path/to/output_dir
+
+
+
+
+

DairV2Xt

+

Download DAIR-V2X-C dataset and extract it to the following structure.

+
├── dair-v2x
+│   ├── cooperative-vehicle-infrastructure
+|      |── 2021_08_16_22_26_54
+|      |── ...
+│   ├── cooperative-vehicle-infrastructure-infrastructure-side-image
+│   ├── cooperative-vehicle-infrastructure-infrastructure-side-velodyne
+│   ├── cooperative-vehicle-infrastructure-vehicle-side-image
+│   ├── cooperative-vehicle-infrastructure-vehicle-side-velodyne
+
+
+

Then download the meta files with

+
bash cosense3d/tools/download.sh DairV2xt /path/to/dair-v2x
+
+
+
+
+

OPV2V

+
bash cosense3d/tools/download.sh OPV2V path/to/output_dir
+
+
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/md/structure.html b/docs/_build/html/md/structure.html new file mode 100644 index 00000000..f57da284 --- /dev/null +++ b/docs/_build/html/md/structure.html @@ -0,0 +1,184 @@ + + + + + + + The Structure of the framework — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

The Structure of the framework

+

framework

+

The overall framework contains four main modules, namely Dataloader, +Graphical user interface (GUI), Runner and Central Controller. +The Central Controller is the core module of the framework which contains four sub-modules: +CAV manager, Data manager, Task manager and Forward runner. Black arrows indicate the instruction flow, +green arrows show the data flow. The framework can run either with or without visualization in the GUI.

+
+

Dataloader

+

The framework standardizes the data loading API for collective perception with a predefined dictionary format +to store the meta information in JSON files. With this API, a new dataset can be easily converted to the +a standardized format without rewriting the PyTorch Dataloader and coping the large media files, such as point clouds +and images, to a new data structure. Only the meta information such as scenarios, frames, timestamps, parameters +of sensors and the annotations are parsed and saved to CoSense3D format in JSON files. This standardized Dataloader is able to load images, point cloud data, 2D annotations for images, +3D local annotations for perception without CAV cooperation and 3D global annotations for collective perception.

+
+
+

GUI

+

The graphical user interface can visualize the training and test data and check the training and test outcomes by one click. +This is helpful for loading new datasets and developing new models. +Before training on a new dataset, it is necessary to check if the data is converted and loaded correctly. +During and after training, visualizing the model output is also helpful to identify the drawbacks and problems +of the model and then refine or modify the model accordingly.

+

The GUI can send commands to the runner to start, stop or step the runner process. After each runner step, +it updates the visualization modules, 3D GLViewer, ImgViewer, ImgAnno3DViewer and OutputViewer. +GLViewer is a OpenGL-based visualizer for 3D data, annotations (green boxes) and predictions (red boxes). +ImgViewer shows image data and the corresponding 2D bounding boxes. ImgAnno3DViewer is used to visualize +if the transformations and augmentations of images and 3D annotations are correctly loaded and processed. +Each row in ImgViewer and ImgAnno3Dviewer shows the images of a single CAV. After training the model, +the OutputViewer can be used to visualize the test result. The OutputViewer can contain multiple canvases +which can be customized by the user. +An example that shows the BEV segmentation (top) and object detection (bottom) result. +glviewer +imgviewer +imganno2viewer +outputviewer

+
+
+

Runner

+

In this framework, three types of Runners are available, namely, TrainRunner, TestRunner and VisRunner. +The user can launch these runners with or without GUI. They are used for training, testing and input +data visualization, respectively. Runners manage the frame-wise data and orders dispatching to Central Controller, +which then process the orders with the provided frame data accordingly.

+
+
+

Central Controller

+

controller +Central Controller is the core module of this framework, it communicates with the order-dispatcher (Runner) +and the CAVs through its CAV manager. The Data manager is responsible for data gathering and scattering +between the central controller and the CAVs. Similarly, the Task manager gathers pseudo tasks generated by CAVs, +batches these tasks and dispatches them to the forward runner, which contains all shared deep learning modules, +for implementation. In this framework, a standardized CAV prototyping API is provided to allow the user to define +the customized workflow for collective perception, including the data augmentations, CAV coordinate transformations, +CPM sharing strategies, the forwarding order of the shared neuron network modules and gradient computation strategies +of these modules.

+

Based on the CAV prototype, the central controller will then implement a standardized pipeline based on the tasks +generated by the CAV prototypes. Once the Central Controller receives the order and frame data from the Runner (step 0), +the CAV manager will update the CAVs according to the meta information in the frame data and the provided prototype +of CAV (step 1). Then the Data manager distributes the input frame data to the updated CAVs (step2). +Upon receiving the input data, the CAVs then pre-process the input data and generate tasks and send them back to the +Central Controller for processing (step3). To increase the efficiency of the forward process, the Task manager will +first summarize the tasks from all CAVs and batch them in two forward steps, one requires gradients, and one without +gradient computation, for parallel processing in the Forward Runner (step 4 and 5). After finishing these tasks, +the generated results are then distributed back to individual CAVs.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/modules.html b/docs/_build/html/modules.html new file mode 100644 index 00000000..caa49c34 --- /dev/null +++ b/docs/_build/html/modules.html @@ -0,0 +1,586 @@ + + + + + + + CoSense3D — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

CoSense3D

+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/objects.inv b/docs/_build/html/objects.inv new file mode 100644 index 00000000..1ab48205 Binary files /dev/null and b/docs/_build/html/objects.inv differ diff --git a/docs/_build/html/py-modindex.html b/docs/_build/html/py-modindex.html new file mode 100644 index 00000000..1cd9be25 --- /dev/null +++ b/docs/_build/html/py-modindex.html @@ -0,0 +1,760 @@ + + + + + + Python Module Index — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Python Module Index

+ +
+ c +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ c
+ cosense3d +
    + cosense3d.agents +
    + cosense3d.agents.cav_prototype +
    + cosense3d.agents.cav_prototype.base_cav +
    + cosense3d.agents.cav_prototype.streamLTS_collection +
    + cosense3d.agents.center_controller +
    + cosense3d.agents.core +
    + cosense3d.agents.core.base_runner +
    + cosense3d.agents.core.cav_manager +
    + cosense3d.agents.core.data_manager +
    + cosense3d.agents.core.forward_runner +
    + cosense3d.agents.core.gui +
    + cosense3d.agents.core.hooks +
    + cosense3d.agents.core.task_manager +
    + cosense3d.agents.core.test_runner +
    + cosense3d.agents.core.train_runner +
    + cosense3d.agents.core.vis_runner +
    + cosense3d.agents.utils +
    + cosense3d.agents.utils.deco +
    + cosense3d.agents.utils.transform +
    + cosense3d.agents.viewer +
    + cosense3d.agents.viewer.gl_viewer +
    + cosense3d.agents.viewer.img_anno3d_viewer +
    + cosense3d.agents.viewer.img_viewer +
    + cosense3d.agents.viewer.items +
    + cosense3d.agents.viewer.items.graph_items +
    + cosense3d.agents.viewer.output_viewer +
    + cosense3d.agents.viewer.utils +
    + cosense3d.dataset +
    + cosense3d.dataset.const +
    + cosense3d.dataset.cosense_dataset +
    + cosense3d.dataset.pipeline +
    + cosense3d.dataset.pipeline.loading +
    + cosense3d.dataset.pipeline.transform +
    + cosense3d.dataset.temporal_cosense_dataset +
    + cosense3d.dataset.toolkit +
    + cosense3d.dataset.toolkit.cosense +
    + cosense3d.dataset.toolkit.dairv2x +
    + cosense3d.dataset.toolkit.opv2v +
    + cosense3d.dataset.toolkit.opv2v_t +
    + cosense3d.modules +
    + cosense3d.modules.backbone2d +
    + cosense3d.modules.backbone2d.resnet_encoder +
    + cosense3d.modules.backbone3d +
    + cosense3d.modules.backbone3d.mink_unet +
    + cosense3d.modules.backbone3d.pillar_bev +
    + cosense3d.modules.backbone3d.spconv +
    + cosense3d.modules.backbone3d.voxelnet +
    + cosense3d.modules.fusion +
    + cosense3d.modules.fusion.attn_fusion +
    + cosense3d.modules.fusion.box_fusion +
    + cosense3d.modules.fusion.fax +
    + cosense3d.modules.fusion.keypoints +
    + cosense3d.modules.fusion.maxout_fusion +
    + cosense3d.modules.fusion.naive_fusion +
    + cosense3d.modules.fusion.spatial_query_fusion +
    + cosense3d.modules.fusion.temporal_fusion +
    + cosense3d.modules.heads +
    + cosense3d.modules.heads.bev +
    + cosense3d.modules.heads.bev_dense +
    + cosense3d.modules.heads.det_anchor_dense +
    + cosense3d.modules.heads.det_anchor_sparse +
    + cosense3d.modules.heads.det_center_sparse +
    + cosense3d.modules.heads.det_roi_refine +
    + cosense3d.modules.heads.img_focal +
    + cosense3d.modules.heads.lidar_petr_head +
    + cosense3d.modules.heads.multitask_head +
    + cosense3d.modules.heads.nbr_attn_bev +
    + cosense3d.modules.heads.petr_head +
    + cosense3d.modules.heads.query_guided_petr_head +
    + cosense3d.modules.losses +
    + cosense3d.modules.losses.base_loss +
    + cosense3d.modules.losses.common +
    + cosense3d.modules.losses.edl +
    + cosense3d.modules.losses.focal_loss +
    + cosense3d.modules.losses.iou_loss +
    + cosense3d.modules.losses.l1_loss +
    + cosense3d.modules.losses.vanilla_seg_loss +
    + cosense3d.modules.necks +
    + cosense3d.modules.necks.cpm_composer +
    + cosense3d.modules.necks.dilation_spconv +
    + cosense3d.modules.necks.formatting +
    + cosense3d.modules.plugin +
    + cosense3d.modules.plugin.attn +
    + cosense3d.modules.plugin.bev_rpn +
    + cosense3d.modules.plugin.downsample_conv +
    + cosense3d.modules.plugin.flash_attn +
    + cosense3d.modules.plugin.fpn +
    + cosense3d.modules.plugin.gevbev_decoder +
    + cosense3d.modules.plugin.mink_spconv +
    + cosense3d.modules.plugin.naive_compressor +
    + cosense3d.modules.plugin.pillar_encoder +
    + cosense3d.modules.plugin.ssfa +
    + cosense3d.modules.plugin.target_assigners +
    + cosense3d.modules.plugin.transformer +
    + cosense3d.modules.plugin.voxel_encoder +
    + cosense3d.modules.plugin.voxel_generator +
    + cosense3d.modules.plugin.voxnet_utils +
    + cosense3d.modules.plugin.vsa +
    + cosense3d.modules.projection +
    + cosense3d.modules.projection.fax +
    + cosense3d.modules.projection.petr +
    + cosense3d.modules.projection.spatial_transform +
    + cosense3d.modules.utils +
    + cosense3d.modules.utils.box_coder +
    + cosense3d.modules.utils.common +
    + cosense3d.modules.utils.conv +
    + cosense3d.modules.utils.edl_utils +
    + cosense3d.modules.utils.gaussian_utils +
    + cosense3d.modules.utils.init +
    + cosense3d.modules.utils.me_utils +
    + cosense3d.modules.utils.misc +
    + cosense3d.modules.utils.nbr_attn +
    + cosense3d.modules.utils.norm +
    + cosense3d.modules.utils.positional_encoding +
    + cosense3d.utils +
    + cosense3d.utils.box_utils +
    + cosense3d.utils.eval_detection_utils +
    + cosense3d.utils.iou2d_calculator +
    + cosense3d.utils.logger +
    + cosense3d.utils.lr_scheduler +
    + cosense3d.utils.metrics +
    + cosense3d.utils.misc +
    + cosense3d.utils.module_utils +
    + cosense3d.utils.pclib +
    + cosense3d.utils.tensor_utils +
    + cosense3d.utils.train_utils +
    + cosense3d.utils.vislib +
+ + +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/search.html b/docs/_build/html/search.html new file mode 100644 index 00000000..5d2682a1 --- /dev/null +++ b/docs/_build/html/search.html @@ -0,0 +1,125 @@ + + + + + + Search — Cosense3D 1.0.0 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + + + +
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Yunshuang Yuan.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/searchindex.js b/docs/_build/html/searchindex.js new file mode 100644 index 00000000..b42add49 --- /dev/null +++ b/docs/_build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["cosense3d", "cosense3d.agents", "cosense3d.agents.cav_prototype", "cosense3d.agents.core", "cosense3d.agents.utils", "cosense3d.agents.viewer", "cosense3d.agents.viewer.items", "cosense3d.dataset", "cosense3d.dataset.pipeline", "cosense3d.dataset.toolkit", "cosense3d.modules", "cosense3d.modules.backbone2d", "cosense3d.modules.backbone3d", "cosense3d.modules.fusion", "cosense3d.modules.heads", "cosense3d.modules.losses", "cosense3d.modules.necks", "cosense3d.modules.plugin", "cosense3d.modules.projection", "cosense3d.modules.utils", "cosense3d.utils", "index", "md/installation", "md/prepare_data", "md/structure", "modules"], "filenames": ["cosense3d.rst", "cosense3d.agents.rst", "cosense3d.agents.cav_prototype.rst", "cosense3d.agents.core.rst", "cosense3d.agents.utils.rst", "cosense3d.agents.viewer.rst", "cosense3d.agents.viewer.items.rst", "cosense3d.dataset.rst", "cosense3d.dataset.pipeline.rst", "cosense3d.dataset.toolkit.rst", "cosense3d.modules.rst", "cosense3d.modules.backbone2d.rst", "cosense3d.modules.backbone3d.rst", "cosense3d.modules.fusion.rst", "cosense3d.modules.heads.rst", "cosense3d.modules.losses.rst", "cosense3d.modules.necks.rst", "cosense3d.modules.plugin.rst", "cosense3d.modules.projection.rst", "cosense3d.modules.utils.rst", "cosense3d.utils.rst", "index.rst", "md/installation.md", "md/prepare_data.md", "md/structure.md", "modules.rst"], "titles": ["cosense3d package", "cosense3d.agents package", "cosense3d.agents.cav_prototype package", "cosense3d.agents.core package", "cosense3d.agents.utils package", "cosense3d.agents.viewer package", "cosense3d.agents.viewer.items package", "cosense3d.dataset package", "cosense3d.dataset.pipeline package", "cosense3d.dataset.toolkit package", "cosense3d.modules package", "cosense3d.modules.backbone2d package", "cosense3d.modules.backbone3d package", "cosense3d.modules.fusion package", "cosense3d.modules.heads package", "cosense3d.modules.losses package", "cosense3d.modules.necks package", "cosense3d.modules.plugin package", "cosense3d.modules.projection package", "cosense3d.modules.utils package", "cosense3d.utils package", "Welcome to OpenCosense3D\u2019s documentation!", "Installation", "Prepare Datasets", "The Structure of the framework", "CoSense3D"], "terms": {"agent": [0, 7, 9, 13, 21, 25], "cav_prototyp": [0, 1, 25], "submodul": [0, 10, 25], "base_cav": [0, 1, 25], "streamlts_collect": [0, 1, 25], "core": [0, 1, 24, 25], "base_runn": [0, 1, 25], "cav_manag": [0, 1, 25], "data_manag": [0, 1, 25], "forward_runn": [0, 1, 25], "gui": [0, 1, 21, 25], "hook": [0, 1, 10, 11, 12, 13, 14, 16, 17, 18, 19, 25], "task_manag": [0, 1, 25], "test_runn": [0, 1, 25], "train_runn": [0, 1, 25], "vis_runn": [0, 1, 25], "util": [0, 1, 10, 21, 25], "deco": [0, 1, 25], "transform": [0, 1, 7, 9, 10, 13, 14, 18, 20, 24, 25], "viewer": [0, 1, 25], "gl_viewer": [0, 1, 25], "img_anno3d_view": [0, 1, 25], "img_view": [0, 1, 25], "output_view": [0, 1, 25], "center_control": [0, 25], "centercontrol": [0, 1, 25], "model": [0, 1, 17, 19, 20, 24, 25], "paramet": [0, 1, 4, 10, 24, 25], "run_fram": [0, 1, 25], "run_seq": [0, 1, 25], "setup_cor": [0, 1, 25], "test_forward": [0, 1, 25], "train_forward": [0, 1, 25], "update_cfg": [0, 1, 25], "vis_forward": [0, 1, 25], "dataset": [0, 21, 24, 25], "pipelin": [0, 7, 24, 25], "load": [0, 1, 3, 7, 9, 20, 24, 25], "toolkit": [0, 7, 25], "cosens": [0, 7, 25], "dairv2x": [0, 7, 25], "opv2v": [0, 7, 21, 25], "opv2v_t": [0, 7, 25], "const": [0, 25], "cosense_dataset": [0, 25], "cosensedataset": [0, 7, 25], "label_color": [0, 7, 25], "valid_cl": [0, 7, 25], "collate_batch": [0, 7, 25], "get_valid_ag": [0, 7, 25], "init_dataset": [0, 7, 25], "load_frame_data": [0, 7, 9, 25], "load_meta": [0, 7, 9, 25], "load_sample_info": [0, 7, 25], "parse_sampl": [0, 7, 25], "temporal_cosense_dataset": [0, 25], "temporalcosensedataset": [0, 7, 25], "get_dataload": [0, 7, 25], "backbone2d": [0, 10, 25], "resnet_encod": [0, 10, 25], "backbone3d": [0, 10, 25], "mink_unet": [0, 10, 25], "pillar_bev": [0, 10, 25], "spconv": [0, 10, 17, 19, 25], "voxelnet": [0, 10, 25], "fusion": [0, 10, 25], "attn_fus": [0, 10, 25], "box_fus": [0, 10, 25], "fax": [0, 10, 25], "keypoint": [0, 10, 25], "maxout_fus": [0, 10, 25], "naive_fus": [0, 10, 25], "spatial_query_fus": [0, 10, 25], "temporal_fus": [0, 10, 25], "head": [0, 10, 13, 17, 20, 25], "bev": [0, 3, 9, 10, 17, 18, 20, 24, 25], "bev_dens": [0, 10, 25], "det_anchor_dens": [0, 10, 25], "det_anchor_spars": [0, 10, 25], "det_center_spars": [0, 10, 25], "det_roi_refin": [0, 10, 25], "img_foc": [0, 10, 25], "lidar_petr_head": [0, 10, 25], "multitask_head": [0, 10, 25], "nbr_attn_bev": [0, 10, 25], "petr_head": [0, 10, 25], "query_guided_petr_head": [0, 10, 25], "loss": [0, 1, 2, 3, 10, 14, 17, 25], "base_loss": [0, 10, 25], "common": [0, 10, 25], "edl": [0, 10, 17, 25], "focal_loss": [0, 10, 17, 25], "iou_loss": [0, 10, 25], "l1_loss": [0, 10, 25], "vanilla_seg_loss": [0, 10, 25], "neck": [0, 10, 11, 12, 13, 25], "cpm_compos": [0, 10, 25], "dilation_spconv": [0, 10, 25], "format": [0, 3, 6, 7, 10, 14, 19, 20, 24, 25], "plugin": [0, 10, 25], "attn": [0, 10, 25], "bev_rpn": [0, 10, 25], "downsample_conv": [0, 10, 25], "flash_attn": [0, 10, 25], "fpn": [0, 10, 20, 25], "gevbev_decod": [0, 10, 25], "mink_spconv": [0, 10, 25], "naive_compressor": [0, 10, 25], "pillar_encod": [0, 10, 12, 25], "ssfa": [0, 10, 25], "target_assign": [0, 10, 14, 25], "voxel_encod": [0, 10, 12, 25], "voxel_gener": [0, 10, 12, 25], "voxnet_util": [0, 10, 25], "vsa": [0, 10, 16, 25], "project": [0, 9, 10, 17, 20, 25], "petr": [0, 10, 25], "spatial_transform": [0, 10, 25], "box_cod": [0, 10, 17, 25], "conv": [0, 10, 16, 17, 25], "edl_util": [0, 10, 25], "gaussian_util": [0, 10, 25], "init": [0, 1, 3, 10, 25], "me_util": [0, 10, 25], "misc": [0, 10, 25], "nbr_attn": [0, 10, 25], "norm": [0, 10, 14, 17, 25], "positional_encod": [0, 10, 25], "basemodul": [0, 10, 11, 12, 13, 14, 16, 18, 25], "cat_data_from_list": [0, 10, 25], "cat_dict_list": [0, 10, 25], "cat_list": [0, 10, 25], "compose_img": [0, 10, 25], "compose_result_list": [0, 10, 25], "compose_stensor": [0, 10, 25], "decompose_stensor": [0, 10, 25], "format_input": [0, 10, 14, 18, 25], "format_output": [0, 10, 11, 12, 13, 14, 16, 25], "forward": [0, 1, 2, 3, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25], "freeze_paramet": [0, 10, 25], "prepare_vis_data": [0, 10, 25], "stack_data_from_list": [0, 10, 25], "stack_dict_list": [0, 10, 25], "to_gpu": [0, 1, 3, 10, 12, 16, 25], "train": [0, 1, 3, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25], "build_modul": [0, 10, 17, 25], "box_util": [0, 25], "bbox_cxcywh_to_xyxi": [0, 20, 25], "bbox_xyxy_to_cxcywh": [0, 20, 25], "boxes3d_to_standup_bbox": [0, 20, 25], "boxes_to_corners_2d": [0, 20, 25], "boxes_to_corners_3d": [0, 20, 25], "compute_i": [0, 20, 25], "convert_box_to_polygon": [0, 20, 25], "corners_to_boxes_3d": [0, 20, 25], "decode_box": [0, 20, 25], "denormalize_bbox": [0, 20, 25], "enlarge_box3d": [0, 20, 25], "find_rigid_align": [0, 20, 25], "limit_period": [0, 10, 13, 19, 20, 25], "mask_boxes_outside_range_numpi": [0, 20, 25], "mask_boxes_outside_range_torch": [0, 20, 25], "normalize_bbox": [0, 20, 25], "remove_points_in_boxes3d": [0, 20, 25], "transform_boxes_3d": [0, 20, 25], "eval_detection_util": [0, 25], "cal_ap_all_point": [0, 20, 25], "cal_precision_recal": [0, 20, 25], "calculate_ap": [0, 20, 25], "caluclate_tp_fp": [0, 20, 25], "eval_final_result": [0, 20, 25], "ops_cal_tp": [0, 20, 25], "voc_ap": [0, 20, 25], "iou2d_calcul": [0, 25], "bbox_overlap": [0, 20, 25], "cast_tensor_typ": [0, 20, 25], "fp16_clamp": [0, 20, 25], "logger": [0, 3, 25], "logmet": [0, 20, 25], "add_met": [0, 20, 25], "log": [0, 15, 20, 25], "updat": [0, 1, 2, 3, 4, 9, 19, 20, 24, 25], "smoothedvalu": [0, 20, 25], "avg": [0, 19, 20, 25], "global_avg": [0, 20, 25], "max": [0, 20, 25], "median": [0, 20, 25], "valu": [0, 9, 15, 17, 19, 20, 25], "testlogg": [0, 20, 25], "setup_logg": [0, 1, 3, 20, 25], "lr_schedul": [0, 3, 25], "lrupdat": [0, 20, 25], "get_last_lr": [0, 20, 25], "load_state_dict": [0, 20, 25], "state_dict": [0, 20, 25], "step_epoch": [0, 20, 25], "step_itr": [0, 20, 25], "transformeradaptiveschedul": [0, 20, 25], "calc_lr": [0, 20, 25], "get_lr": [0, 20, 25], "build_lr_schedul": [0, 20, 25], "metric": [0, 3, 17, 25], "add_sampl": [0, 20, 25], "save_detect": [0, 20, 25], "summari": [0, 20, 25], "metricbev": [0, 20, 25], "format_str": [0, 20, 25], "iou": [0, 1, 3, 10, 15, 17, 20, 25], "summary_hook": [0, 20, 25], "metricmot": [0, 20, 25], "metricobjdet": [0, 20, 25], "cal_ap_11_point": [0, 20, 25], "metricsemseg": [0, 20, 25], "cal_ious_and_acc": [0, 20, 25], "check_numpy_to_torch": [0, 20, 25], "ensure_dir": [0, 20, 25], "list_dir": [0, 20, 25], "load_from_pl_state_dict": [0, 20, 25], "load_json": [0, 20, 25], "load_yaml": [0, 20, 25], "multi_appli": [0, 20, 25], "pad_list_to_array_np": [0, 20, 25], "save_json": [0, 20, 25], "save_yaml": [0, 20, 25], "torch_tensor_to_numpi": [0, 20, 25], "update_dict": [0, 20, 25], "module_util": [0, 25], "build_dropout": [0, 20, 25], "build_norm_lay": [0, 10, 19, 20, 25], "digit_vers": [0, 20, 25], "get_target_modul": [0, 20, 25], "instantiate_target_modul": [0, 20, 25], "pclib": [0, 25], "cart2cyl": [0, 20, 25], "cyl2cart": [0, 20, 25], "get_tf_matrix_torch": [0, 20, 25], "header": [0, 20, 25], "lidar_bin2bin": [0, 20, 25], "lidar_bin2pcd": [0, 20, 25], "lidar_bin2pcd_o3d": [0, 20, 25], "lidar_ply2bin": [0, 20, 25], "load_pcd": [0, 20, 25], "mask_points_in_box": [0, 20, 25], "mask_points_in_rang": [0, 20, 25], "mask_values_in_rang": [0, 20, 25], "mat_pitch": [0, 20, 25], "mat_rol": [0, 20, 25], "mat_yaw": [0, 20, 25], "pose2tf": [0, 20, 25], "pose_err_global2relative_torch": [0, 20, 25], "pose_to_transform": [0, 7, 9, 20, 25], "project_points_by_matrix_torch": [0, 20, 25], "read_pli": [0, 7, 9, 20, 25], "rotate3d": [0, 20, 25], "rotate_box_corners_with_tf_np": [0, 20, 25], "rotate_points_along_z_np": [0, 20, 25], "rotate_points_along_z_torch": [0, 20, 25], "rotate_points_batch": [0, 20, 25], "rotate_points_with_tf_np": [0, 20, 25], "rotation_mat2euler_torch": [0, 20, 25], "rotation_matrix": [0, 20, 25], "save_cosense_pli": [0, 20, 25], "tf2pose": [0, 20, 25], "tensor_util": [0, 25], "pad_list_to_array_torch": [0, 20, 25], "train_util": [0, 25], "build_optim": [0, 20, 25], "clip_grad": [0, 20, 25], "get_gpu_architectur": [0, 20, 25], "is_tensor_to_cuda": [0, 20, 25], "load_model_dict": [0, 20, 25], "load_tensors_to_gpu": [0, 20, 25], "seed_everyth": [0, 20, 25], "vislib": [0, 25], "bbx2linset": [0, 20, 25], "draw_2d_bboxes_on_img": [0, 20, 25], "draw_3d_points_boxes_on_img": [0, 20, 25], "draw_box_plt": [0, 20, 25], "draw_matched_box": [0, 20, 25], "draw_points_boxes_plt": [0, 20, 25], "get_palette_color": [0, 20, 25], "o3d_draw_agent_data": [0, 20, 25], "o3d_draw_frame_data": [0, 20, 25], "o3d_draw_pcds_bbx": [0, 20, 25], "o3d_play_sequ": [0, 20, 25], "plot_cavs_point": [0, 20, 25], "plt_draw_frame_data": [0, 20, 25], "update_axis_linset": [0, 20, 25], "update_lineset_vbo": [0, 20, 25], "visual": [0, 3, 9, 20, 24, 25], "basecav": [1, 2], "apply_transform": [1, 2, 4], "forward_fus": [1, 2], "forward_head": [1, 2], "forward_loc": [1, 2], "get_request_cpm": [1, 2], "get_response_cpm": [1, 2], "has_request": [1, 2], "post_update_memori": [1, 2], "pre_update_memori": [1, 2], "prepare_data": [1, 2], "receive_request": [1, 2, 3], "receive_respons": [1, 2, 3], "reset_data": [1, 2], "transform_data": [1, 2], "baseseqcav": [1, 2], "get_data": [1, 2], "task_id": [1, 2], "dairv2xcav": [1, 2], "opv2vtcav": [1, 2], "opv2vtcav_v2": [1, 2], "ltscavloccorr": [1, 2], "ltsdairv2x": [1, 2], "streamlidarcav": [1, 2], "prepare_time_scal": [1, 2], "refresh_memori": [1, 2], "timestamp": [1, 2, 9, 13, 24], "transform_ref_pt": [1, 2], "update_memory_timestamp": [1, 2], "vis_local_detect": [1, 2], "vis_local_pr": [1, 2], "vis_pos": [1, 2], "vis_ref_pt": [1, 2], "slcattnfus": [1, 2], "slcciassd": [1, 2], "slcdensetospars": [1, 2], "slcfpvrcnn": [1, 2], "slcfcooper": [1, 2], "slcnoboxtim": [1, 2], "slcnoboxtimedairv2x": [1, 2], "get_prototyp": [1, 2], "baserunn": [1, 3], "logdir": [1, 3, 20], "next_batch": [1, 3], "run": [1, 2, 3, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 24], "set_logdir": [1, 3], "vis_data": [1, 3], "cavmanag": [1, 3], "apply_cav_funct": [1, 3], "get_cav_with_id": [1, 3], "has_cav": [1, 3], "reset": [1, 3], "send_request": [1, 3], "send_respons": [1, 3], "update_cav_info": [1, 3], "update_cpm_statist": [1, 3], "datamanag": [1, 3], "add_loc_err": [1, 3], "apply_preprocess": [1, 3], "boxes_to_vis_format": [1, 3], "distribute_to_cav": [1, 3], "distribute_to_seq_cav": [1, 3], "distribute_to_seq_list": [1, 3], "gather": [1, 3, 24], "gather_batch": [1, 3], "gather_cav_data": [1, 3], "gather_ego_data": [1, 3], "gather_vis_data": [1, 3], "generate_augment_param": [1, 3], "generate_global_non_empty_mask": [1, 3], "generate_local_non_empty_mask": [1, 3], "get_gt_boxes_as_vis_format": [1, 3], "get_vis_data_bev": [1, 3], "get_vis_data_detect": [1, 3], "get_vis_data_input": [1, 3], "get_vis_data_meta": [1, 3], "remove_global_empty_box": [1, 3], "remove_local_empty_box": [1, 3], "sample_global_bev_tgt_pt": [1, 3], "scatter": [1, 3, 24], "vis_global_data_plt": [1, 3], "forwardrunn": [1, 3], "frame_loss": [1, 3], "gather_cav_id": [1, 3], "change_color_mod": [1, 3], "change_glcolor": [1, 3], "change_vis": [1, 3, 5], "connect_events_to_func": [1, 3], "get_toolbar": [1, 3], "initgui": [1, 3], "refresh": [1, 3, 5], "setrunn": [1, 3], "setupui": [1, 3], "start": [1, 3, 5, 24], "step": [1, 3, 4, 9, 15, 17, 19, 20, 24], "stop": [1, 3, 19, 24], "basehook": [1, 3], "post_epoch": [1, 3], "post_it": [1, 3], "pre_epoch": [1, 3], "pre_it": [1, 3], "set_logg": [1, 3], "cpmstatistichook": [1, 3], "checkpointshook": [1, 3], "save": [1, 3, 9, 20, 24], "detectionnmshook": [1, 3], "evalbevsemseghook": [1, 3], "cal_iou": [1, 3], "crop_map": [1, 3], "gt_dynamic_map": [1, 3], "gt_static_map": [1, 3], "evaldetectionbevhook": [1, 3], "filter_box_rang": [1, 3], "format_final_result": [1, 3], "evaldetectionhook": [1, 3], "eval_cosense3d_fin": [1, 3], "memoryusagehook": [1, 3], "traintimerhook": [1, 3], "taskmanag": [1, 3], "reformat_task": [1, 3], "summarize_loss_task": [1, 3], "summarize_task": [1, 3], "task_to_ordered_dict": [1, 3], "testrunn": [1, 3, 24], "run_itr": [1, 3], "trainrunn": [1, 3, 24], "resum": [1, 3], "run_epoch": [1, 3], "visrunn": [1, 3, 24], "save_ckpt_on_error": [1, 4], "dataonlineprocessor": [1, 4], "adaptive_free_space_augment": [1, 4], "cav_aug_transform": [1, 4], "filter_rang": [1, 4, 10, 12, 17], "free_space_augment": [1, 4], "generate_sparse_target_bev_point": [1, 4], "generate_sparse_target_roadline_point": [1, 4], "update_transform_with_aug": [1, 4], "add_flip": [1, 4], "add_rot": [1, 4], "add_scal": [1, 4], "filter_range_mask": [1, 4], "generate_bev_tgt_pt": [1, 4], "item": [1, 5, 7], "graph_item": [1, 5], "glviewer": [1, 5, 24], "addbox": [1, 5], "box": [1, 3, 5, 6, 9, 13, 15, 17, 19, 20, 24], "drawrectangl": [1, 5], "draw_ax": [1, 5], "draw_depth_buff": [1, 5], "evt_pos_to_world": [1, 5], "get_point_depth": [1, 5], "get_region_depth": [1, 5], "highlightbox": [1, 5], "initializegl": [1, 5], "keypressev": [1, 5], "keyreleaseev": [1, 5], "model_pose_to_world": [1, 5], "mousedoubleclickev": [1, 5], "mousemoveev": [1, 5], "mousepressev": [1, 5], "mousereleaseev": [1, 5], "paintgl": [1, 5], "paintrect": [1, 5], "removeactiv": [1, 5], "removeheilight": [1, 5], "removerectangl": [1, 5], "selectheilight": [1, 5], "updateframedata": [1, 5], "updatelabel": [1, 5], "updatepcd": [1, 5], "imganno3dview": [1, 5, 24], "imgview": [1, 5, 24], "bevdensecanva": [1, 5], "bevsparsecanva": [1, 5], "detectioncanva": [1, 5], "detectionscoremap": [1, 5], "mplcanva": [1, 5], "update_titl": [1, 5], "outputview": [1, 5, 24], "sparsedetectioncanva": [1, 5], "circular_mask": [1, 5], "depth_min": [1, 5], "class": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "cfg": [1, 3, 7, 8, 17, 19, 20], "data_load": 1, "dist": [1, 3, 12, 17, 19, 20], "fals": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 17, 19, 20], "sourc": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "base": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 24], "object": [1, 2, 3, 4, 8, 9, 15, 17, 19, 20, 24], "properti": [1, 2, 3, 6, 9, 15, 17, 19, 20], "frame_data": 1, "with_loss": [1, 2, 3], "training_mod": [1, 2, 3], "kwarg": [1, 2, 3, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "seq_data": 1, "batch_dict": [1, 3, 7, 20], "arg": [1, 2, 3, 5, 9, 10, 14, 15, 16, 17, 19, 20], "id": [2, 3, 5, 6, 7, 9, 20], "str": [2, 5, 7, 9, 15, 17, 19, 20], "mapped_id": 2, "int": [2, 4, 7, 13, 15, 17, 19, 20], "is_ego": 2, "bool": [2, 3, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "lidar_rang": [2, 3, 4, 5, 9, 13, 14, 17, 19, 20], "tensor": [2, 15, 17, 19, 20], "memory_len": [2, 3, 13, 14], "lidar_pos": [2, 3, 9], "none": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20], "require_grad": 2, "seq_len": [2, 3], "1": [2, 3, 4, 5, 6, 9, 12, 13, 14, 15, 17, 18, 19, 20, 22, 24], "task": [2, 3, 24], "To": [2, 24], "overload": 2, "memori": [2, 13, 14, 17, 19, 20], "after": [2, 9, 17, 19, 24], "each": [2, 4, 9, 13, 14, 15, 17, 19, 20, 24], "singl": [2, 20, 24], "frame": [2, 5, 7, 9, 19, 20, 24], "befor": [2, 9, 17, 19, 24], "request": [2, 3, 18], "respons": [2, 3, 24], "seq_idx": 2, "kei": [2, 3, 4, 5, 8, 9, 10, 13, 17], "prev_exist": 2, "reference_point": 2, "matrix": [2, 9, 20], "ref_pt": [2, 13, 14, 17], "ax": [2, 20], "label": [2, 3, 9, 13, 15, 17, 20], "his_len": 2, "alia": 2, "module_full_path": 2, "dataload": [3, 21], "control": [3, 21], "gpu": [3, 20, 22], "0": [3, 4, 5, 6, 8, 9, 12, 13, 14, 15, 16, 17, 19, 20, 22, 24], "log_everi": [3, 20], "10": [3, 4, 5, 9, 20], "prototyp": [3, 24], "all_grad": 3, "num_grad_cav": 3, "cpm_statist": 3, "func_nam": 3, "cav_id": [3, 5, 9], "valid_agent_id": 3, "data": [3, 4, 5, 7, 8, 9, 13, 17, 20, 24], "voxel_s": [3, 17, 19], "aug": 3, "pre_process": 3, "loc_err": [3, 7, 8], "id_appendix": 3, "cav_list": 3, "data_kei": [3, 5], "batch_idx": [3, 17, 19], "to_numpi": 3, "point": [3, 4, 5, 9, 12, 13, 14, 16, 17, 19, 20, 24], "ego_onli": [3, 8], "coor": [3, 12, 14, 17, 19], "global": [3, 9, 20, 22, 24], "successor": [3, 5], "detect": [3, 15, 17, 20, 24], "batch": [3, 17, 20, 22, 24], "index": [3, 7, 17, 21, 22], "default": [3, 4, 15, 17, 19, 20], "i": [3, 4, 5, 7, 9, 13, 15, 17, 18, 19, 20, 24], "custom": [3, 17, 19, 24], "can": [3, 13, 15, 20, 22, 24], "also": [3, 17, 24], "us": [3, 5, 7, 9, 15, 17, 19, 20, 23, 24], "depend": [3, 22], "which": [3, 7, 15, 17, 19, 24], "result": [3, 15, 17, 20, 24], "cav": [3, 9, 19, 20, 24], "pool": 3, "convert": [3, 6, 9, 20, 24], "addit": [3, 15, 17, 19], "gt": [3, 17, 19, 20], "ar": [3, 9, 15, 17, 19, 20, 24], "standarl": 3, "consense3d": [3, 22], "api": [3, 20, 24], "sam_r": [3, 4], "4": [3, 4, 5, 9, 12, 14, 15, 17, 19, 20, 22, 24], "map_r": [3, 4], "2": [3, 4, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22], "rang": [3, 4, 7, 8, 17, 19, 20], "50": [3, 4, 5, 15], "max_num_pt": [3, 4], "5000": [3, 4], "discret": [3, 4], "data_dict": [3, 5, 7, 8, 17, 20], "vis_func": 3, "shared_modul": 3, "chunk_siz": 3, "24": 3, "with_grad": 3, "true": [3, 8, 9, 12, 14, 15, 17, 18, 19, 20], "defin": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 24], "comput": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 24], "perform": [3, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], "everi": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "call": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "should": [3, 5, 10, 11, 12, 13, 14, 16, 17, 18, 19], "overridden": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "all": [3, 7, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 24], "subclass": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "although": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "recip": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "pass": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "need": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19, 22], "within": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "thi": [3, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 24], "function": [3, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "one": [3, 7, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 24], "instanc": [3, 7, 10, 11, 12, 13, 14, 16, 17, 18, 19], "afterward": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "instead": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "sinc": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "former": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "take": [3, 8, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20], "care": [3, 8, 10, 11, 12, 13, 14, 16, 17, 18, 19], "regist": [3, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19], "while": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "latter": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "silent": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19], "ignor": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19, 22], "them": [3, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 24], "gpu_id": [3, 10, 12, 16], "mode": [3, 7, 9, 15, 17, 19, 20], "qmainwindow": 3, "name": [3, 5, 10, 15, 17, 19, 20, 24], "runner": [3, 21], "devic": [3, 16, 17, 20], "cuda": [3, 17, 20, 22], "max_ckpt": 3, "3": [3, 4, 9, 12, 13, 14, 15, 17, 18, 19, 20, 22], "epoch_everi": 3, "iter_everi": 3, "static": [3, 4, 7, 9, 14, 17], "nms_thr": 3, "pre_max_s": 3, "det_kei": 3, "test_rang": 3, "test_r": 3, "save_result": 3, "eval_stat": 3, "bev_semseg_kei": 3, "bev_semseg": 3, "gt_bev_kei": 3, "bevmap": 3, "gt_boxes_kei": 3, "global_bboxes_3d": 3, "pred": [3, 14, 15, 16, 17, 19, 20], "gt_map": 3, "tag": [3, 17], "token": 3, "conf": [3, 17, 20], "unc": [3, 17, 20], "obs_mask": [3, 17], "pc_rang": [3, 13, 14, 19, 20], "iou_thr": [3, 20], "5": [3, 4, 5, 9, 13, 14, 17, 19, 20, 22, 24], "7": [3, 9, 13, 17, 20], "gt_kei": [3, 10], "score": [3, 13, 17, 20], "indic": [3, 15, 17, 19, 20, 24], "time": [3, 4, 7, 13, 20], "out_dict": [3, 12, 14, 16, 20], "task_list": 3, "load_from": 3, "ckpt": 3, "max_epoch": 3, "optim": [3, 9, 20], "resume_from": 3, "run_nam": 3, "log_dir": [3, 20], "work_dir": 3, "use_wandb": 3, "debug": [3, 20], "func": [4, 20], "dict": [4, 7, 9, 10, 14, 15, 17, 19, 20], "min_h": 4, "float": [4, 5, 13, 15, 17, 19, 20], "20": [4, 20, 22], "alpha": [4, 15, 17, 20], "05": [4, 19], "re": [4, 9, 10, 12, 17, 19], "time_idx": 4, "add": [4, 19], "free": 4, "space": [4, 9, 18, 19], "accord": [4, 19, 24], "distanc": [4, 17, 19], "origin": [4, 9, 14, 15], "lidar": [4, 9, 20], "h": [4, 14, 17, 19, 20], "el": 4, "d": [4, 12, 16, 17, 19], "assum": [4, 20], "theta": 4, "frac": 4, "pi": 4, "text": [4, 17], "elev": 4, "angl": [4, 20], "averag": [4, 15, 17, 20], "between": [4, 17, 19, 20, 24], "two": [4, 9, 20, 24], "ring": 4, "d_k": 4, "ground": [4, 7, 15, 17, 20], "n_": 4, "th": [4, 7], "k": [4, 8, 10, 17, 19], "n": [4, 9, 10, 12, 14, 15, 17, 19, 20, 22], "delta_d": 4, "neighbor": 4, "tan": 4, "d_n": 4, "d_": 4, "arctan": 4, "we": [4, 5, 19, 20], "sampl": [4, 7, 9, 14, 17, 19], "rel": [4, 9, 20], "given": [4, 7, 17, 19, 20], "input": [4, 8, 10, 13, 14, 15, 16, 17, 18, 19, 20, 24], "contain": [4, 9, 13, 15, 17, 19, 20, 24], "minimum": [4, 20], "height": [4, 5, 9, 19], "number": [4, 7, 9, 15, 17, 19, 20], "rai": [4, 20], "offset": [4, 13, 19, 20], "cast": 4, "resolut": [4, 13, 18], "down": [4, 9, 17], "provid": [4, 9, 17, 22, 24], "copi": [4, 17], "from": [4, 7, 8, 9, 13, 15, 17, 19, 20, 24], "return": [4, 10], "apply_to": 4, "aug_param": 4, "img": [4, 20], "annos_glob": 4, "list": [4, 7, 9, 10, 12, 17, 18, 19, 20], "25": [4, 6, 15, 17], "3000": [4, 8], "kernel": [4, 17, 19], "tf": [4, 15], "flip_idx": 4, "flip_axi": 4, "xy": [4, 20], "rot": [4, 20], "scale_ratio": 4, "ep": [4, 15, 17, 19, 20], "0001": [4, 19], "lineboxitem": [5, 6], "activ": [5, 6, 15, 17, 19, 22], "color": [5, 6, 20], "deactiv": [5, 6], "highlight": [5, 6], "id_ptr": [5, 6], "isact": [5, 6], "to_cent": [5, 6], "lineitem": [5, 6], "hoverev": [5, 6], "meshboxitem": [5, 6], "rectangleitem": [5, 6], "parent": [5, 6], "glviewwidget": 5, "visibl": 5, "pos1": 5, "pos2": 5, "center": [5, 6, 14, 17, 19, 20], "po": [5, 13, 19], "els": [5, 7, 17, 20], "post": 5, "region": 5, "end": [5, 17], "x": [5, 9, 13, 14, 17, 19, 20], "y": [5, 9, 15, 17, 19, 20, 22], "p1": 5, "qpoint": 5, "p2": 5, "ndarrai": [5, 9, 19, 20], "initi": [5, 19], "were": 5, "dure": [5, 17, 24], "additem": 5, "self": [5, 17, 19], "a0": 5, "qkeyev": 5, "z": [5, 9, 17, 19, 20], "qmouseev": 5, "viewport": 5, "useitemnam": 5, "specifi": [5, 15, 17, 19], "argument": [5, 15, 17, 19, 20], "glviewport": 5, "If": [5, 17, 19, 20, 22], "opt": 5, "sub": [5, 24], "render": 5, "note": [5, 17, 20], "mai": 5, "when": [5, 17, 20], "export": [5, 22], "visible_kei": 5, "globalgt": 5, "color_mod": 5, "unit": [5, 13], "pcd": [5, 8, 9, 20], "local_label": 5, "global_label": 5, "local_det": 5, "global_det": 5, "predecessor": 5, "successor_gt": 5, "pcd_color": 5, "dpi": 5, "figurecanvasqtagg": 5, "100": 5, "mean": [5, 9, 15, 17, 19], "std": [5, 19], "": [5, 13, 17, 19, 20], "topk_ctr": 5, "width": [5, 9, 19], "titl": [5, 9], "plot": [5, 20], "nrow": 5, "ncol": 5, "scenario": [5, 7, 9, 24], "qwidget": 5, "arr_length": 5, "radiu": [5, 17, 19], "depth": [5, 13], "r": [5, 17, 19, 20, 22], "statu": 6, "inact": 6, "show_direct": 6, "last_pos": 6, "line_width": 6, "gllineplotitem": 6, "line": [6, 9], "qgraphicslineitem": 6, "event": 6, "size": [6, 15, 17, 19, 20], "glmeshitem": 6, "rect": 6, "qgraphicsrectitem": 6, "loadannot": [7, 8], "get_lidar2img_transform": [7, 8], "loadcarlaroadlinemap": [7, 8], "load_singl": [7, 8], "loadlidarpoint": [7, 8], "read_pcd": [7, 8], "loadmultiviewimg": [7, 8], "loadopv2vbevmap": [7, 8], "crop_map_for_pos": [7, 8], "loadsparsebevtargetpoint": [7, 8], "generate_sparse_bev_pt": [7, 8], "resizecropfliprotimag": [7, 8], "resizeimag": [7, 8], "build_process": [7, 8], "cosensedataconvert": [7, 9], "obj_id2nam": [7, 9], "obj_list": [7, 9], "obj_name2id": [7, 9], "add_cam_to_fdict": [7, 9], "cal_vbbx_mean_dim": [7, 9], "draw_sample_distribut": [7, 9], "fdict_templ": [7, 9], "global_boxes_to_loc": [7, 9], "obj_from_sustech": [7, 9], "obj_to_opv2v": [7, 9], "obj_to_sustech": [7, 9], "parse_global_bbox_velo": [7, 9], "remove_lidar_info": [7, 9], "supervison_full_to_spars": [7, 9], "to_kitti": [7, 9], "to_opv2v": [7, 9], "to_sustech": [7, 9], "update_ag": [7, 9], "update_agent_gt_box": [7, 9], "update_agent_lidar": [7, 9], "update_frame_bbx": [7, 9], "update_from_sustech": [7, 9], "calib_to_tf_matrix": [7, 9], "convert_v2x_c": [7, 9], "convert_v2x_seq": [7, 9], "load_info_to_dict": [7, 9], "load_label": [7, 9], "optimize_pos": [7, 9], "optimize_trajectori": [7, 9], "parse_global_bbox": [7, 9], "parse_static_pcd": [7, 9], "parse_timestamped_box": [7, 9], "register_pcds_to_block": [7, 9], "register_sequ": [7, 9], "register_step_on": [7, 9], "register_step_two": [7, 9], "remove_ego_box": [7, 9], "select_sub_scen": [7, 9], "boxes_3d_to_2d": [7, 9], "convert_bev_semantic_map_to_road_height_map": [7, 9], "corner_to_cent": [7, 9], "create_bbx": [7, 9], "generate_bevmap": [7, 9], "generate_roadlin": [7, 9], "opv2v_pose_to_cosens": [7, 9], "opv2v_to_cosens": [7, 9], "project_point": [7, 9], "project_world_object": [7, 9], "update_2d_bbox": [7, 9], "update_cam_param": [7, 9], "update_global_bboxes_num_pt": [7, 9], "update_local_boxes3d": [7, 9], "x1_to_x2": [7, 9], "x_to_world": [7, 9], "gen_time_offset": [7, 9], "generate_roadline_reference_point": [7, 9], "get_box_velo": [7, 9], "get_local_boxes3d": [7, 9], "get_velo": [7, 9], "load_vehicles_gfram": [7, 9], "opv2vt_to_cosens": [7, 9], "pad_box_result": [7, 9], "parse_speed_from_yaml": [7, 9], "parse_sub_fram": [7, 9], "read_frame_plys_box": [7, 9], "read_ply_to_dict": [7, 9], "read_sub_fram": [7, 9], "transform_boxes_global_to_ref": [7, 9], "update_bev_map": [7, 9], "update_global_box": [7, 9], "update_velo": [7, 9], "vis_cosense_scenario": [7, 9], "vis_frame_data": [7, 9], "callback_registr": [7, 9], "click_regist": [7, 9], "register_pcd": [7, 9], "batch_list": [7, 14], "sample_info": 7, "prev_ag": 7, "select": [7, 9, 13, 19], "commun": [7, 24], "includ": [7, 15, 17, 19, 20, 22, 24], "ego": [7, 18, 20], "meta": [7, 9, 23, 24], "info": [7, 20], "loader": 7, "last": 7, "agents_id": 7, "valid": 7, "current": [7, 15, 19, 20], "necessari": [7, 24], "inform": [7, 9, 24], "prev_item": 7, "omit_gt": 7, "annot": [7, 24], "standard": [7, 13, 19, 24], "onli": [7, 13, 17, 19, 24], "previou": 7, "tempor": 7, "whether": [7, 19, 20], "omit": 7, "truth": [7, 15, 17, 20], "local": [7, 14, 20, 22, 24], "error": [7, 20, 22], "json": [7, 9, 24], "file": [7, 9, 20, 23, 24], "wise": [7, 20, 24], "sequenti": [7, 9, 17, 19], "distribut": [7, 9, 15, 17, 19, 24], "load2d": 8, "load_cam_param": 8, "load3d_loc": 8, "load3d_glob": 8, "load_global_tim": 8, "load3d_pr": 8, "min_num_pt": 8, "with_veloc": 8, "class_agnostic_3d": 8, "time_offset": [8, 9], "lidar2cam": [8, 9], "intrins": [8, 9, 18], "75": [8, 20], "path": [8, 9, 20, 23], "ai": 8, "coop_mod": 8, "load_attribut": 8, "xyz": [8, 20], "intens": [8, 20], "pts_filenam": 8, "bgr2rgb": 8, "to_float32": 8, "max_num_img": 8, "img_filter_kei": 8, "use_global_map": 8, "num_point": [8, 20], "data_aug_conf": 8, "with_2d": 8, "filter_invis": 8, "augment": [8, 24], "imag": [8, 9, 11, 17, 18, 23, 24], "random": 8, "resiz": 8, "crop": 8, "flip": 8, "rotat": [8, 20], "modifi": [8, 13, 15, 17, 19, 24], "streampetr": 8, "img_siz": [8, 9, 11, 14, 17, 18, 19], "compos": 8, "sever": 8, "process": [8, 24], "togeth": 8, "directli": 8, "v": [8, 17, 22], "data_path": [9, 20], "meta_path": 9, "vehicl": [9, 13, 20, 23], "car": 9, "van": 9, "truck": 9, "bu": 9, "tram": 9, "motorcycl": 9, "6": [9, 13, 14, 17, 19, 20], "cyclist": 9, "scooter": 9, "8": [9, 12, 13, 17, 19, 20, 22], "other": [9, 15, 17, 20], "9": [9, 20], "human": 9, "pedestrian": 9, "wheelchair": 9, "11": [9, 20], "sit": 9, "12": [9, 17, 22], "trafficcon": 9, "13": [9, 22], "barrowlist": 9, "14": 9, "tricyclist": 9, "15": 9, "unknown": 9, "fdict": 9, "agent_id": 9, "cam_id": 9, "filenam": [9, 20], "extrins": [9, 18], "calcul": [9, 15, 17, 20], "dimens": [9, 13, 17, 19], "four": [9, 24], "wheel": 9, "draw": [9, 20], "observ": [9, 17, 20], "categori": [9, 15], "pickl": 9, "meta_dict": [9, 20], "label_fil": 9, "bbx": [9, 20], "pose": [9, 20], "out_fil": [9, 20], "cosense_obj": 9, "sustech_fil": 9, "out_path": 9, "det_r": [9, 19, 20], "num_box_per_fram": 9, "num_box_tot": 9, "label_ratio": 9, "out_dir": 9, "agent_typ": 9, "agent_pos": 9, "agent_tim": 9, "gt_box": [9, 14, 17, 19, 20], "lidar_id": 9, "lidar_tim": 9, "lidar_fil": 9, "sustech_path": 9, "calib_fil": 9, "root_dir": 9, "meta_out_dir": 9, "info_fil": 9, "seq": 9, "sdict": 9, "out_meta_dir": 9, "ego_agent_id": 9, "idx": [9, 17, 19], "sub_idx": 9, "iter": [9, 20], "over": [9, 20], "doe": 9, "follow": [9, 17, 22, 23], "cloud": [9, 20, 24], "get": [9, 14, 17, 19], "accur": 9, "trajectori": 9, "registr": 9, "belong": [9, 17], "objet": 9, "high": [9, 17], "dynam": [9, 15, 17], "remov": [9, 17], "sequenc": [9, 17, 19], "pair": [9, 17, 20], "merg": [9, 13, 20], "match": [9, 17, 19, 20], "differ": [9, 13, 15, 17, 20], "recov": 9, "world": 9, "directori": 9, "root": 9, "dir": [9, 17, 19], "three": [9, 17, 19, 20, 24], "adict": 9, "four_wheel_onli": 9, "ignore_id": 9, "vi": 9, "mf": 9, "find": [9, 20], "most": 9, "close": 9, "infra": 9, "start_fram": 9, "meta_in": 9, "meta_out": 9, "split": 9, "boxes3d": [9, 20], "num_pt": 9, "map_dir": 9, "map_bounds_fil": 9, "scenario_town_map_fil": 9, "meta_dir": 9, "corner3d": 9, "order": [9, 15, 17, 19, 20, 24], "lwh": [9, 20], "corner": [9, 20], "dx": [9, 20], "dy": [9, 20], "dz": [9, 20], "yaw": [9, 20], "np": [9, 20], "hwl": [9, 20], "box3d": 9, "extent": [9, 22], "creat": [9, 15, 17, 19, 22], "bound": [9, 15, 19, 20, 24], "under": 9, "obstacl": 9, "refer": [9, 19], "length": [9, 17, 19, 20], "arrai": [9, 20], "The": [9, 13, 14, 15, 17, 19, 20, 21], "shape": [9, 14, 15, 17, 19, 20], "data_dir": 9, "semant": [9, 17], "map": [9, 14, 17, 19, 20], "2d": [9, 17, 24], "road": 9, "describ": 9, "coordin": [9, 14, 17, 19, 20, 24], "nx2": [9, 20], "meter": [9, 19, 20], "path_in": 9, "path_out": 9, "issim": 9, "correct_transf": 9, "pcd_ext": 9, "roll": [9, 20], "pitch": [9, 20], "3d": [9, 17, 20, 24], "plane": [9, 20], "object_dict": 9, "output_dict": [9, 14, 15], "anoth": [9, 20], "dictionari": [9, 15, 20, 24], "surround": 9, "certain": [9, 17], "xyzlwhyaw": 9, "opv2v_param": 9, "cosense_fdict": 9, "objects_dict": 9, "ref_pos": 9, "x1": [9, 17, 20], "x2": [9, 17, 20], "transformation_matrix": [9, 20], "system": [9, 14, 20], "carla": 9, "meta_fil": 9, "speed": [9, 20], "scene_dir": 9, "param": [9, 15, 19, 20], "data_out_dir": 9, "out_len": 9, "f": 9, "prev_fram": 9, "parse_box": 9, "scenario_meta_fil": 9, "target": [9, 14, 15, 17, 19, 20], "source_point": 9, "target_point": 9, "callback": 9, "pick": 9, "correspond": [9, 17, 19, 20, 24], "source_cloud": 9, "target_cloud": 9, "initial_transf": 9, "thr": 9, "resnetencod": [10, 11], "minkunet": [10, 12], "qmode": [10, 12, 19], "forward_height_compress": [10, 12], "forward_unet": [10, 12], "grid_siz": [10, 12, 14], "init_weight": [10, 12, 13, 14, 17, 18, 19], "stensor_to_dens": [10, 12], "valid_coord": [10, 12], "pillarbev": [10, 12], "to_dense_bev": [10, 12], "to_dens": [10, 12], "post_act_block": [10, 12], "denseattentionfus": [10, 13], "sparseattentionfus": [10, 13], "fuse_feature_at_strid": [10, 13], "boxfus": [10, 13], "cluster_fus": [10, 13], "cluster": [10, 13], "merge_sync_box": [10, 13], "temporal_cluster_fus": [10, 13], "attent": [10, 13, 17], "feedforward": [10, 13], "prenormresidu": [10, 13], "swapfusionblock": [10, 13], "swapfusionblockmask": [10, 13], "swapfusionencod": [10, 13], "keypointsfus": [10, 13], "bevmaxoutfus": [10, 13], "sparsebevmaxoutfus": [10, 13], "naivefus": [10, 13], "spatialqueryalignfusionrl": [10, 13], "align_coordin": [10, 13], "spatialqueryfus": [10, 13], "localnaivefus": [10, 13], "gather_topk": [10, 13, 14, 18], "localtemporalfus": [10, 13], "embed_po": [10, 13], "temporal_align": [10, 13], "localtemporalfusionv1": [10, 13], "localtemporalfusionv2": [10, 13], "localtemporalfusionv3": [10, 13], "temporalfus": [10, 13], "temporallidarfus": [10, 13], "down_sampl": [10, 14, 17], "bevmultiresolut": [10, 14], "contiattnbev": [10, 14], "get_evid": [10, 14], "contigevbev": [10, 14], "continuousbev": [10, 14], "sample_reference_point": [10, 14], "bevroidensehead": [10, 14], "bevseghead": [10, 14], "detanchordens": [10, 14], "add_sin_differ": [10, 14], "predict": [10, 14, 15, 17, 19, 24], "detanchorspars": [10, 14], "detcenterspars": [10, 14], "multilvldetcenterspars": [10, 14], "separatedclshead": [10, 14], "unitedclshead": [10, 14], "unitedreghead": [10, 14], "keypointroihead": [10, 14], "get_dense_grid_point": [10, 14], "get_global_grid_points_of_roi": [10, 14], "roi_grid_pool": [10, 14], "imgfoc": [10, 14], "apply_center_offset": [10, 14], "apply_ltrb": [10, 14], "lidarpetrhead": [10, 14], "multitaskhead": [10, 14], "nbrattentionbev": [10, 14], "downsample_tgt_pt": [10, 14, 17], "generate_reference_point": [10, 14], "get_tgt": [10, 14], "petrhead": [10, 14], "queryguidedpetrhead": [10, 14], "get_pred_box": [10, 14], "get_predict": [10, 14, 17], "baseloss": [10, 15], "cross_entroy_with_logit": [10, 15], "indices_to_dense_vector": [10, 15], "sigmoid_binary_cross_entropi": [10, 15], "weighted_l1_loss": [10, 15], "weighted_sigmoid_binary_cross_entropi": [10, 15], "weighted_smooth_l1_loss": [10, 15], "edlloss": [10, 15], "edl_mse_loss": [10, 15], "evidence_to_conf_unc": [10, 15], "exp_evid": [10, 15], "kl_diverg": [10, 15], "loglikelihood_loss": [10, 15], "mse_loss": [10, 15], "pred_to_conf_unc": [10, 15], "relu_evid": [10, 15], "softplus_evid": [10, 15], "focalloss": [10, 15], "gaussianfocalloss": [10, 15], "qualityfocalloss": [10, 15], "py_focal_loss_with_prob": [10, 15], "py_sigmoid_focal_loss": [10, 15], "quality_focal_loss": [10, 15], "quality_focal_loss_with_prob": [10, 15], "giouloss": [10, 15], "iouloss": [10, 15], "l1loss": [10, 15], "smoothl1loss": [10, 15], "vanillasegloss": [10, 15], "build_loss": [10, 15], "keypointcompos": [10, 16], "dilationspconv": [10, 16], "get_conv_lay": [10, 16, 17], "dilationspconvabl": [10, 16], "densetospars": [10, 16], "get_cent": [10, 16], "detdensetospars": [10, 16], "fpvrcnntolt": [10, 16], "neighborhoodattent": [10, 17, 19], "coor_to_indic": [10, 17], "get_nbr_map": [10, 17], "scaleddotproductattent": [10, 17], "conv2d": [10, 17, 19], "customrpn": [10, 17], "rpn": [10, 17], "doubleconv": [10, 17], "downsampleconv": [10, 17], "flashattent": [10, 17], "flashmha": [10, 17], "flash_attn_unpadded_kvpacked_test": [10, 17], "index_first_axi": [10, 17], "gevbevdecod": [10, 17], "get_2d_stensor": [10, 17], "naivecompressor": [10, 17], "pfnlayer": [10, 17], "pillarencod": [10, 17], "absolute_xyz_dim": [10, 17], "compose_voxel_featur": [10, 17], "distance_dim": [10, 17], "get_paddings_ind": [10, 17], "intensity_dim": [10, 17], "xyz_dim": [10, 17], "bevboxassign": [10, 17], "assign": [10, 17], "get_labels_single_head": [10, 17], "bevcenternessassign": [10, 17], "bevpointassign": [10, 17], "bevsemsegassign": [10, 17], "down_sample_pred_pt": [10, 17], "get_obs_mask": [10, 17], "pts_to_ind": [10, 17], "baseassign": [10, 17], "boxanchorassign": [10, 17], "box_overlap": [10, 17], "get_anchor_templ": [10, 17], "boxcenterassign": [10, 17], "pts_to_indic": [10, 17], "boxsparseanchorassign": [10, 17], "me_coor_to_grid_indic": [10, 17, 19], "contibevassign": [10, 17], "sample_dynamic_tgt_pt": [10, 17], "discretebevassign": [10, 17], "heatmapassign": [10, 17], "draw_heatmap_gaussian": [10, 17], "hungarianassigner2d": [10, 17], "hungarianassigner3d": [10, 17], "matchcost": [10, 17], "bboxl1": [10, 17], "binary_focal_loss": [10, 17], "build": [10, 17, 19, 22], "classif": [10, 15, 17], "giou": [10, 17, 20], "l1": [10, 17], "roibox3dassign": [10, 17], "roadlineassign": [10, 17], "pos_neg_sampl": [10, 17], "sample_min": [10, 17], "ffn": [10, 17], "multiheadattentionwrapp": [10, 17], "bias_k": [10, 17], "bias_v": [10, 17], "forward_fp16": [10, 17], "forward_fp32": [10, 17], "multiheadattent": [10, 17], "multiheadflashattent": [10, 17], "petrtemporaltransform": [10, 17], "petrtransform": [10, 17], "transformerdecod": [10, 17], "transformerdecoderlay": [10, 17], "transformerlayersequ": [10, 17], "meanvf": [10, 17], "get_output_feature_dim": [10, 17], "voxelgener": [10, 17], "cml": [10, 12, 17], "cmlspars": [10, 17], "conv3d": [10, 17], "voxelsetabstract": [10, 17], "get_sampled_point": [10, 17], "interpolate_from_bev_featur": [10, 17], "bilinear_interpolate_torch": [10, 17], "build_plugin_lay": [10, 17], "build_plugin_modul": [10, 17], "infer_abbr": [10, 17], "faxmodul": [10, 18], "resnetbottleneck": [10, 18], "img_position_embed": [10, 18], "sttf": [10, 18], "boxpredcod": [10, 19], "decod": [10, 13, 17, 19], "encod": [10, 11, 17, 19], "centerboxcod": [10, 19], "residualboxcod": [10, 19], "decode_direct": [10, 19], "encode_direct": [10, 19], "build_box_cod": [10, 19], "bias_init_with_prob": [10, 19], "cat_coor_with_idx": [10, 19], "cat_name_str": [10, 19], "clip_sigmoid": [10, 19], "draw_sample_prob": [10, 19], "fuse_batch_indic": [10, 19], "get_conv2d_lay": [10, 19], "get_norm_lay": [10, 19], "get_voxel_cent": [10, 19], "instanti": [10, 17, 19], "inverse_sigmoid": [10, 19], "linear_last": [10, 19], "linear_lay": [10, 19], "meshgrid": [10, 19], "meshgrid_cross": [10, 19], "pad_l": [10, 19], "pad_r": [10, 19], "topk_gath": [10, 19], "weighted_mahalanobis_dist": [10, 19], "xavier_init": [10, 19], "convmodul": [10, 19], "build_conv_lay": [10, 19], "build_padding_lay": [10, 19], "logit_to_edl": [10, 19], "center_to_img_coor": [10, 19], "cornernet_gaussian_radiu": [10, 19], "draw_gaussian_map": [10, 19], "gaussian_2d": [10, 19], "gaussian_radiu": [10, 19], "mahalanobis_dists_2d": [10, 19], "constant_init": [10, 19], "kaiming_init": [10, 19], "normal_init": [10, 19], "trunc_normal_init": [10, 19], "uniform_init": [10, 19], "bev_sparse_to_dens": [10, 19], "devoxelize_with_centroid": [10, 19], "downsample_embed": [10, 19], "downsample_point": [10, 19], "get_conv_block": [10, 19], "get_kernel_map_and_out_kei": [10, 19], "indices2metr": [10, 19], "metric2indic": [10, 19], "mink_coor_limit": [10, 19], "minkconv_conv_block": [10, 19], "minkconv_lay": [10, 19], "normalize_centroid": [10, 19], "normalize_point": [10, 19], "prepare_input_data": [10, 19], "sparse_to_dens": [10, 15, 19], "stride_centroid": [10, 19], "update_me_essenti": [10, 19], "voxelize_with_centroid": [10, 19], "mln": [10, 19], "reset_paramet": [10, 19], "mln2": [10, 19], "selayer_linear": [10, 19], "coor2ratio": [10, 19], "img_loc": [10, 19], "nerf_positional_encod": [10, 19], "pos2posemb1d": [10, 19], "pos2posemb2d": [10, 19], "pos2posemb3d": [10, 19], "ratio2coord": [10, 19], "build_torch_modul": [10, 19], "gather_kei": 10, "scatter_kei": 10, "freez": 10, "pad_idx": 10, "d_list": 10, "x_list": 10, "recurs": 10, "concaten": [10, 17], "sub_list": 10, "img_list": 10, "stensor_list": [10, 14, 16], "stride": [10, 12, 13, 14, 16, 17, 18, 19], "output": [10, 11, 13, 14, 15, 17, 19, 20, 24], "b": [10, 12, 14, 16, 17, 19, 20], "must": [10, 14, 17, 20], "implement": [10, 14, 17, 19, 24], "module_cfg": [10, 19], "num_lay": [11, 17], "feat_indic": 11, "out_index": 11, "resnet": 11, "famili": 11, "num_img": 11, "input_imag": 11, "data_info": [12, 14, 16, 17, 19], "in_dim": [12, 14, 19], "kernel_size_layer1": 12, "enc_dim": 12, "32": [12, 13, 17, 20], "cache_strid": 12, "floor_height": [12, 19], "height_compress": 12, "compression_kernel_size_xi": 12, "stensor": [12, 19], "in_channel": [12, 13, 14, 17, 18, 19], "layer_num": 12, "layer_strid": 12, "downsample_channel": 12, "upsample_channel": 12, "upsample_strid": 12, "bev_shrink": 12, "bev_compressor": 12, "feat": [12, 14, 17, 19], "out_channel": [12, 14, 17, 19], "bev_neck": 12, "cache_coord": 12, "kernel_s": [12, 17, 19], "indice_kei": 12, "pad": [12, 17, 19, 20], "conv_typ": 12, "subm": 12, "norm_fn": 12, "feature_dim": 13, "ego_feat": 13, "coop_feat": 13, "fuse_kei": 13, "global_tim": 13, "weight": [13, 15, 17, 19], "ego_pr": 13, "coop_pr": 13, "c": [13, 14, 15, 17, 18, 19, 20, 22, 23], "val": [13, 19, 20], "period": [13, 19, 20], "283185306": [13, 19], "about": 13, "swap": 13, "applic": 13, "dim": [13, 14, 17, 18, 19, 20], "dim_head": 13, "dropout": [13, 17], "agent_s": 13, "window_s": [13, 20], "todo": 13, "mask": [13, 17, 19], "ad": [13, 17, 19, 20], "yet": 13, "featur": [13, 17, 18, 19], "rate": 13, "view": 13, "hidden_dim": [13, 17], "fn": 13, "input_dim": [13, 14, 17], "mlp_dim": 13, "drop_out": 13, "block": [13, 19], "window": 13, "grid": [13, 14, 17, 20], "enabl": 13, "multi": 13, "cooper": [13, 23, 24], "128": [13, 17, 19], "256": [13, 14, 17, 19], "rearrang": 13, "mlp_head": 13, "coop_cpm": 13, "train_from_epoch": [13, 14, 16], "num_pose_feat": [13, 19], "64": [13, 18, 19], "ego_bctr": 13, "ego_rl": 13, "ego_rl_pr": 13, "ego_pos": 13, "cpfeat": 13, "det_loc": 13, "roadlin": 13, "roadline_pr": 13, "ego_queri": 13, "ego_pose_correct": 13, "ego_poses_aug": 13, "cpm": [13, 24], "feature_strid": [13, 14], "pos_dim": 13, "topk_ref_pt": 13, "1024": 13, "ref_pts_strid": 13, "transformer_itr": 13, "global_ref_tim": 13, "naiv": [13, 17], "replac": [13, 20], "topk": [13, 14, 18], "later": 13, "spatial": 13, "local_roi": 13, "global_roi": 13, "bev_feat": [13, 14, 16, 17, 18], "mem_dict": 13, "roi": [13, 14, 17], "topk_feat": 13, "512": [13, 20], "num_propag": 13, "norm_fus": 13, "query_po": [13, 17], "tgt": [13, 15, 17], "ref_feat": 13, "ref_tim": 13, "flow": [13, 17, 24], "time_scal": 13, "2048": [13, 14, 18], "num_queri": [13, 14, 17, 18], "644": [13, 14, 18], "loss_cl": 14, "num_cl": 14, "class_names_each_head": [14, 17], "down_sample_tgt": 14, "generate_roi_scr": 14, "gt_label": [14, 17], "strides_for_loss": 14, "context_decod": 14, "seg": 14, "understand": 14, "bev_pr": 14, "bev_tgt": 14, "output_class": 14, "dynamic_bev_pr": 14, "dynamic_bev": 14, "loss_box": 14, "num_class": [14, 15, 17], "get_boxes_when_train": 14, "box_stamp": 14, "boxes1": [14, 17, 20], "boxes2": [14, 17, 20], "bev_feat_list": 14, "dens": [14, 15, 17, 20], "show": [14, 24], "have": [14, 17], "w": [14, 17, 19, 20, 22], "get_roi_scor": 14, "shared_conv_channel": 14, "cls_head_cfg": 14, "reg_head_cfg": 14, "reg_channel": 14, "cls_assign": 14, "box_assign": 14, "center_threshold": [14, 17], "bn": [14, 19, 20], "gt_mask": 14, "nlvl": 14, "spars": [14, 19], "feat_in": 14, "reference_ind": 14, "one_hot_encod": 14, "use_bia": 14, "combine_channel": 14, "sigmoid_kei": 14, "n_fc_channel": 14, "dp_ratio": 14, "batch_size_rcnn": 14, "li": 14, "out": [14, 19, 20], "epoch": [14, 20], "target_dict": [14, 17], "embed_dim": [14, 17], "center_assign": 14, "loss_cls2d": 14, "loss_cent": 14, "loss_bbox2d": 14, "loss_iou2d": 14, "loss_centers2d": 14, "with_depth": 14, "locat": 14, "center_offset": 14, "pred_ltrb": 14, "img_feat": [14, 18], "img_coor": [14, 18], "labels2d": 14, "centers2d": [14, 17], "bboxes2d": 14, "tensor_list": [14, 19], "annealing_step": [14, 15, 17], "tgt_label": [14, 17], "max_sam": [14, 17], "code_weight": [14, 17], "loss_bbox": 14, "loss_iou": 14, "num_reg_fc": [14, 18], "num_pr": [14, 18], "use_logit": 14, "petr_out": 14, "det": 14, "pred_while_train": 14, "bbox_pr": [14, 17], "cls_score": 14, "det_box": [14, 20], "pred_box": [14, 17, 20], "batch_ind": 14, "gt_boxes_glob": 14, "gt_labels_glob": 14, "reduct": 15, "loss_weight": [15, 17], "avg_factor": 15, "reduction_overrid": 15, "factor": 15, "method": [15, 17, 19, 20], "overrid": 15, "n_cl": [15, 17], "gamma": [15, 17], "use_sigmoid": 15, "floattensor": [15, 17, 20], "longtensor": [15, 17], "largest": 15, "background": [15, 17], "same": [15, 17, 19, 20], "indices_valu": 15, "default_valu": 15, "vector": [15, 17, 20], "set": [15, 19, 20], "specif": 15, "rest": 15, "zero": [15, 17, 19, 20], "exist": [15, 20], "becaus": 15, "unclear": 15, "safe": 15, "validate_indic": 15, "accept": 15, "e": [15, 17, 19], "g": [15, 17, 19], "1d": 15, "integ": [15, 20], "element": [15, 17, 19], "d1": 15, "dn": 15, "sum": [15, 17], "sigma": [15, 17, 19], "class_indic": 15, "temp": 15, "n_cls_overrid": 15, "model_label": 15, "evidenti": 15, "distinguish": 15, "logit": [15, 17, 19], "temperatur": [15, 19], "anneal": 15, "kl": 15, "diverg": 15, "term": 15, "maximum": [15, 17, 20], "evid": 15, "epoch_num": 15, "relu": [15, 17, 19], "bg_idx": 15, "variant": 15, "focal": 15, "more": [15, 17, 19, 20], "detail": [15, 17], "found": 15, "paper": [15, 17], "code": [15, 19], "kp_util": 15, "py": [15, 20], "noqa": 15, "e501": 15, "pleas": [15, 22], "notic": 15, "gaussian": [15, 17, 19], "heatmap": [15, 17], "binari": [15, 17, 20], "learn": [15, 17, 24], "beta": [15, 20], "joint": 15, "represent": 15, "qualiti": 15, "estim": 15, "pytorch": [15, 19, 22, 24], "version": [15, 20], "probabl": [15, 17, 19], "A": [15, 17, 19, 20], "balanc": 15, "form": 15, "torch": [15, 17, 19, 20, 22], "qfl": 15, "gener": [15, 17, 19, 20, 24], "qualifi": 15, "1e": [15, 17, 19, 20], "07": [15, 17], "06": [15, 20], "d_weight": 15, "s_weight": 15, "d_coe": 15, "s_coe": 15, "l_weight": 15, "static_pr": 15, "dynamic_pr": 15, "static_gt": 15, "dynamic_gt": 15, "gt_dict": 15, "groundtruth": [15, 20], "type": [15, 17, 19, 20, 24], "voxel_feat": [16, 17], "n_layer": [16, 17, 19], "emb_dim": [17, 19], "ctr_coor": 17, "ctr_feat": 17, "q": [17, 19], "out_featur": 17, "attend": [17, 19], "value_po": 17, "scale": [17, 20], "dot": 17, "product": 17, "propos": 17, "you": [17, 22], "queri": 17, "divid": 17, "sqrt": 17, "appli": [17, 19, 20], "softmax": 17, "obtain": 17, "q_len": 17, "d_model": 17, "k_len": 17, "v_len": 17, "context": 17, "mechan": 17, "align": [17, 20], "p": 17, "batch_norm": 17, "anchor_num": 17, "downsampl": 17, "doubl": 17, "convoltuion": 17, "softmax_scal": 17, "attention_dropout": 17, "return_attn_weight": 17, "dtype": [17, 20], "kv": [17, 19], "causal": 17, "key_padding_mask": 17, "multihead": 17, "t": [17, 20], "num_head": 17, "bia": [17, 19], "batch_first": 17, "seqlen": 17, "where": 17, "num": 17, "cu_seqlens_q": 17, "cu_seqlens_k": 17, "max_sq": 17, "max_sk": 17, "dropout_p": 17, "batch_siz": [17, 19], "num_out": 17, "start_level": 17, "end_level": 17, "add_extra_conv": 17, "relu_before_extra_conv": 17, "no_norm_on_later": 17, "conv_cfg": [17, 19], "norm_cfg": [17, 19], "act_cfg": [17, 19], "upsample_cfg": 17, "nearest": 17, "init_cfg": 17, "uniform": 17, "layer": [17, 19], "xavier": 17, "var0": [17, 19], "ctr_reg": 17, "regress": 17, "out_evid": 17, "dilat": [17, 19], "stensor_dict": 17, "compress_ratio": 17, "veri": 17, "compress": 17, "channel": [17, 19], "use_norm": 17, "last_lay": 17, "voxel_featur": 17, "coord": [17, 19], "voxel_num_point": 17, "actual_num": 17, "max_num": 17, "axi": [17, 19, 20], "shrink_strid": 17, "shrink_channel": 17, "conv_nam": [17, 19], "relu_last": [17, 19], "convolut": [17, 19], "pos_neg_ratio": 17, "mining_thr": 17, "max_mining_ratio": 17, "mining_start_epoch": 17, "merge_all_class": 17, "positv": 17, "min_radiu": [17, 19], "ani": [17, 22], "pred_scor": 17, "use_gaussian": 17, "sample_mining_thr": 17, "topk_sampl": 17, "annealing_sampl": 17, "buffer": 17, "tgt_pt": 17, "tgt_rang": 17, "ctr_pt": 17, "ind": 17, "pt": 17, "abstract": 17, "box_siz": 17, "pos_threshold": 17, "neg_threshold": 17, "45": 17, "score_thrshold": 17, "l": [17, 19], "reg": [17, 19, 20], "num_anchor": 17, "code_s": 17, "detection_benchmark": 17, "gt_pred": [17, 19], "bbox": [17, 19, 20], "cl": 17, "ncl": 17, "per": [17, 20], "scr": [17, 19], "lbl": 17, "bev_pt": 17, "1st": 17, "column": 17, "me_coor": 17, "mink": 17, "m": [17, 20], "reg_tgt": 17, "ir_scor": 17, "direct": 17, "distr_r": [17, 19], "keyword": [17, 19], "its": [17, 24], "semseg": 17, "uncertainti": 17, "confid": [17, 20], "tupl": [17, 19, 20], "segment": [17, 24], "illustr": 17, "tgt_lbl": 17, "obj_centers2d": 17, "obj_bbox": 17, "img_shap": 17, "option": [17, 19, 20, 21], "multipl": [17, 20, 24], "masked_gaussian": 17, "cls_cost": 17, "reg_cost": 17, "iou_cost": 17, "centers2d_cost": 17, "an": [17, 19, 24], "cost": 17, "compon": 17, "center2d": 17, "done": 17, "matter": 17, "do": [17, 19, 20], "hungarian": 17, "cpu": [17, 20], "first": [17, 19, 20, 24], "treat": 17, "foreground": [17, 20], "plu": 17, "cls_pred": 17, "pred_centers2d": 17, "gt_bbox": 17, "assigned_gt_ind": 17, "don": 17, "neg": 17, "posit": [17, 19, 20], "normal": [17, 19], "cx": [17, 20], "cy": [17, 20], "num_gt": 17, "denomin": [17, 20], "numer": [17, 19, 20], "stabil": [17, 20], "mmdet": 17, "box_format": 17, "xyxi": 17, "y1": [17, 20], "y2": [17, 20], "detr": 17, "xywh": 17, "sparse_rcnn": 17, "bbox_cost": 17, "unnorm": 17, "giou_cost": 17, "see": 17, "rcnn_cl": 17, "rcnn_iou": 17, "rcnn_reg": 17, "ratio": [17, 19], "num_neg_sampl": 17, "num_pos_sampl": 17, "max_sample_ratio": 17, "max_num_sampl": 17, "limit": 17, "major": 17, "hard": 17, "weaken": 17, "lot": 17, "therefor": [17, 20], "mine": 17, "n1": 17, "nk": 17, "threshold": [17, 20], "n_sampl": 17, "n_pos_sampl": 17, "feedforward_channel": 17, "num_fc": 17, "inplac": [17, 19], "add_residu": 17, "feed": 17, "network": [17, 24], "residu": 17, "connect": [17, 19], "embed": [17, 19], "e_q": 17, "unbatch": 17, "compar": [17, 20], "against": 17, "produc": [17, 19], "e_k": 17, "kdim": 17, "e_v": 17, "vdim": 17, "purpos": 17, "For": [17, 20, 22], "byte": [17, 20], "support": [17, 19], "non": [17, 20], "need_weight": 17, "attn_output_weight": 17, "attn_output": 17, "attn_mask": 17, "prevent": 17, "cdot": 17, "_head": 17, "broadcast": 17, "across": 17, "allow": [17, 24], "entri": 17, "average_attn_weight": 17, "attn_weight": 17, "otherwis": [17, 19, 20], "separ": 17, "flag": 17, "ha": [17, 19, 20], "effect": 17, "average_weight": 17, "cache_attn_weight": 17, "fp16": [17, 20], "wrapper": 17, "nn": [17, 19], "ident": 17, "key_po": 17, "num_kei": 17, "link": [17, 23], "bytetensor": 17, "combin": 17, "oper": 17, "transformerlay": 17, "attn_drop": 17, "proj_drop": 17, "cross": 17, "offici": [17, 19], "past": 17, "modif": 17, "extra": [17, 22], "ln": 17, "stack": 17, "pos_emb": 17, "temp_memori": 17, "temp_po": 17, "query_mask": 17, "reg_branch": 17, "post_norm_cfg": 17, "return_intermedi": 17, "attn_cfg": 17, "ffn_cfg": 17, "operation_ord": 17, "with_cp": 17, "query_key_padding_mask": 17, "transformercod": 17, "transformerencod": 17, "vision": 17, "As": 17, "kind": [17, 20], "transformer_lay": 17, "transformer_cod": 17, "num_point_featur": 17, "voxel": [17, 19], "num_voxel": 17, "max_points_per_voxel": 17, "vfe_featur": 17, "empty_mean": 17, "point_cloud_rang": [17, 19], "num_keypoint": 17, "4096": 17, "num_out_featur": 17, "point_sourc": 17, "raw_point": 17, "features_sourc": 17, "num_bev_featur": 17, "bev_strid": 17, "num_rawpoint_featur": 17, "enlarge_selection_box": 17, "sa_lay": 17, "min_selected_kpt": 17, "det_out": 17, "voxel_coord": [17, 19], "keypoints_list": 17, "bev_featur": 17, "im": 17, "postfix": [17, 19], "identifi": [17, 24], "append": [17, 19], "abbrevi": [17, 19], "second": [17, 19], "class_typ": 17, "infer": 17, "rule": 17, "abbr": 17, "fall": 17, "back": [17, 20, 24], "snake": 17, "case": [17, 20], "fancyblock": 17, "fancy_block": 17, "middl": 18, "feat_dim": [18, 19], "cross_view": 18, "cross_view_swap": 18, "bev_embed": 18, "self_attn": 18, "position_rang": 18, "depth_num": 18, "lid": 18, "depth_start": 18, "img_roi": 18, "lidar2img": [18, 20], "img_memori": 18, "img_po": 18, "img2lidar": 18, "downsample_r": 18, "use_roi_mask": 18, "coop_pos": 18, "with_velo": 19, "vel": 19, "meter_per_pixel": 19, "with_pr": 19, "reg_radiu": 19, "z_offset": 19, "simple_dist": 19, "anchor": [19, 20], "boxes_enc": 19, "dir_scor": 19, "ra": 19, "vt": 19, "rg": 19, "prior_prob": 19, "fc": 19, "module_nam": 19, "xxx_yyy_zzz": 19, "class_nam": 19, "xxxyyyzzz": 19, "sigmoid": 19, "lower": 19, "clamp": 19, "num_cav": 19, "fuse": 19, "me": [19, 22], "downsample_tim": 19, "cls_name": 19, "invers": 19, "avoid": 19, "overflow": 19, "mid_channel": 19, "in_out": 19, "xmin": 19, "xmax": 19, "ymin": 19, "ymax": 19, "n_step": 19, "topk_index": 19, "reg_evi": 19, "reg_var": 19, "gain": 19, "group": 19, "auto": 19, "with_spectral_norm": 19, "padding_mod": 19, "act": 19, "bundl": 19, "simplifi": 19, "usag": 19, "commonli": 19, "batchnorm": 19, "It": 19, "upon": [19, 24], "build_activation_lay": 19, "besid": 19, "some": [19, 20], "automat": 19, "spectral": 19, "circular": 19, "reflect": 19, "_convnd": 19, "convolv": 19, "both": 19, "side": [19, 23], "decid": 19, "config": 19, "been": 19, "our": [19, 22], "own": 19, "exampl": [19, 20, 24], "openmmlab": 19, "__init__": 19, "center_in": 19, "pixel_sz": 19, "min_overlap": 19, "box_dim": 19, "overlap": [19, 20], "squar": 19, "mahalanobi": 19, "deviat": 19, "var": 19, "varianc": 19, "fan_out": 19, "nonlinear": 19, "sparsetensor": 19, "tensorfield": 19, "h_emb": 19, "inverse_map": 19, "tensor_map": 19, "field_map": 19, "nc": 19, "tr": 19, "bn_momentum": 19, "in_lay": 19, "mid_lay": 19, "out_lay": 19, "transpos": 19, "stensor_out": 19, "kernel_typ": 19, "cube": 19, "kernel_gener": 19, "hybrid": 19, "v0": 19, "nxc": 19, "lr": [19, 20], "round": 19, "toward": 19, "floor": 19, "out_dim": 19, "leakyrelu": 19, "expand_coordin": 19, "norm_befor": 19, "down_point": 19, "tensor_strid": 19, "centroid": 19, "points_list": 19, "coor_dim": 19, "count": 19, "row": [19, 20, 24], "col": [19, 20], "essenti": [19, 22], "variabl": [19, 20], "python": [19, 22], "zmin": 19, "zmax": 19, "vx": 19, "vy": 19, "vz": 19, "enc_mlp": 19, "c_dim": 19, "f_dim": 19, "latent": 19, "act_lay": 19, "gate_lay": 19, "x_se": 19, "n_nbr": 19, "16": [19, 22], "neighborhood": 19, "mem_coor": 19, "q_coor": 19, "kv_coor": 19, "num_featur": 19, "requires_grad": 19, "gradient": [19, 24], "consist": 19, "bn1": 19, "gn": 19, "feat_siz": 19, "num_encoding_funct": 19, "include_input": 19, "log_sampl": 19, "position": 19, "num_pos_feat": 19, "10000": 19, "tenosr": 20, "x_min": 20, "y_min": 20, "x_max": 20, "y_max": 20, "boxes_np": 20, "left": 20, "front": 20, "geometri": 20, "polygon": 20, "boxes_arrai": 20, "lwh_mean": 20, "normalized_bbox": 20, "extra_width": 20, "extra_x": 20, "extra_i": 20, "extra_z": 20, "translat": 20, "283185307179586": 20, "limit_rang": 20, "min_num_corn": 20, "minx": 20, "mini": 20, "minz": 20, "maxx": 20, "maxi": 20, "maxz": 20, "requir": [20, 21, 24], "consid": 20, "filter": 20, "x_idx": 20, "NOT": 20, "boxes_in": 20, "tp": 20, "n_pred": 20, "n_gt": 20, "http": [20, 22], "github": [20, 22], "com": [20, 22], "rafaelpadilla": 20, "blob": 20, "7c0bd0489e3fd4ae71fc0bc8f2a67dbab5dbdc9c": 20, "lib": 20, "evalu": 20, "l292": 20, "result_stat": 20, "global_sort_detect": 20, "precis": 20, "recal": 20, "txt": [20, 22], "fp": 20, "sort": 20, "det_scor": 20, "iou_thresh": 20, "det_rang": 20, "preditect": 20, "thresh": 20, "left_rang": 20, "right_rang": 20, "iou_mod": 20, "rec": 20, "prec": 20, "voc": 20, "2010": 20, "bboxes1": 20, "bboxes2": 20, "is_align": 20, "contribut": 20, "open": 20, "mmlab": 20, "mmdetect": 20, "pull": 20, "4889": 20, "new": [20, 24], "area1": 20, "area2": 20, "lt": [20, 22], "rb": 20, "wh": 20, "union": 20, "total": 20, "reduc": 20, "larg": [20, 24], "than": 20, "alwai": 20, "obvious": 20, "40": 20, "400000": 20, "cnn": 20, "275": 20, "mb": 20, "special": 20, "3516": 20, "43": 20, "gb": 20, "frequent": 20, "experi": 20, "geforc": 20, "rtx": [20, 22], "2080ti": 20, "11019": 20, "mib": 20, "real": 20, "ideal": 20, "so": 20, "faster": 20, "fp32": 20, "gpu_assign_thr": 20, "There": 20, "half": 20, "keep": 20, "empti": 20, "b1": 20, "b2": 20, "equal": 20, "intersect": 20, "iof": 20, "38": 20, "42": 20, "19": 20, "assert": 20, "nonempti": 20, "min": 20, "total_it": 20, "delimit": 20, "wandb_project": 20, "fmt": 20, "msg": 20, "exp_nam": 20, "polici": 20, "unifi": 20, "schedul": 20, "itr": 20, "dim_emb": 20, "warmup_step": 20, "itrs_per_epoch": 20, "last_epoch": 20, "global_fade_ratio": 20, "verbos": 20, "_lrschedul": 20, "run_path": 20, "test": [20, 22, 24], "result_dict": 20, "full": 20, "area": 20, "l315": 20, "pl_state_dict": 20, "cloader": 20, "yaml": 20, "numpi": 20, "cdumper": 20, "torch_tensor": 20, "dict_out": 20, "dict_add": 20, "config_add": 20, "config_out": 20, "overwritten": 20, "version_str": 20, "string": 20, "usual": 20, "pre": [20, 24], "releas": 20, "rc": 20, "level": 20, "digit": 20, "input_xyz": 20, "input_xyz_polar": 20, "inv": 20, "bin_fil": 20, "ply_fil": 20, "field": 20, "read": 20, "ply": 20, "exisit": 20, "pcd_file": 20, "return_o3d": 20, "o3d": 20, "pointcloud": 20, "lidar_dict": 20, "pcd_np": 20, "cosa": 20, "sina": 20, "ones": 20, "err": 20, "errorn": 20, "nx3": 20, "coop": 20, "x3": 20, "projected_point": 20, "euler": 20, "tf_np": 20, "nx8x3": 20, "4x4": 20, "along": 20, "increas": [20, 24], "radian": 20, "points_rot": 20, "mat": 20, "degre": 20, "construct": 20, "3x3": 20, "output_file_nam": 20, "tf_matrix": 20, "steps_per_epoch": 20, "max_norm": 20, "35": 20, "norm_typ": 20, "pretrained_dict": 20, "seed": 20, "lineset": 20, "line_set": 20, "open3d": 20, "boxes2d": 20, "ax_in": 20, "top": [20, 24], "right": 20, "bottom": [20, 24], "inn": 20, "pixel": 20, "plt": 20, "camera": 20, "boxes_dec": 20, "linewidth_scal": 20, "linestyl": 20, "solid": 20, "drawn": 20, "boxes_pr": 20, "boxes_gt": 20, "wandb_nam": 20, "points_c": 20, "grai": 20, "bbox_gt_c": 20, "green": [20, 24], "bbox_pred_c": 20, "red": [20, 24], "bbox_pred_label": 20, "bbox_gt_label": 20, "return_ax": 20, "marker_s": 20, "palett": 20, "agent_dict": 20, "frame_dict": 20, "bbxs_color": 20, "pcds_color": 20, "points_kei": 20, "axis_len": 20, "vbo": 20, "func_list": 20, "batch_data": 20, "instal": 21, "prepar": 21, "opv2vt": 21, "dairv2xt": 21, "structur": [21, 23], "framework": 21, "central": 21, "cosense3d": [21, 22, 23, 24], "packag": [21, 22, 25], "modul": [21, 24, 25], "search": 21, "page": [21, 23], "ubuntu": 22, "04": 22, "nvidia": 22, "3090": 22, "ti": 22, "4090": 22, "environ": 22, "command": [22, 23, 24], "conda": 22, "cd": [22, 23], "opencosense3d": 22, "setup_env_3090": 22, "sh": [22, 23], "setup_env_4090": 22, "confront": 22, "try": 22, "openbla": 22, "devel": 22, "anaconda": 22, "forg": 22, "libstdcxx": 22, "ng": 22, "libffi": 22, "sudo": 22, "apt": 22, "python3": 22, "dev": 22, "libopenbla": 22, "compil": 22, "extens": 22, "nvcc": 22, "pip": 22, "cu113": 22, "torchvis": 22, "torchaudio": 22, "url": 22, "download": [22, 23], "org": 22, "whl": 22, "pip3": 22, "cu118": 22, "op": 22, "reququirements_cosense_3090": 22, "reququirements_cosense_4090": 22, "graphic": [22, 24], "interfac": [22, 24], "requirements_ui": 22, "minkovskiengin": 22, "u": 22, "git": 22, "minkowskiengin": 22, "dep": 22, "blas_include_dir": 22, "conda_prefix": 22, "bla": 22, "omp_num_thread": 22, "check": [22, 23, 24], "import": 22, "print": 22, "__version__": 22, "script": 23, "bash": 23, "tool": 23, "output_dir": 23, "dair": 23, "v2x": 23, "extract": 23, "infrastructur": 23, "2021_08_16_22_26_54": 23, "velodyn": 23, "Then": [23, 24], "overal": 24, "main": 24, "user": 24, "manag": 24, "black": 24, "arrow": 24, "instruct": 24, "either": 24, "without": 24, "collect": 24, "percept": 24, "predefin": 24, "store": 24, "With": 24, "easili": 24, "rewrit": 24, "cope": 24, "media": 24, "sensor": 24, "pars": 24, "abl": 24, "outcom": 24, "click": 24, "help": 24, "develop": 24, "correctli": 24, "drawback": 24, "problem": 24, "refin": 24, "accordingli": 24, "send": 24, "opengl": 24, "canvas": 24, "In": 24, "avail": 24, "launch": 24, "thei": 24, "respect": 24, "dispatch": 24, "through": 24, "similarli": 24, "pseudo": 24, "share": 24, "deep": 24, "workflow": 24, "strategi": 24, "neuron": 24, "onc": 24, "receiv": 24, "step2": 24, "step3": 24, "effici": 24, "summar": 24, "parallel": 24, "finish": 24, "individu": 24, "subpackag": 25, "content": 25}, "objects": {"": [[0, 0, 0, "-", "cosense3d"]], "cosense3d": [[1, 0, 0, "-", "agents"], [7, 0, 0, "-", "dataset"], [10, 0, 0, "-", "modules"], [20, 0, 0, "-", "utils"]], "cosense3d.agents": [[2, 0, 0, "-", "cav_prototype"], [1, 0, 0, "-", "center_controller"], [3, 0, 0, "-", "core"], [4, 0, 0, "-", "utils"], [5, 0, 0, "-", "viewer"]], "cosense3d.agents.cav_prototype": [[2, 0, 0, "-", "base_cav"], [2, 3, 1, "", "get_prototype"], [2, 0, 0, "-", "streamLTS_collection"]], "cosense3d.agents.cav_prototype.base_cav": [[2, 1, 1, "", "BaseCAV"], [2, 1, 1, "", "BaseSeqCAV"], [2, 1, 1, "", "DairV2XCAV"], [2, 1, 1, "", "OPV2VtCAV"], [2, 1, 1, "", "OPV2VtCAV_v2"]], "cosense3d.agents.cav_prototype.base_cav.BaseCAV": [[2, 2, 1, "", "apply_transform"], [2, 2, 1, "", "forward"], [2, 2, 1, "", "forward_fusion"], [2, 2, 1, "", "forward_head"], [2, 2, 1, "", "forward_local"], [2, 2, 1, "", "forward_localization"], [2, 2, 1, "", "get_request_cpm"], [2, 2, 1, "", "get_response_cpm"], [2, 2, 1, "", "has_request"], [2, 2, 1, "", "loss"], [2, 2, 1, "", "post_update_memory"], [2, 2, 1, "", "pre_update_memory"], [2, 2, 1, "", "prepare_data"], [2, 2, 1, "", "receive_request"], [2, 2, 1, "", "receive_response"], [2, 2, 1, "", "reset_data"], [2, 2, 1, "", "transform_data"], [2, 2, 1, "", "update"]], "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV": [[2, 2, 1, "", "apply_transform"], [2, 2, 1, "", "forward"], [2, 2, 1, "", "forward_fusion"], [2, 2, 1, "", "forward_head"], [2, 2, 1, "", "forward_local"], [2, 2, 1, "", "get_data"], [2, 2, 1, "", "get_request_cpm"], [2, 2, 1, "", "get_response_cpm"], [2, 2, 1, "", "has_request"], [2, 2, 1, "", "loss"], [2, 2, 1, "", "post_update_memory"], [2, 2, 1, "", "pre_update_memory"], [2, 2, 1, "", "prepare_data"], [2, 2, 1, "", "receive_request"], [2, 2, 1, "", "receive_response"], [2, 2, 1, "", "reset_data"], [2, 2, 1, "", "task_id"], [2, 2, 1, "", "update"]], "cosense3d.agents.cav_prototype.streamLTS_collection": [[2, 1, 1, "", "LTSCAVLocCorr"], [2, 1, 1, "", "LTSDairV2X"], [2, 1, 1, "", "StreamLidarCAV"], [2, 5, 1, "", "slcAttnFusion"], [2, 1, 1, "", "slcCIASSD"], [2, 1, 1, "", "slcDenseToSparse"], [2, 1, 1, "", "slcFPVRCNN"], [2, 5, 1, "", "slcFcooper"], [2, 1, 1, "", "slcNoBoxTime"], [2, 1, 1, "", "slcNoBoxTimeDairV2X"]], "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr": [[2, 2, 1, "", "apply_transform"], [2, 2, 1, "", "forward_fusion"], [2, 2, 1, "", "forward_head"], [2, 2, 1, "", "forward_local"], [2, 2, 1, "", "forward_localization"], [2, 2, 1, "", "get_response_cpm"], [2, 2, 1, "", "prepare_data"], [2, 2, 1, "", "transform_data"]], "cosense3d.agents.cav_prototype.streamLTS_collection.LTSDairV2X": [[2, 2, 1, "", "forward_fusion"], [2, 2, 1, "", "forward_head"], [2, 2, 1, "", "forward_local"], [2, 2, 1, "", "loss"]], "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV": [[2, 2, 1, "", "apply_transform"], [2, 2, 1, "", "forward_fusion"], [2, 2, 1, "", "forward_head"], [2, 2, 1, "", "forward_local"], [2, 2, 1, "", "get_response_cpm"], [2, 2, 1, "", "loss"], [2, 2, 1, "", "post_update_memory"], [2, 2, 1, "", "pre_update_memory"], [2, 2, 1, "", "prepare_data"], [2, 2, 1, "", "prepare_time_scale"], [2, 2, 1, "", "refresh_memory"], [2, 4, 1, "", "timestamp"], [2, 2, 1, "", "transform_data"], [2, 2, 1, "", "transform_ref_pts"], [2, 2, 1, "", "update_memory_timestamps"], [2, 2, 1, "", "vis_local_detection"], [2, 2, 1, "", "vis_local_pred"], [2, 2, 1, "", "vis_poses"], [2, 2, 1, "", "vis_ref_pts"]], "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD": [[2, 2, 1, "", "apply_transform"], [2, 2, 1, "", "forward_fusion"], [2, 2, 1, "", "forward_head"], [2, 2, 1, "", "forward_local"], [2, 2, 1, "", "get_response_cpm"], [2, 2, 1, "", "loss"], [2, 2, 1, "", "post_update_memory"], [2, 2, 1, "", "pre_update_memory"], [2, 2, 1, "", "prepare_data"]], "cosense3d.agents.cav_prototype.streamLTS_collection.slcDenseToSparse": [[2, 2, 1, "", "forward_local"], [2, 2, 1, "", "prepare_data"]], "cosense3d.agents.cav_prototype.streamLTS_collection.slcFPVRCNN": [[2, 2, 1, "", "forward_local"], [2, 2, 1, "", "prepare_data"]], "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTime": [[2, 2, 1, "", "prepare_data"], [2, 2, 1, "", "update_memory_timestamps"]], "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTimeDairV2X": [[2, 2, 1, "", "prepare_data"], [2, 2, 1, "", "update_memory_timestamps"]], "cosense3d.agents.center_controller": [[1, 1, 1, "", "CenterController"]], "cosense3d.agents.center_controller.CenterController": [[1, 4, 1, "", "model"], [1, 4, 1, "", "modules"], [1, 4, 1, "", "parameters"], [1, 2, 1, "", "run_frame"], [1, 2, 1, "", "run_seq"], [1, 2, 1, "", "setup_core"], [1, 2, 1, "", "test_forward"], [1, 2, 1, "", "train_forward"], [1, 2, 1, "", "update_cfg"], [1, 2, 1, "", "vis_forward"]], "cosense3d.agents.core": [[3, 0, 0, "-", "base_runner"], [3, 0, 0, "-", "cav_manager"], [3, 0, 0, "-", "data_manager"], [3, 0, 0, "-", "forward_runner"], [3, 0, 0, "-", "gui"], [3, 0, 0, "-", "hooks"], [3, 0, 0, "-", "task_manager"], [3, 0, 0, "-", "test_runner"], [3, 0, 0, "-", "train_runner"], [3, 0, 0, "-", "vis_runner"]], "cosense3d.agents.core.base_runner": [[3, 1, 1, "", "BaseRunner"]], "cosense3d.agents.core.base_runner.BaseRunner": [[3, 2, 1, "", "init"], [3, 4, 1, "", "logdir"], [3, 2, 1, "", "next_batch"], [3, 2, 1, "", "run"], [3, 2, 1, "", "set_logdir"], [3, 2, 1, "", "setup_logger"], [3, 2, 1, "", "vis_data"]], "cosense3d.agents.core.cav_manager": [[3, 1, 1, "", "CAVManager"]], "cosense3d.agents.core.cav_manager.CAVManager": [[3, 2, 1, "", "apply_cav_function"], [3, 2, 1, "", "forward"], [3, 2, 1, "", "get_cav_with_id"], [3, 2, 1, "", "has_cav"], [3, 2, 1, "", "receive_request"], [3, 2, 1, "", "receive_response"], [3, 2, 1, "", "reset"], [3, 2, 1, "", "send_request"], [3, 2, 1, "", "send_response"], [3, 2, 1, "", "update_cav_info"], [3, 2, 1, "", "update_cpm_statistic"]], "cosense3d.agents.core.data_manager": [[3, 1, 1, "", "DataManager"]], "cosense3d.agents.core.data_manager.DataManager": [[3, 2, 1, "", "add_loc_err"], [3, 2, 1, "", "apply_preprocess"], [3, 2, 1, "", "boxes_to_vis_format"], [3, 2, 1, "", "distribute_to_cav"], [3, 2, 1, "", "distribute_to_seq_cav"], [3, 2, 1, "", "distribute_to_seq_list"], [3, 2, 1, "", "gather"], [3, 2, 1, "", "gather_batch"], [3, 2, 1, "", "gather_cav_data"], [3, 2, 1, "", "gather_ego_data"], [3, 2, 1, "", "gather_vis_data"], [3, 2, 1, "", "generate_augment_params"], [3, 2, 1, "", "generate_global_non_empty_mask"], [3, 2, 1, "", "generate_local_non_empty_mask"], [3, 2, 1, "", "get_gt_boxes_as_vis_format"], [3, 2, 1, "", "get_vis_data_bev"], [3, 2, 1, "", "get_vis_data_detection"], [3, 2, 1, "", "get_vis_data_input"], [3, 2, 1, "", "get_vis_data_meta"], [3, 2, 1, "", "remove_global_empty_boxes"], [3, 2, 1, "", "remove_local_empty_boxes"], [3, 2, 1, "", "sample_global_bev_tgt_pts"], [3, 2, 1, "", "scatter"], [3, 2, 1, "", "update"], [3, 2, 1, "", "vis_global_data_plt"]], "cosense3d.agents.core.forward_runner": [[3, 1, 1, "", "ForwardRunner"]], "cosense3d.agents.core.forward_runner.ForwardRunner": [[3, 2, 1, "", "forward"], [3, 2, 1, "", "frame_loss"], [3, 2, 1, "", "gather_cav_ids"], [3, 2, 1, "", "loss"], [3, 2, 1, "", "to_gpu"], [3, 5, 1, "", "training"]], "cosense3d.agents.core.gui": [[3, 1, 1, "", "GUI"]], "cosense3d.agents.core.gui.GUI": [[3, 2, 1, "", "change_color_mode"], [3, 2, 1, "", "change_glcolor"], [3, 2, 1, "", "change_visible"], [3, 2, 1, "", "connect_events_to_funcs"], [3, 2, 1, "", "get_toolbar"], [3, 2, 1, "", "initGUI"], [3, 2, 1, "", "refresh"], [3, 2, 1, "", "setRunner"], [3, 2, 1, "", "setupUI"], [3, 2, 1, "", "start"], [3, 2, 1, "", "step"], [3, 2, 1, "", "stop"]], "cosense3d.agents.core.hooks": [[3, 1, 1, "", "BaseHook"], [3, 1, 1, "", "CPMStatisticHook"], [3, 1, 1, "", "CheckPointsHook"], [3, 1, 1, "", "DetectionNMSHook"], [3, 1, 1, "", "EvalBEVSemsegHook"], [3, 1, 1, "", "EvalDetectionBEVHook"], [3, 1, 1, "", "EvalDetectionHook"], [3, 1, 1, "", "Hooks"], [3, 1, 1, "", "MemoryUsageHook"], [3, 1, 1, "", "TrainTimerHook"]], "cosense3d.agents.core.hooks.BaseHook": [[3, 2, 1, "", "post_epoch"], [3, 2, 1, "", "post_iter"], [3, 2, 1, "", "pre_epoch"], [3, 2, 1, "", "pre_iter"], [3, 2, 1, "", "set_logger"]], "cosense3d.agents.core.hooks.CPMStatisticHook": [[3, 2, 1, "", "post_epoch"], [3, 2, 1, "", "set_logger"]], "cosense3d.agents.core.hooks.CheckPointsHook": [[3, 2, 1, "", "post_epoch"], [3, 2, 1, "", "post_iter"], [3, 2, 1, "", "save"]], "cosense3d.agents.core.hooks.DetectionNMSHook": [[3, 2, 1, "", "post_iter"]], "cosense3d.agents.core.hooks.EvalBEVSemsegHook": [[3, 2, 1, "", "cal_ious"], [3, 2, 1, "", "crop_map"], [3, 2, 1, "", "gt_dynamic_map"], [3, 2, 1, "", "gt_static_map"], [3, 2, 1, "", "iou"], [3, 2, 1, "", "post_epoch"], [3, 2, 1, "", "post_iter"], [3, 2, 1, "", "set_logger"]], "cosense3d.agents.core.hooks.EvalDetectionBEVHook": [[3, 2, 1, "", "filter_box_ranges"], [3, 2, 1, "", "format_final_result"], [3, 2, 1, "", "post_epoch"], [3, 2, 1, "", "post_iter"], [3, 2, 1, "", "set_logger"]], "cosense3d.agents.core.hooks.EvalDetectionHook": [[3, 2, 1, "", "eval_cosense3d_final"], [3, 2, 1, "", "filter_box_ranges"], [3, 2, 1, "", "format_final_result"], [3, 2, 1, "", "post_epoch"], [3, 2, 1, "", "post_iter"], [3, 2, 1, "", "set_logger"]], "cosense3d.agents.core.hooks.Hooks": [[3, 2, 1, "", "set_logger"]], "cosense3d.agents.core.hooks.MemoryUsageHook": [[3, 2, 1, "", "post_iter"]], "cosense3d.agents.core.hooks.TrainTimerHook": [[3, 2, 1, "", "post_iter"], [3, 2, 1, "", "pre_epoch"]], "cosense3d.agents.core.task_manager": [[3, 1, 1, "", "TaskManager"]], "cosense3d.agents.core.task_manager.TaskManager": [[3, 2, 1, "", "reformat_tasks"], [3, 2, 1, "", "summarize_loss_tasks"], [3, 2, 1, "", "summarize_tasks"], [3, 2, 1, "", "task_to_ordered_dict"]], "cosense3d.agents.core.test_runner": [[3, 1, 1, "", "TestRunner"]], "cosense3d.agents.core.test_runner.TestRunner": [[3, 2, 1, "", "load"], [3, 2, 1, "", "run"], [3, 2, 1, "", "run_itr"], [3, 2, 1, "", "setup_logger"], [3, 2, 1, "", "step"]], "cosense3d.agents.core.train_runner": [[3, 1, 1, "", "TrainRunner"]], "cosense3d.agents.core.train_runner.TrainRunner": [[3, 2, 1, "", "resume"], [3, 2, 1, "", "run"], [3, 2, 1, "", "run_epoch"], [3, 2, 1, "", "run_itr"], [3, 2, 1, "", "setup_logger"], [3, 2, 1, "", "step"]], "cosense3d.agents.core.vis_runner": [[3, 1, 1, "", "VisRunner"]], "cosense3d.agents.core.vis_runner.VisRunner": [[3, 2, 1, "", "load"], [3, 2, 1, "", "run"], [3, 2, 1, "", "run_itr"], [3, 2, 1, "", "step"]], "cosense3d.agents.utils": [[4, 0, 0, "-", "deco"], [4, 0, 0, "-", "transform"]], "cosense3d.agents.utils.deco": [[4, 3, 1, "", "save_ckpt_on_error"]], "cosense3d.agents.utils.transform": [[4, 1, 1, "", "DataOnlineProcessor"], [4, 3, 1, "", "add_flip"], [4, 3, 1, "", "add_rotate"], [4, 3, 1, "", "add_scale"], [4, 3, 1, "", "apply_transform"], [4, 3, 1, "", "filter_range"], [4, 3, 1, "", "filter_range_mask"], [4, 3, 1, "", "generate_bev_tgt_pts"]], "cosense3d.agents.utils.transform.DataOnlineProcessor": [[4, 2, 1, "", "adaptive_free_space_augmentation"], [4, 2, 1, "", "apply_transform"], [4, 2, 1, "", "cav_aug_transform"], [4, 2, 1, "", "filter_range"], [4, 2, 1, "", "free_space_augmentation"], [4, 2, 1, "", "generate_sparse_target_bev_points"], [4, 2, 1, "", "generate_sparse_target_roadline_points"], [4, 2, 1, "", "update_transform_with_aug"]], "cosense3d.agents.viewer": [[5, 0, 0, "-", "gl_viewer"], [5, 0, 0, "-", "img_anno3d_viewer"], [5, 0, 0, "-", "img_viewer"], [6, 0, 0, "-", "items"], [5, 0, 0, "-", "output_viewer"], [5, 0, 0, "-", "utils"]], "cosense3d.agents.viewer.gl_viewer": [[5, 1, 1, "", "GLViewer"]], "cosense3d.agents.viewer.gl_viewer.GLViewer": [[5, 2, 1, "", "addBox"], [5, 2, 1, "", "box"], [5, 2, 1, "", "change_visibility"], [5, 2, 1, "", "drawRectangle"], [5, 2, 1, "", "draw_axes"], [5, 2, 1, "", "draw_depth_buffer"], [5, 2, 1, "", "evt_pos_to_world"], [5, 2, 1, "", "get_point_depth"], [5, 2, 1, "", "get_region_depth"], [5, 2, 1, "", "highlightBox"], [5, 2, 1, "", "initializeGL"], [5, 2, 1, "", "keyPressEvent"], [5, 2, 1, "", "keyReleaseEvent"], [5, 2, 1, "", "model_pose_to_world"], [5, 2, 1, "", "mouseDoubleClickEvent"], [5, 2, 1, "", "mouseMoveEvent"], [5, 2, 1, "", "mousePressEvent"], [5, 2, 1, "", "mouseReleaseEvent"], [5, 2, 1, "", "paintGL"], [5, 2, 1, "", "paintRect"], [5, 2, 1, "", "refresh"], [5, 2, 1, "", "removeActivate"], [5, 2, 1, "", "removeHeilight"], [5, 2, 1, "", "removeRectangle"], [5, 2, 1, "", "selectHeilight"], [5, 2, 1, "", "updateFrameData"], [5, 2, 1, "", "updateLabel"], [5, 2, 1, "", "updatePCDs"]], "cosense3d.agents.viewer.img_anno3d_viewer": [[5, 1, 1, "", "ImgAnno3DViewer"]], "cosense3d.agents.viewer.img_anno3d_viewer.ImgAnno3DViewer": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.img_viewer": [[5, 1, 1, "", "ImgViewer"]], "cosense3d.agents.viewer.img_viewer.ImgViewer": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.items": [[6, 0, 0, "-", "graph_items"]], "cosense3d.agents.viewer.items.graph_items": [[6, 1, 1, "", "LineBoxItem"], [6, 1, 1, "", "LineItem"], [6, 1, 1, "", "MeshBoxItem"], [6, 1, 1, "", "RectangleItem"]], "cosense3d.agents.viewer.items.graph_items.LineBoxItem": [[6, 2, 1, "", "activate"], [6, 2, 1, "", "color"], [6, 2, 1, "", "deactivate"], [6, 2, 1, "", "highlight"], [6, 5, 1, "", "id_ptr"], [6, 5, 1, "", "ids"], [6, 4, 1, "", "isActive"], [6, 2, 1, "", "to_center"]], "cosense3d.agents.viewer.items.graph_items.LineItem": [[6, 2, 1, "", "hoverEvent"]], "cosense3d.agents.viewer.items.graph_items.RectangleItem": [[6, 2, 1, "", "hoverEvent"]], "cosense3d.agents.viewer.output_viewer": [[5, 1, 1, "", "BEVDenseCanvas"], [5, 1, 1, "", "BEVSparseCanvas"], [5, 1, 1, "", "DetectionCanvas"], [5, 1, 1, "", "DetectionScoreMap"], [5, 1, 1, "", "MplCanvas"], [5, 1, 1, "", "OutputViewer"], [5, 1, 1, "", "SparseDetectionCanvas"]], "cosense3d.agents.viewer.output_viewer.BEVDenseCanvas": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.output_viewer.BEVSparseCanvas": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.output_viewer.DetectionCanvas": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.output_viewer.DetectionScoreMap": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.output_viewer.MplCanvas": [[5, 2, 1, "", "update_title"]], "cosense3d.agents.viewer.output_viewer.OutputViewer": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.output_viewer.SparseDetectionCanvas": [[5, 2, 1, "", "refresh"]], "cosense3d.agents.viewer.utils": [[5, 3, 1, "", "circular_mask"], [5, 3, 1, "", "depth_min"]], "cosense3d.dataset": [[7, 0, 0, "-", "const"], [7, 0, 0, "-", "cosense_dataset"], [7, 3, 1, "", "get_dataloader"], [8, 0, 0, "-", "pipeline"], [7, 0, 0, "-", "temporal_cosense_dataset"], [9, 0, 0, "-", "toolkit"]], "cosense3d.dataset.cosense_dataset": [[7, 1, 1, "", "CosenseDataset"]], "cosense3d.dataset.cosense_dataset.CosenseDataset": [[7, 5, 1, "", "LABEL_COLORS"], [7, 5, 1, "", "VALID_CLS"], [7, 2, 1, "", "collate_batch"], [7, 2, 1, "", "get_valid_agents"], [7, 2, 1, "", "init_dataset"], [7, 2, 1, "", "load_frame_data"], [7, 2, 1, "", "load_meta"], [7, 2, 1, "", "load_sample_info"], [7, 2, 1, "", "parse_samples"]], "cosense3d.dataset.pipeline": [[8, 1, 1, "", "Pipeline"], [8, 0, 0, "-", "loading"], [8, 0, 0, "-", "transform"]], "cosense3d.dataset.pipeline.Pipeline": [[8, 2, 1, "", "build_process"]], "cosense3d.dataset.pipeline.loading": [[8, 1, 1, "", "LoadAnnotations"], [8, 1, 1, "", "LoadCarlaRoadlineMaps"], [8, 1, 1, "", "LoadLidarPoints"], [8, 1, 1, "", "LoadMultiViewImg"], [8, 1, 1, "", "LoadOPV2VBevMaps"], [8, 1, 1, "", "LoadSparseBevTargetPoints"]], "cosense3d.dataset.pipeline.loading.LoadAnnotations": [[8, 2, 1, "", "get_lidar2img_transform"]], "cosense3d.dataset.pipeline.loading.LoadCarlaRoadlineMaps": [[8, 2, 1, "", "load_single"]], "cosense3d.dataset.pipeline.loading.LoadLidarPoints": [[8, 2, 1, "", "read_pcd"]], "cosense3d.dataset.pipeline.loading.LoadOPV2VBevMaps": [[8, 2, 1, "", "crop_map_for_pose"], [8, 2, 1, "", "load_single"]], "cosense3d.dataset.pipeline.loading.LoadSparseBevTargetPoints": [[8, 2, 1, "", "generate_sparse_bev_pts"]], "cosense3d.dataset.pipeline.transform": [[8, 1, 1, "", "ResizeCropFlipRotImage"], [8, 1, 1, "", "ResizeImage"]], "cosense3d.dataset.temporal_cosense_dataset": [[7, 1, 1, "", "TemporalCosenseDataset"]], "cosense3d.dataset.toolkit": [[9, 3, 1, "", "callback_registrations"], [9, 3, 1, "", "click_register"], [9, 0, 0, "-", "cosense"], [9, 0, 0, "-", "dairv2x"], [9, 0, 0, "-", "opv2v"], [9, 0, 0, "-", "opv2v_t"], [9, 3, 1, "", "register_pcds"]], "cosense3d.dataset.toolkit.cosense": [[9, 1, 1, "", "CoSenseDataConverter"]], "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter": [[9, 5, 1, "", "OBJ_ID2NAME"], [9, 5, 1, "", "OBJ_LIST"], [9, 5, 1, "", "OBJ_NAME2ID"], [9, 2, 1, "", "add_cam_to_fdict"], [9, 2, 1, "", "cal_vbbx_mean_dim"], [9, 2, 1, "", "draw_sample_distributions"], [9, 2, 1, "", "fdict_template"], [9, 2, 1, "", "global_boxes_to_local"], [9, 2, 1, "", "load_meta"], [9, 2, 1, "", "obj_from_sustech"], [9, 2, 1, "", "obj_to_opv2v"], [9, 2, 1, "", "obj_to_sustech"], [9, 2, 1, "", "parse_global_bbox_velo"], [9, 2, 1, "", "remove_lidar_info"], [9, 2, 1, "", "supervison_full_to_sparse"], [9, 2, 1, "", "to_kitti"], [9, 2, 1, "", "to_opv2v"], [9, 2, 1, "", "to_sustech"], [9, 2, 1, "", "update_agent"], [9, 2, 1, "", "update_agent_gt_boxes"], [9, 2, 1, "", "update_agent_lidar"], [9, 2, 1, "", "update_frame_bbx"], [9, 2, 1, "", "update_from_sustech"]], "cosense3d.dataset.toolkit.dairv2x": [[9, 3, 1, "", "calib_to_tf_matrix"], [9, 3, 1, "", "convert_v2x_c"], [9, 3, 1, "", "convert_v2x_seq"], [9, 3, 1, "", "load_info_to_dict"], [9, 3, 1, "", "load_label"], [9, 3, 1, "", "optimize_poses"], [9, 3, 1, "", "optimize_trajectory"], [9, 3, 1, "", "parse_global_bboxes"], [9, 3, 1, "", "parse_static_pcd"], [9, 3, 1, "", "parse_timestamped_boxes"], [9, 3, 1, "", "register_pcds_to_blocks"], [9, 3, 1, "", "register_sequence"], [9, 3, 1, "", "register_step_one"], [9, 3, 1, "", "register_step_two"], [9, 3, 1, "", "remove_ego_boxes"], [9, 3, 1, "", "select_sub_scenes"]], "cosense3d.dataset.toolkit.opv2v": [[9, 3, 1, "", "boxes_3d_to_2d"], [9, 3, 1, "", "convert_bev_semantic_map_to_road_height_map"], [9, 3, 1, "", "corner_to_center"], [9, 3, 1, "", "create_bbx"], [9, 3, 1, "", "generate_bevmaps"], [9, 3, 1, "", "generate_roadline"], [9, 3, 1, "", "opv2v_pose_to_cosense"], [9, 3, 1, "", "opv2v_to_cosense"], [9, 3, 1, "", "pose_to_transformation"], [9, 3, 1, "", "project_points"], [9, 3, 1, "", "project_world_objects"], [9, 3, 1, "", "update_2d_bboxes"], [9, 3, 1, "", "update_cam_params"], [9, 3, 1, "", "update_global_bboxes_num_pts"], [9, 3, 1, "", "update_local_boxes3d"], [9, 3, 1, "", "x1_to_x2"], [9, 3, 1, "", "x_to_world"]], "cosense3d.dataset.toolkit.opv2v_t": [[9, 3, 1, "", "gen_time_offsets"], [9, 3, 1, "", "generate_roadline_reference_points"], [9, 3, 1, "", "get_box_velo"], [9, 3, 1, "", "get_local_boxes3d"], [9, 3, 1, "", "get_velos"], [9, 3, 1, "", "load_frame_data"], [9, 3, 1, "", "load_vehicles_gframe"], [9, 3, 1, "", "opv2vt_to_cosense"], [9, 3, 1, "", "pad_box_result"], [9, 3, 1, "", "parse_speed_from_yamls"], [9, 3, 1, "", "parse_sub_frame"], [9, 3, 1, "", "read_frame_plys_boxes"], [9, 3, 1, "", "read_ply"], [9, 3, 1, "", "read_ply_to_dict"], [9, 3, 1, "", "read_sub_frame"], [9, 3, 1, "", "transform_boxes_global_to_ref"], [9, 3, 1, "", "update_bev_map"], [9, 3, 1, "", "update_global_boxes"], [9, 3, 1, "", "update_velo"], [9, 3, 1, "", "vis_cosense_scenario"], [9, 3, 1, "", "vis_frame_data"]], "cosense3d.modules": [[10, 1, 1, "", "BaseModule"], [11, 0, 0, "-", "backbone2d"], [12, 0, 0, "-", "backbone3d"], [10, 3, 1, "", "build_module"], [13, 0, 0, "-", "fusion"], [14, 0, 0, "-", "heads"], [15, 0, 0, "-", "losses"], [16, 0, 0, "-", "necks"], [17, 0, 0, "-", "plugin"], [18, 0, 0, "-", "projection"], [19, 0, 0, "-", "utils"]], "cosense3d.modules.BaseModule": [[10, 2, 1, "", "cat_data_from_list"], [10, 2, 1, "", "cat_dict_list"], [10, 2, 1, "", "cat_list"], [10, 2, 1, "", "compose_imgs"], [10, 2, 1, "", "compose_result_list"], [10, 2, 1, "", "compose_stensor"], [10, 2, 1, "", "decompose_stensor"], [10, 2, 1, "", "format_input"], [10, 2, 1, "", "format_output"], [10, 2, 1, "", "forward"], [10, 2, 1, "", "freeze_parameters"], [10, 2, 1, "", "loss"], [10, 2, 1, "", "prepare_vis_data"], [10, 2, 1, "", "stack_data_from_list"], [10, 2, 1, "", "stack_dict_list"], [10, 2, 1, "", "to_gpu"], [10, 5, 1, "", "training"]], "cosense3d.modules.backbone2d": [[11, 0, 0, "-", "resnet_encoder"]], "cosense3d.modules.backbone2d.resnet_encoder": [[11, 1, 1, "", "ResnetEncoder"]], "cosense3d.modules.backbone2d.resnet_encoder.ResnetEncoder": [[11, 2, 1, "", "format_output"], [11, 2, 1, "", "forward"], [11, 5, 1, "", "training"]], "cosense3d.modules.backbone3d": [[12, 0, 0, "-", "mink_unet"], [12, 0, 0, "-", "pillar_bev"], [12, 0, 0, "-", "spconv"], [12, 0, 0, "-", "voxelnet"]], "cosense3d.modules.backbone3d.mink_unet": [[12, 1, 1, "", "MinkUnet"]], "cosense3d.modules.backbone3d.mink_unet.MinkUnet": [[12, 5, 1, "", "QMODE"], [12, 2, 1, "", "format_output"], [12, 2, 1, "", "forward"], [12, 2, 1, "", "forward_height_compression"], [12, 2, 1, "", "forward_unet"], [12, 2, 1, "", "grid_size"], [12, 2, 1, "", "init_weights"], [12, 2, 1, "", "stensor_to_dense"], [12, 2, 1, "", "to_gpu"], [12, 5, 1, "", "training"], [12, 2, 1, "", "valid_coords"]], "cosense3d.modules.backbone3d.pillar_bev": [[12, 1, 1, "", "PillarBEV"]], "cosense3d.modules.backbone3d.pillar_bev.PillarBEV": [[12, 2, 1, "", "format_output"], [12, 2, 1, "", "forward"], [12, 2, 1, "", "to_dense_bev"], [12, 5, 1, "", "training"]], "cosense3d.modules.backbone3d.spconv": [[12, 1, 1, "", "Spconv"], [12, 3, 1, "", "post_act_block"]], "cosense3d.modules.backbone3d.spconv.Spconv": [[12, 2, 1, "", "format_output"], [12, 2, 1, "", "forward"], [12, 2, 1, "", "to_dense"], [12, 5, 1, "", "training"]], "cosense3d.modules.backbone3d.voxelnet": [[12, 1, 1, "", "VoxelNet"]], "cosense3d.modules.backbone3d.voxelnet.VoxelNet": [[12, 2, 1, "", "forward"], [12, 2, 1, "", "to_dense"], [12, 5, 1, "", "training"]], "cosense3d.modules.fusion": [[13, 0, 0, "-", "attn_fusion"], [13, 0, 0, "-", "box_fusion"], [13, 0, 0, "-", "fax"], [13, 0, 0, "-", "keypoints"], [13, 0, 0, "-", "maxout_fusion"], [13, 0, 0, "-", "naive_fusion"], [13, 0, 0, "-", "spatial_query_fusion"], [13, 0, 0, "-", "temporal_fusion"]], "cosense3d.modules.fusion.attn_fusion": [[13, 1, 1, "", "DenseAttentionFusion"], [13, 1, 1, "", "SparseAttentionFusion"]], "cosense3d.modules.fusion.attn_fusion.DenseAttentionFusion": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.attn_fusion.SparseAttentionFusion": [[13, 2, 1, "", "format_output"], [13, 2, 1, "", "forward"], [13, 2, 1, "", "fuse_feature_at_stride"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.box_fusion": [[13, 1, 1, "", "BoxFusion"], [13, 3, 1, "", "limit_period"]], "cosense3d.modules.fusion.box_fusion.BoxFusion": [[13, 2, 1, "", "cluster_fusion"], [13, 2, 1, "", "clustering"], [13, 2, 1, "", "forward"], [13, 2, 1, "", "merge_sync_boxes"], [13, 2, 1, "", "temporal_cluster_fusion"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.fax": [[13, 1, 1, "", "Attention"], [13, 1, 1, "", "FeedForward"], [13, 1, 1, "", "PreNormResidual"], [13, 1, 1, "", "SwapFusionBlock"], [13, 1, 1, "", "SwapFusionBlockMask"], [13, 1, 1, "", "SwapFusionEncoder"]], "cosense3d.modules.fusion.fax.Attention": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.fax.FeedForward": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.fax.PreNormResidual": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.fax.SwapFusionBlock": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.fax.SwapFusionBlockMask": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.fax.SwapFusionEncoder": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.keypoints": [[13, 1, 1, "", "KeypointsFusion"], [13, 3, 1, "", "limit_period"]], "cosense3d.modules.fusion.keypoints.KeypointsFusion": [[13, 2, 1, "", "cluster_fusion"], [13, 2, 1, "", "clustering"], [13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.maxout_fusion": [[13, 1, 1, "", "BEVMaxoutFusion"], [13, 1, 1, "", "SparseBEVMaxoutFusion"]], "cosense3d.modules.fusion.maxout_fusion.BEVMaxoutFusion": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.maxout_fusion.SparseBEVMaxoutFusion": [[13, 2, 1, "", "format_output"], [13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.naive_fusion": [[13, 1, 1, "", "NaiveFusion"]], "cosense3d.modules.fusion.naive_fusion.NaiveFusion": [[13, 2, 1, "", "format_output"], [13, 2, 1, "", "forward"], [13, 2, 1, "", "fuse_feature_at_stride"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.spatial_query_fusion": [[13, 1, 1, "", "SpatialQueryAlignFusionRL"], [13, 1, 1, "", "SpatialQueryFusion"]], "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryAlignFusionRL": [[13, 2, 1, "", "align_coordinates"], [13, 2, 1, "", "format_output"], [13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryFusion": [[13, 2, 1, "", "format_output"], [13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.temporal_fusion": [[13, 1, 1, "", "LocalNaiveFusion"], [13, 1, 1, "", "LocalTemporalFusion"], [13, 1, 1, "", "LocalTemporalFusionV1"], [13, 1, 1, "", "LocalTemporalFusionV2"], [13, 1, 1, "", "LocalTemporalFusionV3"], [13, 1, 1, "", "TemporalFusion"], [13, 1, 1, "", "TemporalLidarFusion"]], "cosense3d.modules.fusion.temporal_fusion.LocalNaiveFusion": [[13, 2, 1, "", "forward"], [13, 2, 1, "", "gather_topk"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion": [[13, 2, 1, "", "embed_pos"], [13, 2, 1, "", "forward"], [13, 2, 1, "", "gather_topk"], [13, 2, 1, "", "init_weights"], [13, 2, 1, "", "temporal_alignment"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV1": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV2": [[13, 2, 1, "", "forward"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3": [[13, 2, 1, "", "embed_pos"], [13, 2, 1, "", "forward"], [13, 2, 1, "", "gather_topk"], [13, 2, 1, "", "init_weights"], [13, 2, 1, "", "temporal_alignment"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.temporal_fusion.TemporalFusion": [[13, 2, 1, "", "embed_pos"], [13, 2, 1, "", "forward"], [13, 2, 1, "", "gather_topk"], [13, 2, 1, "", "init_weights"], [13, 2, 1, "", "temporal_alignment"], [13, 5, 1, "", "training"]], "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion": [[13, 2, 1, "", "embed_pos"], [13, 2, 1, "", "forward"], [13, 2, 1, "", "gather_topk"], [13, 2, 1, "", "init_weights"], [13, 2, 1, "", "temporal_alignment"], [13, 5, 1, "", "training"]], "cosense3d.modules.heads": [[14, 0, 0, "-", "bev"], [14, 0, 0, "-", "bev_dense"], [14, 0, 0, "-", "det_anchor_dense"], [14, 0, 0, "-", "det_anchor_sparse"], [14, 0, 0, "-", "det_center_sparse"], [14, 0, 0, "-", "det_roi_refine"], [14, 0, 0, "-", "img_focal"], [14, 0, 0, "-", "lidar_petr_head"], [14, 0, 0, "-", "multitask_head"], [14, 0, 0, "-", "nbr_attn_bev"], [14, 0, 0, "-", "petr_head"], [14, 0, 0, "-", "query_guided_petr_head"]], "cosense3d.modules.heads.bev": [[14, 1, 1, "", "BEV"], [14, 1, 1, "", "BEVMultiResolution"], [14, 1, 1, "", "ContiAttnBEV"], [14, 1, 1, "", "ContiGevBEV"], [14, 1, 1, "", "ContinuousBEV"]], "cosense3d.modules.heads.bev.BEV": [[14, 2, 1, "", "down_sample"], [14, 2, 1, "", "format_input"], [14, 2, 1, "", "format_output"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.bev.BEVMultiResolution": [[14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.bev.ContiAttnBEV": [[14, 2, 1, "", "get_evidence"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.bev.ContiGevBEV": [[14, 2, 1, "", "get_evidence"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.bev.ContinuousBEV": [[14, 2, 1, "", "down_sample"], [14, 2, 1, "", "format_input"], [14, 2, 1, "", "format_output"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "get_evidence"], [14, 2, 1, "", "loss"], [14, 2, 1, "", "sample_reference_points"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.bev_dense": [[14, 1, 1, "", "BevRoIDenseHead"], [14, 1, 1, "", "BevSegHead"]], "cosense3d.modules.heads.bev_dense.BevRoIDenseHead": [[14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.bev_dense.BevSegHead": [[14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_anchor_dense": [[14, 1, 1, "", "DetAnchorDense"]], "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense": [[14, 2, 1, "", "add_sin_difference"], [14, 2, 1, "", "format_output"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "init_weights"], [14, 2, 1, "", "loss"], [14, 2, 1, "", "predictions"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_anchor_sparse": [[14, 1, 1, "", "DetAnchorSparse"]], "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse": [[14, 2, 1, "", "add_sin_difference"], [14, 2, 1, "", "format"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "init_weights"], [14, 2, 1, "", "loss"], [14, 2, 1, "", "predictions"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_center_sparse": [[14, 1, 1, "", "DetCenterSparse"], [14, 1, 1, "", "MultiLvlDetCenterSparse"], [14, 1, 1, "", "SeparatedClsHead"], [14, 1, 1, "", "UnitedClsHead"], [14, 1, 1, "", "UnitedRegHead"]], "cosense3d.modules.heads.det_center_sparse.DetCenterSparse": [[14, 2, 1, "", "format_input"], [14, 2, 1, "", "format_output"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 2, 1, "", "predictions"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse": [[14, 2, 1, "", "format_input"], [14, 2, 1, "", "format_output"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 2, 1, "", "predictions"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_center_sparse.SeparatedClsHead": [[14, 2, 1, "", "forward"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_center_sparse.UnitedClsHead": [[14, 2, 1, "", "forward"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_center_sparse.UnitedRegHead": [[14, 2, 1, "", "forward"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.det_roi_refine": [[14, 1, 1, "", "KeypointRoIHead"]], "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead": [[14, 2, 1, "", "forward"], [14, 2, 1, "", "get_dense_grid_points"], [14, 2, 1, "", "get_global_grid_points_of_roi"], [14, 2, 1, "", "loss"], [14, 2, 1, "", "roi_grid_pool"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.img_focal": [[14, 1, 1, "", "ImgFocal"]], "cosense3d.modules.heads.img_focal.ImgFocal": [[14, 2, 1, "", "apply_center_offset"], [14, 2, 1, "", "apply_ltrb"], [14, 2, 1, "", "format_output"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.lidar_petr_head": [[14, 1, 1, "", "LidarPETRHead"]], "cosense3d.modules.heads.lidar_petr_head.LidarPETRHead": [[14, 2, 1, "", "format_input"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "gather_topk"], [14, 2, 1, "", "init_weights"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.multitask_head": [[14, 1, 1, "", "MultiTaskHead"]], "cosense3d.modules.heads.multitask_head.MultiTaskHead": [[14, 2, 1, "", "forward"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.nbr_attn_bev": [[14, 1, 1, "", "NbrAttentionBEV"]], "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV": [[14, 2, 1, "", "downsample_tgt_pts"], [14, 2, 1, "", "format_input"], [14, 2, 1, "", "format_output"], [14, 2, 1, "", "forward"], [14, 2, 1, "", "generate_reference_points"], [14, 2, 1, "", "get_tgt"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.petr_head": [[14, 1, 1, "", "PETRHead"]], "cosense3d.modules.heads.petr_head.PETRHead": [[14, 2, 1, "", "forward"], [14, 2, 1, "", "init_weights"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.heads.query_guided_petr_head": [[14, 1, 1, "", "QueryGuidedPETRHead"]], "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead": [[14, 2, 1, "", "forward"], [14, 2, 1, "", "get_pred_boxes"], [14, 2, 1, "", "get_predictions"], [14, 2, 1, "", "init_weights"], [14, 2, 1, "", "loss"], [14, 5, 1, "", "training"]], "cosense3d.modules.losses": [[15, 0, 0, "-", "base_loss"], [15, 3, 1, "", "build_loss"], [15, 0, 0, "-", "common"], [15, 0, 0, "-", "edl"], [15, 0, 0, "-", "focal_loss"], [15, 0, 0, "-", "iou_loss"], [15, 0, 0, "-", "l1_loss"], [15, 0, 0, "-", "vanilla_seg_loss"]], "cosense3d.modules.losses.base_loss": [[15, 1, 1, "", "BaseLoss"]], "cosense3d.modules.losses.base_loss.BaseLoss": [[15, 2, 1, "", "forward"], [15, 2, 1, "", "loss"], [15, 4, 1, "", "name"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.common": [[15, 3, 1, "", "cross_entroy_with_logits"], [15, 3, 1, "", "focal_loss"], [15, 3, 1, "", "indices_to_dense_vector"], [15, 3, 1, "", "sigmoid_binary_cross_entropy"], [15, 3, 1, "", "weighted_l1_loss"], [15, 3, 1, "", "weighted_sigmoid_binary_cross_entropy"], [15, 3, 1, "", "weighted_smooth_l1_loss"]], "cosense3d.modules.losses.edl": [[15, 1, 1, "", "EDLLoss"], [15, 3, 1, "", "edl_mse_loss"], [15, 3, 1, "", "evidence_to_conf_unc"], [15, 3, 1, "", "exp_evidence"], [15, 3, 1, "", "kl_divergence"], [15, 3, 1, "", "loglikelihood_loss"], [15, 3, 1, "", "mse_loss"], [15, 3, 1, "", "pred_to_conf_unc"], [15, 3, 1, "", "relu_evidence"], [15, 3, 1, "", "softplus_evidence"]], "cosense3d.modules.losses.edl.EDLLoss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.focal_loss": [[15, 1, 1, "", "FocalLoss"], [15, 1, 1, "", "GaussianFocalLoss"], [15, 1, 1, "", "QualityFocalLoss"], [15, 3, 1, "", "py_focal_loss_with_prob"], [15, 3, 1, "", "py_sigmoid_focal_loss"], [15, 3, 1, "", "quality_focal_loss"], [15, 3, 1, "", "quality_focal_loss_with_prob"]], "cosense3d.modules.losses.focal_loss.FocalLoss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.focal_loss.GaussianFocalLoss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.focal_loss.QualityFocalLoss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.iou_loss": [[15, 1, 1, "", "GIoULoss"], [15, 1, 1, "", "IoULoss"]], "cosense3d.modules.losses.iou_loss.GIoULoss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.iou_loss.IoULoss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.l1_loss": [[15, 1, 1, "", "L1Loss"], [15, 1, 1, "", "SmoothL1Loss"]], "cosense3d.modules.losses.l1_loss.L1Loss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.l1_loss.SmoothL1Loss": [[15, 2, 1, "", "loss"], [15, 5, 1, "", "training"]], "cosense3d.modules.losses.vanilla_seg_loss": [[15, 1, 1, "", "VanillaSegLoss"]], "cosense3d.modules.losses.vanilla_seg_loss.VanillaSegLoss": [[15, 2, 1, "", "forward"], [15, 5, 1, "", "training"]], "cosense3d.modules.necks": [[16, 0, 0, "-", "cpm_composer"], [16, 0, 0, "-", "dilation_spconv"], [16, 0, 0, "-", "formatting"]], "cosense3d.modules.necks.cpm_composer": [[16, 1, 1, "", "KeypointComposer"]], "cosense3d.modules.necks.cpm_composer.KeypointComposer": [[16, 2, 1, "", "forward"], [16, 5, 1, "", "training"]], "cosense3d.modules.necks.dilation_spconv": [[16, 1, 1, "", "DilationSpconv"], [16, 1, 1, "", "DilationSpconvAblation"]], "cosense3d.modules.necks.dilation_spconv.DilationSpconv": [[16, 2, 1, "", "format_output"], [16, 2, 1, "", "forward"], [16, 2, 1, "", "get_conv_layer"], [16, 2, 1, "", "to_gpu"], [16, 5, 1, "", "training"]], "cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation": [[16, 2, 1, "", "format_output"], [16, 2, 1, "", "forward"], [16, 2, 1, "", "get_conv_layer"], [16, 2, 1, "", "to_gpu"], [16, 5, 1, "", "training"]], "cosense3d.modules.necks.formatting": [[16, 1, 1, "", "DenseToSparse"], [16, 1, 1, "", "DetDenseToSparse"], [16, 1, 1, "", "FPVRCNNToLTS"]], "cosense3d.modules.necks.formatting.DenseToSparse": [[16, 2, 1, "", "forward"], [16, 2, 1, "", "get_centers"], [16, 5, 1, "", "training"]], "cosense3d.modules.necks.formatting.DetDenseToSparse": [[16, 2, 1, "", "forward"], [16, 2, 1, "", "get_centers"], [16, 5, 1, "", "training"]], "cosense3d.modules.necks.formatting.FPVRCNNToLTS": [[16, 2, 1, "", "forward"], [16, 2, 1, "", "get_centers"], [16, 5, 1, "", "training"]], "cosense3d.modules.plugin": [[17, 0, 0, "-", "attn"], [17, 0, 0, "-", "bev_rpn"], [17, 3, 1, "", "build_plugin_layer"], [17, 3, 1, "", "build_plugin_module"], [17, 0, 0, "-", "downsample_conv"], [17, 0, 0, "-", "flash_attn"], [17, 0, 0, "-", "fpn"], [17, 0, 0, "-", "gevbev_decoder"], [17, 3, 1, "", "infer_abbr"], [17, 0, 0, "-", "mink_spconv"], [17, 0, 0, "-", "naive_compressor"], [17, 0, 0, "-", "pillar_encoder"], [17, 0, 0, "-", "ssfa"], [17, 0, 0, "-", "target_assigners"], [17, 0, 0, "-", "transformer"], [17, 0, 0, "-", "voxel_encoder"], [17, 0, 0, "-", "voxel_generator"], [17, 0, 0, "-", "voxnet_utils"], [17, 0, 0, "-", "vsa"]], "cosense3d.modules.plugin.attn": [[17, 1, 1, "", "NeighborhoodAttention"], [17, 1, 1, "", "ScaledDotProductAttention"]], "cosense3d.modules.plugin.attn.NeighborhoodAttention": [[17, 2, 1, "", "coor_to_indices"], [17, 2, 1, "", "forward"], [17, 2, 1, "", "get_nbr_mapping"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.attn.ScaledDotProductAttention": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.bev_rpn": [[17, 1, 1, "", "Conv2d"], [17, 1, 1, "", "CustomRPN"], [17, 1, 1, "", "RPN"]], "cosense3d.modules.plugin.bev_rpn.Conv2d": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.bev_rpn.CustomRPN": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.bev_rpn.RPN": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.downsample_conv": [[17, 1, 1, "", "DoubleConv"], [17, 1, 1, "", "DownsampleConv"]], "cosense3d.modules.plugin.downsample_conv.DoubleConv": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.downsample_conv.DownsampleConv": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.flash_attn": [[17, 1, 1, "", "FlashAttention"], [17, 1, 1, "", "FlashMHA"], [17, 3, 1, "", "flash_attn_unpadded_kvpacked_test"], [17, 3, 1, "", "index_first_axis"]], "cosense3d.modules.plugin.flash_attn.FlashAttention": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.flash_attn.FlashMHA": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.fpn": [[17, 1, 1, "", "FPN"]], "cosense3d.modules.plugin.fpn.FPN": [[17, 2, 1, "", "forward"], [17, 2, 1, "", "init_weights"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.gevbev_decoder": [[17, 1, 1, "", "GevBEVDecoder"]], "cosense3d.modules.plugin.gevbev_decoder.GevBEVDecoder": [[17, 2, 1, "", "coor_to_indices"], [17, 2, 1, "", "forward"], [17, 2, 1, "", "get_nbr_mapping"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.mink_spconv": [[17, 1, 1, "", "Spconv"]], "cosense3d.modules.plugin.mink_spconv.Spconv": [[17, 2, 1, "", "forward"], [17, 2, 1, "", "get_2d_stensor"], [17, 2, 1, "", "get_conv_layer"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.naive_compressor": [[17, 1, 1, "", "NaiveCompressor"]], "cosense3d.modules.plugin.naive_compressor.NaiveCompressor": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.pillar_encoder": [[17, 1, 1, "", "PFNLayer"], [17, 1, 1, "", "PillarEncoder"]], "cosense3d.modules.plugin.pillar_encoder.PFNLayer": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.pillar_encoder.PillarEncoder": [[17, 4, 1, "", "absolute_xyz_dim"], [17, 2, 1, "", "compose_voxel_feature"], [17, 4, 1, "", "distance_dim"], [17, 2, 1, "", "forward"], [17, 2, 1, "", "get_paddings_indicator"], [17, 4, 1, "", "intensity_dim"], [17, 5, 1, "", "training"], [17, 4, 1, "", "xyz_dim"]], "cosense3d.modules.plugin.ssfa": [[17, 1, 1, "", "SSFA"], [17, 3, 1, "", "get_conv_layers"]], "cosense3d.modules.plugin.ssfa.SSFA": [[17, 2, 1, "", "forward"], [17, 2, 1, "", "init_weights"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.target_assigners": [[17, 1, 1, "", "BEVBoxAssigner"], [17, 1, 1, "", "BEVCenternessAssigner"], [17, 1, 1, "", "BEVPointAssigner"], [17, 1, 1, "", "BEVSemsegAssigner"], [17, 1, 1, "", "BaseAssigner"], [17, 1, 1, "", "BoxAnchorAssigner"], [17, 1, 1, "", "BoxCenterAssigner"], [17, 1, 1, "", "BoxSparseAnchorAssigner"], [17, 1, 1, "", "ContiBEVAssigner"], [17, 1, 1, "", "DiscreteBEVAssigner"], [17, 1, 1, "", "HeatmapAssigner"], [17, 1, 1, "", "HungarianAssigner2D"], [17, 1, 1, "", "HungarianAssigner3D"], [17, 1, 1, "", "MatchCost"], [17, 1, 1, "", "RoIBox3DAssigner"], [17, 1, 1, "", "RoadLineAssigner"], [17, 3, 1, "", "pos_neg_sampling"], [17, 3, 1, "", "sample_mining"]], "cosense3d.modules.plugin.target_assigners.BEVBoxAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "get_labels_single_head"]], "cosense3d.modules.plugin.target_assigners.BEVCenternessAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "get_labels_single_head"]], "cosense3d.modules.plugin.target_assigners.BEVPointAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "downsample_tgt_pts"], [17, 2, 1, "", "get_predictions"]], "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "down_sample_pred_pts"], [17, 2, 1, "", "downsample_tgt_pts"], [17, 2, 1, "", "filter_range"], [17, 2, 1, "", "get_obs_mask"], [17, 2, 1, "", "get_predictions"], [17, 2, 1, "", "pts_to_inds"]], "cosense3d.modules.plugin.target_assigners.BaseAssigner": [[17, 2, 1, "", "assign"]], "cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "box_overlaps"], [17, 2, 1, "", "get_anchor_template"], [17, 2, 1, "", "get_predictions"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.target_assigners.BoxCenterAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "get_predictions"], [17, 2, 1, "", "pts_to_indices"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "box_overlaps"], [17, 2, 1, "", "get_anchor_template"], [17, 2, 1, "", "get_predictions"], [17, 2, 1, "", "me_coor_to_grid_indices"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.target_assigners.ContiBEVAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "get_predictions"], [17, 2, 1, "", "sample_dynamic_tgt_pts"]], "cosense3d.modules.plugin.target_assigners.DiscreteBEVAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "get_obs_mask"], [17, 2, 1, "", "get_predictions"], [17, 2, 1, "", "pts_to_inds"]], "cosense3d.modules.plugin.target_assigners.HeatmapAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "draw_heatmap_gaussian"]], "cosense3d.modules.plugin.target_assigners.HungarianAssigner2D": [[17, 2, 1, "", "assign"]], "cosense3d.modules.plugin.target_assigners.HungarianAssigner3D": [[17, 2, 1, "", "assign"]], "cosense3d.modules.plugin.target_assigners.MatchCost": [[17, 2, 1, "", "bboxl1"], [17, 2, 1, "", "binary_focal_loss"], [17, 2, 1, "", "build"], [17, 2, 1, "", "classification"], [17, 2, 1, "", "focal_loss"], [17, 2, 1, "", "giou"], [17, 2, 1, "", "iou"], [17, 2, 1, "", "l1"]], "cosense3d.modules.plugin.target_assigners.RoIBox3DAssigner": [[17, 2, 1, "", "assign"], [17, 2, 1, "", "get_predictions"]], "cosense3d.modules.plugin.target_assigners.RoadLineAssigner": [[17, 2, 1, "", "assign"]], "cosense3d.modules.plugin.transformer": [[17, 1, 1, "", "FFN"], [17, 1, 1, "", "MultiHeadAttentionWrapper"], [17, 1, 1, "", "MultiheadAttention"], [17, 1, 1, "", "MultiheadFlashAttention"], [17, 1, 1, "", "PETRTemporalTransformer"], [17, 1, 1, "", "PETRTransformer"], [17, 1, 1, "", "TransformerDecoder"], [17, 1, 1, "", "TransformerDecoderLayer"], [17, 1, 1, "", "TransformerLayerSequence"], [17, 3, 1, "", "build_module"]], "cosense3d.modules.plugin.transformer.FFN": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper": [[17, 5, 1, "", "bias_k"], [17, 5, 1, "", "bias_v"], [17, 2, 1, "", "forward"], [17, 2, 1, "", "forward_fp16"], [17, 2, 1, "", "forward_fp32"]], "cosense3d.modules.plugin.transformer.MultiheadAttention": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.transformer.MultiheadFlashAttention": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.transformer.PETRTemporalTransformer": [[17, 2, 1, "", "forward"], [17, 2, 1, "", "init_weights"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.transformer.PETRTransformer": [[17, 2, 1, "", "forward"], [17, 2, 1, "", "init_weights"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.transformer.TransformerDecoder": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.transformer.TransformerDecoderLayer": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.transformer.TransformerLayerSequence": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.voxel_encoder": [[17, 1, 1, "", "MeanVFE"]], "cosense3d.modules.plugin.voxel_encoder.MeanVFE": [[17, 2, 1, "", "forward"], [17, 2, 1, "", "get_output_feature_dim"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.voxel_generator": [[17, 1, 1, "", "VoxelGenerator"]], "cosense3d.modules.plugin.voxnet_utils": [[17, 1, 1, "", "CML"], [17, 1, 1, "", "CMLSparse"], [17, 1, 1, "", "Conv3d"]], "cosense3d.modules.plugin.voxnet_utils.CML": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.voxnet_utils.CMLSparse": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.voxnet_utils.Conv3d": [[17, 2, 1, "", "forward"], [17, 5, 1, "", "training"]], "cosense3d.modules.plugin.vsa": [[17, 1, 1, "", "VoxelSetAbstraction"], [17, 3, 1, "", "bilinear_interpolate_torch"]], "cosense3d.modules.plugin.vsa.VoxelSetAbstraction": [[17, 2, 1, "", "forward"], [17, 2, 1, "", "get_sampled_points"], [17, 2, 1, "", "interpolate_from_bev_features"], [17, 5, 1, "", "training"]], "cosense3d.modules.projection": [[18, 0, 0, "-", "fax"], [18, 0, 0, "-", "petr"], [18, 0, 0, "-", "spatial_transform"]], "cosense3d.modules.projection.fax": [[18, 1, 1, "", "FAXModule"], [18, 3, 1, "", "ResNetBottleNeck"]], "cosense3d.modules.projection.fax.FAXModule": [[18, 2, 1, "", "forward"], [18, 5, 1, "", "training"]], "cosense3d.modules.projection.petr": [[18, 1, 1, "", "PETR"]], "cosense3d.modules.projection.petr.PETR": [[18, 2, 1, "", "format_input"], [18, 2, 1, "", "forward"], [18, 2, 1, "", "gather_topk"], [18, 2, 1, "", "img_position_embeding"], [18, 2, 1, "", "init_weights"], [18, 5, 1, "", "training"]], "cosense3d.modules.projection.spatial_transform": [[18, 1, 1, "", "STTF"]], "cosense3d.modules.projection.spatial_transform.STTF": [[18, 2, 1, "", "forward"], [18, 5, 1, "", "training"]], "cosense3d.modules.utils": [[19, 0, 0, "-", "box_coder"], [19, 3, 1, "", "build_torch_module"], [19, 0, 0, "-", "common"], [19, 0, 0, "-", "conv"], [19, 0, 0, "-", "edl_utils"], [19, 0, 0, "-", "gaussian_utils"], [19, 0, 0, "-", "init"], [19, 0, 0, "-", "me_utils"], [19, 0, 0, "-", "misc"], [19, 0, 0, "-", "nbr_attn"], [19, 0, 0, "-", "norm"], [19, 0, 0, "-", "positional_encoding"]], "cosense3d.modules.utils.box_coder": [[19, 1, 1, "", "BoxPredCoder"], [19, 1, 1, "", "CenterBoxCoder"], [19, 1, 1, "", "ResidualBoxCoder"], [19, 3, 1, "", "build_box_coder"]], "cosense3d.modules.utils.box_coder.BoxPredCoder": [[19, 2, 1, "", "decode"], [19, 2, 1, "", "encode"]], "cosense3d.modules.utils.box_coder.CenterBoxCoder": [[19, 2, 1, "", "decode"], [19, 2, 1, "", "encode"]], "cosense3d.modules.utils.box_coder.ResidualBoxCoder": [[19, 2, 1, "", "decode"], [19, 2, 1, "", "decode_direction"], [19, 2, 1, "", "encode"], [19, 2, 1, "", "encode_direction"]], "cosense3d.modules.utils.common": [[19, 3, 1, "", "bias_init_with_prob"], [19, 3, 1, "", "cat_coor_with_idx"], [19, 3, 1, "", "cat_name_str"], [19, 3, 1, "", "clip_sigmoid"], [19, 3, 1, "", "draw_sample_prob"], [19, 3, 1, "", "fuse_batch_indices"], [19, 3, 1, "", "get_conv2d_layers"], [19, 3, 1, "", "get_norm_layer"], [19, 3, 1, "", "get_voxel_centers"], [19, 3, 1, "", "instantiate"], [19, 3, 1, "", "inverse_sigmoid"], [19, 3, 1, "", "limit_period"], [19, 3, 1, "", "linear_last"], [19, 3, 1, "", "linear_layers"], [19, 3, 1, "", "meshgrid"], [19, 3, 1, "", "meshgrid_cross"], [19, 3, 1, "", "pad_l"], [19, 3, 1, "", "pad_r"], [19, 3, 1, "", "topk_gather"], [19, 3, 1, "", "weighted_mahalanobis_dists"], [19, 3, 1, "", "xavier_init"]], "cosense3d.modules.utils.conv": [[19, 1, 1, "", "ConvModule"], [19, 3, 1, "", "build_conv_layer"], [19, 3, 1, "", "build_padding_layer"]], "cosense3d.modules.utils.conv.ConvModule": [[19, 2, 1, "", "forward"], [19, 2, 1, "", "init_weights"], [19, 4, 1, "", "norm"], [19, 5, 1, "", "training"]], "cosense3d.modules.utils.edl_utils": [[19, 3, 1, "", "logit_to_edl"]], "cosense3d.modules.utils.gaussian_utils": [[19, 3, 1, "", "center_to_img_coor"], [19, 3, 1, "", "cornernet_gaussian_radius"], [19, 3, 1, "", "draw_gaussian_map"], [19, 3, 1, "", "gaussian_2d"], [19, 3, 1, "", "gaussian_radius"], [19, 3, 1, "", "mahalanobis_dists_2d"], [19, 3, 1, "", "weighted_mahalanobis_dists"]], "cosense3d.modules.utils.init": [[19, 3, 1, "", "bias_init_with_prob"], [19, 3, 1, "", "constant_init"], [19, 3, 1, "", "kaiming_init"], [19, 3, 1, "", "normal_init"], [19, 3, 1, "", "trunc_normal_init"], [19, 3, 1, "", "uniform_init"], [19, 3, 1, "", "xavier_init"]], "cosense3d.modules.utils.me_utils": [[19, 3, 1, "", "bev_sparse_to_dense"], [19, 3, 1, "", "devoxelize_with_centroids"], [19, 3, 1, "", "downsample_embeddings"], [19, 3, 1, "", "downsample_points"], [19, 3, 1, "", "get_conv_block"], [19, 3, 1, "", "get_kernel_map_and_out_key"], [19, 3, 1, "", "indices2metric"], [19, 3, 1, "", "me_coor_to_grid_indices"], [19, 3, 1, "", "metric2indices"], [19, 3, 1, "", "mink_coor_limit"], [19, 3, 1, "", "minkconv_conv_block"], [19, 3, 1, "", "minkconv_layer"], [19, 3, 1, "", "normalize_centroids"], [19, 3, 1, "", "normalize_points"], [19, 3, 1, "", "prepare_input_data"], [19, 3, 1, "", "sparse_to_dense"], [19, 3, 1, "", "stride_centroids"], [19, 3, 1, "", "update_me_essentials"], [19, 3, 1, "", "voxelize_with_centroids"]], "cosense3d.modules.utils.misc": [[19, 1, 1, "", "MLN"], [19, 1, 1, "", "MLN2"], [19, 1, 1, "", "SELayer_Linear"]], "cosense3d.modules.utils.misc.MLN": [[19, 2, 1, "", "forward"], [19, 2, 1, "", "reset_parameters"], [19, 5, 1, "", "training"]], "cosense3d.modules.utils.misc.MLN2": [[19, 2, 1, "", "forward"], [19, 2, 1, "", "reset_parameters"], [19, 5, 1, "", "training"]], "cosense3d.modules.utils.misc.SELayer_Linear": [[19, 2, 1, "", "forward"], [19, 5, 1, "", "training"]], "cosense3d.modules.utils.nbr_attn": [[19, 1, 1, "", "NeighborhoodAttention"]], "cosense3d.modules.utils.nbr_attn.NeighborhoodAttention": [[19, 2, 1, "", "forward"], [19, 5, 1, "", "training"]], "cosense3d.modules.utils.norm": [[19, 3, 1, "", "build_norm_layer"]], "cosense3d.modules.utils.positional_encoding": [[19, 3, 1, "", "coor2ratio"], [19, 3, 1, "", "img_locations"], [19, 3, 1, "", "nerf_positional_encoding"], [19, 3, 1, "", "pos2posemb1d"], [19, 3, 1, "", "pos2posemb2d"], [19, 3, 1, "", "pos2posemb3d"], [19, 3, 1, "", "ratio2coord"]], "cosense3d.utils": [[20, 0, 0, "-", "box_utils"], [20, 0, 0, "-", "eval_detection_utils"], [20, 0, 0, "-", "iou2d_calculator"], [20, 0, 0, "-", "logger"], [20, 0, 0, "-", "lr_scheduler"], [20, 0, 0, "-", "metrics"], [20, 0, 0, "-", "misc"], [20, 0, 0, "-", "module_utils"], [20, 0, 0, "-", "pclib"], [20, 0, 0, "-", "tensor_utils"], [20, 0, 0, "-", "train_utils"], [20, 0, 0, "-", "vislib"]], "cosense3d.utils.box_utils": [[20, 3, 1, "", "bbox_cxcywh_to_xyxy"], [20, 3, 1, "", "bbox_xyxy_to_cxcywh"], [20, 3, 1, "", "boxes3d_to_standup_bboxes"], [20, 3, 1, "", "boxes_to_corners_2d"], [20, 3, 1, "", "boxes_to_corners_3d"], [20, 3, 1, "", "compute_iou"], [20, 3, 1, "", "convert_box_to_polygon"], [20, 3, 1, "", "corners_to_boxes_3d"], [20, 3, 1, "", "decode_boxes"], [20, 3, 1, "", "denormalize_bbox"], [20, 3, 1, "", "enlarge_box3d"], [20, 3, 1, "", "find_rigid_alignment"], [20, 3, 1, "", "limit_period"], [20, 3, 1, "", "mask_boxes_outside_range_numpy"], [20, 3, 1, "", "mask_boxes_outside_range_torch"], [20, 3, 1, "", "normalize_bbox"], [20, 3, 1, "", "remove_points_in_boxes3d"], [20, 3, 1, "", "transform_boxes_3d"]], "cosense3d.utils.eval_detection_utils": [[20, 3, 1, "", "cal_ap_all_point"], [20, 3, 1, "", "cal_precision_recall"], [20, 3, 1, "", "calculate_ap"], [20, 3, 1, "", "caluclate_tp_fp"], [20, 3, 1, "", "eval_final_results"], [20, 3, 1, "", "ops_cal_tp"], [20, 3, 1, "", "voc_ap"]], "cosense3d.utils.iou2d_calculator": [[20, 3, 1, "", "bbox_overlaps"], [20, 3, 1, "", "cast_tensor_type"], [20, 3, 1, "", "fp16_clamp"]], "cosense3d.utils.logger": [[20, 1, 1, "", "LogMeter"], [20, 1, 1, "", "SmoothedValue"], [20, 1, 1, "", "TestLogger"], [20, 3, 1, "", "setup_logger"]], "cosense3d.utils.logger.LogMeter": [[20, 2, 1, "", "add_meter"], [20, 2, 1, "", "log"], [20, 2, 1, "", "update"]], "cosense3d.utils.logger.SmoothedValue": [[20, 4, 1, "", "avg"], [20, 4, 1, "", "global_avg"], [20, 4, 1, "", "max"], [20, 4, 1, "", "median"], [20, 2, 1, "", "update"], [20, 4, 1, "", "value"]], "cosense3d.utils.logger.TestLogger": [[20, 2, 1, "", "log"]], "cosense3d.utils.lr_scheduler": [[20, 1, 1, "", "LRUpdater"], [20, 1, 1, "", "TransformerAdaptiveScheduler"], [20, 3, 1, "", "build_lr_scheduler"]], "cosense3d.utils.lr_scheduler.LRUpdater": [[20, 2, 1, "", "get_last_lr"], [20, 2, 1, "", "load_state_dict"], [20, 2, 1, "", "state_dict"], [20, 2, 1, "", "step_epoch"], [20, 2, 1, "", "step_itr"]], "cosense3d.utils.lr_scheduler.TransformerAdaptiveScheduler": [[20, 2, 1, "", "calc_lr"], [20, 2, 1, "", "get_lr"]], "cosense3d.utils.metrics": [[20, 1, 1, "", "Metric"], [20, 1, 1, "", "MetricBev"], [20, 1, 1, "", "MetricMOT"], [20, 1, 1, "", "MetricObjDet"], [20, 1, 1, "", "MetricSemSeg"]], "cosense3d.utils.metrics.Metric": [[20, 2, 1, "", "add_samples"], [20, 2, 1, "", "save_detections"], [20, 2, 1, "", "summary"]], "cosense3d.utils.metrics.MetricBev": [[20, 2, 1, "", "add_samples"], [20, 2, 1, "", "format_str"], [20, 2, 1, "", "iou"], [20, 2, 1, "", "summary"], [20, 2, 1, "", "summary_hook"]], "cosense3d.utils.metrics.MetricMOT": [[20, 2, 1, "", "add_samples"]], "cosense3d.utils.metrics.MetricObjDet": [[20, 2, 1, "", "add_sample"], [20, 2, 1, "", "add_samples"], [20, 2, 1, "", "cal_ap_11_point"], [20, 2, 1, "", "cal_ap_all_point"], [20, 2, 1, "", "cal_precision_recall"], [20, 2, 1, "", "save_detections"], [20, 2, 1, "", "summary"]], "cosense3d.utils.metrics.MetricSemSeg": [[20, 2, 1, "", "add_samples"], [20, 2, 1, "", "cal_ious_and_accs"], [20, 2, 1, "", "save_detections"]], "cosense3d.utils.misc": [[20, 3, 1, "", "check_numpy_to_torch"], [20, 3, 1, "", "ensure_dir"], [20, 3, 1, "", "list_dirs"], [20, 3, 1, "", "load_from_pl_state_dict"], [20, 3, 1, "", "load_json"], [20, 3, 1, "", "load_yaml"], [20, 3, 1, "", "multi_apply"], [20, 3, 1, "", "pad_list_to_array_np"], [20, 3, 1, "", "save_json"], [20, 3, 1, "", "save_yaml"], [20, 3, 1, "", "setup_logger"], [20, 3, 1, "", "torch_tensor_to_numpy"], [20, 3, 1, "", "update_dict"]], "cosense3d.utils.module_utils": [[20, 3, 1, "", "build_dropout"], [20, 3, 1, "", "build_norm_layer"], [20, 3, 1, "", "digit_version"], [20, 3, 1, "", "get_target_module"], [20, 3, 1, "", "instantiate_target_module"]], "cosense3d.utils.pclib": [[20, 3, 1, "", "cart2cyl"], [20, 3, 1, "", "cyl2cart"], [20, 3, 1, "", "get_tf_matrix_torch"], [20, 3, 1, "", "header"], [20, 3, 1, "", "lidar_bin2bin"], [20, 3, 1, "", "lidar_bin2pcd"], [20, 3, 1, "", "lidar_bin2pcd_o3d"], [20, 3, 1, "", "lidar_ply2bin"], [20, 3, 1, "", "load_pcd"], [20, 3, 1, "", "mask_points_in_box"], [20, 3, 1, "", "mask_points_in_range"], [20, 3, 1, "", "mask_values_in_range"], [20, 3, 1, "", "mat_pitch"], [20, 3, 1, "", "mat_roll"], [20, 3, 1, "", "mat_yaw"], [20, 3, 1, "", "pose2tf"], [20, 3, 1, "", "pose_err_global2relative_torch"], [20, 3, 1, "", "pose_to_transformation"], [20, 3, 1, "", "project_points_by_matrix_torch"], [20, 3, 1, "", "read_ply"], [20, 3, 1, "", "rotate3d"], [20, 3, 1, "", "rotate_box_corners_with_tf_np"], [20, 3, 1, "", "rotate_points_along_z_np"], [20, 3, 1, "", "rotate_points_along_z_torch"], [20, 3, 1, "", "rotate_points_batch"], [20, 3, 1, "", "rotate_points_with_tf_np"], [20, 3, 1, "", "rotation_mat2euler_torch"], [20, 3, 1, "", "rotation_matrix"], [20, 3, 1, "", "save_cosense_ply"], [20, 3, 1, "", "tf2pose"]], "cosense3d.utils.tensor_utils": [[20, 3, 1, "", "check_numpy_to_torch"], [20, 3, 1, "", "pad_list_to_array_torch"]], "cosense3d.utils.train_utils": [[20, 3, 1, "", "build_lr_scheduler"], [20, 3, 1, "", "build_optimizer"], [20, 3, 1, "", "clip_grads"], [20, 3, 1, "", "get_gpu_architecture"], [20, 3, 1, "", "is_tensor_to_cuda"], [20, 3, 1, "", "load_model_dict"], [20, 3, 1, "", "load_tensors_to_gpu"], [20, 3, 1, "", "seed_everything"]], "cosense3d.utils.vislib": [[20, 3, 1, "", "bbx2linset"], [20, 3, 1, "", "draw_2d_bboxes_on_img"], [20, 3, 1, "", "draw_3d_points_boxes_on_img"], [20, 3, 1, "", "draw_box_plt"], [20, 3, 1, "", "draw_matched_boxes"], [20, 3, 1, "", "draw_points_boxes_plt"], [20, 3, 1, "", "get_palette_colors"], [20, 3, 1, "", "o3d_draw_agent_data"], [20, 3, 1, "", "o3d_draw_frame_data"], [20, 3, 1, "", "o3d_draw_pcds_bbxs"], [20, 3, 1, "", "o3d_play_sequence"], [20, 3, 1, "", "plot_cavs_points"], [20, 3, 1, "", "plt_draw_frame_data"], [20, 3, 1, "", "update_axis_linset"], [20, 3, 1, "", "update_lineset_vbo"], [20, 3, 1, "", "visualization"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:function", "4": "py:property", "5": "py:attribute"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "function", "Python function"], "4": ["py", "property", "Python property"], "5": ["py", "attribute", "Python attribute"]}, "titleterms": {"cosense3d": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25], "packag": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "subpackag": [0, 1, 5, 7, 10], "modul": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "content": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], "agent": [1, 2, 3, 4, 5, 6], "submodul": [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "center_control": 1, "cav_prototyp": 2, "base_cav": 2, "streamlts_collect": 2, "core": 3, "base_runn": 3, "cav_manag": 3, "data_manag": 3, "paramet": [3, 7, 9, 13, 14, 15, 17, 19, 20], "return": [3, 7, 9, 15, 17, 19, 20], "forward_runn": 3, "gui": [3, 24], "hook": 3, "task_manag": 3, "test_runn": 3, "train_runn": 3, "vis_runn": 3, "util": [4, 5, 19, 20], "deco": 4, "transform": [4, 8, 17], "viewer": [5, 6], "gl_viewer": 5, "img_anno3d_view": 5, "img_view": 5, "output_view": 5, "item": 6, "graph_item": 6, "dataset": [7, 8, 9, 23], "const": 7, "cosense_dataset": 7, "temporal_cosense_dataset": 7, "pipelin": 8, "load": 8, "toolkit": 9, "cosens": 9, "dairv2x": 9, "opv2v": [9, 23], "opv2v_t": 9, "backbone2d": 11, "resnet_encod": 11, "backbone3d": 12, "mink_unet": 12, "pillar_bev": 12, "spconv": 12, "voxelnet": 12, "fusion": 13, "attn_fus": 13, "box_fus": 13, "fax": [13, 18], "keypoint": 13, "maxout_fus": 13, "naive_fus": 13, "spatial_query_fus": 13, "temporal_fus": 13, "head": 14, "bev": 14, "bev_dens": 14, "det_anchor_dens": 14, "det_anchor_spars": 14, "det_center_spars": 14, "det_roi_refin": 14, "img_foc": 14, "lidar_petr_head": 14, "multitask_head": 14, "nbr_attn_bev": 14, "petr_head": 14, "query_guided_petr_head": 14, "loss": 15, "base_loss": 15, "common": [15, 19], "edl": 15, "focal_loss": 15, "iou_loss": 15, "l1_loss": 15, "vanilla_seg_loss": 15, "neck": 16, "cpm_compos": 16, "dilation_spconv": 16, "format": 16, "plugin": 17, "attn": 17, "bev_rpn": 17, "downsample_conv": 17, "flash_attn": 17, "fpn": 17, "gevbev_decod": 17, "mink_spconv": 17, "naive_compressor": 17, "pillar_encod": 17, "ssfa": 17, "target_assign": 17, "voxel_encod": 17, "voxel_gener": 17, "voxnet_util": 17, "vsa": 17, "project": 18, "petr": 18, "spatial_transform": 18, "box_cod": 19, "conv": 19, "edl_util": 19, "gaussian_util": 19, "init": 19, "me_util": 19, "misc": [19, 20], "nbr_attn": 19, "norm": 19, "positional_encod": 19, "box_util": 20, "eval_detection_util": 20, "iou2d_calcul": 20, "logger": 20, "lr_schedul": 20, "metric": 20, "module_util": 20, "pclib": 20, "tensor_util": 20, "train_util": 20, "vislib": 20, "welcom": 21, "opencosense3d": 21, "": 21, "document": 21, "indic": 21, "tabl": 21, "instal": 22, "requir": 22, "option": 22, "via": 22, "bash": 22, "script": 22, "step": 22, "prepar": 23, "opv2vt": 23, "dairv2xt": 23, "The": 24, "structur": 24, "framework": 24, "dataload": 24, "runner": 24, "central": 24, "control": 24}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 58}, "alltitles": {"cosense3d package": [[0, "cosense3d-package"]], "Subpackages": [[0, "subpackages"], [1, "subpackages"], [5, "subpackages"], [7, "subpackages"], [10, "subpackages"]], "Module contents": [[0, "module-cosense3d"], [1, "module-cosense3d.agents"], [2, "module-cosense3d.agents.cav_prototype"], [3, "module-cosense3d.agents.core"], [4, "module-cosense3d.agents.utils"], [5, "module-cosense3d.agents.viewer"], [6, "module-cosense3d.agents.viewer.items"], [7, "module-cosense3d.dataset"], [8, "module-cosense3d.dataset.pipeline"], [9, "module-cosense3d.dataset.toolkit"], [10, "module-cosense3d.modules"], [11, "module-cosense3d.modules.backbone2d"], [12, "module-cosense3d.modules.backbone3d"], [13, "module-cosense3d.modules.fusion"], [14, "module-cosense3d.modules.heads"], [15, "module-cosense3d.modules.losses"], [16, "module-cosense3d.modules.necks"], [17, "module-cosense3d.modules.plugin"], [18, "module-cosense3d.modules.projection"], [19, "module-cosense3d.modules.utils"], [20, "module-cosense3d.utils"]], "cosense3d.agents package": [[1, "cosense3d-agents-package"]], "Submodules": [[1, "submodules"], [2, "submodules"], [3, "submodules"], [4, "submodules"], [5, "submodules"], [6, "submodules"], [7, "submodules"], [8, "submodules"], [9, "submodules"], [11, "submodules"], [12, "submodules"], [13, "submodules"], [14, "submodules"], [15, "submodules"], [16, "submodules"], [17, "submodules"], [18, "submodules"], [19, "submodules"], [20, "submodules"]], "cosense3d.agents.center_controller module": [[1, "module-cosense3d.agents.center_controller"]], "cosense3d.agents.cav_prototype package": [[2, "cosense3d-agents-cav-prototype-package"]], "cosense3d.agents.cav_prototype.base_cav module": [[2, "module-cosense3d.agents.cav_prototype.base_cav"]], "cosense3d.agents.cav_prototype.streamLTS_collection module": [[2, "module-cosense3d.agents.cav_prototype.streamLTS_collection"]], "cosense3d.agents.core package": [[3, "cosense3d-agents-core-package"]], "cosense3d.agents.core.base_runner module": [[3, "module-cosense3d.agents.core.base_runner"]], "cosense3d.agents.core.cav_manager module": [[3, "module-cosense3d.agents.core.cav_manager"]], "cosense3d.agents.core.data_manager module": [[3, "module-cosense3d.agents.core.data_manager"]], "Parameters": [[3, "parameters"], [3, "id1"], [7, "parameters"], [9, "parameters"], [9, "id1"], [9, "id3"], [9, "id5"], [9, "id6"], [13, "parameters"], [14, "parameters"], [15, "parameters"], [15, "id1"], [15, "id5"], [17, "parameters"], [17, "id1"], [19, "parameters"], [20, "parameters"], [20, "id13"], [20, "id14"], [20, "id15"], [20, "id17"]], "Returns": [[3, "returns"], [3, "id2"], [7, "returns"], [9, "returns"], [9, "id2"], [9, "id4"], [9, "id7"], [15, "returns"], [17, "returns"], [17, "id2"], [19, "returns"], [20, "returns"], [20, "id16"], [20, "id18"]], "cosense3d.agents.core.forward_runner module": [[3, "module-cosense3d.agents.core.forward_runner"]], "cosense3d.agents.core.gui module": [[3, "module-cosense3d.agents.core.gui"]], "cosense3d.agents.core.hooks module": [[3, "module-cosense3d.agents.core.hooks"]], "cosense3d.agents.core.task_manager module": [[3, "module-cosense3d.agents.core.task_manager"]], "cosense3d.agents.core.test_runner module": [[3, "module-cosense3d.agents.core.test_runner"]], "cosense3d.agents.core.train_runner module": [[3, "module-cosense3d.agents.core.train_runner"]], "cosense3d.agents.core.vis_runner module": [[3, "module-cosense3d.agents.core.vis_runner"]], "cosense3d.agents.utils package": [[4, "cosense3d-agents-utils-package"]], "cosense3d.agents.utils.deco module": [[4, "module-cosense3d.agents.utils.deco"]], "cosense3d.agents.utils.transform module": [[4, "module-cosense3d.agents.utils.transform"]], "cosense3d.agents.viewer package": [[5, "cosense3d-agents-viewer-package"]], "cosense3d.agents.viewer.gl_viewer module": [[5, "module-cosense3d.agents.viewer.gl_viewer"]], "cosense3d.agents.viewer.img_anno3d_viewer module": [[5, "module-cosense3d.agents.viewer.img_anno3d_viewer"]], "cosense3d.agents.viewer.img_viewer module": [[5, "module-cosense3d.agents.viewer.img_viewer"]], "cosense3d.agents.viewer.output_viewer module": [[5, "module-cosense3d.agents.viewer.output_viewer"]], "cosense3d.agents.viewer.utils module": [[5, "module-cosense3d.agents.viewer.utils"]], "cosense3d.agents.viewer.items package": [[6, "cosense3d-agents-viewer-items-package"]], "cosense3d.agents.viewer.items.graph_items module": [[6, "module-cosense3d.agents.viewer.items.graph_items"]], "cosense3d.dataset package": [[7, "cosense3d-dataset-package"]], "cosense3d.dataset.const module": [[7, "module-cosense3d.dataset.const"]], "cosense3d.dataset.cosense_dataset module": [[7, "module-cosense3d.dataset.cosense_dataset"]], "cosense3d.dataset.temporal_cosense_dataset module": [[7, "module-cosense3d.dataset.temporal_cosense_dataset"]], "cosense3d.dataset.pipeline package": [[8, "cosense3d-dataset-pipeline-package"]], "cosense3d.dataset.pipeline.loading module": [[8, "module-cosense3d.dataset.pipeline.loading"]], "cosense3d.dataset.pipeline.transform module": [[8, "module-cosense3d.dataset.pipeline.transform"]], "cosense3d.dataset.toolkit package": [[9, "cosense3d-dataset-toolkit-package"]], "cosense3d.dataset.toolkit.cosense module": [[9, "module-cosense3d.dataset.toolkit.cosense"]], "cosense3d.dataset.toolkit.dairv2x module": [[9, "module-cosense3d.dataset.toolkit.dairv2x"]], "cosense3d.dataset.toolkit.opv2v module": [[9, "module-cosense3d.dataset.toolkit.opv2v"]], "cosense3d.dataset.toolkit.opv2v_t module": [[9, "module-cosense3d.dataset.toolkit.opv2v_t"]], "cosense3d.modules package": [[10, "cosense3d-modules-package"]], "cosense3d.modules.backbone2d package": [[11, "cosense3d-modules-backbone2d-package"]], "cosense3d.modules.backbone2d.resnet_encoder module": [[11, "module-cosense3d.modules.backbone2d.resnet_encoder"]], "cosense3d.modules.backbone3d package": [[12, "cosense3d-modules-backbone3d-package"]], "cosense3d.modules.backbone3d.mink_unet module": [[12, "module-cosense3d.modules.backbone3d.mink_unet"]], "cosense3d.modules.backbone3d.pillar_bev module": [[12, "module-cosense3d.modules.backbone3d.pillar_bev"]], "cosense3d.modules.backbone3d.spconv module": [[12, "module-cosense3d.modules.backbone3d.spconv"]], "cosense3d.modules.backbone3d.voxelnet module": [[12, "module-cosense3d.modules.backbone3d.voxelnet"]], "cosense3d.modules.fusion package": [[13, "cosense3d-modules-fusion-package"]], "cosense3d.modules.fusion.attn_fusion module": [[13, "module-cosense3d.modules.fusion.attn_fusion"]], "cosense3d.modules.fusion.box_fusion module": [[13, "module-cosense3d.modules.fusion.box_fusion"]], "cosense3d.modules.fusion.fax module": [[13, "module-cosense3d.modules.fusion.fax"]], "cosense3d.modules.fusion.keypoints module": [[13, "module-cosense3d.modules.fusion.keypoints"]], "cosense3d.modules.fusion.maxout_fusion module": [[13, "module-cosense3d.modules.fusion.maxout_fusion"]], "cosense3d.modules.fusion.naive_fusion module": [[13, "module-cosense3d.modules.fusion.naive_fusion"]], "cosense3d.modules.fusion.spatial_query_fusion module": [[13, "module-cosense3d.modules.fusion.spatial_query_fusion"]], "cosense3d.modules.fusion.temporal_fusion module": [[13, "module-cosense3d.modules.fusion.temporal_fusion"]], "cosense3d.modules.heads package": [[14, "cosense3d-modules-heads-package"]], "cosense3d.modules.heads.bev module": [[14, "module-cosense3d.modules.heads.bev"]], "cosense3d.modules.heads.bev_dense module": [[14, "module-cosense3d.modules.heads.bev_dense"]], "cosense3d.modules.heads.det_anchor_dense module": [[14, "module-cosense3d.modules.heads.det_anchor_dense"]], "cosense3d.modules.heads.det_anchor_sparse module": [[14, "module-cosense3d.modules.heads.det_anchor_sparse"]], "cosense3d.modules.heads.det_center_sparse module": [[14, "module-cosense3d.modules.heads.det_center_sparse"]], "cosense3d.modules.heads.det_roi_refine module": [[14, "module-cosense3d.modules.heads.det_roi_refine"]], "cosense3d.modules.heads.img_focal module": [[14, "module-cosense3d.modules.heads.img_focal"]], "cosense3d.modules.heads.lidar_petr_head module": [[14, "module-cosense3d.modules.heads.lidar_petr_head"]], "cosense3d.modules.heads.multitask_head module": [[14, "module-cosense3d.modules.heads.multitask_head"]], "cosense3d.modules.heads.nbr_attn_bev module": [[14, "module-cosense3d.modules.heads.nbr_attn_bev"]], "cosense3d.modules.heads.petr_head module": [[14, "module-cosense3d.modules.heads.petr_head"]], "cosense3d.modules.heads.query_guided_petr_head module": [[14, "module-cosense3d.modules.heads.query_guided_petr_head"]], "cosense3d.modules.losses package": [[15, "cosense3d-modules-losses-package"]], "cosense3d.modules.losses.base_loss module": [[15, "module-cosense3d.modules.losses.base_loss"]], "cosense3d.modules.losses.common module": [[15, "module-cosense3d.modules.losses.common"]], "cosense3d.modules.losses.edl module": [[15, "module-cosense3d.modules.losses.edl"]], "cosense3d.modules.losses.focal_loss module": [[15, "module-cosense3d.modules.losses.focal_loss"]], "cosense3d.modules.losses.iou_loss module": [[15, "module-cosense3d.modules.losses.iou_loss"]], "cosense3d.modules.losses.l1_loss module": [[15, "module-cosense3d.modules.losses.l1_loss"]], "cosense3d.modules.losses.vanilla_seg_loss module": [[15, "module-cosense3d.modules.losses.vanilla_seg_loss"]], "cosense3d.modules.necks package": [[16, "cosense3d-modules-necks-package"]], "cosense3d.modules.necks.cpm_composer module": [[16, "module-cosense3d.modules.necks.cpm_composer"]], "cosense3d.modules.necks.dilation_spconv module": [[16, "module-cosense3d.modules.necks.dilation_spconv"]], "cosense3d.modules.necks.formatting module": [[16, "module-cosense3d.modules.necks.formatting"]], "cosense3d.modules.plugin package": [[17, "cosense3d-modules-plugin-package"]], "cosense3d.modules.plugin.attn module": [[17, "module-cosense3d.modules.plugin.attn"]], "cosense3d.modules.plugin.bev_rpn module": [[17, "module-cosense3d.modules.plugin.bev_rpn"]], "cosense3d.modules.plugin.downsample_conv module": [[17, "module-cosense3d.modules.plugin.downsample_conv"]], "cosense3d.modules.plugin.flash_attn module": [[17, "module-cosense3d.modules.plugin.flash_attn"]], "cosense3d.modules.plugin.fpn module": [[17, "module-cosense3d.modules.plugin.fpn"]], "cosense3d.modules.plugin.gevbev_decoder module": [[17, "module-cosense3d.modules.plugin.gevbev_decoder"]], "cosense3d.modules.plugin.mink_spconv module": [[17, "module-cosense3d.modules.plugin.mink_spconv"]], "cosense3d.modules.plugin.naive_compressor module": [[17, "module-cosense3d.modules.plugin.naive_compressor"]], "cosense3d.modules.plugin.pillar_encoder module": [[17, "module-cosense3d.modules.plugin.pillar_encoder"]], "cosense3d.modules.plugin.ssfa module": [[17, "module-cosense3d.modules.plugin.ssfa"]], "cosense3d.modules.plugin.target_assigners module": [[17, "module-cosense3d.modules.plugin.target_assigners"]], "cosense3d.modules.plugin.transformer module": [[17, "module-cosense3d.modules.plugin.transformer"]], "cosense3d.modules.plugin.voxel_encoder module": [[17, "module-cosense3d.modules.plugin.voxel_encoder"]], "cosense3d.modules.plugin.voxel_generator module": [[17, "module-cosense3d.modules.plugin.voxel_generator"]], "cosense3d.modules.plugin.voxnet_utils module": [[17, "module-cosense3d.modules.plugin.voxnet_utils"]], "cosense3d.modules.plugin.vsa module": [[17, "module-cosense3d.modules.plugin.vsa"]], "cosense3d.modules.projection package": [[18, "cosense3d-modules-projection-package"]], "cosense3d.modules.projection.fax module": [[18, "module-cosense3d.modules.projection.fax"]], "cosense3d.modules.projection.petr module": [[18, "module-cosense3d.modules.projection.petr"]], "cosense3d.modules.projection.spatial_transform module": [[18, "module-cosense3d.modules.projection.spatial_transform"]], "cosense3d.modules.utils package": [[19, "cosense3d-modules-utils-package"]], "cosense3d.modules.utils.box_coder module": [[19, "module-cosense3d.modules.utils.box_coder"]], "cosense3d.modules.utils.common module": [[19, "module-cosense3d.modules.utils.common"]], "cosense3d.modules.utils.conv module": [[19, "module-cosense3d.modules.utils.conv"]], "cosense3d.modules.utils.edl_utils module": [[19, "module-cosense3d.modules.utils.edl_utils"]], "cosense3d.modules.utils.gaussian_utils module": [[19, "module-cosense3d.modules.utils.gaussian_utils"]], "cosense3d.modules.utils.init module": [[19, "module-cosense3d.modules.utils.init"]], "cosense3d.modules.utils.me_utils module": [[19, "module-cosense3d.modules.utils.me_utils"]], "cosense3d.modules.utils.misc module": [[19, "module-cosense3d.modules.utils.misc"]], "cosense3d.modules.utils.nbr_attn module": [[19, "module-cosense3d.modules.utils.nbr_attn"]], "cosense3d.modules.utils.norm module": [[19, "module-cosense3d.modules.utils.norm"]], "cosense3d.modules.utils.positional_encoding module": [[19, "module-cosense3d.modules.utils.positional_encoding"]], "cosense3d.utils package": [[20, "cosense3d-utils-package"]], "cosense3d.utils.box_utils module": [[20, "module-cosense3d.utils.box_utils"]], "cosense3d.utils.eval_detection_utils module": [[20, "module-cosense3d.utils.eval_detection_utils"]], "cosense3d.utils.iou2d_calculator module": [[20, "module-cosense3d.utils.iou2d_calculator"]], "cosense3d.utils.logger module": [[20, "module-cosense3d.utils.logger"]], "cosense3d.utils.lr_scheduler module": [[20, "module-cosense3d.utils.lr_scheduler"]], "cosense3d.utils.metrics module": [[20, "module-cosense3d.utils.metrics"]], "cosense3d.utils.misc module": [[20, "module-cosense3d.utils.misc"]], "cosense3d.utils.module_utils module": [[20, "module-cosense3d.utils.module_utils"]], "cosense3d.utils.pclib module": [[20, "module-cosense3d.utils.pclib"]], "cosense3d.utils.tensor_utils module": [[20, "module-cosense3d.utils.tensor_utils"]], "cosense3d.utils.train_utils module": [[20, "module-cosense3d.utils.train_utils"]], "cosense3d.utils.vislib module": [[20, "module-cosense3d.utils.vislib"]], "Welcome to OpenCosense3D\u2019s documentation!": [[21, "welcome-to-opencosense3d-s-documentation"]], "Contents:": [[21, null]], "Indices and tables": [[21, "indices-and-tables"]], "Installation": [[22, "installation"]], "Requirements": [[22, "requirements"]], "Installation options": [[22, "installation-options"]], "Via bash script": [[22, "via-bash-script"]], "Step-by-step": [[22, "step-by-step"]], "Prepare Datasets": [[23, "prepare-datasets"]], "OPV2Vt": [[23, "opv2vt"]], "DairV2Xt": [[23, "dairv2xt"]], "OPV2V": [[23, "opv2v"]], "The Structure of the framework": [[24, "the-structure-of-the-framework"]], "Dataloader": [[24, "dataloader"]], "GUI": [[24, "gui"]], "Runner": [[24, "runner"]], "Central Controller": [[24, "central-controller"]], "CoSense3D": [[25, "cosense3d"]]}, "indexentries": {"cosense3d": [[0, "module-cosense3d"]], "module": [[0, "module-cosense3d"], [1, "module-cosense3d.agents"], [1, "module-cosense3d.agents.center_controller"], [2, "module-cosense3d.agents.cav_prototype"], [2, "module-cosense3d.agents.cav_prototype.base_cav"], [2, "module-cosense3d.agents.cav_prototype.streamLTS_collection"], [3, "module-cosense3d.agents.core"], [3, "module-cosense3d.agents.core.base_runner"], [3, "module-cosense3d.agents.core.cav_manager"], [3, "module-cosense3d.agents.core.data_manager"], [3, "module-cosense3d.agents.core.forward_runner"], [3, "module-cosense3d.agents.core.gui"], [3, "module-cosense3d.agents.core.hooks"], [3, "module-cosense3d.agents.core.task_manager"], [3, "module-cosense3d.agents.core.test_runner"], [3, "module-cosense3d.agents.core.train_runner"], [3, "module-cosense3d.agents.core.vis_runner"], [4, "module-cosense3d.agents.utils"], [4, "module-cosense3d.agents.utils.deco"], [4, "module-cosense3d.agents.utils.transform"], [5, "module-cosense3d.agents.viewer"], [5, "module-cosense3d.agents.viewer.gl_viewer"], [5, "module-cosense3d.agents.viewer.img_anno3d_viewer"], [5, "module-cosense3d.agents.viewer.img_viewer"], [5, "module-cosense3d.agents.viewer.output_viewer"], [5, "module-cosense3d.agents.viewer.utils"], [6, "module-cosense3d.agents.viewer.items"], [6, "module-cosense3d.agents.viewer.items.graph_items"], [7, "module-cosense3d.dataset"], [7, "module-cosense3d.dataset.const"], [7, "module-cosense3d.dataset.cosense_dataset"], [7, "module-cosense3d.dataset.temporal_cosense_dataset"], [8, "module-cosense3d.dataset.pipeline"], [8, "module-cosense3d.dataset.pipeline.loading"], [8, "module-cosense3d.dataset.pipeline.transform"], [9, "module-cosense3d.dataset.toolkit"], [9, "module-cosense3d.dataset.toolkit.cosense"], [9, "module-cosense3d.dataset.toolkit.dairv2x"], [9, "module-cosense3d.dataset.toolkit.opv2v"], [9, "module-cosense3d.dataset.toolkit.opv2v_t"], [10, "module-cosense3d.modules"], [11, "module-cosense3d.modules.backbone2d"], [11, "module-cosense3d.modules.backbone2d.resnet_encoder"], [12, "module-cosense3d.modules.backbone3d"], [12, "module-cosense3d.modules.backbone3d.mink_unet"], [12, "module-cosense3d.modules.backbone3d.pillar_bev"], [12, "module-cosense3d.modules.backbone3d.spconv"], [12, "module-cosense3d.modules.backbone3d.voxelnet"], [13, "module-cosense3d.modules.fusion"], [13, "module-cosense3d.modules.fusion.attn_fusion"], [13, "module-cosense3d.modules.fusion.box_fusion"], [13, "module-cosense3d.modules.fusion.fax"], [13, "module-cosense3d.modules.fusion.keypoints"], [13, "module-cosense3d.modules.fusion.maxout_fusion"], [13, "module-cosense3d.modules.fusion.naive_fusion"], [13, "module-cosense3d.modules.fusion.spatial_query_fusion"], [13, "module-cosense3d.modules.fusion.temporal_fusion"], [14, "module-cosense3d.modules.heads"], [14, "module-cosense3d.modules.heads.bev"], [14, "module-cosense3d.modules.heads.bev_dense"], [14, "module-cosense3d.modules.heads.det_anchor_dense"], [14, "module-cosense3d.modules.heads.det_anchor_sparse"], [14, "module-cosense3d.modules.heads.det_center_sparse"], [14, "module-cosense3d.modules.heads.det_roi_refine"], [14, "module-cosense3d.modules.heads.img_focal"], [14, "module-cosense3d.modules.heads.lidar_petr_head"], [14, "module-cosense3d.modules.heads.multitask_head"], [14, "module-cosense3d.modules.heads.nbr_attn_bev"], [14, "module-cosense3d.modules.heads.petr_head"], [14, "module-cosense3d.modules.heads.query_guided_petr_head"], [15, "module-cosense3d.modules.losses"], [15, "module-cosense3d.modules.losses.base_loss"], [15, "module-cosense3d.modules.losses.common"], [15, "module-cosense3d.modules.losses.edl"], [15, "module-cosense3d.modules.losses.focal_loss"], [15, "module-cosense3d.modules.losses.iou_loss"], [15, "module-cosense3d.modules.losses.l1_loss"], [15, "module-cosense3d.modules.losses.vanilla_seg_loss"], [16, "module-cosense3d.modules.necks"], [16, "module-cosense3d.modules.necks.cpm_composer"], [16, "module-cosense3d.modules.necks.dilation_spconv"], [16, "module-cosense3d.modules.necks.formatting"], [17, "module-cosense3d.modules.plugin"], [17, "module-cosense3d.modules.plugin.attn"], [17, "module-cosense3d.modules.plugin.bev_rpn"], [17, "module-cosense3d.modules.plugin.downsample_conv"], [17, "module-cosense3d.modules.plugin.flash_attn"], [17, "module-cosense3d.modules.plugin.fpn"], [17, "module-cosense3d.modules.plugin.gevbev_decoder"], [17, "module-cosense3d.modules.plugin.mink_spconv"], [17, "module-cosense3d.modules.plugin.naive_compressor"], [17, "module-cosense3d.modules.plugin.pillar_encoder"], [17, "module-cosense3d.modules.plugin.ssfa"], [17, "module-cosense3d.modules.plugin.target_assigners"], [17, "module-cosense3d.modules.plugin.transformer"], [17, "module-cosense3d.modules.plugin.voxel_encoder"], [17, "module-cosense3d.modules.plugin.voxel_generator"], [17, "module-cosense3d.modules.plugin.voxnet_utils"], [17, "module-cosense3d.modules.plugin.vsa"], [18, "module-cosense3d.modules.projection"], [18, "module-cosense3d.modules.projection.fax"], [18, "module-cosense3d.modules.projection.petr"], [18, "module-cosense3d.modules.projection.spatial_transform"], [19, "module-cosense3d.modules.utils"], [19, "module-cosense3d.modules.utils.box_coder"], [19, "module-cosense3d.modules.utils.common"], [19, "module-cosense3d.modules.utils.conv"], [19, "module-cosense3d.modules.utils.edl_utils"], [19, "module-cosense3d.modules.utils.gaussian_utils"], [19, "module-cosense3d.modules.utils.init"], [19, "module-cosense3d.modules.utils.me_utils"], [19, "module-cosense3d.modules.utils.misc"], [19, "module-cosense3d.modules.utils.nbr_attn"], [19, "module-cosense3d.modules.utils.norm"], [19, "module-cosense3d.modules.utils.positional_encoding"], [20, "module-cosense3d.utils"], [20, "module-cosense3d.utils.box_utils"], [20, "module-cosense3d.utils.eval_detection_utils"], [20, "module-cosense3d.utils.iou2d_calculator"], [20, "module-cosense3d.utils.logger"], [20, "module-cosense3d.utils.lr_scheduler"], [20, "module-cosense3d.utils.metrics"], [20, "module-cosense3d.utils.misc"], [20, "module-cosense3d.utils.module_utils"], [20, "module-cosense3d.utils.pclib"], [20, "module-cosense3d.utils.tensor_utils"], [20, "module-cosense3d.utils.train_utils"], [20, "module-cosense3d.utils.vislib"]], "centercontroller (class in cosense3d.agents.center_controller)": [[1, "cosense3d.agents.center_controller.CenterController"]], "cosense3d.agents": [[1, "module-cosense3d.agents"]], "cosense3d.agents.center_controller": [[1, "module-cosense3d.agents.center_controller"]], "model (cosense3d.agents.center_controller.centercontroller property)": [[1, "cosense3d.agents.center_controller.CenterController.model"]], "modules (cosense3d.agents.center_controller.centercontroller property)": [[1, "cosense3d.agents.center_controller.CenterController.modules"]], "parameters (cosense3d.agents.center_controller.centercontroller property)": [[1, "cosense3d.agents.center_controller.CenterController.parameters"]], "run_frame() (cosense3d.agents.center_controller.centercontroller method)": [[1, "cosense3d.agents.center_controller.CenterController.run_frame"]], "run_seq() (cosense3d.agents.center_controller.centercontroller method)": [[1, "cosense3d.agents.center_controller.CenterController.run_seq"]], "setup_core() (cosense3d.agents.center_controller.centercontroller method)": [[1, "cosense3d.agents.center_controller.CenterController.setup_core"]], "test_forward() (cosense3d.agents.center_controller.centercontroller method)": [[1, "cosense3d.agents.center_controller.CenterController.test_forward"]], "train_forward() (cosense3d.agents.center_controller.centercontroller method)": [[1, "cosense3d.agents.center_controller.CenterController.train_forward"]], "update_cfg() (cosense3d.agents.center_controller.centercontroller method)": [[1, "cosense3d.agents.center_controller.CenterController.update_cfg"]], "vis_forward() (cosense3d.agents.center_controller.centercontroller method)": [[1, "cosense3d.agents.center_controller.CenterController.vis_forward"]], "basecav (class in cosense3d.agents.cav_prototype.base_cav)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV"]], "baseseqcav (class in cosense3d.agents.cav_prototype.base_cav)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV"]], "dairv2xcav (class in cosense3d.agents.cav_prototype.base_cav)": [[2, "cosense3d.agents.cav_prototype.base_cav.DairV2XCAV"]], "ltscavloccorr (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr"]], "ltsdairv2x (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSDairV2X"]], "opv2vtcav (class in cosense3d.agents.cav_prototype.base_cav)": [[2, "cosense3d.agents.cav_prototype.base_cav.OPV2VtCAV"]], "opv2vtcav_v2 (class in cosense3d.agents.cav_prototype.base_cav)": [[2, "cosense3d.agents.cav_prototype.base_cav.OPV2VtCAV_v2"]], "streamlidarcav (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV"]], "apply_transform() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.apply_transform"]], "apply_transform() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.apply_transform"]], "apply_transform() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.apply_transform"]], "apply_transform() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.apply_transform"]], "apply_transform() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.apply_transform"]], "cosense3d.agents.cav_prototype": [[2, "module-cosense3d.agents.cav_prototype"]], "cosense3d.agents.cav_prototype.base_cav": [[2, "module-cosense3d.agents.cav_prototype.base_cav"]], "cosense3d.agents.cav_prototype.streamlts_collection": [[2, "module-cosense3d.agents.cav_prototype.streamLTS_collection"]], "forward() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.forward"]], "forward() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.forward"]], "forward_fusion() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.forward_fusion"]], "forward_fusion() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.forward_fusion"]], "forward_fusion() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.forward_fusion"]], "forward_fusion() (cosense3d.agents.cav_prototype.streamlts_collection.ltsdairv2x method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSDairV2X.forward_fusion"]], "forward_fusion() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.forward_fusion"]], "forward_fusion() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.forward_fusion"]], "forward_head() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.forward_head"]], "forward_head() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.forward_head"]], "forward_head() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.forward_head"]], "forward_head() (cosense3d.agents.cav_prototype.streamlts_collection.ltsdairv2x method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSDairV2X.forward_head"]], "forward_head() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.forward_head"]], "forward_head() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.forward_head"]], "forward_local() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.forward_local"]], "forward_local() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.forward_local"]], "forward_local() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.forward_local"]], "forward_local() (cosense3d.agents.cav_prototype.streamlts_collection.ltsdairv2x method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSDairV2X.forward_local"]], "forward_local() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.forward_local"]], "forward_local() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.forward_local"]], "forward_local() (cosense3d.agents.cav_prototype.streamlts_collection.slcdensetosparse method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcDenseToSparse.forward_local"]], "forward_local() (cosense3d.agents.cav_prototype.streamlts_collection.slcfpvrcnn method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcFPVRCNN.forward_local"]], "forward_localization() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.forward_localization"]], "forward_localization() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.forward_localization"]], "get_data() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.get_data"]], "get_prototype() (in module cosense3d.agents.cav_prototype)": [[2, "cosense3d.agents.cav_prototype.get_prototype"]], "get_request_cpm() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.get_request_cpm"]], "get_request_cpm() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.get_request_cpm"]], "get_response_cpm() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.get_response_cpm"]], "get_response_cpm() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.get_response_cpm"]], "get_response_cpm() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.get_response_cpm"]], "get_response_cpm() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.get_response_cpm"]], "get_response_cpm() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.get_response_cpm"]], "has_request() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.has_request"]], "has_request() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.has_request"]], "loss() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.loss"]], "loss() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.loss"]], "loss() (cosense3d.agents.cav_prototype.streamlts_collection.ltsdairv2x method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSDairV2X.loss"]], "loss() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.loss"]], "loss() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.loss"]], "post_update_memory() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.post_update_memory"]], "post_update_memory() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.post_update_memory"]], "post_update_memory() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.post_update_memory"]], "post_update_memory() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.post_update_memory"]], "pre_update_memory() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.pre_update_memory"]], "pre_update_memory() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.pre_update_memory"]], "pre_update_memory() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.pre_update_memory"]], "pre_update_memory() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.pre_update_memory"]], "prepare_data() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.streamlts_collection.slcciassd method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.streamlts_collection.slcdensetosparse method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcDenseToSparse.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.streamlts_collection.slcfpvrcnn method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcFPVRCNN.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.streamlts_collection.slcnoboxtime method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTime.prepare_data"]], "prepare_data() (cosense3d.agents.cav_prototype.streamlts_collection.slcnoboxtimedairv2x method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTimeDairV2X.prepare_data"]], "prepare_time_scale() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.prepare_time_scale"]], "receive_request() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.receive_request"]], "receive_request() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.receive_request"]], "receive_response() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.receive_response"]], "receive_response() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.receive_response"]], "refresh_memory() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.refresh_memory"]], "reset_data() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.reset_data"]], "reset_data() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.reset_data"]], "slcattnfusion (in module cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcAttnFusion"]], "slcciassd (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcCIASSD"]], "slcdensetosparse (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcDenseToSparse"]], "slcfpvrcnn (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcFPVRCNN"]], "slcfcooper (in module cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcFcooper"]], "slcnoboxtime (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTime"]], "slcnoboxtimedairv2x (class in cosense3d.agents.cav_prototype.streamlts_collection)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTimeDairV2X"]], "task_id() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.task_id"]], "timestamp (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav property)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.timestamp"]], "transform_data() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.transform_data"]], "transform_data() (cosense3d.agents.cav_prototype.streamlts_collection.ltscavloccorr method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.LTSCAVLocCorr.transform_data"]], "transform_data() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.transform_data"]], "transform_ref_pts() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.transform_ref_pts"]], "update() (cosense3d.agents.cav_prototype.base_cav.basecav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseCAV.update"]], "update() (cosense3d.agents.cav_prototype.base_cav.baseseqcav method)": [[2, "cosense3d.agents.cav_prototype.base_cav.BaseSeqCAV.update"]], "update_memory_timestamps() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.update_memory_timestamps"]], "update_memory_timestamps() (cosense3d.agents.cav_prototype.streamlts_collection.slcnoboxtime method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTime.update_memory_timestamps"]], "update_memory_timestamps() (cosense3d.agents.cav_prototype.streamlts_collection.slcnoboxtimedairv2x method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.slcNoBoxTimeDairV2X.update_memory_timestamps"]], "vis_local_detection() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.vis_local_detection"]], "vis_local_pred() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.vis_local_pred"]], "vis_poses() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.vis_poses"]], "vis_ref_pts() (cosense3d.agents.cav_prototype.streamlts_collection.streamlidarcav method)": [[2, "cosense3d.agents.cav_prototype.streamLTS_collection.StreamLidarCAV.vis_ref_pts"]], "basehook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.BaseHook"]], "baserunner (class in cosense3d.agents.core.base_runner)": [[3, "cosense3d.agents.core.base_runner.BaseRunner"]], "cavmanager (class in cosense3d.agents.core.cav_manager)": [[3, "cosense3d.agents.core.cav_manager.CAVManager"]], "cpmstatistichook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.CPMStatisticHook"]], "checkpointshook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.CheckPointsHook"]], "datamanager (class in cosense3d.agents.core.data_manager)": [[3, "cosense3d.agents.core.data_manager.DataManager"]], "detectionnmshook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.DetectionNMSHook"]], "evalbevsemseghook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook"]], "evaldetectionbevhook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.EvalDetectionBEVHook"]], "evaldetectionhook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.EvalDetectionHook"]], "forwardrunner (class in cosense3d.agents.core.forward_runner)": [[3, "cosense3d.agents.core.forward_runner.ForwardRunner"]], "gui (class in cosense3d.agents.core.gui)": [[3, "cosense3d.agents.core.gui.GUI"]], "hooks (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.Hooks"]], "memoryusagehook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.MemoryUsageHook"]], "taskmanager (class in cosense3d.agents.core.task_manager)": [[3, "cosense3d.agents.core.task_manager.TaskManager"]], "testrunner (class in cosense3d.agents.core.test_runner)": [[3, "cosense3d.agents.core.test_runner.TestRunner"]], "trainrunner (class in cosense3d.agents.core.train_runner)": [[3, "cosense3d.agents.core.train_runner.TrainRunner"]], "traintimerhook (class in cosense3d.agents.core.hooks)": [[3, "cosense3d.agents.core.hooks.TrainTimerHook"]], "visrunner (class in cosense3d.agents.core.vis_runner)": [[3, "cosense3d.agents.core.vis_runner.VisRunner"]], "add_loc_err() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.add_loc_err"]], "apply_cav_function() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.apply_cav_function"]], "apply_preprocess() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.apply_preprocess"]], "boxes_to_vis_format() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.boxes_to_vis_format"]], "cal_ious() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.cal_ious"]], "change_color_mode() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.change_color_mode"]], "change_glcolor() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.change_glcolor"]], "change_visible() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.change_visible"]], "connect_events_to_funcs() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.connect_events_to_funcs"]], "cosense3d.agents.core": [[3, "module-cosense3d.agents.core"]], "cosense3d.agents.core.base_runner": [[3, "module-cosense3d.agents.core.base_runner"]], "cosense3d.agents.core.cav_manager": [[3, "module-cosense3d.agents.core.cav_manager"]], "cosense3d.agents.core.data_manager": [[3, "module-cosense3d.agents.core.data_manager"]], "cosense3d.agents.core.forward_runner": [[3, "module-cosense3d.agents.core.forward_runner"]], "cosense3d.agents.core.gui": [[3, "module-cosense3d.agents.core.gui"]], "cosense3d.agents.core.hooks": [[3, "module-cosense3d.agents.core.hooks"]], "cosense3d.agents.core.task_manager": [[3, "module-cosense3d.agents.core.task_manager"]], "cosense3d.agents.core.test_runner": [[3, "module-cosense3d.agents.core.test_runner"]], "cosense3d.agents.core.train_runner": [[3, "module-cosense3d.agents.core.train_runner"]], "cosense3d.agents.core.vis_runner": [[3, "module-cosense3d.agents.core.vis_runner"]], "crop_map() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.crop_map"]], "distribute_to_cav() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.distribute_to_cav"]], "distribute_to_seq_cav() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.distribute_to_seq_cav"]], "distribute_to_seq_list() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.distribute_to_seq_list"]], "eval_cosense3d_final() (cosense3d.agents.core.hooks.evaldetectionhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionHook.eval_cosense3d_final"]], "filter_box_ranges() (cosense3d.agents.core.hooks.evaldetectionbevhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionBEVHook.filter_box_ranges"]], "filter_box_ranges() (cosense3d.agents.core.hooks.evaldetectionhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionHook.filter_box_ranges"]], "format_final_result() (cosense3d.agents.core.hooks.evaldetectionbevhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionBEVHook.format_final_result"]], "format_final_result() (cosense3d.agents.core.hooks.evaldetectionhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionHook.format_final_result"]], "forward() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.forward"]], "forward() (cosense3d.agents.core.forward_runner.forwardrunner method)": [[3, "cosense3d.agents.core.forward_runner.ForwardRunner.forward"]], "frame_loss() (cosense3d.agents.core.forward_runner.forwardrunner method)": [[3, "cosense3d.agents.core.forward_runner.ForwardRunner.frame_loss"]], "gather() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.gather"]], "gather_batch() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.gather_batch"]], "gather_cav_data() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.gather_cav_data"]], "gather_cav_ids() (cosense3d.agents.core.forward_runner.forwardrunner method)": [[3, "cosense3d.agents.core.forward_runner.ForwardRunner.gather_cav_ids"]], "gather_ego_data() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.gather_ego_data"]], "gather_vis_data() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.gather_vis_data"]], "generate_augment_params() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.generate_augment_params"]], "generate_global_non_empty_mask() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.generate_global_non_empty_mask"]], "generate_local_non_empty_mask() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.generate_local_non_empty_mask"]], "get_cav_with_id() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.get_cav_with_id"]], "get_gt_boxes_as_vis_format() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.get_gt_boxes_as_vis_format"]], "get_toolbar() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.get_toolbar"]], "get_vis_data_bev() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.get_vis_data_bev"]], "get_vis_data_detection() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.get_vis_data_detection"]], "get_vis_data_input() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.get_vis_data_input"]], "get_vis_data_meta() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.get_vis_data_meta"]], "gt_dynamic_map() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.gt_dynamic_map"]], "gt_static_map() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.gt_static_map"]], "has_cav() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.has_cav"]], "init() (cosense3d.agents.core.base_runner.baserunner method)": [[3, "cosense3d.agents.core.base_runner.BaseRunner.init"]], "initgui() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.initGUI"]], "iou() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.iou"]], "load() (cosense3d.agents.core.test_runner.testrunner method)": [[3, "cosense3d.agents.core.test_runner.TestRunner.load"]], "load() (cosense3d.agents.core.vis_runner.visrunner method)": [[3, "cosense3d.agents.core.vis_runner.VisRunner.load"]], "logdir (cosense3d.agents.core.base_runner.baserunner property)": [[3, "cosense3d.agents.core.base_runner.BaseRunner.logdir"]], "loss() (cosense3d.agents.core.forward_runner.forwardrunner method)": [[3, "cosense3d.agents.core.forward_runner.ForwardRunner.loss"]], "next_batch() (cosense3d.agents.core.base_runner.baserunner method)": [[3, "cosense3d.agents.core.base_runner.BaseRunner.next_batch"]], "post_epoch() (cosense3d.agents.core.hooks.basehook method)": [[3, "cosense3d.agents.core.hooks.BaseHook.post_epoch"]], "post_epoch() (cosense3d.agents.core.hooks.cpmstatistichook method)": [[3, "cosense3d.agents.core.hooks.CPMStatisticHook.post_epoch"]], "post_epoch() (cosense3d.agents.core.hooks.checkpointshook method)": [[3, "cosense3d.agents.core.hooks.CheckPointsHook.post_epoch"]], "post_epoch() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.post_epoch"]], "post_epoch() (cosense3d.agents.core.hooks.evaldetectionbevhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionBEVHook.post_epoch"]], "post_epoch() (cosense3d.agents.core.hooks.evaldetectionhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionHook.post_epoch"]], "post_iter() (cosense3d.agents.core.hooks.basehook method)": [[3, "cosense3d.agents.core.hooks.BaseHook.post_iter"]], "post_iter() (cosense3d.agents.core.hooks.checkpointshook method)": [[3, "cosense3d.agents.core.hooks.CheckPointsHook.post_iter"]], "post_iter() (cosense3d.agents.core.hooks.detectionnmshook method)": [[3, "cosense3d.agents.core.hooks.DetectionNMSHook.post_iter"]], "post_iter() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.post_iter"]], "post_iter() (cosense3d.agents.core.hooks.evaldetectionbevhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionBEVHook.post_iter"]], "post_iter() (cosense3d.agents.core.hooks.evaldetectionhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionHook.post_iter"]], "post_iter() (cosense3d.agents.core.hooks.memoryusagehook method)": [[3, "cosense3d.agents.core.hooks.MemoryUsageHook.post_iter"]], "post_iter() (cosense3d.agents.core.hooks.traintimerhook method)": [[3, "cosense3d.agents.core.hooks.TrainTimerHook.post_iter"]], "pre_epoch() (cosense3d.agents.core.hooks.basehook method)": [[3, "cosense3d.agents.core.hooks.BaseHook.pre_epoch"]], "pre_epoch() (cosense3d.agents.core.hooks.traintimerhook method)": [[3, "cosense3d.agents.core.hooks.TrainTimerHook.pre_epoch"]], "pre_iter() (cosense3d.agents.core.hooks.basehook method)": [[3, "cosense3d.agents.core.hooks.BaseHook.pre_iter"]], "receive_request() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.receive_request"]], "receive_response() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.receive_response"]], "reformat_tasks() (cosense3d.agents.core.task_manager.taskmanager method)": [[3, "cosense3d.agents.core.task_manager.TaskManager.reformat_tasks"]], "refresh() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.refresh"]], "remove_global_empty_boxes() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.remove_global_empty_boxes"]], "remove_local_empty_boxes() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.remove_local_empty_boxes"]], "reset() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.reset"]], "resume() (cosense3d.agents.core.train_runner.trainrunner method)": [[3, "cosense3d.agents.core.train_runner.TrainRunner.resume"]], "run() (cosense3d.agents.core.base_runner.baserunner method)": [[3, "cosense3d.agents.core.base_runner.BaseRunner.run"]], "run() (cosense3d.agents.core.test_runner.testrunner method)": [[3, "cosense3d.agents.core.test_runner.TestRunner.run"]], "run() (cosense3d.agents.core.train_runner.trainrunner method)": [[3, "cosense3d.agents.core.train_runner.TrainRunner.run"]], "run() (cosense3d.agents.core.vis_runner.visrunner method)": [[3, "cosense3d.agents.core.vis_runner.VisRunner.run"]], "run_epoch() (cosense3d.agents.core.train_runner.trainrunner method)": [[3, "cosense3d.agents.core.train_runner.TrainRunner.run_epoch"]], "run_itr() (cosense3d.agents.core.test_runner.testrunner method)": [[3, "cosense3d.agents.core.test_runner.TestRunner.run_itr"]], "run_itr() (cosense3d.agents.core.train_runner.trainrunner method)": [[3, "cosense3d.agents.core.train_runner.TrainRunner.run_itr"]], "run_itr() (cosense3d.agents.core.vis_runner.visrunner method)": [[3, "cosense3d.agents.core.vis_runner.VisRunner.run_itr"]], "sample_global_bev_tgt_pts() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.sample_global_bev_tgt_pts"]], "save() (cosense3d.agents.core.hooks.checkpointshook static method)": [[3, "cosense3d.agents.core.hooks.CheckPointsHook.save"]], "scatter() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.scatter"]], "send_request() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.send_request"]], "send_response() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.send_response"]], "setrunner() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.setRunner"]], "set_logdir() (cosense3d.agents.core.base_runner.baserunner method)": [[3, "cosense3d.agents.core.base_runner.BaseRunner.set_logdir"]], "set_logger() (cosense3d.agents.core.hooks.basehook method)": [[3, "cosense3d.agents.core.hooks.BaseHook.set_logger"]], "set_logger() (cosense3d.agents.core.hooks.cpmstatistichook method)": [[3, "cosense3d.agents.core.hooks.CPMStatisticHook.set_logger"]], "set_logger() (cosense3d.agents.core.hooks.evalbevsemseghook method)": [[3, "cosense3d.agents.core.hooks.EvalBEVSemsegHook.set_logger"]], "set_logger() (cosense3d.agents.core.hooks.evaldetectionbevhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionBEVHook.set_logger"]], "set_logger() (cosense3d.agents.core.hooks.evaldetectionhook method)": [[3, "cosense3d.agents.core.hooks.EvalDetectionHook.set_logger"]], "set_logger() (cosense3d.agents.core.hooks.hooks method)": [[3, "cosense3d.agents.core.hooks.Hooks.set_logger"]], "setupui() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.setupUI"]], "setup_logger() (cosense3d.agents.core.base_runner.baserunner method)": [[3, "cosense3d.agents.core.base_runner.BaseRunner.setup_logger"]], "setup_logger() (cosense3d.agents.core.test_runner.testrunner method)": [[3, "cosense3d.agents.core.test_runner.TestRunner.setup_logger"]], "setup_logger() (cosense3d.agents.core.train_runner.trainrunner method)": [[3, "cosense3d.agents.core.train_runner.TrainRunner.setup_logger"]], "start() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.start"]], "step() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.step"]], "step() (cosense3d.agents.core.test_runner.testrunner method)": [[3, "cosense3d.agents.core.test_runner.TestRunner.step"]], "step() (cosense3d.agents.core.train_runner.trainrunner method)": [[3, "cosense3d.agents.core.train_runner.TrainRunner.step"]], "step() (cosense3d.agents.core.vis_runner.visrunner method)": [[3, "cosense3d.agents.core.vis_runner.VisRunner.step"]], "stop() (cosense3d.agents.core.gui.gui method)": [[3, "cosense3d.agents.core.gui.GUI.stop"]], "summarize_loss_tasks() (cosense3d.agents.core.task_manager.taskmanager method)": [[3, "cosense3d.agents.core.task_manager.TaskManager.summarize_loss_tasks"]], "summarize_tasks() (cosense3d.agents.core.task_manager.taskmanager method)": [[3, "cosense3d.agents.core.task_manager.TaskManager.summarize_tasks"]], "task_to_ordered_dict() (cosense3d.agents.core.task_manager.taskmanager method)": [[3, "cosense3d.agents.core.task_manager.TaskManager.task_to_ordered_dict"]], "to_gpu() (cosense3d.agents.core.forward_runner.forwardrunner method)": [[3, "cosense3d.agents.core.forward_runner.ForwardRunner.to_gpu"]], "training (cosense3d.agents.core.forward_runner.forwardrunner attribute)": [[3, "cosense3d.agents.core.forward_runner.ForwardRunner.training"]], "update() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.update"]], "update_cav_info() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.update_cav_info"]], "update_cpm_statistic() (cosense3d.agents.core.cav_manager.cavmanager method)": [[3, "cosense3d.agents.core.cav_manager.CAVManager.update_cpm_statistic"]], "vis_data() (cosense3d.agents.core.base_runner.baserunner method)": [[3, "cosense3d.agents.core.base_runner.BaseRunner.vis_data"]], "vis_global_data_plt() (cosense3d.agents.core.data_manager.datamanager method)": [[3, "cosense3d.agents.core.data_manager.DataManager.vis_global_data_plt"]], "dataonlineprocessor (class in cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor"]], "adaptive_free_space_augmentation() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.adaptive_free_space_augmentation"]], "add_flip() (in module cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.add_flip"]], "add_rotate() (in module cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.add_rotate"]], "add_scale() (in module cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.add_scale"]], "apply_transform() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.apply_transform"]], "apply_transform() (in module cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.apply_transform"]], "cav_aug_transform() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.cav_aug_transform"]], "cosense3d.agents.utils": [[4, "module-cosense3d.agents.utils"]], "cosense3d.agents.utils.deco": [[4, "module-cosense3d.agents.utils.deco"]], "cosense3d.agents.utils.transform": [[4, "module-cosense3d.agents.utils.transform"]], "filter_range() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.filter_range"]], "filter_range() (in module cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.filter_range"]], "filter_range_mask() (in module cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.filter_range_mask"]], "free_space_augmentation() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.free_space_augmentation"]], "generate_bev_tgt_pts() (in module cosense3d.agents.utils.transform)": [[4, "cosense3d.agents.utils.transform.generate_bev_tgt_pts"]], "generate_sparse_target_bev_points() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.generate_sparse_target_bev_points"]], "generate_sparse_target_roadline_points() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.generate_sparse_target_roadline_points"]], "save_ckpt_on_error() (in module cosense3d.agents.utils.deco)": [[4, "cosense3d.agents.utils.deco.save_ckpt_on_error"]], "update_transform_with_aug() (cosense3d.agents.utils.transform.dataonlineprocessor static method)": [[4, "cosense3d.agents.utils.transform.DataOnlineProcessor.update_transform_with_aug"]], "bevdensecanvas (class in cosense3d.agents.viewer.output_viewer)": [[5, "cosense3d.agents.viewer.output_viewer.BEVDenseCanvas"]], "bevsparsecanvas (class in cosense3d.agents.viewer.output_viewer)": [[5, "cosense3d.agents.viewer.output_viewer.BEVSparseCanvas"]], "detectioncanvas (class in cosense3d.agents.viewer.output_viewer)": [[5, "cosense3d.agents.viewer.output_viewer.DetectionCanvas"]], "detectionscoremap (class in cosense3d.agents.viewer.output_viewer)": [[5, "cosense3d.agents.viewer.output_viewer.DetectionScoreMap"]], "glviewer (class in cosense3d.agents.viewer.gl_viewer)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer"]], "imganno3dviewer (class in cosense3d.agents.viewer.img_anno3d_viewer)": [[5, "cosense3d.agents.viewer.img_anno3d_viewer.ImgAnno3DViewer"]], "imgviewer (class in cosense3d.agents.viewer.img_viewer)": [[5, "cosense3d.agents.viewer.img_viewer.ImgViewer"]], "mplcanvas (class in cosense3d.agents.viewer.output_viewer)": [[5, "cosense3d.agents.viewer.output_viewer.MplCanvas"]], "outputviewer (class in cosense3d.agents.viewer.output_viewer)": [[5, "cosense3d.agents.viewer.output_viewer.OutputViewer"]], "sparsedetectioncanvas (class in cosense3d.agents.viewer.output_viewer)": [[5, "cosense3d.agents.viewer.output_viewer.SparseDetectionCanvas"]], "addbox() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.addBox"]], "box() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.box"]], "change_visibility() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.change_visibility"]], "circular_mask() (in module cosense3d.agents.viewer.utils)": [[5, "cosense3d.agents.viewer.utils.circular_mask"]], "cosense3d.agents.viewer": [[5, "module-cosense3d.agents.viewer"]], "cosense3d.agents.viewer.gl_viewer": [[5, "module-cosense3d.agents.viewer.gl_viewer"]], "cosense3d.agents.viewer.img_anno3d_viewer": [[5, "module-cosense3d.agents.viewer.img_anno3d_viewer"]], "cosense3d.agents.viewer.img_viewer": [[5, "module-cosense3d.agents.viewer.img_viewer"]], "cosense3d.agents.viewer.output_viewer": [[5, "module-cosense3d.agents.viewer.output_viewer"]], "cosense3d.agents.viewer.utils": [[5, "module-cosense3d.agents.viewer.utils"]], "depth_min() (in module cosense3d.agents.viewer.utils)": [[5, "cosense3d.agents.viewer.utils.depth_min"]], "drawrectangle() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.drawRectangle"]], "draw_axes() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.draw_axes"]], "draw_depth_buffer() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.draw_depth_buffer"]], "evt_pos_to_world() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.evt_pos_to_world"]], "get_point_depth() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.get_point_depth"]], "get_region_depth() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.get_region_depth"]], "highlightbox() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.highlightBox"]], "initializegl() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.initializeGL"]], "keypressevent() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.keyPressEvent"]], "keyreleaseevent() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.keyReleaseEvent"]], "model_pose_to_world() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.model_pose_to_world"]], "mousedoubleclickevent() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.mouseDoubleClickEvent"]], "mousemoveevent() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.mouseMoveEvent"]], "mousepressevent() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.mousePressEvent"]], "mousereleaseevent() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.mouseReleaseEvent"]], "paintgl() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.paintGL"]], "paintrect() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.paintRect"]], "refresh() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.refresh"]], "refresh() (cosense3d.agents.viewer.img_anno3d_viewer.imganno3dviewer method)": [[5, "cosense3d.agents.viewer.img_anno3d_viewer.ImgAnno3DViewer.refresh"]], "refresh() (cosense3d.agents.viewer.img_viewer.imgviewer method)": [[5, "cosense3d.agents.viewer.img_viewer.ImgViewer.refresh"]], "refresh() (cosense3d.agents.viewer.output_viewer.bevdensecanvas method)": [[5, "cosense3d.agents.viewer.output_viewer.BEVDenseCanvas.refresh"]], "refresh() (cosense3d.agents.viewer.output_viewer.bevsparsecanvas method)": [[5, "cosense3d.agents.viewer.output_viewer.BEVSparseCanvas.refresh"]], "refresh() (cosense3d.agents.viewer.output_viewer.detectioncanvas method)": [[5, "cosense3d.agents.viewer.output_viewer.DetectionCanvas.refresh"]], "refresh() (cosense3d.agents.viewer.output_viewer.detectionscoremap method)": [[5, "cosense3d.agents.viewer.output_viewer.DetectionScoreMap.refresh"]], "refresh() (cosense3d.agents.viewer.output_viewer.outputviewer method)": [[5, "cosense3d.agents.viewer.output_viewer.OutputViewer.refresh"]], "refresh() (cosense3d.agents.viewer.output_viewer.sparsedetectioncanvas method)": [[5, "cosense3d.agents.viewer.output_viewer.SparseDetectionCanvas.refresh"]], "removeactivate() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.removeActivate"]], "removeheilight() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.removeHeilight"]], "removerectangle() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.removeRectangle"]], "selectheilight() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.selectHeilight"]], "updateframedata() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.updateFrameData"]], "updatelabel() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.updateLabel"]], "updatepcds() (cosense3d.agents.viewer.gl_viewer.glviewer method)": [[5, "cosense3d.agents.viewer.gl_viewer.GLViewer.updatePCDs"]], "update_title() (cosense3d.agents.viewer.output_viewer.mplcanvas method)": [[5, "cosense3d.agents.viewer.output_viewer.MplCanvas.update_title"]], "lineboxitem (class in cosense3d.agents.viewer.items.graph_items)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem"]], "lineitem (class in cosense3d.agents.viewer.items.graph_items)": [[6, "cosense3d.agents.viewer.items.graph_items.LineItem"]], "meshboxitem (class in cosense3d.agents.viewer.items.graph_items)": [[6, "cosense3d.agents.viewer.items.graph_items.MeshBoxItem"]], "rectangleitem (class in cosense3d.agents.viewer.items.graph_items)": [[6, "cosense3d.agents.viewer.items.graph_items.RectangleItem"]], "activate() (cosense3d.agents.viewer.items.graph_items.lineboxitem method)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.activate"]], "color() (cosense3d.agents.viewer.items.graph_items.lineboxitem method)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.color"]], "cosense3d.agents.viewer.items": [[6, "module-cosense3d.agents.viewer.items"]], "cosense3d.agents.viewer.items.graph_items": [[6, "module-cosense3d.agents.viewer.items.graph_items"]], "deactivate() (cosense3d.agents.viewer.items.graph_items.lineboxitem method)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.deactivate"]], "highlight() (cosense3d.agents.viewer.items.graph_items.lineboxitem method)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.highlight"]], "hoverevent() (cosense3d.agents.viewer.items.graph_items.lineitem method)": [[6, "cosense3d.agents.viewer.items.graph_items.LineItem.hoverEvent"]], "hoverevent() (cosense3d.agents.viewer.items.graph_items.rectangleitem method)": [[6, "cosense3d.agents.viewer.items.graph_items.RectangleItem.hoverEvent"]], "id_ptr (cosense3d.agents.viewer.items.graph_items.lineboxitem attribute)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.id_ptr"]], "ids (cosense3d.agents.viewer.items.graph_items.lineboxitem attribute)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.ids"]], "isactive (cosense3d.agents.viewer.items.graph_items.lineboxitem property)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.isActive"]], "to_center() (cosense3d.agents.viewer.items.graph_items.lineboxitem method)": [[6, "cosense3d.agents.viewer.items.graph_items.LineBoxItem.to_center"]], "cosensedataset (class in cosense3d.dataset.cosense_dataset)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset"]], "label_colors (cosense3d.dataset.cosense_dataset.cosensedataset attribute)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.LABEL_COLORS"]], "temporalcosensedataset (class in cosense3d.dataset.temporal_cosense_dataset)": [[7, "cosense3d.dataset.temporal_cosense_dataset.TemporalCosenseDataset"]], "valid_cls (cosense3d.dataset.cosense_dataset.cosensedataset attribute)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.VALID_CLS"]], "collate_batch() (cosense3d.dataset.cosense_dataset.cosensedataset static method)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.collate_batch"]], "cosense3d.dataset": [[7, "module-cosense3d.dataset"]], "cosense3d.dataset.const": [[7, "module-cosense3d.dataset.const"]], "cosense3d.dataset.cosense_dataset": [[7, "module-cosense3d.dataset.cosense_dataset"]], "cosense3d.dataset.temporal_cosense_dataset": [[7, "module-cosense3d.dataset.temporal_cosense_dataset"]], "get_dataloader() (in module cosense3d.dataset)": [[7, "cosense3d.dataset.get_dataloader"]], "get_valid_agents() (cosense3d.dataset.cosense_dataset.cosensedataset method)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.get_valid_agents"]], "init_dataset() (cosense3d.dataset.cosense_dataset.cosensedataset method)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.init_dataset"]], "load_frame_data() (cosense3d.dataset.cosense_dataset.cosensedataset method)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.load_frame_data"]], "load_meta() (cosense3d.dataset.cosense_dataset.cosensedataset method)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.load_meta"]], "load_sample_info() (cosense3d.dataset.cosense_dataset.cosensedataset method)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.load_sample_info"]], "parse_samples() (cosense3d.dataset.cosense_dataset.cosensedataset method)": [[7, "cosense3d.dataset.cosense_dataset.CosenseDataset.parse_samples"]], "loadannotations (class in cosense3d.dataset.pipeline.loading)": [[8, "cosense3d.dataset.pipeline.loading.LoadAnnotations"]], "loadcarlaroadlinemaps (class in cosense3d.dataset.pipeline.loading)": [[8, "cosense3d.dataset.pipeline.loading.LoadCarlaRoadlineMaps"]], "loadlidarpoints (class in cosense3d.dataset.pipeline.loading)": [[8, "cosense3d.dataset.pipeline.loading.LoadLidarPoints"]], "loadmultiviewimg (class in cosense3d.dataset.pipeline.loading)": [[8, "cosense3d.dataset.pipeline.loading.LoadMultiViewImg"]], "loadopv2vbevmaps (class in cosense3d.dataset.pipeline.loading)": [[8, "cosense3d.dataset.pipeline.loading.LoadOPV2VBevMaps"]], "loadsparsebevtargetpoints (class in cosense3d.dataset.pipeline.loading)": [[8, "cosense3d.dataset.pipeline.loading.LoadSparseBevTargetPoints"]], "pipeline (class in cosense3d.dataset.pipeline)": [[8, "cosense3d.dataset.pipeline.Pipeline"]], "resizecropfliprotimage (class in cosense3d.dataset.pipeline.transform)": [[8, "cosense3d.dataset.pipeline.transform.ResizeCropFlipRotImage"]], "resizeimage (class in cosense3d.dataset.pipeline.transform)": [[8, "cosense3d.dataset.pipeline.transform.ResizeImage"]], "build_process() (cosense3d.dataset.pipeline.pipeline method)": [[8, "cosense3d.dataset.pipeline.Pipeline.build_process"]], "cosense3d.dataset.pipeline": [[8, "module-cosense3d.dataset.pipeline"]], "cosense3d.dataset.pipeline.loading": [[8, "module-cosense3d.dataset.pipeline.loading"]], "cosense3d.dataset.pipeline.transform": [[8, "module-cosense3d.dataset.pipeline.transform"]], "crop_map_for_pose() (cosense3d.dataset.pipeline.loading.loadopv2vbevmaps method)": [[8, "cosense3d.dataset.pipeline.loading.LoadOPV2VBevMaps.crop_map_for_pose"]], "generate_sparse_bev_pts() (cosense3d.dataset.pipeline.loading.loadsparsebevtargetpoints method)": [[8, "cosense3d.dataset.pipeline.loading.LoadSparseBevTargetPoints.generate_sparse_bev_pts"]], "get_lidar2img_transform() (cosense3d.dataset.pipeline.loading.loadannotations method)": [[8, "cosense3d.dataset.pipeline.loading.LoadAnnotations.get_lidar2img_transform"]], "load_single() (cosense3d.dataset.pipeline.loading.loadcarlaroadlinemaps method)": [[8, "cosense3d.dataset.pipeline.loading.LoadCarlaRoadlineMaps.load_single"]], "load_single() (cosense3d.dataset.pipeline.loading.loadopv2vbevmaps method)": [[8, "cosense3d.dataset.pipeline.loading.LoadOPV2VBevMaps.load_single"]], "read_pcd() (cosense3d.dataset.pipeline.loading.loadlidarpoints method)": [[8, "cosense3d.dataset.pipeline.loading.LoadLidarPoints.read_pcd"]], "cosensedataconverter (class in cosense3d.dataset.toolkit.cosense)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter"]], "obj_id2name (cosense3d.dataset.toolkit.cosense.cosensedataconverter attribute)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.OBJ_ID2NAME"]], "obj_list (cosense3d.dataset.toolkit.cosense.cosensedataconverter attribute)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.OBJ_LIST"]], "obj_name2id (cosense3d.dataset.toolkit.cosense.cosensedataconverter attribute)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.OBJ_NAME2ID"]], "add_cam_to_fdict() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.add_cam_to_fdict"]], "boxes_3d_to_2d() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.boxes_3d_to_2d"]], "cal_vbbx_mean_dim() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.cal_vbbx_mean_dim"]], "calib_to_tf_matrix() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.calib_to_tf_matrix"]], "callback_registrations() (in module cosense3d.dataset.toolkit)": [[9, "cosense3d.dataset.toolkit.callback_registrations"]], "click_register() (in module cosense3d.dataset.toolkit)": [[9, "cosense3d.dataset.toolkit.click_register"]], "convert_bev_semantic_map_to_road_height_map() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.convert_bev_semantic_map_to_road_height_map"]], "convert_v2x_c() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.convert_v2x_c"]], "convert_v2x_seq() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.convert_v2x_seq"]], "corner_to_center() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.corner_to_center"]], "cosense3d.dataset.toolkit": [[9, "module-cosense3d.dataset.toolkit"]], "cosense3d.dataset.toolkit.cosense": [[9, "module-cosense3d.dataset.toolkit.cosense"]], "cosense3d.dataset.toolkit.dairv2x": [[9, "module-cosense3d.dataset.toolkit.dairv2x"]], "cosense3d.dataset.toolkit.opv2v": [[9, "module-cosense3d.dataset.toolkit.opv2v"]], "cosense3d.dataset.toolkit.opv2v_t": [[9, "module-cosense3d.dataset.toolkit.opv2v_t"]], "create_bbx() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.create_bbx"]], "draw_sample_distributions() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.draw_sample_distributions"]], "fdict_template() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.fdict_template"]], "gen_time_offsets() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.gen_time_offsets"]], "generate_bevmaps() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.generate_bevmaps"]], "generate_roadline() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.generate_roadline"]], "generate_roadline_reference_points() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.generate_roadline_reference_points"]], "get_box_velo() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.get_box_velo"]], "get_local_boxes3d() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.get_local_boxes3d"]], "get_velos() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.get_velos"]], "global_boxes_to_local() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.global_boxes_to_local"]], "load_frame_data() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.load_frame_data"]], "load_info_to_dict() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.load_info_to_dict"]], "load_label() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.load_label"]], "load_meta() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.load_meta"]], "load_vehicles_gframe() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.load_vehicles_gframe"]], "obj_from_sustech() (cosense3d.dataset.toolkit.cosense.cosensedataconverter method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.obj_from_sustech"]], "obj_to_opv2v() (cosense3d.dataset.toolkit.cosense.cosensedataconverter method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.obj_to_opv2v"]], "obj_to_sustech() (cosense3d.dataset.toolkit.cosense.cosensedataconverter method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.obj_to_sustech"]], "optimize_poses() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.optimize_poses"]], "optimize_trajectory() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.optimize_trajectory"]], "opv2v_pose_to_cosense() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.opv2v_pose_to_cosense"]], "opv2v_to_cosense() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.opv2v_to_cosense"]], "opv2vt_to_cosense() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.opv2vt_to_cosense"]], "pad_box_result() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.pad_box_result"]], "parse_global_bbox_velo() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.parse_global_bbox_velo"]], "parse_global_bboxes() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.parse_global_bboxes"]], "parse_speed_from_yamls() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.parse_speed_from_yamls"]], "parse_static_pcd() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.parse_static_pcd"]], "parse_sub_frame() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.parse_sub_frame"]], "parse_timestamped_boxes() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.parse_timestamped_boxes"]], "pose_to_transformation() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.pose_to_transformation"]], "project_points() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.project_points"]], "project_world_objects() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.project_world_objects"]], "read_frame_plys_boxes() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.read_frame_plys_boxes"]], "read_ply() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.read_ply"]], "read_ply_to_dict() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.read_ply_to_dict"]], "read_sub_frame() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.read_sub_frame"]], "register_pcds() (in module cosense3d.dataset.toolkit)": [[9, "cosense3d.dataset.toolkit.register_pcds"]], "register_pcds_to_blocks() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.register_pcds_to_blocks"]], "register_sequence() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.register_sequence"]], "register_step_one() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.register_step_one"]], "register_step_two() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.register_step_two"]], "remove_ego_boxes() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.remove_ego_boxes"]], "remove_lidar_info() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.remove_lidar_info"]], "select_sub_scenes() (in module cosense3d.dataset.toolkit.dairv2x)": [[9, "cosense3d.dataset.toolkit.dairv2x.select_sub_scenes"]], "supervison_full_to_sparse() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.supervison_full_to_sparse"]], "to_kitti() (cosense3d.dataset.toolkit.cosense.cosensedataconverter method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.to_kitti"]], "to_opv2v() (cosense3d.dataset.toolkit.cosense.cosensedataconverter method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.to_opv2v"]], "to_sustech() (cosense3d.dataset.toolkit.cosense.cosensedataconverter method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.to_sustech"]], "transform_boxes_global_to_ref() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.transform_boxes_global_to_ref"]], "update_2d_bboxes() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.update_2d_bboxes"]], "update_agent() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.update_agent"]], "update_agent_gt_boxes() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.update_agent_gt_boxes"]], "update_agent_lidar() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.update_agent_lidar"]], "update_bev_map() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.update_bev_map"]], "update_cam_params() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.update_cam_params"]], "update_frame_bbx() (cosense3d.dataset.toolkit.cosense.cosensedataconverter static method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.update_frame_bbx"]], "update_from_sustech() (cosense3d.dataset.toolkit.cosense.cosensedataconverter method)": [[9, "cosense3d.dataset.toolkit.cosense.CoSenseDataConverter.update_from_sustech"]], "update_global_bboxes_num_pts() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.update_global_bboxes_num_pts"]], "update_global_boxes() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.update_global_boxes"]], "update_local_boxes3d() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.update_local_boxes3d"]], "update_velo() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.update_velo"]], "vis_cosense_scenario() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.vis_cosense_scenario"]], "vis_frame_data() (in module cosense3d.dataset.toolkit.opv2v_t)": [[9, "cosense3d.dataset.toolkit.opv2v_t.vis_frame_data"]], "x1_to_x2() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.x1_to_x2"]], "x_to_world() (in module cosense3d.dataset.toolkit.opv2v)": [[9, "cosense3d.dataset.toolkit.opv2v.x_to_world"]], "basemodule (class in cosense3d.modules)": [[10, "cosense3d.modules.BaseModule"]], "build_module() (in module cosense3d.modules)": [[10, "cosense3d.modules.build_module"]], "cat_data_from_list() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.cat_data_from_list"]], "cat_dict_list() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.cat_dict_list"]], "cat_list() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.cat_list"]], "compose_imgs() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.compose_imgs"]], "compose_result_list() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.compose_result_list"]], "compose_stensor() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.compose_stensor"]], "cosense3d.modules": [[10, "module-cosense3d.modules"]], "decompose_stensor() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.decompose_stensor"]], "format_input() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.format_input"]], "format_output() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.format_output"]], "forward() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.forward"]], "freeze_parameters() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.freeze_parameters"]], "loss() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.loss"]], "prepare_vis_data() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.prepare_vis_data"]], "stack_data_from_list() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.stack_data_from_list"]], "stack_dict_list() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.stack_dict_list"]], "to_gpu() (cosense3d.modules.basemodule method)": [[10, "cosense3d.modules.BaseModule.to_gpu"]], "training (cosense3d.modules.basemodule attribute)": [[10, "cosense3d.modules.BaseModule.training"]], "resnetencoder (class in cosense3d.modules.backbone2d.resnet_encoder)": [[11, "cosense3d.modules.backbone2d.resnet_encoder.ResnetEncoder"]], "cosense3d.modules.backbone2d": [[11, "module-cosense3d.modules.backbone2d"]], "cosense3d.modules.backbone2d.resnet_encoder": [[11, "module-cosense3d.modules.backbone2d.resnet_encoder"]], "format_output() (cosense3d.modules.backbone2d.resnet_encoder.resnetencoder method)": [[11, "cosense3d.modules.backbone2d.resnet_encoder.ResnetEncoder.format_output"]], "forward() (cosense3d.modules.backbone2d.resnet_encoder.resnetencoder method)": [[11, "cosense3d.modules.backbone2d.resnet_encoder.ResnetEncoder.forward"]], "training (cosense3d.modules.backbone2d.resnet_encoder.resnetencoder attribute)": [[11, "cosense3d.modules.backbone2d.resnet_encoder.ResnetEncoder.training"]], "minkunet (class in cosense3d.modules.backbone3d.mink_unet)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet"]], "pillarbev (class in cosense3d.modules.backbone3d.pillar_bev)": [[12, "cosense3d.modules.backbone3d.pillar_bev.PillarBEV"]], "qmode (cosense3d.modules.backbone3d.mink_unet.minkunet attribute)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.QMODE"]], "spconv (class in cosense3d.modules.backbone3d.spconv)": [[12, "cosense3d.modules.backbone3d.spconv.Spconv"]], "voxelnet (class in cosense3d.modules.backbone3d.voxelnet)": [[12, "cosense3d.modules.backbone3d.voxelnet.VoxelNet"]], "cosense3d.modules.backbone3d": [[12, "module-cosense3d.modules.backbone3d"]], "cosense3d.modules.backbone3d.mink_unet": [[12, "module-cosense3d.modules.backbone3d.mink_unet"]], "cosense3d.modules.backbone3d.pillar_bev": [[12, "module-cosense3d.modules.backbone3d.pillar_bev"]], "cosense3d.modules.backbone3d.spconv": [[12, "module-cosense3d.modules.backbone3d.spconv"]], "cosense3d.modules.backbone3d.voxelnet": [[12, "module-cosense3d.modules.backbone3d.voxelnet"]], "format_output() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.format_output"]], "format_output() (cosense3d.modules.backbone3d.pillar_bev.pillarbev method)": [[12, "cosense3d.modules.backbone3d.pillar_bev.PillarBEV.format_output"]], "format_output() (cosense3d.modules.backbone3d.spconv.spconv method)": [[12, "cosense3d.modules.backbone3d.spconv.Spconv.format_output"]], "forward() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.forward"]], "forward() (cosense3d.modules.backbone3d.pillar_bev.pillarbev method)": [[12, "cosense3d.modules.backbone3d.pillar_bev.PillarBEV.forward"]], "forward() (cosense3d.modules.backbone3d.spconv.spconv method)": [[12, "cosense3d.modules.backbone3d.spconv.Spconv.forward"]], "forward() (cosense3d.modules.backbone3d.voxelnet.voxelnet method)": [[12, "cosense3d.modules.backbone3d.voxelnet.VoxelNet.forward"]], "forward_height_compression() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.forward_height_compression"]], "forward_unet() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.forward_unet"]], "grid_size() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.grid_size"]], "init_weights() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.init_weights"]], "post_act_block() (in module cosense3d.modules.backbone3d.spconv)": [[12, "cosense3d.modules.backbone3d.spconv.post_act_block"]], "stensor_to_dense() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.stensor_to_dense"]], "to_dense() (cosense3d.modules.backbone3d.spconv.spconv method)": [[12, "cosense3d.modules.backbone3d.spconv.Spconv.to_dense"]], "to_dense() (cosense3d.modules.backbone3d.voxelnet.voxelnet method)": [[12, "cosense3d.modules.backbone3d.voxelnet.VoxelNet.to_dense"]], "to_dense_bev() (cosense3d.modules.backbone3d.pillar_bev.pillarbev method)": [[12, "cosense3d.modules.backbone3d.pillar_bev.PillarBEV.to_dense_bev"]], "to_gpu() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.to_gpu"]], "training (cosense3d.modules.backbone3d.mink_unet.minkunet attribute)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.training"]], "training (cosense3d.modules.backbone3d.pillar_bev.pillarbev attribute)": [[12, "cosense3d.modules.backbone3d.pillar_bev.PillarBEV.training"]], "training (cosense3d.modules.backbone3d.spconv.spconv attribute)": [[12, "cosense3d.modules.backbone3d.spconv.Spconv.training"]], "training (cosense3d.modules.backbone3d.voxelnet.voxelnet attribute)": [[12, "cosense3d.modules.backbone3d.voxelnet.VoxelNet.training"]], "valid_coords() (cosense3d.modules.backbone3d.mink_unet.minkunet method)": [[12, "cosense3d.modules.backbone3d.mink_unet.MinkUnet.valid_coords"]], "attention (class in cosense3d.modules.fusion.fax)": [[13, "cosense3d.modules.fusion.fax.Attention"]], "bevmaxoutfusion (class in cosense3d.modules.fusion.maxout_fusion)": [[13, "cosense3d.modules.fusion.maxout_fusion.BEVMaxoutFusion"]], "boxfusion (class in cosense3d.modules.fusion.box_fusion)": [[13, "cosense3d.modules.fusion.box_fusion.BoxFusion"]], "denseattentionfusion (class in cosense3d.modules.fusion.attn_fusion)": [[13, "cosense3d.modules.fusion.attn_fusion.DenseAttentionFusion"]], "feedforward (class in cosense3d.modules.fusion.fax)": [[13, "cosense3d.modules.fusion.fax.FeedForward"]], "keypointsfusion (class in cosense3d.modules.fusion.keypoints)": [[13, "cosense3d.modules.fusion.keypoints.KeypointsFusion"]], "localnaivefusion (class in cosense3d.modules.fusion.temporal_fusion)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalNaiveFusion"]], "localtemporalfusion (class in cosense3d.modules.fusion.temporal_fusion)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion"]], "localtemporalfusionv1 (class in cosense3d.modules.fusion.temporal_fusion)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV1"]], "localtemporalfusionv2 (class in cosense3d.modules.fusion.temporal_fusion)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV2"]], "localtemporalfusionv3 (class in cosense3d.modules.fusion.temporal_fusion)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3"]], "naivefusion (class in cosense3d.modules.fusion.naive_fusion)": [[13, "cosense3d.modules.fusion.naive_fusion.NaiveFusion"]], "prenormresidual (class in cosense3d.modules.fusion.fax)": [[13, "cosense3d.modules.fusion.fax.PreNormResidual"]], "sparseattentionfusion (class in cosense3d.modules.fusion.attn_fusion)": [[13, "cosense3d.modules.fusion.attn_fusion.SparseAttentionFusion"]], "sparsebevmaxoutfusion (class in cosense3d.modules.fusion.maxout_fusion)": [[13, "cosense3d.modules.fusion.maxout_fusion.SparseBEVMaxoutFusion"]], "spatialqueryalignfusionrl (class in cosense3d.modules.fusion.spatial_query_fusion)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryAlignFusionRL"]], "spatialqueryfusion (class in cosense3d.modules.fusion.spatial_query_fusion)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryFusion"]], "swapfusionblock (class in cosense3d.modules.fusion.fax)": [[13, "cosense3d.modules.fusion.fax.SwapFusionBlock"]], "swapfusionblockmask (class in cosense3d.modules.fusion.fax)": [[13, "cosense3d.modules.fusion.fax.SwapFusionBlockMask"]], "swapfusionencoder (class in cosense3d.modules.fusion.fax)": [[13, "cosense3d.modules.fusion.fax.SwapFusionEncoder"]], "temporalfusion (class in cosense3d.modules.fusion.temporal_fusion)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalFusion"]], "temporallidarfusion (class in cosense3d.modules.fusion.temporal_fusion)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion"]], "align_coordinates() (cosense3d.modules.fusion.spatial_query_fusion.spatialqueryalignfusionrl method)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryAlignFusionRL.align_coordinates"]], "cluster_fusion() (cosense3d.modules.fusion.box_fusion.boxfusion method)": [[13, "cosense3d.modules.fusion.box_fusion.BoxFusion.cluster_fusion"]], "cluster_fusion() (cosense3d.modules.fusion.keypoints.keypointsfusion method)": [[13, "cosense3d.modules.fusion.keypoints.KeypointsFusion.cluster_fusion"]], "clustering() (cosense3d.modules.fusion.box_fusion.boxfusion method)": [[13, "cosense3d.modules.fusion.box_fusion.BoxFusion.clustering"]], "clustering() (cosense3d.modules.fusion.keypoints.keypointsfusion method)": [[13, "cosense3d.modules.fusion.keypoints.KeypointsFusion.clustering"]], "cosense3d.modules.fusion": [[13, "module-cosense3d.modules.fusion"]], "cosense3d.modules.fusion.attn_fusion": [[13, "module-cosense3d.modules.fusion.attn_fusion"]], "cosense3d.modules.fusion.box_fusion": [[13, "module-cosense3d.modules.fusion.box_fusion"]], "cosense3d.modules.fusion.fax": [[13, "module-cosense3d.modules.fusion.fax"]], "cosense3d.modules.fusion.keypoints": [[13, "module-cosense3d.modules.fusion.keypoints"]], "cosense3d.modules.fusion.maxout_fusion": [[13, "module-cosense3d.modules.fusion.maxout_fusion"]], "cosense3d.modules.fusion.naive_fusion": [[13, "module-cosense3d.modules.fusion.naive_fusion"]], "cosense3d.modules.fusion.spatial_query_fusion": [[13, "module-cosense3d.modules.fusion.spatial_query_fusion"]], "cosense3d.modules.fusion.temporal_fusion": [[13, "module-cosense3d.modules.fusion.temporal_fusion"]], "embed_pos() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion.embed_pos"]], "embed_pos() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv3 method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3.embed_pos"]], "embed_pos() (cosense3d.modules.fusion.temporal_fusion.temporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalFusion.embed_pos"]], "embed_pos() (cosense3d.modules.fusion.temporal_fusion.temporallidarfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion.embed_pos"]], "format_output() (cosense3d.modules.fusion.attn_fusion.sparseattentionfusion method)": [[13, "cosense3d.modules.fusion.attn_fusion.SparseAttentionFusion.format_output"]], "format_output() (cosense3d.modules.fusion.maxout_fusion.sparsebevmaxoutfusion method)": [[13, "cosense3d.modules.fusion.maxout_fusion.SparseBEVMaxoutFusion.format_output"]], "format_output() (cosense3d.modules.fusion.naive_fusion.naivefusion method)": [[13, "cosense3d.modules.fusion.naive_fusion.NaiveFusion.format_output"]], "format_output() (cosense3d.modules.fusion.spatial_query_fusion.spatialqueryalignfusionrl method)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryAlignFusionRL.format_output"]], "format_output() (cosense3d.modules.fusion.spatial_query_fusion.spatialqueryfusion method)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryFusion.format_output"]], "forward() (cosense3d.modules.fusion.attn_fusion.denseattentionfusion method)": [[13, "cosense3d.modules.fusion.attn_fusion.DenseAttentionFusion.forward"]], "forward() (cosense3d.modules.fusion.attn_fusion.sparseattentionfusion method)": [[13, "cosense3d.modules.fusion.attn_fusion.SparseAttentionFusion.forward"]], "forward() (cosense3d.modules.fusion.box_fusion.boxfusion method)": [[13, "cosense3d.modules.fusion.box_fusion.BoxFusion.forward"]], "forward() (cosense3d.modules.fusion.fax.attention method)": [[13, "cosense3d.modules.fusion.fax.Attention.forward"]], "forward() (cosense3d.modules.fusion.fax.feedforward method)": [[13, "cosense3d.modules.fusion.fax.FeedForward.forward"]], "forward() (cosense3d.modules.fusion.fax.prenormresidual method)": [[13, "cosense3d.modules.fusion.fax.PreNormResidual.forward"]], "forward() (cosense3d.modules.fusion.fax.swapfusionblock method)": [[13, "cosense3d.modules.fusion.fax.SwapFusionBlock.forward"]], "forward() (cosense3d.modules.fusion.fax.swapfusionblockmask method)": [[13, "cosense3d.modules.fusion.fax.SwapFusionBlockMask.forward"]], "forward() (cosense3d.modules.fusion.fax.swapfusionencoder method)": [[13, "cosense3d.modules.fusion.fax.SwapFusionEncoder.forward"]], "forward() (cosense3d.modules.fusion.keypoints.keypointsfusion method)": [[13, "cosense3d.modules.fusion.keypoints.KeypointsFusion.forward"]], "forward() (cosense3d.modules.fusion.maxout_fusion.bevmaxoutfusion method)": [[13, "cosense3d.modules.fusion.maxout_fusion.BEVMaxoutFusion.forward"]], "forward() (cosense3d.modules.fusion.maxout_fusion.sparsebevmaxoutfusion method)": [[13, "cosense3d.modules.fusion.maxout_fusion.SparseBEVMaxoutFusion.forward"]], "forward() (cosense3d.modules.fusion.naive_fusion.naivefusion method)": [[13, "cosense3d.modules.fusion.naive_fusion.NaiveFusion.forward"]], "forward() (cosense3d.modules.fusion.spatial_query_fusion.spatialqueryalignfusionrl method)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryAlignFusionRL.forward"]], "forward() (cosense3d.modules.fusion.spatial_query_fusion.spatialqueryfusion method)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryFusion.forward"]], "forward() (cosense3d.modules.fusion.temporal_fusion.localnaivefusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalNaiveFusion.forward"]], "forward() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion.forward"]], "forward() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv1 method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV1.forward"]], "forward() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv2 method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV2.forward"]], "forward() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv3 method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3.forward"]], "forward() (cosense3d.modules.fusion.temporal_fusion.temporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalFusion.forward"]], "forward() (cosense3d.modules.fusion.temporal_fusion.temporallidarfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion.forward"]], "fuse_feature_at_stride() (cosense3d.modules.fusion.attn_fusion.sparseattentionfusion method)": [[13, "cosense3d.modules.fusion.attn_fusion.SparseAttentionFusion.fuse_feature_at_stride"]], "fuse_feature_at_stride() (cosense3d.modules.fusion.naive_fusion.naivefusion method)": [[13, "cosense3d.modules.fusion.naive_fusion.NaiveFusion.fuse_feature_at_stride"]], "gather_topk() (cosense3d.modules.fusion.temporal_fusion.localnaivefusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalNaiveFusion.gather_topk"]], "gather_topk() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion.gather_topk"]], "gather_topk() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv3 method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3.gather_topk"]], "gather_topk() (cosense3d.modules.fusion.temporal_fusion.temporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalFusion.gather_topk"]], "gather_topk() (cosense3d.modules.fusion.temporal_fusion.temporallidarfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion.gather_topk"]], "init_weights() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion.init_weights"]], "init_weights() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv3 method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3.init_weights"]], "init_weights() (cosense3d.modules.fusion.temporal_fusion.temporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalFusion.init_weights"]], "init_weights() (cosense3d.modules.fusion.temporal_fusion.temporallidarfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion.init_weights"]], "limit_period() (in module cosense3d.modules.fusion.box_fusion)": [[13, "cosense3d.modules.fusion.box_fusion.limit_period"]], "limit_period() (in module cosense3d.modules.fusion.keypoints)": [[13, "cosense3d.modules.fusion.keypoints.limit_period"]], "merge_sync_boxes() (cosense3d.modules.fusion.box_fusion.boxfusion method)": [[13, "cosense3d.modules.fusion.box_fusion.BoxFusion.merge_sync_boxes"]], "temporal_alignment() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion.temporal_alignment"]], "temporal_alignment() (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv3 method)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3.temporal_alignment"]], "temporal_alignment() (cosense3d.modules.fusion.temporal_fusion.temporalfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalFusion.temporal_alignment"]], "temporal_alignment() (cosense3d.modules.fusion.temporal_fusion.temporallidarfusion method)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion.temporal_alignment"]], "temporal_cluster_fusion() (cosense3d.modules.fusion.box_fusion.boxfusion method)": [[13, "cosense3d.modules.fusion.box_fusion.BoxFusion.temporal_cluster_fusion"]], "training (cosense3d.modules.fusion.attn_fusion.denseattentionfusion attribute)": [[13, "cosense3d.modules.fusion.attn_fusion.DenseAttentionFusion.training"]], "training (cosense3d.modules.fusion.attn_fusion.sparseattentionfusion attribute)": [[13, "cosense3d.modules.fusion.attn_fusion.SparseAttentionFusion.training"]], "training (cosense3d.modules.fusion.box_fusion.boxfusion attribute)": [[13, "cosense3d.modules.fusion.box_fusion.BoxFusion.training"]], "training (cosense3d.modules.fusion.fax.attention attribute)": [[13, "cosense3d.modules.fusion.fax.Attention.training"]], "training (cosense3d.modules.fusion.fax.feedforward attribute)": [[13, "cosense3d.modules.fusion.fax.FeedForward.training"]], "training (cosense3d.modules.fusion.fax.prenormresidual attribute)": [[13, "cosense3d.modules.fusion.fax.PreNormResidual.training"]], "training (cosense3d.modules.fusion.fax.swapfusionblock attribute)": [[13, "cosense3d.modules.fusion.fax.SwapFusionBlock.training"]], "training (cosense3d.modules.fusion.fax.swapfusionblockmask attribute)": [[13, "cosense3d.modules.fusion.fax.SwapFusionBlockMask.training"]], "training (cosense3d.modules.fusion.fax.swapfusionencoder attribute)": [[13, "cosense3d.modules.fusion.fax.SwapFusionEncoder.training"]], "training (cosense3d.modules.fusion.keypoints.keypointsfusion attribute)": [[13, "cosense3d.modules.fusion.keypoints.KeypointsFusion.training"]], "training (cosense3d.modules.fusion.maxout_fusion.bevmaxoutfusion attribute)": [[13, "cosense3d.modules.fusion.maxout_fusion.BEVMaxoutFusion.training"]], "training (cosense3d.modules.fusion.maxout_fusion.sparsebevmaxoutfusion attribute)": [[13, "cosense3d.modules.fusion.maxout_fusion.SparseBEVMaxoutFusion.training"]], "training (cosense3d.modules.fusion.naive_fusion.naivefusion attribute)": [[13, "cosense3d.modules.fusion.naive_fusion.NaiveFusion.training"]], "training (cosense3d.modules.fusion.spatial_query_fusion.spatialqueryalignfusionrl attribute)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryAlignFusionRL.training"]], "training (cosense3d.modules.fusion.spatial_query_fusion.spatialqueryfusion attribute)": [[13, "cosense3d.modules.fusion.spatial_query_fusion.SpatialQueryFusion.training"]], "training (cosense3d.modules.fusion.temporal_fusion.localnaivefusion attribute)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalNaiveFusion.training"]], "training (cosense3d.modules.fusion.temporal_fusion.localtemporalfusion attribute)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusion.training"]], "training (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv1 attribute)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV1.training"]], "training (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv2 attribute)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV2.training"]], "training (cosense3d.modules.fusion.temporal_fusion.localtemporalfusionv3 attribute)": [[13, "cosense3d.modules.fusion.temporal_fusion.LocalTemporalFusionV3.training"]], "training (cosense3d.modules.fusion.temporal_fusion.temporalfusion attribute)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalFusion.training"]], "training (cosense3d.modules.fusion.temporal_fusion.temporallidarfusion attribute)": [[13, "cosense3d.modules.fusion.temporal_fusion.TemporalLidarFusion.training"]], "bev (class in cosense3d.modules.heads.bev)": [[14, "cosense3d.modules.heads.bev.BEV"]], "bevmultiresolution (class in cosense3d.modules.heads.bev)": [[14, "cosense3d.modules.heads.bev.BEVMultiResolution"]], "bevroidensehead (class in cosense3d.modules.heads.bev_dense)": [[14, "cosense3d.modules.heads.bev_dense.BevRoIDenseHead"]], "bevseghead (class in cosense3d.modules.heads.bev_dense)": [[14, "cosense3d.modules.heads.bev_dense.BevSegHead"]], "contiattnbev (class in cosense3d.modules.heads.bev)": [[14, "cosense3d.modules.heads.bev.ContiAttnBEV"]], "contigevbev (class in cosense3d.modules.heads.bev)": [[14, "cosense3d.modules.heads.bev.ContiGevBEV"]], "continuousbev (class in cosense3d.modules.heads.bev)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV"]], "detanchordense (class in cosense3d.modules.heads.det_anchor_dense)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense"]], "detanchorsparse (class in cosense3d.modules.heads.det_anchor_sparse)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse"]], "detcentersparse (class in cosense3d.modules.heads.det_center_sparse)": [[14, "cosense3d.modules.heads.det_center_sparse.DetCenterSparse"]], "imgfocal (class in cosense3d.modules.heads.img_focal)": [[14, "cosense3d.modules.heads.img_focal.ImgFocal"]], "keypointroihead (class in cosense3d.modules.heads.det_roi_refine)": [[14, "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead"]], "lidarpetrhead (class in cosense3d.modules.heads.lidar_petr_head)": [[14, "cosense3d.modules.heads.lidar_petr_head.LidarPETRHead"]], "multilvldetcentersparse (class in cosense3d.modules.heads.det_center_sparse)": [[14, "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse"]], "multitaskhead (class in cosense3d.modules.heads.multitask_head)": [[14, "cosense3d.modules.heads.multitask_head.MultiTaskHead"]], "nbrattentionbev (class in cosense3d.modules.heads.nbr_attn_bev)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV"]], "petrhead (class in cosense3d.modules.heads.petr_head)": [[14, "cosense3d.modules.heads.petr_head.PETRHead"]], "queryguidedpetrhead (class in cosense3d.modules.heads.query_guided_petr_head)": [[14, "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead"]], "separatedclshead (class in cosense3d.modules.heads.det_center_sparse)": [[14, "cosense3d.modules.heads.det_center_sparse.SeparatedClsHead"]], "unitedclshead (class in cosense3d.modules.heads.det_center_sparse)": [[14, "cosense3d.modules.heads.det_center_sparse.UnitedClsHead"]], "unitedreghead (class in cosense3d.modules.heads.det_center_sparse)": [[14, "cosense3d.modules.heads.det_center_sparse.UnitedRegHead"]], "add_sin_difference() (cosense3d.modules.heads.det_anchor_dense.detanchordense static method)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense.add_sin_difference"]], "add_sin_difference() (cosense3d.modules.heads.det_anchor_sparse.detanchorsparse static method)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse.add_sin_difference"]], "apply_center_offset() (cosense3d.modules.heads.img_focal.imgfocal static method)": [[14, "cosense3d.modules.heads.img_focal.ImgFocal.apply_center_offset"]], "apply_ltrb() (cosense3d.modules.heads.img_focal.imgfocal static method)": [[14, "cosense3d.modules.heads.img_focal.ImgFocal.apply_ltrb"]], "cosense3d.modules.heads": [[14, "module-cosense3d.modules.heads"]], "cosense3d.modules.heads.bev": [[14, "module-cosense3d.modules.heads.bev"]], "cosense3d.modules.heads.bev_dense": [[14, "module-cosense3d.modules.heads.bev_dense"]], "cosense3d.modules.heads.det_anchor_dense": [[14, "module-cosense3d.modules.heads.det_anchor_dense"]], "cosense3d.modules.heads.det_anchor_sparse": [[14, "module-cosense3d.modules.heads.det_anchor_sparse"]], "cosense3d.modules.heads.det_center_sparse": [[14, "module-cosense3d.modules.heads.det_center_sparse"]], "cosense3d.modules.heads.det_roi_refine": [[14, "module-cosense3d.modules.heads.det_roi_refine"]], "cosense3d.modules.heads.img_focal": [[14, "module-cosense3d.modules.heads.img_focal"]], "cosense3d.modules.heads.lidar_petr_head": [[14, "module-cosense3d.modules.heads.lidar_petr_head"]], "cosense3d.modules.heads.multitask_head": [[14, "module-cosense3d.modules.heads.multitask_head"]], "cosense3d.modules.heads.nbr_attn_bev": [[14, "module-cosense3d.modules.heads.nbr_attn_bev"]], "cosense3d.modules.heads.petr_head": [[14, "module-cosense3d.modules.heads.petr_head"]], "cosense3d.modules.heads.query_guided_petr_head": [[14, "module-cosense3d.modules.heads.query_guided_petr_head"]], "down_sample() (cosense3d.modules.heads.bev.bev method)": [[14, "cosense3d.modules.heads.bev.BEV.down_sample"]], "down_sample() (cosense3d.modules.heads.bev.continuousbev method)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.down_sample"]], "downsample_tgt_pts() (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev method)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.downsample_tgt_pts"]], "format() (cosense3d.modules.heads.det_anchor_sparse.detanchorsparse method)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse.format"]], "format_input() (cosense3d.modules.heads.bev.bev method)": [[14, "cosense3d.modules.heads.bev.BEV.format_input"]], "format_input() (cosense3d.modules.heads.bev.continuousbev method)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.format_input"]], "format_input() (cosense3d.modules.heads.det_center_sparse.detcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.DetCenterSparse.format_input"]], "format_input() (cosense3d.modules.heads.det_center_sparse.multilvldetcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse.format_input"]], "format_input() (cosense3d.modules.heads.lidar_petr_head.lidarpetrhead method)": [[14, "cosense3d.modules.heads.lidar_petr_head.LidarPETRHead.format_input"]], "format_input() (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev method)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.format_input"]], "format_output() (cosense3d.modules.heads.bev.bev method)": [[14, "cosense3d.modules.heads.bev.BEV.format_output"]], "format_output() (cosense3d.modules.heads.bev.continuousbev method)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.format_output"]], "format_output() (cosense3d.modules.heads.det_anchor_dense.detanchordense method)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense.format_output"]], "format_output() (cosense3d.modules.heads.det_center_sparse.detcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.DetCenterSparse.format_output"]], "format_output() (cosense3d.modules.heads.det_center_sparse.multilvldetcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse.format_output"]], "format_output() (cosense3d.modules.heads.img_focal.imgfocal method)": [[14, "cosense3d.modules.heads.img_focal.ImgFocal.format_output"]], "format_output() (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev method)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.format_output"]], "forward() (cosense3d.modules.heads.bev.bev method)": [[14, "cosense3d.modules.heads.bev.BEV.forward"]], "forward() (cosense3d.modules.heads.bev.bevmultiresolution method)": [[14, "cosense3d.modules.heads.bev.BEVMultiResolution.forward"]], "forward() (cosense3d.modules.heads.bev.continuousbev method)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.forward"]], "forward() (cosense3d.modules.heads.bev_dense.bevroidensehead method)": [[14, "cosense3d.modules.heads.bev_dense.BevRoIDenseHead.forward"]], "forward() (cosense3d.modules.heads.bev_dense.bevseghead method)": [[14, "cosense3d.modules.heads.bev_dense.BevSegHead.forward"]], "forward() (cosense3d.modules.heads.det_anchor_dense.detanchordense method)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense.forward"]], "forward() (cosense3d.modules.heads.det_anchor_sparse.detanchorsparse method)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse.forward"]], "forward() (cosense3d.modules.heads.det_center_sparse.detcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.DetCenterSparse.forward"]], "forward() (cosense3d.modules.heads.det_center_sparse.multilvldetcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse.forward"]], "forward() (cosense3d.modules.heads.det_center_sparse.separatedclshead method)": [[14, "cosense3d.modules.heads.det_center_sparse.SeparatedClsHead.forward"]], "forward() (cosense3d.modules.heads.det_center_sparse.unitedclshead method)": [[14, "cosense3d.modules.heads.det_center_sparse.UnitedClsHead.forward"]], "forward() (cosense3d.modules.heads.det_center_sparse.unitedreghead method)": [[14, "cosense3d.modules.heads.det_center_sparse.UnitedRegHead.forward"]], "forward() (cosense3d.modules.heads.det_roi_refine.keypointroihead method)": [[14, "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead.forward"]], "forward() (cosense3d.modules.heads.img_focal.imgfocal method)": [[14, "cosense3d.modules.heads.img_focal.ImgFocal.forward"]], "forward() (cosense3d.modules.heads.lidar_petr_head.lidarpetrhead method)": [[14, "cosense3d.modules.heads.lidar_petr_head.LidarPETRHead.forward"]], "forward() (cosense3d.modules.heads.multitask_head.multitaskhead method)": [[14, "cosense3d.modules.heads.multitask_head.MultiTaskHead.forward"]], "forward() (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev method)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.forward"]], "forward() (cosense3d.modules.heads.petr_head.petrhead method)": [[14, "cosense3d.modules.heads.petr_head.PETRHead.forward"]], "forward() (cosense3d.modules.heads.query_guided_petr_head.queryguidedpetrhead method)": [[14, "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead.forward"]], "gather_topk() (cosense3d.modules.heads.lidar_petr_head.lidarpetrhead method)": [[14, "cosense3d.modules.heads.lidar_petr_head.LidarPETRHead.gather_topk"]], "generate_reference_points() (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev method)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.generate_reference_points"]], "get_dense_grid_points() (cosense3d.modules.heads.det_roi_refine.keypointroihead static method)": [[14, "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead.get_dense_grid_points"]], "get_evidence() (cosense3d.modules.heads.bev.contiattnbev method)": [[14, "cosense3d.modules.heads.bev.ContiAttnBEV.get_evidence"]], "get_evidence() (cosense3d.modules.heads.bev.contigevbev method)": [[14, "cosense3d.modules.heads.bev.ContiGevBEV.get_evidence"]], "get_evidence() (cosense3d.modules.heads.bev.continuousbev method)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.get_evidence"]], "get_global_grid_points_of_roi() (cosense3d.modules.heads.det_roi_refine.keypointroihead method)": [[14, "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead.get_global_grid_points_of_roi"]], "get_pred_boxes() (cosense3d.modules.heads.query_guided_petr_head.queryguidedpetrhead method)": [[14, "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead.get_pred_boxes"]], "get_predictions() (cosense3d.modules.heads.query_guided_petr_head.queryguidedpetrhead method)": [[14, "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead.get_predictions"]], "get_tgt() (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev method)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.get_tgt"]], "init_weights() (cosense3d.modules.heads.det_anchor_dense.detanchordense method)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense.init_weights"]], "init_weights() (cosense3d.modules.heads.det_anchor_sparse.detanchorsparse method)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse.init_weights"]], "init_weights() (cosense3d.modules.heads.lidar_petr_head.lidarpetrhead method)": [[14, "cosense3d.modules.heads.lidar_petr_head.LidarPETRHead.init_weights"]], "init_weights() (cosense3d.modules.heads.petr_head.petrhead method)": [[14, "cosense3d.modules.heads.petr_head.PETRHead.init_weights"]], "init_weights() (cosense3d.modules.heads.query_guided_petr_head.queryguidedpetrhead method)": [[14, "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead.init_weights"]], "loss() (cosense3d.modules.heads.bev.bev method)": [[14, "cosense3d.modules.heads.bev.BEV.loss"]], "loss() (cosense3d.modules.heads.bev.bevmultiresolution method)": [[14, "cosense3d.modules.heads.bev.BEVMultiResolution.loss"]], "loss() (cosense3d.modules.heads.bev.continuousbev method)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.loss"]], "loss() (cosense3d.modules.heads.bev_dense.bevroidensehead method)": [[14, "cosense3d.modules.heads.bev_dense.BevRoIDenseHead.loss"]], "loss() (cosense3d.modules.heads.bev_dense.bevseghead method)": [[14, "cosense3d.modules.heads.bev_dense.BevSegHead.loss"]], "loss() (cosense3d.modules.heads.det_anchor_dense.detanchordense method)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense.loss"]], "loss() (cosense3d.modules.heads.det_anchor_sparse.detanchorsparse method)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse.loss"]], "loss() (cosense3d.modules.heads.det_center_sparse.detcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.DetCenterSparse.loss"]], "loss() (cosense3d.modules.heads.det_center_sparse.multilvldetcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse.loss"]], "loss() (cosense3d.modules.heads.det_roi_refine.keypointroihead method)": [[14, "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead.loss"]], "loss() (cosense3d.modules.heads.img_focal.imgfocal method)": [[14, "cosense3d.modules.heads.img_focal.ImgFocal.loss"]], "loss() (cosense3d.modules.heads.multitask_head.multitaskhead method)": [[14, "cosense3d.modules.heads.multitask_head.MultiTaskHead.loss"]], "loss() (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev method)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.loss"]], "loss() (cosense3d.modules.heads.petr_head.petrhead method)": [[14, "cosense3d.modules.heads.petr_head.PETRHead.loss"]], "loss() (cosense3d.modules.heads.query_guided_petr_head.queryguidedpetrhead method)": [[14, "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead.loss"]], "predictions() (cosense3d.modules.heads.det_anchor_dense.detanchordense method)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense.predictions"]], "predictions() (cosense3d.modules.heads.det_anchor_sparse.detanchorsparse method)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse.predictions"]], "predictions() (cosense3d.modules.heads.det_center_sparse.detcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.DetCenterSparse.predictions"]], "predictions() (cosense3d.modules.heads.det_center_sparse.multilvldetcentersparse method)": [[14, "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse.predictions"]], "roi_grid_pool() (cosense3d.modules.heads.det_roi_refine.keypointroihead method)": [[14, "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead.roi_grid_pool"]], "sample_reference_points() (cosense3d.modules.heads.bev.continuousbev method)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.sample_reference_points"]], "training (cosense3d.modules.heads.bev.bev attribute)": [[14, "cosense3d.modules.heads.bev.BEV.training"]], "training (cosense3d.modules.heads.bev.bevmultiresolution attribute)": [[14, "cosense3d.modules.heads.bev.BEVMultiResolution.training"]], "training (cosense3d.modules.heads.bev.contiattnbev attribute)": [[14, "cosense3d.modules.heads.bev.ContiAttnBEV.training"]], "training (cosense3d.modules.heads.bev.contigevbev attribute)": [[14, "cosense3d.modules.heads.bev.ContiGevBEV.training"]], "training (cosense3d.modules.heads.bev.continuousbev attribute)": [[14, "cosense3d.modules.heads.bev.ContinuousBEV.training"]], "training (cosense3d.modules.heads.bev_dense.bevroidensehead attribute)": [[14, "cosense3d.modules.heads.bev_dense.BevRoIDenseHead.training"]], "training (cosense3d.modules.heads.bev_dense.bevseghead attribute)": [[14, "cosense3d.modules.heads.bev_dense.BevSegHead.training"]], "training (cosense3d.modules.heads.det_anchor_dense.detanchordense attribute)": [[14, "cosense3d.modules.heads.det_anchor_dense.DetAnchorDense.training"]], "training (cosense3d.modules.heads.det_anchor_sparse.detanchorsparse attribute)": [[14, "cosense3d.modules.heads.det_anchor_sparse.DetAnchorSparse.training"]], "training (cosense3d.modules.heads.det_center_sparse.detcentersparse attribute)": [[14, "cosense3d.modules.heads.det_center_sparse.DetCenterSparse.training"]], "training (cosense3d.modules.heads.det_center_sparse.multilvldetcentersparse attribute)": [[14, "cosense3d.modules.heads.det_center_sparse.MultiLvlDetCenterSparse.training"]], "training (cosense3d.modules.heads.det_center_sparse.separatedclshead attribute)": [[14, "cosense3d.modules.heads.det_center_sparse.SeparatedClsHead.training"]], "training (cosense3d.modules.heads.det_center_sparse.unitedclshead attribute)": [[14, "cosense3d.modules.heads.det_center_sparse.UnitedClsHead.training"]], "training (cosense3d.modules.heads.det_center_sparse.unitedreghead attribute)": [[14, "cosense3d.modules.heads.det_center_sparse.UnitedRegHead.training"]], "training (cosense3d.modules.heads.det_roi_refine.keypointroihead attribute)": [[14, "cosense3d.modules.heads.det_roi_refine.KeypointRoIHead.training"]], "training (cosense3d.modules.heads.img_focal.imgfocal attribute)": [[14, "cosense3d.modules.heads.img_focal.ImgFocal.training"]], "training (cosense3d.modules.heads.lidar_petr_head.lidarpetrhead attribute)": [[14, "cosense3d.modules.heads.lidar_petr_head.LidarPETRHead.training"]], "training (cosense3d.modules.heads.multitask_head.multitaskhead attribute)": [[14, "cosense3d.modules.heads.multitask_head.MultiTaskHead.training"]], "training (cosense3d.modules.heads.nbr_attn_bev.nbrattentionbev attribute)": [[14, "cosense3d.modules.heads.nbr_attn_bev.NbrAttentionBEV.training"]], "training (cosense3d.modules.heads.petr_head.petrhead attribute)": [[14, "cosense3d.modules.heads.petr_head.PETRHead.training"]], "training (cosense3d.modules.heads.query_guided_petr_head.queryguidedpetrhead attribute)": [[14, "cosense3d.modules.heads.query_guided_petr_head.QueryGuidedPETRHead.training"]], "baseloss (class in cosense3d.modules.losses.base_loss)": [[15, "cosense3d.modules.losses.base_loss.BaseLoss"]], "edlloss (class in cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.EDLLoss"]], "focalloss (class in cosense3d.modules.losses.focal_loss)": [[15, "cosense3d.modules.losses.focal_loss.FocalLoss"]], "giouloss (class in cosense3d.modules.losses.iou_loss)": [[15, "cosense3d.modules.losses.iou_loss.GIoULoss"]], "gaussianfocalloss (class in cosense3d.modules.losses.focal_loss)": [[15, "cosense3d.modules.losses.focal_loss.GaussianFocalLoss"]], "iouloss (class in cosense3d.modules.losses.iou_loss)": [[15, "cosense3d.modules.losses.iou_loss.IoULoss"]], "l1loss (class in cosense3d.modules.losses.l1_loss)": [[15, "cosense3d.modules.losses.l1_loss.L1Loss"]], "qualityfocalloss (class in cosense3d.modules.losses.focal_loss)": [[15, "cosense3d.modules.losses.focal_loss.QualityFocalLoss"]], "smoothl1loss (class in cosense3d.modules.losses.l1_loss)": [[15, "cosense3d.modules.losses.l1_loss.SmoothL1Loss"]], "vanillasegloss (class in cosense3d.modules.losses.vanilla_seg_loss)": [[15, "cosense3d.modules.losses.vanilla_seg_loss.VanillaSegLoss"]], "build_loss() (in module cosense3d.modules.losses)": [[15, "cosense3d.modules.losses.build_loss"]], "cosense3d.modules.losses": [[15, "module-cosense3d.modules.losses"]], "cosense3d.modules.losses.base_loss": [[15, "module-cosense3d.modules.losses.base_loss"]], "cosense3d.modules.losses.common": [[15, "module-cosense3d.modules.losses.common"]], "cosense3d.modules.losses.edl": [[15, "module-cosense3d.modules.losses.edl"]], "cosense3d.modules.losses.focal_loss": [[15, "module-cosense3d.modules.losses.focal_loss"]], "cosense3d.modules.losses.iou_loss": [[15, "module-cosense3d.modules.losses.iou_loss"]], "cosense3d.modules.losses.l1_loss": [[15, "module-cosense3d.modules.losses.l1_loss"]], "cosense3d.modules.losses.vanilla_seg_loss": [[15, "module-cosense3d.modules.losses.vanilla_seg_loss"]], "cross_entroy_with_logits() (in module cosense3d.modules.losses.common)": [[15, "cosense3d.modules.losses.common.cross_entroy_with_logits"]], "edl_mse_loss() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.edl_mse_loss"]], "evidence_to_conf_unc() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.evidence_to_conf_unc"]], "exp_evidence() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.exp_evidence"]], "focal_loss() (in module cosense3d.modules.losses.common)": [[15, "cosense3d.modules.losses.common.focal_loss"]], "forward() (cosense3d.modules.losses.base_loss.baseloss method)": [[15, "cosense3d.modules.losses.base_loss.BaseLoss.forward"]], "forward() (cosense3d.modules.losses.vanilla_seg_loss.vanillasegloss method)": [[15, "cosense3d.modules.losses.vanilla_seg_loss.VanillaSegLoss.forward"]], "indices_to_dense_vector() (in module cosense3d.modules.losses.common)": [[15, "cosense3d.modules.losses.common.indices_to_dense_vector"]], "kl_divergence() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.kl_divergence"]], "loglikelihood_loss() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.loglikelihood_loss"]], "loss() (cosense3d.modules.losses.base_loss.baseloss method)": [[15, "cosense3d.modules.losses.base_loss.BaseLoss.loss"]], "loss() (cosense3d.modules.losses.edl.edlloss method)": [[15, "cosense3d.modules.losses.edl.EDLLoss.loss"]], "loss() (cosense3d.modules.losses.focal_loss.focalloss method)": [[15, "cosense3d.modules.losses.focal_loss.FocalLoss.loss"]], "loss() (cosense3d.modules.losses.focal_loss.gaussianfocalloss method)": [[15, "cosense3d.modules.losses.focal_loss.GaussianFocalLoss.loss"]], "loss() (cosense3d.modules.losses.focal_loss.qualityfocalloss method)": [[15, "cosense3d.modules.losses.focal_loss.QualityFocalLoss.loss"]], "loss() (cosense3d.modules.losses.iou_loss.giouloss method)": [[15, "cosense3d.modules.losses.iou_loss.GIoULoss.loss"]], "loss() (cosense3d.modules.losses.iou_loss.iouloss method)": [[15, "cosense3d.modules.losses.iou_loss.IoULoss.loss"]], "loss() (cosense3d.modules.losses.l1_loss.l1loss method)": [[15, "cosense3d.modules.losses.l1_loss.L1Loss.loss"]], "loss() (cosense3d.modules.losses.l1_loss.smoothl1loss method)": [[15, "cosense3d.modules.losses.l1_loss.SmoothL1Loss.loss"]], "mse_loss() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.mse_loss"]], "name (cosense3d.modules.losses.base_loss.baseloss property)": [[15, "cosense3d.modules.losses.base_loss.BaseLoss.name"]], "pred_to_conf_unc() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.pred_to_conf_unc"]], "py_focal_loss_with_prob() (in module cosense3d.modules.losses.focal_loss)": [[15, "cosense3d.modules.losses.focal_loss.py_focal_loss_with_prob"]], "py_sigmoid_focal_loss() (in module cosense3d.modules.losses.focal_loss)": [[15, "cosense3d.modules.losses.focal_loss.py_sigmoid_focal_loss"]], "quality_focal_loss() (in module cosense3d.modules.losses.focal_loss)": [[15, "cosense3d.modules.losses.focal_loss.quality_focal_loss"]], "quality_focal_loss_with_prob() (in module cosense3d.modules.losses.focal_loss)": [[15, "cosense3d.modules.losses.focal_loss.quality_focal_loss_with_prob"]], "relu_evidence() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.relu_evidence"]], "sigmoid_binary_cross_entropy() (in module cosense3d.modules.losses.common)": [[15, "cosense3d.modules.losses.common.sigmoid_binary_cross_entropy"]], "softplus_evidence() (in module cosense3d.modules.losses.edl)": [[15, "cosense3d.modules.losses.edl.softplus_evidence"]], "training (cosense3d.modules.losses.base_loss.baseloss attribute)": [[15, "cosense3d.modules.losses.base_loss.BaseLoss.training"]], "training (cosense3d.modules.losses.edl.edlloss attribute)": [[15, "cosense3d.modules.losses.edl.EDLLoss.training"]], "training (cosense3d.modules.losses.focal_loss.focalloss attribute)": [[15, "cosense3d.modules.losses.focal_loss.FocalLoss.training"]], "training (cosense3d.modules.losses.focal_loss.gaussianfocalloss attribute)": [[15, "cosense3d.modules.losses.focal_loss.GaussianFocalLoss.training"]], "training (cosense3d.modules.losses.focal_loss.qualityfocalloss attribute)": [[15, "cosense3d.modules.losses.focal_loss.QualityFocalLoss.training"]], "training (cosense3d.modules.losses.iou_loss.giouloss attribute)": [[15, "cosense3d.modules.losses.iou_loss.GIoULoss.training"]], "training (cosense3d.modules.losses.iou_loss.iouloss attribute)": [[15, "cosense3d.modules.losses.iou_loss.IoULoss.training"]], "training (cosense3d.modules.losses.l1_loss.l1loss attribute)": [[15, "cosense3d.modules.losses.l1_loss.L1Loss.training"]], "training (cosense3d.modules.losses.l1_loss.smoothl1loss attribute)": [[15, "cosense3d.modules.losses.l1_loss.SmoothL1Loss.training"]], "training (cosense3d.modules.losses.vanilla_seg_loss.vanillasegloss attribute)": [[15, "cosense3d.modules.losses.vanilla_seg_loss.VanillaSegLoss.training"]], "weighted_l1_loss() (in module cosense3d.modules.losses.common)": [[15, "cosense3d.modules.losses.common.weighted_l1_loss"]], "weighted_sigmoid_binary_cross_entropy() (in module cosense3d.modules.losses.common)": [[15, "cosense3d.modules.losses.common.weighted_sigmoid_binary_cross_entropy"]], "weighted_smooth_l1_loss() (in module cosense3d.modules.losses.common)": [[15, "cosense3d.modules.losses.common.weighted_smooth_l1_loss"]], "densetosparse (class in cosense3d.modules.necks.formatting)": [[16, "cosense3d.modules.necks.formatting.DenseToSparse"]], "detdensetosparse (class in cosense3d.modules.necks.formatting)": [[16, "cosense3d.modules.necks.formatting.DetDenseToSparse"]], "dilationspconv (class in cosense3d.modules.necks.dilation_spconv)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconv"]], "dilationspconvablation (class in cosense3d.modules.necks.dilation_spconv)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation"]], "fpvrcnntolts (class in cosense3d.modules.necks.formatting)": [[16, "cosense3d.modules.necks.formatting.FPVRCNNToLTS"]], "keypointcomposer (class in cosense3d.modules.necks.cpm_composer)": [[16, "cosense3d.modules.necks.cpm_composer.KeypointComposer"]], "cosense3d.modules.necks": [[16, "module-cosense3d.modules.necks"]], "cosense3d.modules.necks.cpm_composer": [[16, "module-cosense3d.modules.necks.cpm_composer"]], "cosense3d.modules.necks.dilation_spconv": [[16, "module-cosense3d.modules.necks.dilation_spconv"]], "cosense3d.modules.necks.formatting": [[16, "module-cosense3d.modules.necks.formatting"]], "format_output() (cosense3d.modules.necks.dilation_spconv.dilationspconv method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconv.format_output"]], "format_output() (cosense3d.modules.necks.dilation_spconv.dilationspconvablation method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation.format_output"]], "forward() (cosense3d.modules.necks.cpm_composer.keypointcomposer method)": [[16, "cosense3d.modules.necks.cpm_composer.KeypointComposer.forward"]], "forward() (cosense3d.modules.necks.dilation_spconv.dilationspconv method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconv.forward"]], "forward() (cosense3d.modules.necks.dilation_spconv.dilationspconvablation method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation.forward"]], "forward() (cosense3d.modules.necks.formatting.densetosparse method)": [[16, "cosense3d.modules.necks.formatting.DenseToSparse.forward"]], "forward() (cosense3d.modules.necks.formatting.detdensetosparse method)": [[16, "cosense3d.modules.necks.formatting.DetDenseToSparse.forward"]], "forward() (cosense3d.modules.necks.formatting.fpvrcnntolts method)": [[16, "cosense3d.modules.necks.formatting.FPVRCNNToLTS.forward"]], "get_centers() (cosense3d.modules.necks.formatting.densetosparse method)": [[16, "cosense3d.modules.necks.formatting.DenseToSparse.get_centers"]], "get_centers() (cosense3d.modules.necks.formatting.detdensetosparse method)": [[16, "cosense3d.modules.necks.formatting.DetDenseToSparse.get_centers"]], "get_centers() (cosense3d.modules.necks.formatting.fpvrcnntolts method)": [[16, "cosense3d.modules.necks.formatting.FPVRCNNToLTS.get_centers"]], "get_conv_layer() (cosense3d.modules.necks.dilation_spconv.dilationspconv method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconv.get_conv_layer"]], "get_conv_layer() (cosense3d.modules.necks.dilation_spconv.dilationspconvablation method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation.get_conv_layer"]], "to_gpu() (cosense3d.modules.necks.dilation_spconv.dilationspconv method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconv.to_gpu"]], "to_gpu() (cosense3d.modules.necks.dilation_spconv.dilationspconvablation method)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation.to_gpu"]], "training (cosense3d.modules.necks.cpm_composer.keypointcomposer attribute)": [[16, "cosense3d.modules.necks.cpm_composer.KeypointComposer.training"]], "training (cosense3d.modules.necks.dilation_spconv.dilationspconv attribute)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconv.training"]], "training (cosense3d.modules.necks.dilation_spconv.dilationspconvablation attribute)": [[16, "cosense3d.modules.necks.dilation_spconv.DilationSpconvAblation.training"]], "training (cosense3d.modules.necks.formatting.densetosparse attribute)": [[16, "cosense3d.modules.necks.formatting.DenseToSparse.training"]], "training (cosense3d.modules.necks.formatting.detdensetosparse attribute)": [[16, "cosense3d.modules.necks.formatting.DetDenseToSparse.training"]], "training (cosense3d.modules.necks.formatting.fpvrcnntolts attribute)": [[16, "cosense3d.modules.necks.formatting.FPVRCNNToLTS.training"]], "bevboxassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BEVBoxAssigner"]], "bevcenternessassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BEVCenternessAssigner"]], "bevpointassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BEVPointAssigner"]], "bevsemsegassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner"]], "baseassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BaseAssigner"]], "boxanchorassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner"]], "boxcenterassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BoxCenterAssigner"]], "boxsparseanchorassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner"]], "cml (class in cosense3d.modules.plugin.voxnet_utils)": [[17, "cosense3d.modules.plugin.voxnet_utils.CML"]], "cmlsparse (class in cosense3d.modules.plugin.voxnet_utils)": [[17, "cosense3d.modules.plugin.voxnet_utils.CMLSparse"]], "contibevassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.ContiBEVAssigner"]], "conv2d (class in cosense3d.modules.plugin.bev_rpn)": [[17, "cosense3d.modules.plugin.bev_rpn.Conv2d"]], "conv3d (class in cosense3d.modules.plugin.voxnet_utils)": [[17, "cosense3d.modules.plugin.voxnet_utils.Conv3d"]], "customrpn (class in cosense3d.modules.plugin.bev_rpn)": [[17, "cosense3d.modules.plugin.bev_rpn.CustomRPN"]], "discretebevassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.DiscreteBEVAssigner"]], "doubleconv (class in cosense3d.modules.plugin.downsample_conv)": [[17, "cosense3d.modules.plugin.downsample_conv.DoubleConv"]], "downsampleconv (class in cosense3d.modules.plugin.downsample_conv)": [[17, "cosense3d.modules.plugin.downsample_conv.DownsampleConv"]], "ffn (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.FFN"]], "fpn (class in cosense3d.modules.plugin.fpn)": [[17, "cosense3d.modules.plugin.fpn.FPN"]], "flashattention (class in cosense3d.modules.plugin.flash_attn)": [[17, "cosense3d.modules.plugin.flash_attn.FlashAttention"]], "flashmha (class in cosense3d.modules.plugin.flash_attn)": [[17, "cosense3d.modules.plugin.flash_attn.FlashMHA"]], "gevbevdecoder (class in cosense3d.modules.plugin.gevbev_decoder)": [[17, "cosense3d.modules.plugin.gevbev_decoder.GevBEVDecoder"]], "heatmapassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.HeatmapAssigner"]], "hungarianassigner2d (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.HungarianAssigner2D"]], "hungarianassigner3d (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.HungarianAssigner3D"]], "matchcost (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost"]], "meanvfe (class in cosense3d.modules.plugin.voxel_encoder)": [[17, "cosense3d.modules.plugin.voxel_encoder.MeanVFE"]], "multiheadattentionwrapper (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper"]], "multiheadattention (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.MultiheadAttention"]], "multiheadflashattention (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.MultiheadFlashAttention"]], "naivecompressor (class in cosense3d.modules.plugin.naive_compressor)": [[17, "cosense3d.modules.plugin.naive_compressor.NaiveCompressor"]], "neighborhoodattention (class in cosense3d.modules.plugin.attn)": [[17, "cosense3d.modules.plugin.attn.NeighborhoodAttention"]], "petrtemporaltransformer (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.PETRTemporalTransformer"]], "petrtransformer (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.PETRTransformer"]], "pfnlayer (class in cosense3d.modules.plugin.pillar_encoder)": [[17, "cosense3d.modules.plugin.pillar_encoder.PFNLayer"]], "pillarencoder (class in cosense3d.modules.plugin.pillar_encoder)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder"]], "rpn (class in cosense3d.modules.plugin.bev_rpn)": [[17, "cosense3d.modules.plugin.bev_rpn.RPN"]], "roibox3dassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.RoIBox3DAssigner"]], "roadlineassigner (class in cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.RoadLineAssigner"]], "ssfa (class in cosense3d.modules.plugin.ssfa)": [[17, "cosense3d.modules.plugin.ssfa.SSFA"]], "scaleddotproductattention (class in cosense3d.modules.plugin.attn)": [[17, "cosense3d.modules.plugin.attn.ScaledDotProductAttention"]], "spconv (class in cosense3d.modules.plugin.mink_spconv)": [[17, "cosense3d.modules.plugin.mink_spconv.Spconv"]], "transformerdecoder (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.TransformerDecoder"]], "transformerdecoderlayer (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.TransformerDecoderLayer"]], "transformerlayersequence (class in cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.TransformerLayerSequence"]], "voxelgenerator (class in cosense3d.modules.plugin.voxel_generator)": [[17, "cosense3d.modules.plugin.voxel_generator.VoxelGenerator"]], "voxelsetabstraction (class in cosense3d.modules.plugin.vsa)": [[17, "cosense3d.modules.plugin.vsa.VoxelSetAbstraction"]], "absolute_xyz_dim (cosense3d.modules.plugin.pillar_encoder.pillarencoder property)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.absolute_xyz_dim"]], "assign() (cosense3d.modules.plugin.target_assigners.bevboxassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVBoxAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.bevcenternessassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVCenternessAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.bevpointassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVPointAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.bevsemsegassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.baseassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BaseAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.boxanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.boxcenterassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxCenterAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.boxsparseanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.contibevassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.ContiBEVAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.discretebevassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.DiscreteBEVAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.heatmapassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.HeatmapAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.hungarianassigner2d method)": [[17, "cosense3d.modules.plugin.target_assigners.HungarianAssigner2D.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.hungarianassigner3d method)": [[17, "cosense3d.modules.plugin.target_assigners.HungarianAssigner3D.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.roibox3dassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.RoIBox3DAssigner.assign"]], "assign() (cosense3d.modules.plugin.target_assigners.roadlineassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.RoadLineAssigner.assign"]], "bboxl1() (cosense3d.modules.plugin.target_assigners.matchcost static method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.bboxl1"]], "bias_k (cosense3d.modules.plugin.transformer.multiheadattentionwrapper attribute)": [[17, "cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper.bias_k"]], "bias_v (cosense3d.modules.plugin.transformer.multiheadattentionwrapper attribute)": [[17, "cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper.bias_v"]], "bilinear_interpolate_torch() (in module cosense3d.modules.plugin.vsa)": [[17, "cosense3d.modules.plugin.vsa.bilinear_interpolate_torch"]], "binary_focal_loss() (cosense3d.modules.plugin.target_assigners.matchcost static method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.binary_focal_loss"]], "box_overlaps() (cosense3d.modules.plugin.target_assigners.boxanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner.box_overlaps"]], "box_overlaps() (cosense3d.modules.plugin.target_assigners.boxsparseanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner.box_overlaps"]], "build() (cosense3d.modules.plugin.target_assigners.matchcost method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.build"]], "build_module() (in module cosense3d.modules.plugin.transformer)": [[17, "cosense3d.modules.plugin.transformer.build_module"]], "build_plugin_layer() (in module cosense3d.modules.plugin)": [[17, "cosense3d.modules.plugin.build_plugin_layer"]], "build_plugin_module() (in module cosense3d.modules.plugin)": [[17, "cosense3d.modules.plugin.build_plugin_module"]], "classification() (cosense3d.modules.plugin.target_assigners.matchcost static method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.classification"]], "compose_voxel_feature() (cosense3d.modules.plugin.pillar_encoder.pillarencoder method)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.compose_voxel_feature"]], "coor_to_indices() (cosense3d.modules.plugin.attn.neighborhoodattention method)": [[17, "cosense3d.modules.plugin.attn.NeighborhoodAttention.coor_to_indices"]], "coor_to_indices() (cosense3d.modules.plugin.gevbev_decoder.gevbevdecoder method)": [[17, "cosense3d.modules.plugin.gevbev_decoder.GevBEVDecoder.coor_to_indices"]], "cosense3d.modules.plugin": [[17, "module-cosense3d.modules.plugin"]], "cosense3d.modules.plugin.attn": [[17, "module-cosense3d.modules.plugin.attn"]], "cosense3d.modules.plugin.bev_rpn": [[17, "module-cosense3d.modules.plugin.bev_rpn"]], "cosense3d.modules.plugin.downsample_conv": [[17, "module-cosense3d.modules.plugin.downsample_conv"]], "cosense3d.modules.plugin.flash_attn": [[17, "module-cosense3d.modules.plugin.flash_attn"]], "cosense3d.modules.plugin.fpn": [[17, "module-cosense3d.modules.plugin.fpn"]], "cosense3d.modules.plugin.gevbev_decoder": [[17, "module-cosense3d.modules.plugin.gevbev_decoder"]], "cosense3d.modules.plugin.mink_spconv": [[17, "module-cosense3d.modules.plugin.mink_spconv"]], "cosense3d.modules.plugin.naive_compressor": [[17, "module-cosense3d.modules.plugin.naive_compressor"]], "cosense3d.modules.plugin.pillar_encoder": [[17, "module-cosense3d.modules.plugin.pillar_encoder"]], "cosense3d.modules.plugin.ssfa": [[17, "module-cosense3d.modules.plugin.ssfa"]], "cosense3d.modules.plugin.target_assigners": [[17, "module-cosense3d.modules.plugin.target_assigners"]], "cosense3d.modules.plugin.transformer": [[17, "module-cosense3d.modules.plugin.transformer"]], "cosense3d.modules.plugin.voxel_encoder": [[17, "module-cosense3d.modules.plugin.voxel_encoder"]], "cosense3d.modules.plugin.voxel_generator": [[17, "module-cosense3d.modules.plugin.voxel_generator"]], "cosense3d.modules.plugin.voxnet_utils": [[17, "module-cosense3d.modules.plugin.voxnet_utils"]], "cosense3d.modules.plugin.vsa": [[17, "module-cosense3d.modules.plugin.vsa"]], "distance_dim (cosense3d.modules.plugin.pillar_encoder.pillarencoder property)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.distance_dim"]], "down_sample_pred_pts() (cosense3d.modules.plugin.target_assigners.bevsemsegassigner static method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner.down_sample_pred_pts"]], "downsample_tgt_pts() (cosense3d.modules.plugin.target_assigners.bevpointassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVPointAssigner.downsample_tgt_pts"]], "downsample_tgt_pts() (cosense3d.modules.plugin.target_assigners.bevsemsegassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner.downsample_tgt_pts"]], "draw_heatmap_gaussian() (cosense3d.modules.plugin.target_assigners.heatmapassigner static method)": [[17, "cosense3d.modules.plugin.target_assigners.HeatmapAssigner.draw_heatmap_gaussian"]], "filter_range() (cosense3d.modules.plugin.target_assigners.bevsemsegassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner.filter_range"]], "flash_attn_unpadded_kvpacked_test() (in module cosense3d.modules.plugin.flash_attn)": [[17, "cosense3d.modules.plugin.flash_attn.flash_attn_unpadded_kvpacked_test"]], "focal_loss() (cosense3d.modules.plugin.target_assigners.matchcost static method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.focal_loss"]], "forward() (cosense3d.modules.plugin.attn.neighborhoodattention method)": [[17, "cosense3d.modules.plugin.attn.NeighborhoodAttention.forward"]], "forward() (cosense3d.modules.plugin.attn.scaleddotproductattention method)": [[17, "cosense3d.modules.plugin.attn.ScaledDotProductAttention.forward"]], "forward() (cosense3d.modules.plugin.bev_rpn.conv2d method)": [[17, "cosense3d.modules.plugin.bev_rpn.Conv2d.forward"]], "forward() (cosense3d.modules.plugin.bev_rpn.customrpn method)": [[17, "cosense3d.modules.plugin.bev_rpn.CustomRPN.forward"]], "forward() (cosense3d.modules.plugin.bev_rpn.rpn method)": [[17, "cosense3d.modules.plugin.bev_rpn.RPN.forward"]], "forward() (cosense3d.modules.plugin.downsample_conv.doubleconv method)": [[17, "cosense3d.modules.plugin.downsample_conv.DoubleConv.forward"]], "forward() (cosense3d.modules.plugin.downsample_conv.downsampleconv method)": [[17, "cosense3d.modules.plugin.downsample_conv.DownsampleConv.forward"]], "forward() (cosense3d.modules.plugin.flash_attn.flashattention method)": [[17, "cosense3d.modules.plugin.flash_attn.FlashAttention.forward"]], "forward() (cosense3d.modules.plugin.flash_attn.flashmha method)": [[17, "cosense3d.modules.plugin.flash_attn.FlashMHA.forward"]], "forward() (cosense3d.modules.plugin.fpn.fpn method)": [[17, "cosense3d.modules.plugin.fpn.FPN.forward"]], "forward() (cosense3d.modules.plugin.gevbev_decoder.gevbevdecoder method)": [[17, "cosense3d.modules.plugin.gevbev_decoder.GevBEVDecoder.forward"]], "forward() (cosense3d.modules.plugin.mink_spconv.spconv method)": [[17, "cosense3d.modules.plugin.mink_spconv.Spconv.forward"]], "forward() (cosense3d.modules.plugin.naive_compressor.naivecompressor method)": [[17, "cosense3d.modules.plugin.naive_compressor.NaiveCompressor.forward"]], "forward() (cosense3d.modules.plugin.pillar_encoder.pfnlayer method)": [[17, "cosense3d.modules.plugin.pillar_encoder.PFNLayer.forward"]], "forward() (cosense3d.modules.plugin.pillar_encoder.pillarencoder method)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.forward"]], "forward() (cosense3d.modules.plugin.ssfa.ssfa method)": [[17, "cosense3d.modules.plugin.ssfa.SSFA.forward"]], "forward() (cosense3d.modules.plugin.transformer.ffn method)": [[17, "cosense3d.modules.plugin.transformer.FFN.forward"]], "forward() (cosense3d.modules.plugin.transformer.multiheadattentionwrapper method)": [[17, "cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper.forward"]], "forward() (cosense3d.modules.plugin.transformer.multiheadattention method)": [[17, "cosense3d.modules.plugin.transformer.MultiheadAttention.forward"]], "forward() (cosense3d.modules.plugin.transformer.multiheadflashattention method)": [[17, "cosense3d.modules.plugin.transformer.MultiheadFlashAttention.forward"]], "forward() (cosense3d.modules.plugin.transformer.petrtemporaltransformer method)": [[17, "cosense3d.modules.plugin.transformer.PETRTemporalTransformer.forward"]], "forward() (cosense3d.modules.plugin.transformer.petrtransformer method)": [[17, "cosense3d.modules.plugin.transformer.PETRTransformer.forward"]], "forward() (cosense3d.modules.plugin.transformer.transformerdecoder method)": [[17, "cosense3d.modules.plugin.transformer.TransformerDecoder.forward"]], "forward() (cosense3d.modules.plugin.transformer.transformerdecoderlayer method)": [[17, "cosense3d.modules.plugin.transformer.TransformerDecoderLayer.forward"]], "forward() (cosense3d.modules.plugin.transformer.transformerlayersequence method)": [[17, "cosense3d.modules.plugin.transformer.TransformerLayerSequence.forward"]], "forward() (cosense3d.modules.plugin.voxel_encoder.meanvfe method)": [[17, "cosense3d.modules.plugin.voxel_encoder.MeanVFE.forward"]], "forward() (cosense3d.modules.plugin.voxnet_utils.cml method)": [[17, "cosense3d.modules.plugin.voxnet_utils.CML.forward"]], "forward() (cosense3d.modules.plugin.voxnet_utils.cmlsparse method)": [[17, "cosense3d.modules.plugin.voxnet_utils.CMLSparse.forward"]], "forward() (cosense3d.modules.plugin.voxnet_utils.conv3d method)": [[17, "cosense3d.modules.plugin.voxnet_utils.Conv3d.forward"]], "forward() (cosense3d.modules.plugin.vsa.voxelsetabstraction method)": [[17, "cosense3d.modules.plugin.vsa.VoxelSetAbstraction.forward"]], "forward_fp16() (cosense3d.modules.plugin.transformer.multiheadattentionwrapper method)": [[17, "cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper.forward_fp16"]], "forward_fp32() (cosense3d.modules.plugin.transformer.multiheadattentionwrapper method)": [[17, "cosense3d.modules.plugin.transformer.MultiHeadAttentionWrapper.forward_fp32"]], "get_2d_stensor() (cosense3d.modules.plugin.mink_spconv.spconv method)": [[17, "cosense3d.modules.plugin.mink_spconv.Spconv.get_2d_stensor"]], "get_anchor_template() (cosense3d.modules.plugin.target_assigners.boxanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner.get_anchor_template"]], "get_anchor_template() (cosense3d.modules.plugin.target_assigners.boxsparseanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner.get_anchor_template"]], "get_conv_layer() (cosense3d.modules.plugin.mink_spconv.spconv method)": [[17, "cosense3d.modules.plugin.mink_spconv.Spconv.get_conv_layer"]], "get_conv_layers() (in module cosense3d.modules.plugin.ssfa)": [[17, "cosense3d.modules.plugin.ssfa.get_conv_layers"]], "get_labels_single_head() (cosense3d.modules.plugin.target_assigners.bevboxassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVBoxAssigner.get_labels_single_head"]], "get_labels_single_head() (cosense3d.modules.plugin.target_assigners.bevcenternessassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVCenternessAssigner.get_labels_single_head"]], "get_nbr_mapping() (cosense3d.modules.plugin.attn.neighborhoodattention method)": [[17, "cosense3d.modules.plugin.attn.NeighborhoodAttention.get_nbr_mapping"]], "get_nbr_mapping() (cosense3d.modules.plugin.gevbev_decoder.gevbevdecoder method)": [[17, "cosense3d.modules.plugin.gevbev_decoder.GevBEVDecoder.get_nbr_mapping"]], "get_obs_mask() (cosense3d.modules.plugin.target_assigners.bevsemsegassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner.get_obs_mask"]], "get_obs_mask() (cosense3d.modules.plugin.target_assigners.discretebevassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.DiscreteBEVAssigner.get_obs_mask"]], "get_output_feature_dim() (cosense3d.modules.plugin.voxel_encoder.meanvfe method)": [[17, "cosense3d.modules.plugin.voxel_encoder.MeanVFE.get_output_feature_dim"]], "get_paddings_indicator() (cosense3d.modules.plugin.pillar_encoder.pillarencoder static method)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.get_paddings_indicator"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.bevpointassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVPointAssigner.get_predictions"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.bevsemsegassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner.get_predictions"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.boxanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner.get_predictions"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.boxcenterassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxCenterAssigner.get_predictions"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.boxsparseanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner.get_predictions"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.contibevassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.ContiBEVAssigner.get_predictions"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.discretebevassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.DiscreteBEVAssigner.get_predictions"]], "get_predictions() (cosense3d.modules.plugin.target_assigners.roibox3dassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.RoIBox3DAssigner.get_predictions"]], "get_sampled_points() (cosense3d.modules.plugin.vsa.voxelsetabstraction method)": [[17, "cosense3d.modules.plugin.vsa.VoxelSetAbstraction.get_sampled_points"]], "giou() (cosense3d.modules.plugin.target_assigners.matchcost static method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.giou"]], "index_first_axis() (in module cosense3d.modules.plugin.flash_attn)": [[17, "cosense3d.modules.plugin.flash_attn.index_first_axis"]], "infer_abbr() (in module cosense3d.modules.plugin)": [[17, "cosense3d.modules.plugin.infer_abbr"]], "init_weights() (cosense3d.modules.plugin.fpn.fpn method)": [[17, "cosense3d.modules.plugin.fpn.FPN.init_weights"]], "init_weights() (cosense3d.modules.plugin.ssfa.ssfa method)": [[17, "cosense3d.modules.plugin.ssfa.SSFA.init_weights"]], "init_weights() (cosense3d.modules.plugin.transformer.petrtemporaltransformer method)": [[17, "cosense3d.modules.plugin.transformer.PETRTemporalTransformer.init_weights"]], "init_weights() (cosense3d.modules.plugin.transformer.petrtransformer method)": [[17, "cosense3d.modules.plugin.transformer.PETRTransformer.init_weights"]], "intensity_dim (cosense3d.modules.plugin.pillar_encoder.pillarencoder property)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.intensity_dim"]], "interpolate_from_bev_features() (cosense3d.modules.plugin.vsa.voxelsetabstraction method)": [[17, "cosense3d.modules.plugin.vsa.VoxelSetAbstraction.interpolate_from_bev_features"]], "iou() (cosense3d.modules.plugin.target_assigners.matchcost static method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.iou"]], "l1() (cosense3d.modules.plugin.target_assigners.matchcost static method)": [[17, "cosense3d.modules.plugin.target_assigners.MatchCost.l1"]], "me_coor_to_grid_indices() (cosense3d.modules.plugin.target_assigners.boxsparseanchorassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner.me_coor_to_grid_indices"]], "pos_neg_sampling() (in module cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.pos_neg_sampling"]], "pts_to_indices() (cosense3d.modules.plugin.target_assigners.boxcenterassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BoxCenterAssigner.pts_to_indices"]], "pts_to_inds() (cosense3d.modules.plugin.target_assigners.bevsemsegassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.BEVSemsegAssigner.pts_to_inds"]], "pts_to_inds() (cosense3d.modules.plugin.target_assigners.discretebevassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.DiscreteBEVAssigner.pts_to_inds"]], "sample_dynamic_tgt_pts() (cosense3d.modules.plugin.target_assigners.contibevassigner method)": [[17, "cosense3d.modules.plugin.target_assigners.ContiBEVAssigner.sample_dynamic_tgt_pts"]], "sample_mining() (in module cosense3d.modules.plugin.target_assigners)": [[17, "cosense3d.modules.plugin.target_assigners.sample_mining"]], "training (cosense3d.modules.plugin.attn.neighborhoodattention attribute)": [[17, "cosense3d.modules.plugin.attn.NeighborhoodAttention.training"]], "training (cosense3d.modules.plugin.attn.scaleddotproductattention attribute)": [[17, "cosense3d.modules.plugin.attn.ScaledDotProductAttention.training"]], "training (cosense3d.modules.plugin.bev_rpn.conv2d attribute)": [[17, "cosense3d.modules.plugin.bev_rpn.Conv2d.training"]], "training (cosense3d.modules.plugin.bev_rpn.customrpn attribute)": [[17, "cosense3d.modules.plugin.bev_rpn.CustomRPN.training"]], "training (cosense3d.modules.plugin.bev_rpn.rpn attribute)": [[17, "cosense3d.modules.plugin.bev_rpn.RPN.training"]], "training (cosense3d.modules.plugin.downsample_conv.doubleconv attribute)": [[17, "cosense3d.modules.plugin.downsample_conv.DoubleConv.training"]], "training (cosense3d.modules.plugin.downsample_conv.downsampleconv attribute)": [[17, "cosense3d.modules.plugin.downsample_conv.DownsampleConv.training"]], "training (cosense3d.modules.plugin.flash_attn.flashattention attribute)": [[17, "cosense3d.modules.plugin.flash_attn.FlashAttention.training"]], "training (cosense3d.modules.plugin.flash_attn.flashmha attribute)": [[17, "cosense3d.modules.plugin.flash_attn.FlashMHA.training"]], "training (cosense3d.modules.plugin.fpn.fpn attribute)": [[17, "cosense3d.modules.plugin.fpn.FPN.training"]], "training (cosense3d.modules.plugin.gevbev_decoder.gevbevdecoder attribute)": [[17, "cosense3d.modules.plugin.gevbev_decoder.GevBEVDecoder.training"]], "training (cosense3d.modules.plugin.mink_spconv.spconv attribute)": [[17, "cosense3d.modules.plugin.mink_spconv.Spconv.training"]], "training (cosense3d.modules.plugin.naive_compressor.naivecompressor attribute)": [[17, "cosense3d.modules.plugin.naive_compressor.NaiveCompressor.training"]], "training (cosense3d.modules.plugin.pillar_encoder.pfnlayer attribute)": [[17, "cosense3d.modules.plugin.pillar_encoder.PFNLayer.training"]], "training (cosense3d.modules.plugin.pillar_encoder.pillarencoder attribute)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.training"]], "training (cosense3d.modules.plugin.ssfa.ssfa attribute)": [[17, "cosense3d.modules.plugin.ssfa.SSFA.training"]], "training (cosense3d.modules.plugin.target_assigners.boxanchorassigner attribute)": [[17, "cosense3d.modules.plugin.target_assigners.BoxAnchorAssigner.training"]], "training (cosense3d.modules.plugin.target_assigners.boxcenterassigner attribute)": [[17, "cosense3d.modules.plugin.target_assigners.BoxCenterAssigner.training"]], "training (cosense3d.modules.plugin.target_assigners.boxsparseanchorassigner attribute)": [[17, "cosense3d.modules.plugin.target_assigners.BoxSparseAnchorAssigner.training"]], "training (cosense3d.modules.plugin.transformer.ffn attribute)": [[17, "cosense3d.modules.plugin.transformer.FFN.training"]], "training (cosense3d.modules.plugin.transformer.multiheadattention attribute)": [[17, "cosense3d.modules.plugin.transformer.MultiheadAttention.training"]], "training (cosense3d.modules.plugin.transformer.multiheadflashattention attribute)": [[17, "cosense3d.modules.plugin.transformer.MultiheadFlashAttention.training"]], "training (cosense3d.modules.plugin.transformer.petrtemporaltransformer attribute)": [[17, "cosense3d.modules.plugin.transformer.PETRTemporalTransformer.training"]], "training (cosense3d.modules.plugin.transformer.petrtransformer attribute)": [[17, "cosense3d.modules.plugin.transformer.PETRTransformer.training"]], "training (cosense3d.modules.plugin.transformer.transformerdecoder attribute)": [[17, "cosense3d.modules.plugin.transformer.TransformerDecoder.training"]], "training (cosense3d.modules.plugin.transformer.transformerdecoderlayer attribute)": [[17, "cosense3d.modules.plugin.transformer.TransformerDecoderLayer.training"]], "training (cosense3d.modules.plugin.transformer.transformerlayersequence attribute)": [[17, "cosense3d.modules.plugin.transformer.TransformerLayerSequence.training"]], "training (cosense3d.modules.plugin.voxel_encoder.meanvfe attribute)": [[17, "cosense3d.modules.plugin.voxel_encoder.MeanVFE.training"]], "training (cosense3d.modules.plugin.voxnet_utils.cml attribute)": [[17, "cosense3d.modules.plugin.voxnet_utils.CML.training"]], "training (cosense3d.modules.plugin.voxnet_utils.cmlsparse attribute)": [[17, "cosense3d.modules.plugin.voxnet_utils.CMLSparse.training"]], "training (cosense3d.modules.plugin.voxnet_utils.conv3d attribute)": [[17, "cosense3d.modules.plugin.voxnet_utils.Conv3d.training"]], "training (cosense3d.modules.plugin.vsa.voxelsetabstraction attribute)": [[17, "cosense3d.modules.plugin.vsa.VoxelSetAbstraction.training"]], "xyz_dim (cosense3d.modules.plugin.pillar_encoder.pillarencoder property)": [[17, "cosense3d.modules.plugin.pillar_encoder.PillarEncoder.xyz_dim"]], "faxmodule (class in cosense3d.modules.projection.fax)": [[18, "cosense3d.modules.projection.fax.FAXModule"]], "petr (class in cosense3d.modules.projection.petr)": [[18, "cosense3d.modules.projection.petr.PETR"]], "resnetbottleneck() (in module cosense3d.modules.projection.fax)": [[18, "cosense3d.modules.projection.fax.ResNetBottleNeck"]], "sttf (class in cosense3d.modules.projection.spatial_transform)": [[18, "cosense3d.modules.projection.spatial_transform.STTF"]], "cosense3d.modules.projection": [[18, "module-cosense3d.modules.projection"]], "cosense3d.modules.projection.fax": [[18, "module-cosense3d.modules.projection.fax"]], "cosense3d.modules.projection.petr": [[18, "module-cosense3d.modules.projection.petr"]], "cosense3d.modules.projection.spatial_transform": [[18, "module-cosense3d.modules.projection.spatial_transform"]], "format_input() (cosense3d.modules.projection.petr.petr method)": [[18, "cosense3d.modules.projection.petr.PETR.format_input"]], "forward() (cosense3d.modules.projection.fax.faxmodule method)": [[18, "cosense3d.modules.projection.fax.FAXModule.forward"]], "forward() (cosense3d.modules.projection.petr.petr method)": [[18, "cosense3d.modules.projection.petr.PETR.forward"]], "forward() (cosense3d.modules.projection.spatial_transform.sttf method)": [[18, "cosense3d.modules.projection.spatial_transform.STTF.forward"]], "gather_topk() (cosense3d.modules.projection.petr.petr method)": [[18, "cosense3d.modules.projection.petr.PETR.gather_topk"]], "img_position_embeding() (cosense3d.modules.projection.petr.petr method)": [[18, "cosense3d.modules.projection.petr.PETR.img_position_embeding"]], "init_weights() (cosense3d.modules.projection.petr.petr method)": [[18, "cosense3d.modules.projection.petr.PETR.init_weights"]], "training (cosense3d.modules.projection.fax.faxmodule attribute)": [[18, "cosense3d.modules.projection.fax.FAXModule.training"]], "training (cosense3d.modules.projection.petr.petr attribute)": [[18, "cosense3d.modules.projection.petr.PETR.training"]], "training (cosense3d.modules.projection.spatial_transform.sttf attribute)": [[18, "cosense3d.modules.projection.spatial_transform.STTF.training"]], "boxpredcoder (class in cosense3d.modules.utils.box_coder)": [[19, "cosense3d.modules.utils.box_coder.BoxPredCoder"]], "centerboxcoder (class in cosense3d.modules.utils.box_coder)": [[19, "cosense3d.modules.utils.box_coder.CenterBoxCoder"]], "convmodule (class in cosense3d.modules.utils.conv)": [[19, "cosense3d.modules.utils.conv.ConvModule"]], "mln (class in cosense3d.modules.utils.misc)": [[19, "cosense3d.modules.utils.misc.MLN"]], "mln2 (class in cosense3d.modules.utils.misc)": [[19, "cosense3d.modules.utils.misc.MLN2"]], "neighborhoodattention (class in cosense3d.modules.utils.nbr_attn)": [[19, "cosense3d.modules.utils.nbr_attn.NeighborhoodAttention"]], "residualboxcoder (class in cosense3d.modules.utils.box_coder)": [[19, "cosense3d.modules.utils.box_coder.ResidualBoxCoder"]], "selayer_linear (class in cosense3d.modules.utils.misc)": [[19, "cosense3d.modules.utils.misc.SELayer_Linear"]], "bev_sparse_to_dense() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.bev_sparse_to_dense"]], "bias_init_with_prob() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.bias_init_with_prob"]], "bias_init_with_prob() (in module cosense3d.modules.utils.init)": [[19, "cosense3d.modules.utils.init.bias_init_with_prob"]], "build_box_coder() (in module cosense3d.modules.utils.box_coder)": [[19, "cosense3d.modules.utils.box_coder.build_box_coder"]], "build_conv_layer() (in module cosense3d.modules.utils.conv)": [[19, "cosense3d.modules.utils.conv.build_conv_layer"]], "build_norm_layer() (in module cosense3d.modules.utils.norm)": [[19, "cosense3d.modules.utils.norm.build_norm_layer"]], "build_padding_layer() (in module cosense3d.modules.utils.conv)": [[19, "cosense3d.modules.utils.conv.build_padding_layer"]], "build_torch_module() (in module cosense3d.modules.utils)": [[19, "cosense3d.modules.utils.build_torch_module"]], "cat_coor_with_idx() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.cat_coor_with_idx"]], "cat_name_str() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.cat_name_str"]], "center_to_img_coor() (in module cosense3d.modules.utils.gaussian_utils)": [[19, "cosense3d.modules.utils.gaussian_utils.center_to_img_coor"]], "clip_sigmoid() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.clip_sigmoid"]], "constant_init() (in module cosense3d.modules.utils.init)": [[19, "cosense3d.modules.utils.init.constant_init"]], "coor2ratio() (in module cosense3d.modules.utils.positional_encoding)": [[19, "cosense3d.modules.utils.positional_encoding.coor2ratio"]], "cornernet_gaussian_radius() (in module cosense3d.modules.utils.gaussian_utils)": [[19, "cosense3d.modules.utils.gaussian_utils.cornernet_gaussian_radius"]], "cosense3d.modules.utils": [[19, "module-cosense3d.modules.utils"]], "cosense3d.modules.utils.box_coder": [[19, "module-cosense3d.modules.utils.box_coder"]], "cosense3d.modules.utils.common": [[19, "module-cosense3d.modules.utils.common"]], "cosense3d.modules.utils.conv": [[19, "module-cosense3d.modules.utils.conv"]], "cosense3d.modules.utils.edl_utils": [[19, "module-cosense3d.modules.utils.edl_utils"]], "cosense3d.modules.utils.gaussian_utils": [[19, "module-cosense3d.modules.utils.gaussian_utils"]], "cosense3d.modules.utils.init": [[19, "module-cosense3d.modules.utils.init"]], "cosense3d.modules.utils.me_utils": [[19, "module-cosense3d.modules.utils.me_utils"]], "cosense3d.modules.utils.misc": [[19, "module-cosense3d.modules.utils.misc"]], "cosense3d.modules.utils.nbr_attn": [[19, "module-cosense3d.modules.utils.nbr_attn"]], "cosense3d.modules.utils.norm": [[19, "module-cosense3d.modules.utils.norm"]], "cosense3d.modules.utils.positional_encoding": [[19, "module-cosense3d.modules.utils.positional_encoding"]], "decode() (cosense3d.modules.utils.box_coder.boxpredcoder method)": [[19, "cosense3d.modules.utils.box_coder.BoxPredCoder.decode"]], "decode() (cosense3d.modules.utils.box_coder.centerboxcoder method)": [[19, "cosense3d.modules.utils.box_coder.CenterBoxCoder.decode"]], "decode() (cosense3d.modules.utils.box_coder.residualboxcoder method)": [[19, "cosense3d.modules.utils.box_coder.ResidualBoxCoder.decode"]], "decode_direction() (cosense3d.modules.utils.box_coder.residualboxcoder method)": [[19, "cosense3d.modules.utils.box_coder.ResidualBoxCoder.decode_direction"]], "devoxelize_with_centroids() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.devoxelize_with_centroids"]], "downsample_embeddings() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.downsample_embeddings"]], "downsample_points() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.downsample_points"]], "draw_gaussian_map() (in module cosense3d.modules.utils.gaussian_utils)": [[19, "cosense3d.modules.utils.gaussian_utils.draw_gaussian_map"]], "draw_sample_prob() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.draw_sample_prob"]], "encode() (cosense3d.modules.utils.box_coder.boxpredcoder method)": [[19, "cosense3d.modules.utils.box_coder.BoxPredCoder.encode"]], "encode() (cosense3d.modules.utils.box_coder.centerboxcoder method)": [[19, "cosense3d.modules.utils.box_coder.CenterBoxCoder.encode"]], "encode() (cosense3d.modules.utils.box_coder.residualboxcoder method)": [[19, "cosense3d.modules.utils.box_coder.ResidualBoxCoder.encode"]], "encode_direction() (cosense3d.modules.utils.box_coder.residualboxcoder method)": [[19, "cosense3d.modules.utils.box_coder.ResidualBoxCoder.encode_direction"]], "forward() (cosense3d.modules.utils.conv.convmodule method)": [[19, "cosense3d.modules.utils.conv.ConvModule.forward"]], "forward() (cosense3d.modules.utils.misc.mln method)": [[19, "cosense3d.modules.utils.misc.MLN.forward"]], "forward() (cosense3d.modules.utils.misc.mln2 method)": [[19, "cosense3d.modules.utils.misc.MLN2.forward"]], "forward() (cosense3d.modules.utils.misc.selayer_linear method)": [[19, "cosense3d.modules.utils.misc.SELayer_Linear.forward"]], "forward() (cosense3d.modules.utils.nbr_attn.neighborhoodattention method)": [[19, "cosense3d.modules.utils.nbr_attn.NeighborhoodAttention.forward"]], "fuse_batch_indices() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.fuse_batch_indices"]], "gaussian_2d() (in module cosense3d.modules.utils.gaussian_utils)": [[19, "cosense3d.modules.utils.gaussian_utils.gaussian_2d"]], "gaussian_radius() (in module cosense3d.modules.utils.gaussian_utils)": [[19, "cosense3d.modules.utils.gaussian_utils.gaussian_radius"]], "get_conv2d_layers() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.get_conv2d_layers"]], "get_conv_block() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.get_conv_block"]], "get_kernel_map_and_out_key() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.get_kernel_map_and_out_key"]], "get_norm_layer() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.get_norm_layer"]], "get_voxel_centers() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.get_voxel_centers"]], "img_locations() (in module cosense3d.modules.utils.positional_encoding)": [[19, "cosense3d.modules.utils.positional_encoding.img_locations"]], "indices2metric() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.indices2metric"]], "init_weights() (cosense3d.modules.utils.conv.convmodule method)": [[19, "cosense3d.modules.utils.conv.ConvModule.init_weights"]], "instantiate() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.instantiate"]], "inverse_sigmoid() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.inverse_sigmoid"]], "kaiming_init() (in module cosense3d.modules.utils.init)": [[19, "cosense3d.modules.utils.init.kaiming_init"]], "limit_period() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.limit_period"]], "linear_last() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.linear_last"]], "linear_layers() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.linear_layers"]], "logit_to_edl() (in module cosense3d.modules.utils.edl_utils)": [[19, "cosense3d.modules.utils.edl_utils.logit_to_edl"]], "mahalanobis_dists_2d() (in module cosense3d.modules.utils.gaussian_utils)": [[19, "cosense3d.modules.utils.gaussian_utils.mahalanobis_dists_2d"]], "me_coor_to_grid_indices() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.me_coor_to_grid_indices"]], "meshgrid() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.meshgrid"]], "meshgrid_cross() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.meshgrid_cross"]], "metric2indices() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.metric2indices"]], "mink_coor_limit() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.mink_coor_limit"]], "minkconv_conv_block() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.minkconv_conv_block"]], "minkconv_layer() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.minkconv_layer"]], "nerf_positional_encoding() (in module cosense3d.modules.utils.positional_encoding)": [[19, "cosense3d.modules.utils.positional_encoding.nerf_positional_encoding"]], "norm (cosense3d.modules.utils.conv.convmodule property)": [[19, "cosense3d.modules.utils.conv.ConvModule.norm"]], "normal_init() (in module cosense3d.modules.utils.init)": [[19, "cosense3d.modules.utils.init.normal_init"]], "normalize_centroids() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.normalize_centroids"]], "normalize_points() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.normalize_points"]], "pad_l() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.pad_l"]], "pad_r() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.pad_r"]], "pos2posemb1d() (in module cosense3d.modules.utils.positional_encoding)": [[19, "cosense3d.modules.utils.positional_encoding.pos2posemb1d"]], "pos2posemb2d() (in module cosense3d.modules.utils.positional_encoding)": [[19, "cosense3d.modules.utils.positional_encoding.pos2posemb2d"]], "pos2posemb3d() (in module cosense3d.modules.utils.positional_encoding)": [[19, "cosense3d.modules.utils.positional_encoding.pos2posemb3d"]], "prepare_input_data() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.prepare_input_data"]], "ratio2coord() (in module cosense3d.modules.utils.positional_encoding)": [[19, "cosense3d.modules.utils.positional_encoding.ratio2coord"]], "reset_parameters() (cosense3d.modules.utils.misc.mln method)": [[19, "cosense3d.modules.utils.misc.MLN.reset_parameters"]], "reset_parameters() (cosense3d.modules.utils.misc.mln2 method)": [[19, "cosense3d.modules.utils.misc.MLN2.reset_parameters"]], "sparse_to_dense() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.sparse_to_dense"]], "stride_centroids() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.stride_centroids"]], "topk_gather() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.topk_gather"]], "training (cosense3d.modules.utils.conv.convmodule attribute)": [[19, "cosense3d.modules.utils.conv.ConvModule.training"]], "training (cosense3d.modules.utils.misc.mln attribute)": [[19, "cosense3d.modules.utils.misc.MLN.training"]], "training (cosense3d.modules.utils.misc.mln2 attribute)": [[19, "cosense3d.modules.utils.misc.MLN2.training"]], "training (cosense3d.modules.utils.misc.selayer_linear attribute)": [[19, "cosense3d.modules.utils.misc.SELayer_Linear.training"]], "training (cosense3d.modules.utils.nbr_attn.neighborhoodattention attribute)": [[19, "cosense3d.modules.utils.nbr_attn.NeighborhoodAttention.training"]], "trunc_normal_init() (in module cosense3d.modules.utils.init)": [[19, "cosense3d.modules.utils.init.trunc_normal_init"]], "uniform_init() (in module cosense3d.modules.utils.init)": [[19, "cosense3d.modules.utils.init.uniform_init"]], "update_me_essentials() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.update_me_essentials"]], "voxelize_with_centroids() (in module cosense3d.modules.utils.me_utils)": [[19, "cosense3d.modules.utils.me_utils.voxelize_with_centroids"]], "weighted_mahalanobis_dists() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.weighted_mahalanobis_dists"]], "weighted_mahalanobis_dists() (in module cosense3d.modules.utils.gaussian_utils)": [[19, "cosense3d.modules.utils.gaussian_utils.weighted_mahalanobis_dists"]], "xavier_init() (in module cosense3d.modules.utils.common)": [[19, "cosense3d.modules.utils.common.xavier_init"]], "xavier_init() (in module cosense3d.modules.utils.init)": [[19, "cosense3d.modules.utils.init.xavier_init"]], "lrupdater (class in cosense3d.utils.lr_scheduler)": [[20, "cosense3d.utils.lr_scheduler.LRUpdater"]], "logmeter (class in cosense3d.utils.logger)": [[20, "cosense3d.utils.logger.LogMeter"]], "metric (class in cosense3d.utils.metrics)": [[20, "cosense3d.utils.metrics.Metric"]], "metricbev (class in cosense3d.utils.metrics)": [[20, "cosense3d.utils.metrics.MetricBev"]], "metricmot (class in cosense3d.utils.metrics)": [[20, "cosense3d.utils.metrics.MetricMOT"]], "metricobjdet (class in cosense3d.utils.metrics)": [[20, "cosense3d.utils.metrics.MetricObjDet"]], "metricsemseg (class in cosense3d.utils.metrics)": [[20, "cosense3d.utils.metrics.MetricSemSeg"]], "smoothedvalue (class in cosense3d.utils.logger)": [[20, "cosense3d.utils.logger.SmoothedValue"]], "testlogger (class in cosense3d.utils.logger)": [[20, "cosense3d.utils.logger.TestLogger"]], "transformeradaptivescheduler (class in cosense3d.utils.lr_scheduler)": [[20, "cosense3d.utils.lr_scheduler.TransformerAdaptiveScheduler"]], "add_meter() (cosense3d.utils.logger.logmeter method)": [[20, "cosense3d.utils.logger.LogMeter.add_meter"]], "add_sample() (cosense3d.utils.metrics.metricobjdet method)": [[20, "cosense3d.utils.metrics.MetricObjDet.add_sample"]], "add_samples() (cosense3d.utils.metrics.metric method)": [[20, "cosense3d.utils.metrics.Metric.add_samples"]], "add_samples() (cosense3d.utils.metrics.metricbev method)": [[20, "cosense3d.utils.metrics.MetricBev.add_samples"]], "add_samples() (cosense3d.utils.metrics.metricmot method)": [[20, "cosense3d.utils.metrics.MetricMOT.add_samples"]], "add_samples() (cosense3d.utils.metrics.metricobjdet method)": [[20, "cosense3d.utils.metrics.MetricObjDet.add_samples"]], "add_samples() (cosense3d.utils.metrics.metricsemseg method)": [[20, "cosense3d.utils.metrics.MetricSemSeg.add_samples"]], "avg (cosense3d.utils.logger.smoothedvalue property)": [[20, "cosense3d.utils.logger.SmoothedValue.avg"]], "bbox_cxcywh_to_xyxy() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.bbox_cxcywh_to_xyxy"]], "bbox_overlaps() (in module cosense3d.utils.iou2d_calculator)": [[20, "cosense3d.utils.iou2d_calculator.bbox_overlaps"]], "bbox_xyxy_to_cxcywh() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.bbox_xyxy_to_cxcywh"]], "bbx2linset() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.bbx2linset"]], "boxes3d_to_standup_bboxes() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.boxes3d_to_standup_bboxes"]], "boxes_to_corners_2d() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.boxes_to_corners_2d"]], "boxes_to_corners_3d() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.boxes_to_corners_3d"]], "build_dropout() (in module cosense3d.utils.module_utils)": [[20, "cosense3d.utils.module_utils.build_dropout"]], "build_lr_scheduler() (in module cosense3d.utils.lr_scheduler)": [[20, "cosense3d.utils.lr_scheduler.build_lr_scheduler"]], "build_lr_scheduler() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.build_lr_scheduler"]], "build_norm_layer() (in module cosense3d.utils.module_utils)": [[20, "cosense3d.utils.module_utils.build_norm_layer"]], "build_optimizer() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.build_optimizer"]], "cal_ap_11_point() (cosense3d.utils.metrics.metricobjdet method)": [[20, "cosense3d.utils.metrics.MetricObjDet.cal_ap_11_point"]], "cal_ap_all_point() (cosense3d.utils.metrics.metricobjdet method)": [[20, "cosense3d.utils.metrics.MetricObjDet.cal_ap_all_point"]], "cal_ap_all_point() (in module cosense3d.utils.eval_detection_utils)": [[20, "cosense3d.utils.eval_detection_utils.cal_ap_all_point"]], "cal_ious_and_accs() (cosense3d.utils.metrics.metricsemseg method)": [[20, "cosense3d.utils.metrics.MetricSemSeg.cal_ious_and_accs"]], "cal_precision_recall() (cosense3d.utils.metrics.metricobjdet method)": [[20, "cosense3d.utils.metrics.MetricObjDet.cal_precision_recall"]], "cal_precision_recall() (in module cosense3d.utils.eval_detection_utils)": [[20, "cosense3d.utils.eval_detection_utils.cal_precision_recall"]], "calc_lr() (cosense3d.utils.lr_scheduler.transformeradaptivescheduler method)": [[20, "cosense3d.utils.lr_scheduler.TransformerAdaptiveScheduler.calc_lr"]], "calculate_ap() (in module cosense3d.utils.eval_detection_utils)": [[20, "cosense3d.utils.eval_detection_utils.calculate_ap"]], "caluclate_tp_fp() (in module cosense3d.utils.eval_detection_utils)": [[20, "cosense3d.utils.eval_detection_utils.caluclate_tp_fp"]], "cart2cyl() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.cart2cyl"]], "cast_tensor_type() (in module cosense3d.utils.iou2d_calculator)": [[20, "cosense3d.utils.iou2d_calculator.cast_tensor_type"]], "check_numpy_to_torch() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.check_numpy_to_torch"]], "check_numpy_to_torch() (in module cosense3d.utils.tensor_utils)": [[20, "cosense3d.utils.tensor_utils.check_numpy_to_torch"]], "clip_grads() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.clip_grads"]], "compute_iou() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.compute_iou"]], "convert_box_to_polygon() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.convert_box_to_polygon"]], "corners_to_boxes_3d() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.corners_to_boxes_3d"]], "cosense3d.utils": [[20, "module-cosense3d.utils"]], "cosense3d.utils.box_utils": [[20, "module-cosense3d.utils.box_utils"]], "cosense3d.utils.eval_detection_utils": [[20, "module-cosense3d.utils.eval_detection_utils"]], "cosense3d.utils.iou2d_calculator": [[20, "module-cosense3d.utils.iou2d_calculator"]], "cosense3d.utils.logger": [[20, "module-cosense3d.utils.logger"]], "cosense3d.utils.lr_scheduler": [[20, "module-cosense3d.utils.lr_scheduler"]], "cosense3d.utils.metrics": [[20, "module-cosense3d.utils.metrics"]], "cosense3d.utils.misc": [[20, "module-cosense3d.utils.misc"]], "cosense3d.utils.module_utils": [[20, "module-cosense3d.utils.module_utils"]], "cosense3d.utils.pclib": [[20, "module-cosense3d.utils.pclib"]], "cosense3d.utils.tensor_utils": [[20, "module-cosense3d.utils.tensor_utils"]], "cosense3d.utils.train_utils": [[20, "module-cosense3d.utils.train_utils"]], "cosense3d.utils.vislib": [[20, "module-cosense3d.utils.vislib"]], "cyl2cart() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.cyl2cart"]], "decode_boxes() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.decode_boxes"]], "denormalize_bbox() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.denormalize_bbox"]], "digit_version() (in module cosense3d.utils.module_utils)": [[20, "cosense3d.utils.module_utils.digit_version"]], "draw_2d_bboxes_on_img() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.draw_2d_bboxes_on_img"]], "draw_3d_points_boxes_on_img() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.draw_3d_points_boxes_on_img"]], "draw_box_plt() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.draw_box_plt"]], "draw_matched_boxes() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.draw_matched_boxes"]], "draw_points_boxes_plt() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.draw_points_boxes_plt"]], "enlarge_box3d() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.enlarge_box3d"]], "ensure_dir() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.ensure_dir"]], "eval_final_results() (in module cosense3d.utils.eval_detection_utils)": [[20, "cosense3d.utils.eval_detection_utils.eval_final_results"]], "find_rigid_alignment() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.find_rigid_alignment"]], "format_str() (cosense3d.utils.metrics.metricbev method)": [[20, "cosense3d.utils.metrics.MetricBev.format_str"]], "fp16_clamp() (in module cosense3d.utils.iou2d_calculator)": [[20, "cosense3d.utils.iou2d_calculator.fp16_clamp"]], "get_gpu_architecture() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.get_gpu_architecture"]], "get_last_lr() (cosense3d.utils.lr_scheduler.lrupdater method)": [[20, "cosense3d.utils.lr_scheduler.LRUpdater.get_last_lr"]], "get_lr() (cosense3d.utils.lr_scheduler.transformeradaptivescheduler method)": [[20, "cosense3d.utils.lr_scheduler.TransformerAdaptiveScheduler.get_lr"]], "get_palette_colors() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.get_palette_colors"]], "get_target_module() (in module cosense3d.utils.module_utils)": [[20, "cosense3d.utils.module_utils.get_target_module"]], "get_tf_matrix_torch() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.get_tf_matrix_torch"]], "global_avg (cosense3d.utils.logger.smoothedvalue property)": [[20, "cosense3d.utils.logger.SmoothedValue.global_avg"]], "header() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.header"]], "instantiate_target_module() (in module cosense3d.utils.module_utils)": [[20, "cosense3d.utils.module_utils.instantiate_target_module"]], "iou() (cosense3d.utils.metrics.metricbev method)": [[20, "cosense3d.utils.metrics.MetricBev.iou"]], "is_tensor_to_cuda() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.is_tensor_to_cuda"]], "lidar_bin2bin() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.lidar_bin2bin"]], "lidar_bin2pcd() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.lidar_bin2pcd"]], "lidar_bin2pcd_o3d() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.lidar_bin2pcd_o3d"]], "lidar_ply2bin() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.lidar_ply2bin"]], "limit_period() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.limit_period"]], "list_dirs() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.list_dirs"]], "load_from_pl_state_dict() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.load_from_pl_state_dict"]], "load_json() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.load_json"]], "load_model_dict() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.load_model_dict"]], "load_pcd() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.load_pcd"]], "load_state_dict() (cosense3d.utils.lr_scheduler.lrupdater method)": [[20, "cosense3d.utils.lr_scheduler.LRUpdater.load_state_dict"]], "load_tensors_to_gpu() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.load_tensors_to_gpu"]], "load_yaml() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.load_yaml"]], "log() (cosense3d.utils.logger.logmeter method)": [[20, "cosense3d.utils.logger.LogMeter.log"]], "log() (cosense3d.utils.logger.testlogger method)": [[20, "cosense3d.utils.logger.TestLogger.log"]], "mask_boxes_outside_range_numpy() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.mask_boxes_outside_range_numpy"]], "mask_boxes_outside_range_torch() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.mask_boxes_outside_range_torch"]], "mask_points_in_box() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.mask_points_in_box"]], "mask_points_in_range() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.mask_points_in_range"]], "mask_values_in_range() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.mask_values_in_range"]], "mat_pitch() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.mat_pitch"]], "mat_roll() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.mat_roll"]], "mat_yaw() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.mat_yaw"]], "max (cosense3d.utils.logger.smoothedvalue property)": [[20, "cosense3d.utils.logger.SmoothedValue.max"]], "median (cosense3d.utils.logger.smoothedvalue property)": [[20, "cosense3d.utils.logger.SmoothedValue.median"]], "multi_apply() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.multi_apply"]], "normalize_bbox() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.normalize_bbox"]], "o3d_draw_agent_data() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.o3d_draw_agent_data"]], "o3d_draw_frame_data() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.o3d_draw_frame_data"]], "o3d_draw_pcds_bbxs() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.o3d_draw_pcds_bbxs"]], "o3d_play_sequence() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.o3d_play_sequence"]], "ops_cal_tp() (in module cosense3d.utils.eval_detection_utils)": [[20, "cosense3d.utils.eval_detection_utils.ops_cal_tp"]], "pad_list_to_array_np() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.pad_list_to_array_np"]], "pad_list_to_array_torch() (in module cosense3d.utils.tensor_utils)": [[20, "cosense3d.utils.tensor_utils.pad_list_to_array_torch"]], "plot_cavs_points() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.plot_cavs_points"]], "plt_draw_frame_data() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.plt_draw_frame_data"]], "pose2tf() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.pose2tf"]], "pose_err_global2relative_torch() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.pose_err_global2relative_torch"]], "pose_to_transformation() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.pose_to_transformation"]], "project_points_by_matrix_torch() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.project_points_by_matrix_torch"]], "read_ply() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.read_ply"]], "remove_points_in_boxes3d() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.remove_points_in_boxes3d"]], "rotate3d() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotate3d"]], "rotate_box_corners_with_tf_np() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotate_box_corners_with_tf_np"]], "rotate_points_along_z_np() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotate_points_along_z_np"]], "rotate_points_along_z_torch() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotate_points_along_z_torch"]], "rotate_points_batch() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotate_points_batch"]], "rotate_points_with_tf_np() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotate_points_with_tf_np"]], "rotation_mat2euler_torch() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotation_mat2euler_torch"]], "rotation_matrix() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.rotation_matrix"]], "save_cosense_ply() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.save_cosense_ply"]], "save_detections() (cosense3d.utils.metrics.metric method)": [[20, "cosense3d.utils.metrics.Metric.save_detections"]], "save_detections() (cosense3d.utils.metrics.metricobjdet method)": [[20, "cosense3d.utils.metrics.MetricObjDet.save_detections"]], "save_detections() (cosense3d.utils.metrics.metricsemseg method)": [[20, "cosense3d.utils.metrics.MetricSemSeg.save_detections"]], "save_json() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.save_json"]], "save_yaml() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.save_yaml"]], "seed_everything() (in module cosense3d.utils.train_utils)": [[20, "cosense3d.utils.train_utils.seed_everything"]], "setup_logger() (in module cosense3d.utils.logger)": [[20, "cosense3d.utils.logger.setup_logger"]], "setup_logger() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.setup_logger"]], "state_dict() (cosense3d.utils.lr_scheduler.lrupdater method)": [[20, "cosense3d.utils.lr_scheduler.LRUpdater.state_dict"]], "step_epoch() (cosense3d.utils.lr_scheduler.lrupdater method)": [[20, "cosense3d.utils.lr_scheduler.LRUpdater.step_epoch"]], "step_itr() (cosense3d.utils.lr_scheduler.lrupdater method)": [[20, "cosense3d.utils.lr_scheduler.LRUpdater.step_itr"]], "summary() (cosense3d.utils.metrics.metric method)": [[20, "cosense3d.utils.metrics.Metric.summary"]], "summary() (cosense3d.utils.metrics.metricbev method)": [[20, "cosense3d.utils.metrics.MetricBev.summary"]], "summary() (cosense3d.utils.metrics.metricobjdet method)": [[20, "cosense3d.utils.metrics.MetricObjDet.summary"]], "summary_hook() (cosense3d.utils.metrics.metricbev method)": [[20, "cosense3d.utils.metrics.MetricBev.summary_hook"]], "tf2pose() (in module cosense3d.utils.pclib)": [[20, "cosense3d.utils.pclib.tf2pose"]], "torch_tensor_to_numpy() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.torch_tensor_to_numpy"]], "transform_boxes_3d() (in module cosense3d.utils.box_utils)": [[20, "cosense3d.utils.box_utils.transform_boxes_3d"]], "update() (cosense3d.utils.logger.logmeter method)": [[20, "cosense3d.utils.logger.LogMeter.update"]], "update() (cosense3d.utils.logger.smoothedvalue method)": [[20, "cosense3d.utils.logger.SmoothedValue.update"]], "update_axis_linset() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.update_axis_linset"]], "update_dict() (in module cosense3d.utils.misc)": [[20, "cosense3d.utils.misc.update_dict"]], "update_lineset_vbo() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.update_lineset_vbo"]], "value (cosense3d.utils.logger.smoothedvalue property)": [[20, "cosense3d.utils.logger.SmoothedValue.value"]], "visualization() (in module cosense3d.utils.vislib)": [[20, "cosense3d.utils.vislib.visualization"]], "voc_ap() (in module cosense3d.utils.eval_detection_utils)": [[20, "cosense3d.utils.eval_detection_utils.voc_ap"]]}}) \ No newline at end of file diff --git a/docs/_static/imgs/buffer_based_sampling.png b/docs/_static/imgs/buffer_based_sampling.png new file mode 100644 index 00000000..6678d0f4 Binary files /dev/null and b/docs/_static/imgs/buffer_based_sampling.png differ diff --git a/docs/_static/imgs/center_controller.png b/docs/_static/imgs/center_controller.png new file mode 100644 index 00000000..3d758489 Binary files /dev/null and b/docs/_static/imgs/center_controller.png differ diff --git a/docs/_static/imgs/cosense_logo.png b/docs/_static/imgs/cosense_logo.png new file mode 100644 index 00000000..d12fa8b0 Binary files /dev/null and b/docs/_static/imgs/cosense_logo.png differ diff --git a/docs/_static/imgs/dairv2xt.gif b/docs/_static/imgs/dairv2xt.gif new file mode 100644 index 00000000..17be4cdd Binary files /dev/null and b/docs/_static/imgs/dairv2xt.gif differ diff --git a/docs/_static/imgs/download.png b/docs/_static/imgs/download.png new file mode 100644 index 00000000..bfb8906a Binary files /dev/null and b/docs/_static/imgs/download.png differ diff --git a/docs/_static/imgs/framework-structure.png b/docs/_static/imgs/framework-structure.png new file mode 100644 index 00000000..78c5136a Binary files /dev/null and b/docs/_static/imgs/framework-structure.png differ diff --git a/docs/_static/imgs/glviewer.png b/docs/_static/imgs/glviewer.png new file mode 100644 index 00000000..ef75085f Binary files /dev/null and b/docs/_static/imgs/glviewer.png differ diff --git a/docs/_static/imgs/imganno2dviewer.png b/docs/_static/imgs/imganno2dviewer.png new file mode 100644 index 00000000..dbd4a6cc Binary files /dev/null and b/docs/_static/imgs/imganno2dviewer.png differ diff --git a/docs/_static/imgs/imgviewer.png b/docs/_static/imgs/imgviewer.png new file mode 100644 index 00000000..0eb6a691 Binary files /dev/null and b/docs/_static/imgs/imgviewer.png differ diff --git a/docs/_static/imgs/opv2vt.gif b/docs/_static/imgs/opv2vt.gif new file mode 100644 index 00000000..cafe71cf Binary files /dev/null and b/docs/_static/imgs/opv2vt.gif differ diff --git a/docs/_static/imgs/outputviewer.png b/docs/_static/imgs/outputviewer.png new file mode 100644 index 00000000..66efa4cf Binary files /dev/null and b/docs/_static/imgs/outputviewer.png differ diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000..2c0c6698 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,41 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information +import os +import sys + +cur_path = os.path.abspath(os.path.dirname(__file__)) +sys.path.insert(0, os.path.abspath(f"{cur_path}/..")) +sys.path.append(f'{cur_path}/../cosense3d') + + +project = 'Cosense3D' +copyright = '2024, Yunshuang Yuan' +author = 'Yunshuang Yuan' +release = '1.0.0' + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = ['recommonmark', "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinx.ext.autodoc"] + +templates_path = ['_templates'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'cosense3d/config', 'cosense3d/ops'] + +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = 'sphinx_rtd_theme' +html_static_path = ['_static'] +html_context = { + 'image_path': '_static/imgs' # Relative to html_static_path +} diff --git a/docs/cosense3d.agents.cav_prototype.rst b/docs/cosense3d.agents.cav_prototype.rst new file mode 100644 index 00000000..4bb42d0f --- /dev/null +++ b/docs/cosense3d.agents.cav_prototype.rst @@ -0,0 +1,29 @@ +cosense3d.agents.cav\_prototype package +======================================= + +Submodules +---------- + +cosense3d.agents.cav\_prototype.base\_cav module +------------------------------------------------ + +.. automodule:: cosense3d.agents.cav_prototype.base_cav + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.cav\_prototype.streamLTS\_collection module +------------------------------------------------------------ + +.. automodule:: cosense3d.agents.cav_prototype.streamLTS_collection + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.cav_prototype + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.agents.core.rst b/docs/cosense3d.agents.core.rst new file mode 100644 index 00000000..a38dabe3 --- /dev/null +++ b/docs/cosense3d.agents.core.rst @@ -0,0 +1,93 @@ +cosense3d.agents.core package +============================= + +Submodules +---------- + +cosense3d.agents.core.base\_runner module +----------------------------------------- + +.. automodule:: cosense3d.agents.core.base_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.cav\_manager module +----------------------------------------- + +.. automodule:: cosense3d.agents.core.cav_manager + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.data\_manager module +------------------------------------------ + +.. automodule:: cosense3d.agents.core.data_manager + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.forward\_runner module +-------------------------------------------- + +.. automodule:: cosense3d.agents.core.forward_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.gui module +-------------------------------- + +.. automodule:: cosense3d.agents.core.gui + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.hooks module +---------------------------------- + +.. automodule:: cosense3d.agents.core.hooks + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.task\_manager module +------------------------------------------ + +.. automodule:: cosense3d.agents.core.task_manager + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.test\_runner module +----------------------------------------- + +.. automodule:: cosense3d.agents.core.test_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.train\_runner module +------------------------------------------ + +.. automodule:: cosense3d.agents.core.train_runner + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.core.vis\_runner module +---------------------------------------- + +.. automodule:: cosense3d.agents.core.vis_runner + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.core + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.agents.rst b/docs/cosense3d.agents.rst new file mode 100644 index 00000000..5db920ff --- /dev/null +++ b/docs/cosense3d.agents.rst @@ -0,0 +1,32 @@ +cosense3d.agents package +======================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents.cav_prototype + cosense3d.agents.core + cosense3d.agents.utils + cosense3d.agents.viewer + +Submodules +---------- + +cosense3d.agents.center\_controller module +------------------------------------------ + +.. automodule:: cosense3d.agents.center_controller + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.agents.utils.rst b/docs/cosense3d.agents.utils.rst new file mode 100644 index 00000000..3ca7e6db --- /dev/null +++ b/docs/cosense3d.agents.utils.rst @@ -0,0 +1,29 @@ +cosense3d.agents.utils package +============================== + +Submodules +---------- + +cosense3d.agents.utils.deco module +---------------------------------- + +.. automodule:: cosense3d.agents.utils.deco + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.utils.transform module +--------------------------------------- + +.. automodule:: cosense3d.agents.utils.transform + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.agents.viewer.items.rst b/docs/cosense3d.agents.viewer.items.rst new file mode 100644 index 00000000..d7d493fb --- /dev/null +++ b/docs/cosense3d.agents.viewer.items.rst @@ -0,0 +1,21 @@ +cosense3d.agents.viewer.items package +===================================== + +Submodules +---------- + +cosense3d.agents.viewer.items.graph\_items module +------------------------------------------------- + +.. automodule:: cosense3d.agents.viewer.items.graph_items + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.viewer.items + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.agents.viewer.rst b/docs/cosense3d.agents.viewer.rst new file mode 100644 index 00000000..0fcf4a70 --- /dev/null +++ b/docs/cosense3d.agents.viewer.rst @@ -0,0 +1,61 @@ +cosense3d.agents.viewer package +=============================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents.viewer.items + +Submodules +---------- + +cosense3d.agents.viewer.gl\_viewer module +----------------------------------------- + +.. automodule:: cosense3d.agents.viewer.gl_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.img\_anno3d\_viewer module +-------------------------------------------------- + +.. automodule:: cosense3d.agents.viewer.img_anno3d_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.img\_viewer module +------------------------------------------ + +.. automodule:: cosense3d.agents.viewer.img_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.output\_viewer module +--------------------------------------------- + +.. automodule:: cosense3d.agents.viewer.output_viewer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.agents.viewer.utils module +------------------------------------ + +.. automodule:: cosense3d.agents.viewer.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.agents.viewer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.dataset.pipeline.rst b/docs/cosense3d.dataset.pipeline.rst new file mode 100644 index 00000000..cb14002f --- /dev/null +++ b/docs/cosense3d.dataset.pipeline.rst @@ -0,0 +1,29 @@ +cosense3d.dataset.pipeline package +================================== + +Submodules +---------- + +cosense3d.dataset.pipeline.loading module +----------------------------------------- + +.. automodule:: cosense3d.dataset.pipeline.loading + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.pipeline.transform module +------------------------------------------- + +.. automodule:: cosense3d.dataset.pipeline.transform + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.dataset.pipeline + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.dataset.rst b/docs/cosense3d.dataset.rst new file mode 100644 index 00000000..470c5269 --- /dev/null +++ b/docs/cosense3d.dataset.rst @@ -0,0 +1,46 @@ +cosense3d.dataset package +========================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.dataset.pipeline + cosense3d.dataset.toolkit + +Submodules +---------- + +cosense3d.dataset.const module +------------------------------ + +.. automodule:: cosense3d.dataset.const + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.cosense\_dataset module +----------------------------------------- + +.. automodule:: cosense3d.dataset.cosense_dataset + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.temporal\_cosense\_dataset module +--------------------------------------------------- + +.. automodule:: cosense3d.dataset.temporal_cosense_dataset + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.dataset + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.dataset.toolkit.rst b/docs/cosense3d.dataset.toolkit.rst new file mode 100644 index 00000000..a599c35b --- /dev/null +++ b/docs/cosense3d.dataset.toolkit.rst @@ -0,0 +1,45 @@ +cosense3d.dataset.toolkit package +================================= + +Submodules +---------- + +cosense3d.dataset.toolkit.cosense module +---------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.cosense + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.toolkit.dairv2x module +---------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.dairv2x + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.toolkit.opv2v module +-------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.opv2v + :members: + :undoc-members: + :show-inheritance: + +cosense3d.dataset.toolkit.opv2v\_t module +----------------------------------------- + +.. automodule:: cosense3d.dataset.toolkit.opv2v_t + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.dataset.toolkit + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.backbone2d.rst b/docs/cosense3d.modules.backbone2d.rst new file mode 100644 index 00000000..6a2c7fc3 --- /dev/null +++ b/docs/cosense3d.modules.backbone2d.rst @@ -0,0 +1,21 @@ +cosense3d.modules.backbone2d package +==================================== + +Submodules +---------- + +cosense3d.modules.backbone2d.resnet\_encoder module +--------------------------------------------------- + +.. automodule:: cosense3d.modules.backbone2d.resnet_encoder + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.backbone2d + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.backbone3d.rst b/docs/cosense3d.modules.backbone3d.rst new file mode 100644 index 00000000..12c1f5d1 --- /dev/null +++ b/docs/cosense3d.modules.backbone3d.rst @@ -0,0 +1,45 @@ +cosense3d.modules.backbone3d package +==================================== + +Submodules +---------- + +cosense3d.modules.backbone3d.mink\_unet module +---------------------------------------------- + +.. automodule:: cosense3d.modules.backbone3d.mink_unet + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.backbone3d.pillar\_bev module +----------------------------------------------- + +.. automodule:: cosense3d.modules.backbone3d.pillar_bev + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.backbone3d.spconv module +------------------------------------------ + +.. automodule:: cosense3d.modules.backbone3d.spconv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.backbone3d.voxelnet module +-------------------------------------------- + +.. automodule:: cosense3d.modules.backbone3d.voxelnet + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.backbone3d + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.fusion.rst b/docs/cosense3d.modules.fusion.rst new file mode 100644 index 00000000..0ba729e5 --- /dev/null +++ b/docs/cosense3d.modules.fusion.rst @@ -0,0 +1,77 @@ +cosense3d.modules.fusion package +================================ + +Submodules +---------- + +cosense3d.modules.fusion.attn\_fusion module +-------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.attn_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.box\_fusion module +------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.box_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.fax module +----------------------------------- + +.. automodule:: cosense3d.modules.fusion.fax + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.keypoints module +----------------------------------------- + +.. automodule:: cosense3d.modules.fusion.keypoints + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.maxout\_fusion module +---------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.maxout_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.naive\_fusion module +--------------------------------------------- + +.. automodule:: cosense3d.modules.fusion.naive_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.spatial\_query\_fusion module +------------------------------------------------------ + +.. automodule:: cosense3d.modules.fusion.spatial_query_fusion + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.fusion.temporal\_fusion module +------------------------------------------------ + +.. automodule:: cosense3d.modules.fusion.temporal_fusion + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.fusion + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.heads.rst b/docs/cosense3d.modules.heads.rst new file mode 100644 index 00000000..43eaa832 --- /dev/null +++ b/docs/cosense3d.modules.heads.rst @@ -0,0 +1,109 @@ +cosense3d.modules.heads package +=============================== + +Submodules +---------- + +cosense3d.modules.heads.bev module +---------------------------------- + +.. automodule:: cosense3d.modules.heads.bev + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.bev\_dense module +----------------------------------------- + +.. automodule:: cosense3d.modules.heads.bev_dense + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_anchor\_dense module +------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_anchor_dense + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_anchor\_sparse module +-------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_anchor_sparse + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_center\_sparse module +-------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_center_sparse + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.det\_roi\_refine module +----------------------------------------------- + +.. automodule:: cosense3d.modules.heads.det_roi_refine + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.img\_focal module +----------------------------------------- + +.. automodule:: cosense3d.modules.heads.img_focal + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.lidar\_petr\_head module +------------------------------------------------ + +.. automodule:: cosense3d.modules.heads.lidar_petr_head + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.multitask\_head module +---------------------------------------------- + +.. automodule:: cosense3d.modules.heads.multitask_head + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.nbr\_attn\_bev module +--------------------------------------------- + +.. automodule:: cosense3d.modules.heads.nbr_attn_bev + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.petr\_head module +----------------------------------------- + +.. automodule:: cosense3d.modules.heads.petr_head + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.heads.query\_guided\_petr\_head module +-------------------------------------------------------- + +.. automodule:: cosense3d.modules.heads.query_guided_petr_head + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.heads + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.losses.rst b/docs/cosense3d.modules.losses.rst new file mode 100644 index 00000000..08a241ed --- /dev/null +++ b/docs/cosense3d.modules.losses.rst @@ -0,0 +1,69 @@ +cosense3d.modules.losses package +================================ + +Submodules +---------- + +cosense3d.modules.losses.base\_loss module +------------------------------------------ + +.. automodule:: cosense3d.modules.losses.base_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.common module +-------------------------------------- + +.. automodule:: cosense3d.modules.losses.common + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.edl module +----------------------------------- + +.. automodule:: cosense3d.modules.losses.edl + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.focal\_loss module +------------------------------------------- + +.. automodule:: cosense3d.modules.losses.focal_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.iou\_loss module +----------------------------------------- + +.. automodule:: cosense3d.modules.losses.iou_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.l1\_loss module +---------------------------------------- + +.. automodule:: cosense3d.modules.losses.l1_loss + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.losses.vanilla\_seg\_loss module +-------------------------------------------------- + +.. automodule:: cosense3d.modules.losses.vanilla_seg_loss + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.losses + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.necks.rst b/docs/cosense3d.modules.necks.rst new file mode 100644 index 00000000..321065e0 --- /dev/null +++ b/docs/cosense3d.modules.necks.rst @@ -0,0 +1,37 @@ +cosense3d.modules.necks package +=============================== + +Submodules +---------- + +cosense3d.modules.necks.cpm\_composer module +-------------------------------------------- + +.. automodule:: cosense3d.modules.necks.cpm_composer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.necks.dilation\_spconv module +----------------------------------------------- + +.. automodule:: cosense3d.modules.necks.dilation_spconv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.necks.formatting module +----------------------------------------- + +.. automodule:: cosense3d.modules.necks.formatting + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.necks + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.plugin.rst b/docs/cosense3d.modules.plugin.rst new file mode 100644 index 00000000..542932ba --- /dev/null +++ b/docs/cosense3d.modules.plugin.rst @@ -0,0 +1,141 @@ +cosense3d.modules.plugin package +================================ + +Submodules +---------- + +cosense3d.modules.plugin.attn module +------------------------------------ + +.. automodule:: cosense3d.modules.plugin.attn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.bev\_rpn module +---------------------------------------- + +.. automodule:: cosense3d.modules.plugin.bev_rpn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.downsample\_conv module +------------------------------------------------ + +.. automodule:: cosense3d.modules.plugin.downsample_conv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.flash\_attn module +------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.flash_attn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.fpn module +----------------------------------- + +.. automodule:: cosense3d.modules.plugin.fpn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.gevbev\_decoder module +----------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.gevbev_decoder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.mink\_spconv module +-------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.mink_spconv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.naive\_compressor module +------------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.naive_compressor + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.pillar\_encoder module +----------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.pillar_encoder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.ssfa module +------------------------------------ + +.. automodule:: cosense3d.modules.plugin.ssfa + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.target\_assigners module +------------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.target_assigners + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.transformer module +------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.transformer + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.voxel\_encoder module +---------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.voxel_encoder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.voxel\_generator module +------------------------------------------------ + +.. automodule:: cosense3d.modules.plugin.voxel_generator + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.voxnet\_utils module +--------------------------------------------- + +.. automodule:: cosense3d.modules.plugin.voxnet_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.plugin.vsa module +----------------------------------- + +.. automodule:: cosense3d.modules.plugin.vsa + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.plugin + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.projection.rst b/docs/cosense3d.modules.projection.rst new file mode 100644 index 00000000..96a562c2 --- /dev/null +++ b/docs/cosense3d.modules.projection.rst @@ -0,0 +1,37 @@ +cosense3d.modules.projection package +==================================== + +Submodules +---------- + +cosense3d.modules.projection.fax module +--------------------------------------- + +.. automodule:: cosense3d.modules.projection.fax + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.projection.petr module +---------------------------------------- + +.. automodule:: cosense3d.modules.projection.petr + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.projection.spatial\_transform module +------------------------------------------------------ + +.. automodule:: cosense3d.modules.projection.spatial_transform + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.projection + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.rst b/docs/cosense3d.modules.rst new file mode 100644 index 00000000..af915302 --- /dev/null +++ b/docs/cosense3d.modules.rst @@ -0,0 +1,26 @@ +cosense3d.modules package +========================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.modules.backbone2d + cosense3d.modules.backbone3d + cosense3d.modules.fusion + cosense3d.modules.heads + cosense3d.modules.losses + cosense3d.modules.necks + cosense3d.modules.plugin + cosense3d.modules.projection + cosense3d.modules.utils + +Module contents +--------------- + +.. automodule:: cosense3d.modules + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.modules.utils.rst b/docs/cosense3d.modules.utils.rst new file mode 100644 index 00000000..d52d7625 --- /dev/null +++ b/docs/cosense3d.modules.utils.rst @@ -0,0 +1,101 @@ +cosense3d.modules.utils package +=============================== + +Submodules +---------- + +cosense3d.modules.utils.box\_coder module +----------------------------------------- + +.. automodule:: cosense3d.modules.utils.box_coder + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.common module +------------------------------------- + +.. automodule:: cosense3d.modules.utils.common + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.conv module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.conv + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.edl\_utils module +----------------------------------------- + +.. automodule:: cosense3d.modules.utils.edl_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.gaussian\_utils module +---------------------------------------------- + +.. automodule:: cosense3d.modules.utils.gaussian_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.init module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.init + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.me\_utils module +---------------------------------------- + +.. automodule:: cosense3d.modules.utils.me_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.misc module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.misc + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.nbr\_attn module +---------------------------------------- + +.. automodule:: cosense3d.modules.utils.nbr_attn + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.norm module +----------------------------------- + +.. automodule:: cosense3d.modules.utils.norm + :members: + :undoc-members: + :show-inheritance: + +cosense3d.modules.utils.positional\_encoding module +--------------------------------------------------- + +.. automodule:: cosense3d.modules.utils.positional_encoding + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.modules.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.rst b/docs/cosense3d.rst new file mode 100644 index 00000000..3758fc44 --- /dev/null +++ b/docs/cosense3d.rst @@ -0,0 +1,21 @@ +cosense3d package +================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents + cosense3d.dataset + cosense3d.modules + cosense3d.utils + +Module contents +--------------- + +.. automodule:: cosense3d + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/cosense3d.utils.rst b/docs/cosense3d.utils.rst new file mode 100644 index 00000000..af2460e5 --- /dev/null +++ b/docs/cosense3d.utils.rst @@ -0,0 +1,109 @@ +cosense3d.utils package +======================= + +Submodules +---------- + +cosense3d.utils.box\_utils module +--------------------------------- + +.. automodule:: cosense3d.utils.box_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.eval\_detection\_utils module +--------------------------------------------- + +.. automodule:: cosense3d.utils.eval_detection_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.iou2d\_calculator module +---------------------------------------- + +.. automodule:: cosense3d.utils.iou2d_calculator + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.logger module +----------------------------- + +.. automodule:: cosense3d.utils.logger + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.lr\_scheduler module +------------------------------------ + +.. automodule:: cosense3d.utils.lr_scheduler + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.metrics module +------------------------------ + +.. automodule:: cosense3d.utils.metrics + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.misc module +--------------------------- + +.. automodule:: cosense3d.utils.misc + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.module\_utils module +------------------------------------ + +.. automodule:: cosense3d.utils.module_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.pclib module +---------------------------- + +.. automodule:: cosense3d.utils.pclib + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.tensor\_utils module +------------------------------------ + +.. automodule:: cosense3d.utils.tensor_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.train\_utils module +----------------------------------- + +.. automodule:: cosense3d.utils.train_utils + :members: + :undoc-members: + :show-inheritance: + +cosense3d.utils.vislib module +----------------------------- + +.. automodule:: cosense3d.utils.vislib + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: cosense3d.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000..5bbd97fe --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,24 @@ +.. OpenCosense3D documentation master file, created by + sphinx-quickstart on Tue Feb 27 18:37:05 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to OpenCosense3D's documentation! +========================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + md/installation + md/prepare_data + md/structure + modules + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..32bb2452 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/md/installation.md b/docs/md/installation.md new file mode 100644 index 00000000..526b23f6 --- /dev/null +++ b/docs/md/installation.md @@ -0,0 +1,69 @@ +# Installation + +## Requirements +- Ubuntu LTS 20.04 +- GPU: tested on *Nvidia RTX 3090 Ti* and *Nvidia RTX 4090* +- Python: >= 3.8 + +## Installation options + +### Via bash script +You can install the environment with our provided batch script with the following commands: +```bash +conda create -n consense3d python=3.8 +conda activate cosense3d +cd OpenCosense3D +# for Nvidia RTX 3090 +bash setup_env_3090.sh +# for Nvidia RTX 4090 +bash setup_env_4090.sh +``` + +### Step-by-step +If you confront with any errors at the script installation, please try step-by-step installation. + +1.Create conda environment and install dependencies. +```shell +conda create -n consense3d python=3.8 +conda activate cosense3d +conda install openblas-devel -c anaconda -y +conda install -c conda-forge libstdcxx-ng libffi -y +sudo apt install build-essential python3-dev libopenblas-dev -y +``` + +2.Install pytorch and compile local Pytorch Extensions (CUDA nvcc compiler needed). +```shell +# For 3090 +pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 \ +--extra-index-url https://download.pytorch.org/whl/cu113 +# For 4090 +pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 +# Install extentions +cd cosense3d/ops +pip install . && cd .. +``` + +3.Install python packages. +```shell +# for 3090 +pip install -r reququirements_cosense_3090.txt +# for 4090 +pip install -r reququirements_cosense_4090.txt +# for Graphical Interface +pip install -r requirements_ui.txt +``` + +4.Install MinkovskiEngine. +```shell +pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ + --global-option="--blas_include_dirs=${CONDA_PREFIX}/include" \ + --global-option="--blas=openblas" +export OMP_NUM_THREADS=16 +``` + +5.Check Installation. +```shell +python -c "import torch; print(torch.__version__)" +python -W ignore -c "import MinkowskiEngine as ME; print(ME.__version__)" +``` + diff --git a/docs/md/prepare_data.md b/docs/md/prepare_data.md new file mode 100644 index 00000000..c8132bfb --- /dev/null +++ b/docs/md/prepare_data.md @@ -0,0 +1,32 @@ +# Prepare Datasets +> Check the dataset [page](https://data.uni-hannover.de/dataset/cosense3d) for download links or use the downloading script as following commands. +## OPV2Vt +```shell +cd CoSense3D +bash cosense3d/tools/download.sh OPV2Vt path/to/output_dir +``` + +## DairV2Xt + +Download [DAIR-V2X-C](https://thudair.baai.ac.cn/coop-dtest) dataset and extract it to the following structure. + +```shell +├── dair-v2x +│ ├── cooperative-vehicle-infrastructure +| |── 2021_08_16_22_26_54 +| |── ... +│ ├── cooperative-vehicle-infrastructure-infrastructure-side-image +│ ├── cooperative-vehicle-infrastructure-infrastructure-side-velodyne +│ ├── cooperative-vehicle-infrastructure-vehicle-side-image +│ ├── cooperative-vehicle-infrastructure-vehicle-side-velodyne +``` +Then download the meta files with +```shell +bash cosense3d/tools/download.sh DairV2xt /path/to/dair-v2x +``` + +## OPV2V + +```shell +bash cosense3d/tools/download.sh OPV2V path/to/output_dir +``` diff --git a/docs/md/structure.md b/docs/md/structure.md new file mode 100644 index 00000000..17065ec7 --- /dev/null +++ b/docs/md/structure.md @@ -0,0 +1,64 @@ +# The Structure of the framework +![framework](../_static/imgs/framework-structure.png) + +The overall framework contains four main modules, namely Dataloader, +Graphical user interface (GUI), Runner and Central Controller. +The Central Controller is the core module of the framework which contains four sub-modules: +CAV manager, Data manager, Task manager and Forward runner. Black arrows indicate the instruction flow, +green arrows show the data flow. The framework can run either with or without visualization in the GUI. + +## Dataloader +The framework standardizes the data loading API for collective perception with a predefined dictionary format +to store the meta information in JSON files. With this API, a new dataset can be easily converted to the +a standardized format without rewriting the PyTorch Dataloader and coping the large media files, such as point clouds +and images, to a new data structure. Only the meta information such as scenarios, frames, timestamps, parameters +of sensors and the annotations are parsed and saved to CoSense3D format in JSON files. This standardized Dataloader is able to load images, point cloud data, 2D annotations for images, +3D local annotations for perception without CAV cooperation and 3D global annotations for collective perception. + +## GUI +The graphical user interface can visualize the training and test data and check the training and test outcomes by one click. +This is helpful for loading new datasets and developing new models. +Before training on a new dataset, it is necessary to check if the data is converted and loaded correctly. +During and after training, visualizing the model output is also helpful to identify the drawbacks and problems +of the model and then refine or modify the model accordingly. + +The GUI can send commands to the runner to start, stop or step the runner process. After each runner step, +it updates the visualization modules, 3D GLViewer, ImgViewer, ImgAnno3DViewer and OutputViewer. +GLViewer is a OpenGL-based visualizer for 3D data, annotations (green boxes) and predictions (red boxes). +ImgViewer shows image data and the corresponding 2D bounding boxes. ImgAnno3DViewer is used to visualize +if the transformations and augmentations of images and 3D annotations are correctly loaded and processed. +Each row in ImgViewer and ImgAnno3Dviewer shows the images of a single CAV. After training the model, +the OutputViewer can be used to visualize the test result. The OutputViewer can contain multiple canvases +which can be customized by the user. +An example that shows the BEV segmentation (top) and object detection (bottom) result. +![glviewer](../_static/imgs/glviewer.png) +![imgviewer](../_static/imgs/imgviewer.png) +![imganno2viewer](../_static/imgs/imganno2dviewer.png) +![outputviewer](../_static/imgs/outputviewer.png) + +## Runner +In this framework, three types of Runners are available, namely, TrainRunner, TestRunner and VisRunner. +The user can launch these runners with or without GUI. They are used for training, testing and input +data visualization, respectively. Runners manage the frame-wise data and orders dispatching to Central Controller, +which then process the orders with the provided frame data accordingly. + +## Central Controller +![controller](../_static/imgs/center_controller.png) +Central Controller is the core module of this framework, it communicates with the order-dispatcher (Runner) +and the CAVs through its CAV manager. The Data manager is responsible for data gathering and scattering +between the central controller and the CAVs. Similarly, the Task manager gathers pseudo tasks generated by CAVs, +batches these tasks and dispatches them to the forward runner, which contains all shared deep learning modules, +for implementation. In this framework, a standardized CAV prototyping API is provided to allow the user to define +the customized workflow for collective perception, including the data augmentations, CAV coordinate transformations, +CPM sharing strategies, the forwarding order of the shared neuron network modules and gradient computation strategies +of these modules. + +Based on the CAV prototype, the central controller will then implement a standardized pipeline based on the tasks +generated by the CAV prototypes. Once the Central Controller receives the order and frame data from the Runner (step 0), +the CAV manager will update the CAVs according to the meta information in the frame data and the provided prototype +of CAV (step 1). Then the Data manager distributes the input frame data to the updated CAVs (step2). +Upon receiving the input data, the CAVs then pre-process the input data and generate tasks and send them back to the +Central Controller for processing (step3). To increase the efficiency of the forward process, the Task manager will +first summarize the tasks from all CAVs and batch them in two forward steps, one requires gradients, and one without +gradient computation, for parallel processing in the Forward Runner (step 4 and 5). After finishing these tasks, +the generated results are then distributed back to individual CAVs. \ No newline at end of file diff --git a/docs/modules.rst b/docs/modules.rst new file mode 100644 index 00000000..738c8791 --- /dev/null +++ b/docs/modules.rst @@ -0,0 +1,10 @@ +CoSense3D +========= + +.. toctree:: + :maxdepth: 4 + + cosense3d.agents + cosense3d.dataset + cosense3d.modules + cosense3d.utils diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..4accef92 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,15 @@ +flash-attn +opencv-python +numpy +torch-scatter +PyYAML +Shapely +scipy +rich +tqdm +open3d +plyfile +einops +timm +spconv-cu118 +pypcd diff --git a/requirements_cosense_3090.txt b/requirements_cosense_3090.txt new file mode 100644 index 00000000..2e188f34 --- /dev/null +++ b/requirements_cosense_3090.txt @@ -0,0 +1,15 @@ +opencv-python==4.2.0.34 +numpy +torch-scatter +PyYAML +Shapely +scipy +rich +tqdm +open3d +plyfile +einops +timm +flash-attn==0.2.2 +spconv-cu113 +pypcd \ No newline at end of file diff --git a/requirements_cosense_4090.txt b/requirements_cosense_4090.txt new file mode 100644 index 00000000..2f755219 --- /dev/null +++ b/requirements_cosense_4090.txt @@ -0,0 +1,16 @@ +opencv-python==4.2.0.34 +numpy +torch-scatter -f https://data.pyg.org/whl/torch-2.1.0%2Bcu118/torch_scatter-2.1.2%2Bpt21cu118-cp38-cp38-linux_x86_64.whl +PyYAML +Shapely +scipy +rich +tqdm +open3d +plyfile +einops +timm +packaging +flash-attn +spconv-cu118 +pypcd \ No newline at end of file diff --git a/requirements_ui.txt b/requirements_ui.txt new file mode 100644 index 00000000..e05bd3a9 --- /dev/null +++ b/requirements_ui.txt @@ -0,0 +1,4 @@ +PyQt5 +pyopengl +pyqtgraph +PyOpenGL-accelerate diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..248daac7 --- /dev/null +++ b/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + + +__version__ = '0.0.1' + +setup( + name='CoSense3D', + version=__version__, + author='Yunshuang Yuan', + author_email='yunshuang.yuan@ikg.uni-hannover.de', + url='-', + license='MIT', + packages=find_packages(include=['cosense3d']), + zip_safe=False, +) diff --git a/setup_env_3090.sh b/setup_env_3090.sh new file mode 100644 index 00000000..1f2aa22b --- /dev/null +++ b/setup_env_3090.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -e + +# Set color codes +GREEN='\033[0;32m' +NC='\033[0m' # No Color + +#echo -e "${GREEN}[INFO] Create conda environment...${NC}" +#conda create -n cosense3d python=3.8 +#conda activate cosense3d +conda install openblas-devel -c anaconda -y +conda install -c conda-forge libstdcxx-ng libffi -y +sudo apt install build-essential python3-dev libopenblas-dev -y + +echo -e "${GREEN}[INFO] Installing pytorch essentials...${NC}" +#conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch +pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113 + +echo - e "${GREEN}[INFO] Installing MinkowskiEngine...${NC}" +# for old version of pip +#pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ +# --install-option="--blas_include_dirs=${CONDA_PREFIX}/include" \ +# --install-option="--blas=openblas" +pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ + --global-option="--blas_include_dirs=${CONDA_PREFIX}/include" \ + --global-option="--blas=openblas" +#pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ +# --config-settings="--blas_include_dirs=${CONDA_PREFIX}/include" \ +# --config-settings="--blas=openblas" + +echo -e "${GREEN}[INFO] Installing cuda_ops...${NC}" +cd cosense3d/ops && pip install . && cd ../.. + +echo -e "${GREEN}[INFO] Installing requirements...${NC}" +pip install -r requirements_cosense_3090.txt +pip install -r requirements_ui.txt + +echo -e "${GREEN}[INFO] Done.${NC}" + +TORCH="$(python -c "import torch; print(torch.__version__)")" +export OMP_NUM_THREADS=16 +ME="$(python -W ignore -c "import MinkowskiEngine as ME; print(ME.__version__)")" + +echo -e "${GREEN}[INFO] Finished the installation!" +echo "[INFO] ========== Configurations ==========" +echo "[INFO] PyTorch version: $TORCH" +echo "[INFO] MinkowskiEngine version: $ME" +echo -e "[INFO] ====================================${NC}" + + +#### CONDA ENV error + +# conda install -c conda-forge libstdcxx-ng +# conda install -c conda-forge libffi + diff --git a/setup_env_4090.sh b/setup_env_4090.sh new file mode 100644 index 00000000..cad28294 --- /dev/null +++ b/setup_env_4090.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +########Base ENV +# Ubuntu = 20.04 +# Cuda = 11.8 +# Pytorch = 2.1.2 + +set -e + +# Set color codes +GREEN='\033[0;32m' +NC='\033[0m' # No Color + +#echo -e "${GREEN}[INFO] Create conda environment...${NC}" +#conda create -n cosense3d python=3.8 +#conda activate cosense3d +conda install openblas-devel -c anaconda -y +conda install -c conda-forge libstdcxx-ng libffi -y +sudo apt install build-essential python3-dev libopenblas-dev -y + +echo -e "${GREEN}[INFO] Installing pytorch essentials...${NC}" +pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 + +echo - e "${GREEN}[INFO] Installing MinkowskiEngine...${NC}" +# for old version of pip +#pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ +# --install-option="--blas_include_dirs=${CONDA_PREFIX}/include" \ +# --install-option="--blas=openblas" +pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ + --global-option="--blas_include_dirs=${CONDA_PREFIX}/include" \ + --global-option="--blas=openblas" +#pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ +# --config-settings="--blas_include_dirs=${CONDA_PREFIX}/include" \ +# --config-settings="--blas=openblas" + +echo -e "${GREEN}[INFO] Installing cuda_ops...${NC}" +cd cosense3d/ops && pip install . && cd ../.. + +echo -e "${GREEN}[INFO] Installing requirements...${NC}" +pip install -r requirements_cosense_4090.txt +pip install -r requirements_ui.txt + +echo -e "${GREEN}[INFO] Done.${NC}" + +TORCH="$(python -c "import torch; print(torch.__version__)")" +export OMP_NUM_THREADS=16 +ME="$(python -W ignore -c "import MinkowskiEngine as ME; print(ME.__version__)")" + +echo -e "${GREEN}[INFO] Finished the installation!" +echo "[INFO] ========== Configurations ==========" +echo "[INFO] PyTorch version: $TORCH" +echo "[INFO] MinkowskiEngine version: $ME" +echo -e "[INFO] ====================================${NC}" + + +# +