diff --git a/src/perception/semantic_segmentation/semantic_segmentation/segmentation_node.py b/src/perception/semantic_segmentation/semantic_segmentation/segmentation_node.py index 25d781ca..e2a4fdf0 100644 --- a/src/perception/semantic_segmentation/semantic_segmentation/segmentation_node.py +++ b/src/perception/semantic_segmentation/semantic_segmentation/segmentation_node.py @@ -24,9 +24,9 @@ package_name = 'semantic_segmentation' package_share_directory = get_package_share_directory(package_name) CONFIG = os.path.join(package_share_directory, 'resource', 'model', - 'segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py') + 'segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py') CHECKPOINT = os.path.join(package_share_directory, 'resource', 'model', - 'segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth') + 'segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth') IMAGE_H = 900 IMAGE_W = 1600 @@ -68,7 +68,7 @@ def __init__(self): self.declare_parameter('config', "model/segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py") self.declare_parameter( 'checkpoint', "model/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth") - + self.compressed = self.get_parameter('compressed').value self.image_subscription = self.create_subscription( Image if not self.compressed else CompressedImage, @@ -106,7 +106,7 @@ def listener_callback(self, msg): self.get_logger().error(str(e)) return with torch.no_grad(): - out_img = self.model(image, show=False, )['predictions'] + out_img = self.model(image, show=False)['predictions'] # logits = torch.tensor( # out_img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions @@ -127,7 +127,6 @@ def listener_callback(self, msg): self.image_publisher.publish(mask_output) - def main(args=None): rclpy.init(args=args)