From 90263eca8e457d7a79a57cf39f4de7d49f0aa544 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 14:17:04 -0700 Subject: [PATCH 01/15] Remove checking for pad byte flag. Instead increase secondary header size to 13. Add real_pkt_data_len. Compute num_garbage_bytes. Update data getter and setter. Update crc calculation. Update all places in code where partial or garbage packets are created. --- emit_sds_l1a/ccsds_packet.py | 62 ++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index e87c56c..ea3c962 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -126,7 +126,7 @@ class ScienceDataPacket(CCSDSPacket): HEADER_SYNC_WORD = 0x81FFFF81 PRIMARY_HDR_LEN = 6 - SEC_HDR_LEN = 11 + SEC_HDR_LEN = 13 CRC_LEN = 4 def __init__(self, stream=None, **kwargs): @@ -147,19 +147,13 @@ def __init__(self, stream=None, **kwargs): @property def data(self): if self.body: - if self.pad_byte_flag == 0: - return self.body[self.SEC_HDR_LEN: -self.CRC_LEN] - else: - return self.body[self.SEC_HDR_LEN: -(self.CRC_LEN + 1)] + return self.body[self.SEC_HDR_LEN: -(self.CRC_LEN + self.num_garbage_bytes)] else: return None @data.setter def data(self, data): - if self.pad_byte_flag == 0: - self.body = self.body[:self.SEC_HDR_LEN] + data + self.body[-self.CRC_LEN:] - else: - self.body = self.body[:self.SEC_HDR_LEN] + data + bytearray(1) + self.body[-self.CRC_LEN:] + self.body = self.body[:self.SEC_HDR_LEN] + data + self.body[-(self.CRC_LEN + self.num_garbage_bytes):] @property def coarse_time(self): @@ -169,7 +163,7 @@ def coarse_time(self): else: logging.error( f"Insufficient data length {len(self.body)} to extract coarse time " - f"from EngineeringDataPacket. Returning default value: {t}" + f"from ScienceDataPacket. Returning default value: {t}" ) return t @@ -182,7 +176,7 @@ def fine_time(self): else: logging.error( f"Insufficient data length {len(self.body)} to extract fine time " - f"from EngineeringDataPacket. Returning default value: {t}" + f"from ScienceDataPacket. Returning default value: {t}" ) return t @@ -203,16 +197,33 @@ def subheader_id(self): else: logging.error( f"Insufficient data length {len(self.body)} to extract subheader id " - f"from EngineeringDataPacket. Returning default value: {shid}" + f"from ScienceDataPacket. Returning default value: {shid}" ) return shid + @property + def real_pkt_data_len(self): + rpdl = -1 + if len(self.body) >= 13: + rpdl = int.from_bytes(self.body[11:13], "big") + else: + logging.error( + f"Insufficient data length {len(self.body)} to extract real packet data length " + f"from ScienceDataPacket. Returning default value: {rpdl}" + ) + + return rpdl + + @property + def num_garbage_bytes(self): + return self.pkt_data_len + 1 - (self.SEC_HDR_LEN + self.real_pkt_data_len + 1 + self.CRC_LEN) + @property def is_valid(self): """""" - crc = int.from_bytes(self.body[-self.CRC_LEN:], "big") - calc_crc = zlib.crc32(self.hdr_data + self.body[:-self.CRC_LEN]) + crc = int.from_bytes(self.body[-(self.CRC_LEN + self.num_garbage_bytes): -self.num_garbage_bytes], "big") + calc_crc = zlib.crc32(self.hdr_data + self.body[:-(self.CRC_LEN + self.num_garbage_bytes)]) return calc_crc == crc @property @@ -357,7 +368,7 @@ def __str__(self): class SciencePacketProcessor: HEADER_SYNC_WORD = bytes.fromhex("81FFFF81") - SEC_HDR_LEN = 11 + SEC_HDR_LEN = 13 MIN_PROCABLE_PKT_LEN = 8 CRC_LEN = 4 MAX_DATA_LEN = 1479 @@ -518,11 +529,8 @@ def _read_pkt_parts(self, start_pkt): if expected_frame_len < len(start_pkt.data): # Create a partial and then read in short frame partial_data = start_pkt.data[expected_frame_len:] - # TODO: Add garbage byte here? - if start_pkt.pad_byte_flag == 0: - body = start_pkt.body[:self.SEC_HDR_LEN] + partial_data + start_pkt.body[-self.CRC_LEN:] - else: - body = start_pkt.body[:self.SEC_HDR_LEN] + partial_data + bytearray(1) + start_pkt.body[-self.CRC_LEN:] + body = start_pkt.body[:self.SEC_HDR_LEN] + partial_data + \ + start_pkt.body[-(self.CRC_LEN + start_pkt.num_garbage_bytes):] partial = ScienceDataPacket(hdr_data=start_pkt.hdr_data, body=body) self._pkt_partial = partial @@ -566,12 +574,8 @@ def _read_pkt_parts(self, start_pkt): # Create new partial remaining_bytes = data_accum_len - expected_frame_len partial_data = pkt_parts[-1].data[-remaining_bytes:] - # TODO: Add pad byte? - if pkt_parts[-1].pad_byte_flag == 0: - body = pkt_parts[-1].body[:self.SEC_HDR_LEN] + partial_data + pkt_parts[-1].body[-self.CRC_LEN:] - else: - body = pkt_parts[-1].body[:self.SEC_HDR_LEN] + partial_data + bytearray(1) + \ - pkt_parts[-1].body[-self.CRC_LEN:] + body = pkt_parts[-1].body[:self.SEC_HDR_LEN] + partial_data + \ + pkt_parts[-1].body[-(self.CRC_LEN + pkt_parts[-1].num_garbage_bytes):] partial = ScienceDataPacket(hdr_data=pkt_parts[-1].hdr_data, body=body) self._pkt_partial = partial @@ -600,7 +604,8 @@ def _read_pkt_parts(self, start_pkt): logger.info(f"Not inserting any more garbage packets because end of frame.") break elif remaining_data_len >= self.MAX_DATA_LEN: - body = pkt.body[:self.SEC_HDR_LEN] + bytearray(self.MAX_DATA_LEN) + pkt.body[-self.CRC_LEN:] + body = pkt.body[:self.SEC_HDR_LEN] + bytearray(self.MAX_DATA_LEN) + \ + pkt.body[-(self.CRC_LEN + pkt.num_garbage_bytes):] garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body) pkt_parts.append(garbage_pkt) data_accum_len += self.MAX_DATA_LEN @@ -608,7 +613,8 @@ def _read_pkt_parts(self, start_pkt): f"now {data_accum_len}") self._stats.corrupt_frame(frame) elif 0 < remaining_data_len < self.MAX_DATA_LEN: - body = pkt.body[:self.SEC_HDR_LEN] + bytearray(remaining_data_len) + pkt.body[-self.CRC_LEN:] + body = pkt.body[:self.SEC_HDR_LEN] + bytearray(self.MAX_DATA_LEN) + \ + pkt.body[-(self.CRC_LEN + pkt.num_garbage_bytes):] garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body) pkt_parts.append(garbage_pkt) data_accum_len += remaining_data_len From cfa055814e85c93ec1fb1d55dccaae934b2291f1 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 14:24:16 -0700 Subject: [PATCH 02/15] Change max data length to 1477, 2 less than before due to larger secondary header. --- emit_sds_l1a/ccsds_packet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index ea3c962..ad45dc5 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -371,7 +371,7 @@ class SciencePacketProcessor: SEC_HDR_LEN = 13 MIN_PROCABLE_PKT_LEN = 8 CRC_LEN = 4 - MAX_DATA_LEN = 1479 + MAX_DATA_LEN = 1477 def __init__(self, stream_path): logger.debug(f"Initializing SciencePacketProcessor from path {stream_path}") From 2aaabcdda401fb1da0225735c56ce42fd7c1542b Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 7 Jun 2022 11:00:37 -0700 Subject: [PATCH 03/15] Fix num_garbage_bytes calc and crc32 calc. Add extra debug logging. --- emit_sds_l1a/ccsds_packet.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index ad45dc5..0d16cb5 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -217,12 +217,16 @@ def real_pkt_data_len(self): @property def num_garbage_bytes(self): - return self.pkt_data_len + 1 - (self.SEC_HDR_LEN + self.real_pkt_data_len + 1 + self.CRC_LEN) + # return self.pkt_data_len + 1 - (self.SEC_HDR_LEN + self.real_pkt_data_len + 1 + self.CRC_LEN) + return self.pkt_data_len - self.real_pkt_data_len @property def is_valid(self): """""" - crc = int.from_bytes(self.body[-(self.CRC_LEN + self.num_garbage_bytes): -self.num_garbage_bytes], "big") + if self.num_garbage_bytes == 0: + crc = int.from_bytes(self.body[-self.CRC_LEN:], "big") + else: + crc = int.from_bytes(self.body[-(self.CRC_LEN + self.num_garbage_bytes): -self.num_garbage_bytes], "big") calc_crc = zlib.crc32(self.hdr_data + self.body[:-(self.CRC_LEN + self.num_garbage_bytes)]) return calc_crc == crc @@ -256,8 +260,10 @@ def product_length(self): def __repr__(self): pkt_str = " Date: Wed, 8 Jun 2022 09:13:07 -0700 Subject: [PATCH 04/15] Add newline in report --- reassemble_raw_cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index b14b3fe..ae646b7 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -326,7 +326,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f"Total cloudy frames encountered in this acquisition: {len(cloudy_frame_nums)}\n") f.write(f"List of cloudy frame numbers (if any):\n") if len(cloudy_frame_nums) > 0: - f.write("\n".join(i for i in cloudy_frame_nums)) + f.write("\n".join(i for i in cloudy_frame_nums) + "\n") f.write("\n") # Report on corrupted lines (line count mismatch): From f934b925858892415d98d5d81bd7f0ea3faa06f1 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 8 Jun 2022 15:48:57 -0700 Subject: [PATCH 05/15] Update ScienceDataPacket and SciencePacketProcessor to use new fsw_ver switch. --- emit_sds_l1a/ccsds_packet.py | 144 +++++++++++++++++++++++------------ 1 file changed, 95 insertions(+), 49 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index 0d16cb5..ecee32d 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -126,13 +126,13 @@ class ScienceDataPacket(CCSDSPacket): HEADER_SYNC_WORD = 0x81FFFF81 PRIMARY_HDR_LEN = 6 - SEC_HDR_LEN = 13 CRC_LEN = 4 - def __init__(self, stream=None, **kwargs): + def __init__(self, stream=None, fsw_ver="1.3", **kwargs): """Inititialize EngineeringDataPacket Arguments: stream - A file object from which to read data (default: None) + fsw_ver - FSW version that determines CCSDS format of secondary header and padding Keyword Arguments: - **hdr_data**: A bytes-like object containing 6-bytes of data that should be processed as a CCSDS Packet header. @@ -142,18 +142,32 @@ def __init__(self, stream=None, **kwargs): enforced if these kwargs are used. """ super(ScienceDataPacket, self).__init__(stream=stream, **kwargs) - # logger.debug("SDP primary header: " + str([bin(self.hdr_data[i])[2:].zfill(8) for i in range(self.PRIMARY_HDR_LEN)])) + self.fsw_ver = fsw_ver + self.SEC_HDR_LEN = 11 if fsw_ver == "1.2.1" else 13 + self.MAX_DATA_LEN = 1479 if fsw_ver == "1.2.1" else 1477 @property def data(self): if self.body: - return self.body[self.SEC_HDR_LEN: -(self.CRC_LEN + self.num_garbage_bytes)] + if self.fsw_ver == "1.2.1": + if self.pad_byte_flag == 0: + return self.body[self.SEC_HDR_LEN: -self.CRC_LEN] + else: + return self.body[self.SEC_HDR_LEN: -(self.CRC_LEN + 1)] + else: + return self.body[self.SEC_HDR_LEN: -(self.CRC_LEN + self.num_garbage_bytes)] else: return None @data.setter def data(self, data): - self.body = self.body[:self.SEC_HDR_LEN] + data + self.body[-(self.CRC_LEN + self.num_garbage_bytes):] + if self.fsw_ver == "1.2.1": + if self.pad_byte_flag == 0: + self.body = self.body[:self.SEC_HDR_LEN] + data + self.body[-self.CRC_LEN:] + else: + self.body = self.body[:self.SEC_HDR_LEN] + data + bytearray(1) + self.body[-self.CRC_LEN:] + else: + self.body = self.body[:self.SEC_HDR_LEN] + data + self.body[-(self.CRC_LEN + self.num_garbage_bytes):] @property def coarse_time(self): @@ -204,40 +218,41 @@ def subheader_id(self): @property def real_pkt_data_len(self): - rpdl = -1 - if len(self.body) >= 13: - rpdl = int.from_bytes(self.body[11:13], "big") + if self.fsw_ver == "1.2.1": + return None else: - logging.error( - f"Insufficient data length {len(self.body)} to extract real packet data length " - f"from ScienceDataPacket. Returning default value: {rpdl}" - ) + rpdl = -1 + if len(self.body) >= 13: + rpdl = int.from_bytes(self.body[11:13], "big") + else: + logging.error( + f"Insufficient data length {len(self.body)} to extract real packet data length " + f"from ScienceDataPacket. Returning default value: {rpdl}" + ) - return rpdl + return rpdl @property def num_garbage_bytes(self): - # return self.pkt_data_len + 1 - (self.SEC_HDR_LEN + self.real_pkt_data_len + 1 + self.CRC_LEN) - return self.pkt_data_len - self.real_pkt_data_len + if self.fsw_ver == "1.2.1": + return None + else: + return self.pkt_data_len - self.real_pkt_data_len @property def is_valid(self): """""" - if self.num_garbage_bytes == 0: + if self.fsw_ver == "1.2.1": crc = int.from_bytes(self.body[-self.CRC_LEN:], "big") + calc_crc = zlib.crc32(self.hdr_data + self.body[:-self.CRC_LEN]) else: - crc = int.from_bytes(self.body[-(self.CRC_LEN + self.num_garbage_bytes): -self.num_garbage_bytes], "big") - calc_crc = zlib.crc32(self.hdr_data + self.body[:-(self.CRC_LEN + self.num_garbage_bytes)]) + if self.num_garbage_bytes == 0: + crc = int.from_bytes(self.body[-self.CRC_LEN:], "big") + else: + crc = int.from_bytes(self.body[-(self.CRC_LEN + self.num_garbage_bytes): -self.num_garbage_bytes], "big") + calc_crc = zlib.crc32(self.hdr_data + self.body[:-(self.CRC_LEN + self.num_garbage_bytes)]) return calc_crc == crc - @property - def payload_data(self): - """""" - if len(self.body) >= self.SEC_HDR_LEN + self.CRC_LEN: - return self.body[self.SEC_HDR_LEN: -self.CRC_LEN] - else: - return bytearray() - @property def is_header_packet(self): stat = False @@ -374,14 +389,12 @@ def __str__(self): class SciencePacketProcessor: HEADER_SYNC_WORD = bytes.fromhex("81FFFF81") - SEC_HDR_LEN = 13 MIN_PROCABLE_PKT_LEN = 8 - CRC_LEN = 4 - MAX_DATA_LEN = 1477 - def __init__(self, stream_path): - logger.debug(f"Initializing SciencePacketProcessor from path {stream_path}") + def __init__(self, stream_path, fsw_ver="1.3"): + logger.debug(f"Initializing SciencePacketProcessor from path {stream_path} using FSW v{fsw_ver}") self.stream = open(stream_path, "rb") + self.fsw_ver = fsw_ver self._cur_psc = -1 self._cur_coarse = -1 self._cur_fine = -1 @@ -409,7 +422,7 @@ def read_frame(self): def _read_next_packet(self): while True: - pkt = ScienceDataPacket(stream=self.stream) + pkt = ScienceDataPacket(stream=self.stream, fsw_ver=self.fsw_ver) logger.debug(pkt) self._stats.ccsds_read(pkt) pkt_hash = str(pkt.coarse_time) + str(pkt.fine_time) + str(pkt.pkt_seq_cnt) @@ -534,11 +547,19 @@ def _read_pkt_parts(self, start_pkt): # TODO: This block is probably never executed since the start packet usually contains only the header and # nothing more if expected_frame_len < len(start_pkt.data): + logger.info("Creating partial packet - frame length is less than the length of the packet's data.") # Create a partial and then read in short frame partial_data = start_pkt.data[expected_frame_len:] - body = start_pkt.body[:self.SEC_HDR_LEN] + partial_data + \ - start_pkt.body[-(self.CRC_LEN + start_pkt.num_garbage_bytes):] - partial = ScienceDataPacket(hdr_data=start_pkt.hdr_data, body=body) + if self.fsw_ver == "1.2.1": + if start_pkt.pad_byte_flag == 0: + body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + start_pkt.body[-start_pkt.CRC_LEN:] + else: + body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + bytearray(1) + \ + start_pkt.body[-start_pkt.CRC_LEN:] + else: + body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + \ + start_pkt.body[-(start_pkt.CRC_LEN + start_pkt.num_garbage_bytes):] + partial = ScienceDataPacket(hdr_data=start_pkt.hdr_data, body=body, fsw_ver=self.fsw_ver) self._pkt_partial = partial start_pkt.data = start_pkt.data[:expected_frame_len] @@ -579,18 +600,27 @@ def _read_pkt_parts(self, start_pkt): logger.debug("Case 2 - accumulated data length exceeds expected length. Trimming last packet to " "expected size and creating partial packet of remaining bytes.") # Create new partial + logger.info("Creating partial packet - the accum data length is greater than the expected frame length") remaining_bytes = data_accum_len - expected_frame_len partial_data = pkt_parts[-1].data[-remaining_bytes:] - body = pkt_parts[-1].body[:self.SEC_HDR_LEN] + partial_data + \ - pkt_parts[-1].body[-(self.CRC_LEN + pkt_parts[-1].num_garbage_bytes):] - partial = ScienceDataPacket(hdr_data=pkt_parts[-1].hdr_data, body=body) + if self.fsw_ver == "1.2.1": + if pkt_parts[-1].pad_byte_flag == 0: + body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + \ + pkt_parts[-1].body[-pkt_parts[-1].CRC_LEN:] + else: + body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + bytearray(1) + \ + pkt_parts[-1].body[-pkt_parts[-1].CRC_LEN:] + else: + body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + \ + pkt_parts[-1].body[-(pkt_parts[-1].CRC_LEN + pkt_parts[-1].num_garbage_bytes):] + partial = ScienceDataPacket(hdr_data=pkt_parts[-1].hdr_data, body=body, fsw_ver=self.fsw_ver) self._pkt_partial = partial # Remove extra data from last packet in packet parts pkt_parts[-1].data = pkt_parts[-1].data[:-remaining_bytes] return pkt_parts - # If neither of the above end cases is met, then read the next packet + # If neither of the above end cases is met, then read the next packet try: pkt = self._read_next_packet() except PSCMismatchException as e: @@ -610,19 +640,35 @@ def _read_pkt_parts(self, start_pkt): if remaining_data_len == 0: logger.info(f"Not inserting any more garbage packets because end of frame.") break - elif remaining_data_len >= self.MAX_DATA_LEN: - body = pkt.body[:self.SEC_HDR_LEN] + bytearray(self.MAX_DATA_LEN) + \ - pkt.body[-(self.CRC_LEN + pkt.num_garbage_bytes):] - garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body) + elif remaining_data_len >= pkt.MAX_DATA_LEN: + if self.fsw_ver == "1.2.1": + if pkt.pad_byte_flag == 0: + body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + \ + pkt.body[-pkt.CRC_LEN:] + else: + body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + bytearray(1) + \ + pkt.body[-pkt.CRC_LEN:] + else: + body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + \ + pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] + garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, fsw_ver=self.fsw_ver) pkt_parts.append(garbage_pkt) - data_accum_len += self.MAX_DATA_LEN - logger.info(f"Inserted garbage packet with {self.MAX_DATA_LEN} bytes of data. Accum data is " + data_accum_len += pkt.MAX_DATA_LEN + logger.info(f"Inserted garbage packet with {pkt.MAX_DATA_LEN} bytes of data. Accum data is " f"now {data_accum_len}") self._stats.corrupt_frame(frame) - elif 0 < remaining_data_len < self.MAX_DATA_LEN: - body = pkt.body[:self.SEC_HDR_LEN] + bytearray(self.MAX_DATA_LEN) + \ - pkt.body[-(self.CRC_LEN + pkt.num_garbage_bytes):] - garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body) + elif 0 < remaining_data_len < pkt.MAX_DATA_LEN: + if self.fsw_ver == "1.2.1": + if pkt.pad_byte_flag == 0: + body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + \ + pkt.body[-pkt.CRC_LEN:] + else: + body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + bytearray(1) + \ + pkt.body[-pkt.CRC_LEN:] + else: + body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + \ + pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] + garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, fsw_ver=self.fsw_ver) pkt_parts.append(garbage_pkt) data_accum_len += remaining_data_len logger.info(f"Inserted garbage packet with {remaining_data_len} bytes of data. Accum data is " From fe1e5a9b23beb27d408cf4bda4b0626b95e0cb91 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 8 Jun 2022 15:52:50 -0700 Subject: [PATCH 06/15] Add fsw_ver argument. --- depacketize_science_frames.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/depacketize_science_frames.py b/depacketize_science_frames.py index 60b5468..d12e121 100644 --- a/depacketize_science_frames.py +++ b/depacketize_science_frames.py @@ -27,6 +27,7 @@ def main(): " * Depacketization summary/report file named depacketize_science_frames_report.txt (default)\n", formatter_class=RawTextHelpFormatter) parser.add_argument("stream_path", help="Path to CCSDS stream file") + parser.add_argument("--fsw_ver", help="Flight software version to use", default="1.3") parser.add_argument("--work_dir", help="Path to working directory", default=".") parser.add_argument("--prev_stream_path", help="Path to previous CCSDS stream file") parser.add_argument("--prev_bytes_to_read", help="How many bytes to read from the end of the previous stream", @@ -78,7 +79,7 @@ def main(): f.write(stream) logger.info(f"Processing stream file {tmp_stream_path}") - processor = SciencePacketProcessor(tmp_stream_path) + processor = SciencePacketProcessor(tmp_stream_path, fsw_ver=args.fsw_ver) frame_count = 0 while True: From 993b6895c4623bdfb74fe8d953885dabb3f30c80 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Thu, 9 Jun 2022 13:37:06 -0700 Subject: [PATCH 07/15] Add fsw_ver to calling scripts. --- depacketize_science_frames.py | 2 +- util/find_all_sync_words.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/depacketize_science_frames.py b/depacketize_science_frames.py index d12e121..41b4e1a 100644 --- a/depacketize_science_frames.py +++ b/depacketize_science_frames.py @@ -78,7 +78,7 @@ def main(): with open(tmp_stream_path, "wb") as f: f.write(stream) - logger.info(f"Processing stream file {tmp_stream_path}") + logger.info(f"Processing stream file {tmp_stream_path} using FSW v{args.fsw_ver}") processor = SciencePacketProcessor(tmp_stream_path, fsw_ver=args.fsw_ver) frame_count = 0 diff --git a/util/find_all_sync_words.py b/util/find_all_sync_words.py index 31b3815..c79166a 100644 --- a/util/find_all_sync_words.py +++ b/util/find_all_sync_words.py @@ -12,6 +12,7 @@ parser = argparse.ArgumentParser() parser.add_argument("infile") parser.add_argument("method", type=int, default=1) +parser.add_argument("fsw_ver", default="1.3") args = parser.parse_args() in_file = open(args.infile, "rb") @@ -22,7 +23,7 @@ cnt = 0 while True: try: - pkt = ScienceDataPacket(in_file) + pkt = ScienceDataPacket(in_file, fsw_ver=args.fsw_ver) cnt += 1 data += pkt.data except EOFError: @@ -32,7 +33,6 @@ print(datetime.datetime.now()) indices = [] - if args.method == 1: print("Using itertools...") data_iters = itertools.tee(data, len(HEADER_SYNC_WORD)) From d4579a652b87b73a89086fe5769086282ff8fb65 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Thu, 9 Jun 2022 14:18:18 -0700 Subject: [PATCH 08/15] Change fsw_ver to pkt_format --- depacketize_science_frames.py | 6 ++--- emit_sds_l1a/ccsds_packet.py | 44 +++++++++++++++++------------------ util/find_all_sync_words.py | 4 ++-- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/depacketize_science_frames.py b/depacketize_science_frames.py index 41b4e1a..3464be2 100644 --- a/depacketize_science_frames.py +++ b/depacketize_science_frames.py @@ -27,7 +27,7 @@ def main(): " * Depacketization summary/report file named depacketize_science_frames_report.txt (default)\n", formatter_class=RawTextHelpFormatter) parser.add_argument("stream_path", help="Path to CCSDS stream file") - parser.add_argument("--fsw_ver", help="Flight software version to use", default="1.3") + parser.add_argument("--pkt_format", help="Flight software version to use", default="1.3") parser.add_argument("--work_dir", help="Path to working directory", default=".") parser.add_argument("--prev_stream_path", help="Path to previous CCSDS stream file") parser.add_argument("--prev_bytes_to_read", help="How many bytes to read from the end of the previous stream", @@ -78,8 +78,8 @@ def main(): with open(tmp_stream_path, "wb") as f: f.write(stream) - logger.info(f"Processing stream file {tmp_stream_path} using FSW v{args.fsw_ver}") - processor = SciencePacketProcessor(tmp_stream_path, fsw_ver=args.fsw_ver) + logger.info(f"Processing stream file {tmp_stream_path} using packet format from FSW v{args.pkt_format}") + processor = SciencePacketProcessor(tmp_stream_path, pkt_format=args.pkt_format) frame_count = 0 while True: diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index ecee32d..a55ca46 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -128,11 +128,11 @@ class ScienceDataPacket(CCSDSPacket): PRIMARY_HDR_LEN = 6 CRC_LEN = 4 - def __init__(self, stream=None, fsw_ver="1.3", **kwargs): + def __init__(self, stream=None, pkt_format="1.3", **kwargs): """Inititialize EngineeringDataPacket Arguments: stream - A file object from which to read data (default: None) - fsw_ver - FSW version that determines CCSDS format of secondary header and padding + pkt_format - The format of the CCSDS packet defined by FSW version (typically 1.2.1 or 1.3) Keyword Arguments: - **hdr_data**: A bytes-like object containing 6-bytes of data that should be processed as a CCSDS Packet header. @@ -142,14 +142,14 @@ def __init__(self, stream=None, fsw_ver="1.3", **kwargs): enforced if these kwargs are used. """ super(ScienceDataPacket, self).__init__(stream=stream, **kwargs) - self.fsw_ver = fsw_ver - self.SEC_HDR_LEN = 11 if fsw_ver == "1.2.1" else 13 - self.MAX_DATA_LEN = 1479 if fsw_ver == "1.2.1" else 1477 + self.pkt_format = pkt_format + self.SEC_HDR_LEN = 11 if pkt_format == "1.2.1" else 13 + self.MAX_DATA_LEN = 1479 if pkt_format == "1.2.1" else 1477 @property def data(self): if self.body: - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": if self.pad_byte_flag == 0: return self.body[self.SEC_HDR_LEN: -self.CRC_LEN] else: @@ -161,7 +161,7 @@ def data(self): @data.setter def data(self, data): - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": if self.pad_byte_flag == 0: self.body = self.body[:self.SEC_HDR_LEN] + data + self.body[-self.CRC_LEN:] else: @@ -218,7 +218,7 @@ def subheader_id(self): @property def real_pkt_data_len(self): - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": return None else: rpdl = -1 @@ -234,7 +234,7 @@ def real_pkt_data_len(self): @property def num_garbage_bytes(self): - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": return None else: return self.pkt_data_len - self.real_pkt_data_len @@ -242,7 +242,7 @@ def num_garbage_bytes(self): @property def is_valid(self): """""" - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": crc = int.from_bytes(self.body[-self.CRC_LEN:], "big") calc_crc = zlib.crc32(self.hdr_data + self.body[:-self.CRC_LEN]) else: @@ -391,10 +391,10 @@ class SciencePacketProcessor: HEADER_SYNC_WORD = bytes.fromhex("81FFFF81") MIN_PROCABLE_PKT_LEN = 8 - def __init__(self, stream_path, fsw_ver="1.3"): - logger.debug(f"Initializing SciencePacketProcessor from path {stream_path} using FSW v{fsw_ver}") + def __init__(self, stream_path, pkt_format="1.3"): + logger.debug(f"Initializing SciencePacketProcessor from path {stream_path} using FSW v{pkt_format}") self.stream = open(stream_path, "rb") - self.fsw_ver = fsw_ver + self.pkt_format = pkt_format self._cur_psc = -1 self._cur_coarse = -1 self._cur_fine = -1 @@ -422,7 +422,7 @@ def read_frame(self): def _read_next_packet(self): while True: - pkt = ScienceDataPacket(stream=self.stream, fsw_ver=self.fsw_ver) + pkt = ScienceDataPacket(stream=self.stream, pkt_format=self.pkt_format) logger.debug(pkt) self._stats.ccsds_read(pkt) pkt_hash = str(pkt.coarse_time) + str(pkt.fine_time) + str(pkt.pkt_seq_cnt) @@ -550,7 +550,7 @@ def _read_pkt_parts(self, start_pkt): logger.info("Creating partial packet - frame length is less than the length of the packet's data.") # Create a partial and then read in short frame partial_data = start_pkt.data[expected_frame_len:] - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": if start_pkt.pad_byte_flag == 0: body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + start_pkt.body[-start_pkt.CRC_LEN:] else: @@ -559,7 +559,7 @@ def _read_pkt_parts(self, start_pkt): else: body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + \ start_pkt.body[-(start_pkt.CRC_LEN + start_pkt.num_garbage_bytes):] - partial = ScienceDataPacket(hdr_data=start_pkt.hdr_data, body=body, fsw_ver=self.fsw_ver) + partial = ScienceDataPacket(hdr_data=start_pkt.hdr_data, body=body, pkt_format=self.pkt_format) self._pkt_partial = partial start_pkt.data = start_pkt.data[:expected_frame_len] @@ -603,7 +603,7 @@ def _read_pkt_parts(self, start_pkt): logger.info("Creating partial packet - the accum data length is greater than the expected frame length") remaining_bytes = data_accum_len - expected_frame_len partial_data = pkt_parts[-1].data[-remaining_bytes:] - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": if pkt_parts[-1].pad_byte_flag == 0: body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + \ pkt_parts[-1].body[-pkt_parts[-1].CRC_LEN:] @@ -613,7 +613,7 @@ def _read_pkt_parts(self, start_pkt): else: body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + \ pkt_parts[-1].body[-(pkt_parts[-1].CRC_LEN + pkt_parts[-1].num_garbage_bytes):] - partial = ScienceDataPacket(hdr_data=pkt_parts[-1].hdr_data, body=body, fsw_ver=self.fsw_ver) + partial = ScienceDataPacket(hdr_data=pkt_parts[-1].hdr_data, body=body, pkt_format=self.pkt_format) self._pkt_partial = partial # Remove extra data from last packet in packet parts @@ -641,7 +641,7 @@ def _read_pkt_parts(self, start_pkt): logger.info(f"Not inserting any more garbage packets because end of frame.") break elif remaining_data_len >= pkt.MAX_DATA_LEN: - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": if pkt.pad_byte_flag == 0: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + \ pkt.body[-pkt.CRC_LEN:] @@ -651,14 +651,14 @@ def _read_pkt_parts(self, start_pkt): else: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + \ pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] - garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, fsw_ver=self.fsw_ver) + garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, pkt_format=self.pkt_format) pkt_parts.append(garbage_pkt) data_accum_len += pkt.MAX_DATA_LEN logger.info(f"Inserted garbage packet with {pkt.MAX_DATA_LEN} bytes of data. Accum data is " f"now {data_accum_len}") self._stats.corrupt_frame(frame) elif 0 < remaining_data_len < pkt.MAX_DATA_LEN: - if self.fsw_ver == "1.2.1": + if self.pkt_format == "1.2.1": if pkt.pad_byte_flag == 0: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + \ pkt.body[-pkt.CRC_LEN:] @@ -668,7 +668,7 @@ def _read_pkt_parts(self, start_pkt): else: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + \ pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] - garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, fsw_ver=self.fsw_ver) + garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, pkt_format=self.pkt_format) pkt_parts.append(garbage_pkt) data_accum_len += remaining_data_len logger.info(f"Inserted garbage packet with {remaining_data_len} bytes of data. Accum data is " diff --git a/util/find_all_sync_words.py b/util/find_all_sync_words.py index c79166a..3da87a4 100644 --- a/util/find_all_sync_words.py +++ b/util/find_all_sync_words.py @@ -12,7 +12,7 @@ parser = argparse.ArgumentParser() parser.add_argument("infile") parser.add_argument("method", type=int, default=1) -parser.add_argument("fsw_ver", default="1.3") +parser.add_argument("pkt_format", default="1.3") args = parser.parse_args() in_file = open(args.infile, "rb") @@ -23,7 +23,7 @@ cnt = 0 while True: try: - pkt = ScienceDataPacket(in_file, fsw_ver=args.fsw_ver) + pkt = ScienceDataPacket(in_file, pkt_format=args.pkt_format) cnt += 1 data += pkt.data except EOFError: From cedc670180a4df0a38e7590cc16c2648c71a1fe3 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Fri, 8 Jul 2022 10:55:43 -0700 Subject: [PATCH 09/15] Fix PEP8 indents --- emit_sds_l1a/ccsds_packet.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index a55ca46..4138728 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -555,10 +555,10 @@ def _read_pkt_parts(self, start_pkt): body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + start_pkt.body[-start_pkt.CRC_LEN:] else: body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + bytearray(1) + \ - start_pkt.body[-start_pkt.CRC_LEN:] + start_pkt.body[-start_pkt.CRC_LEN:] else: body = start_pkt.body[:start_pkt.SEC_HDR_LEN] + partial_data + \ - start_pkt.body[-(start_pkt.CRC_LEN + start_pkt.num_garbage_bytes):] + start_pkt.body[-(start_pkt.CRC_LEN + start_pkt.num_garbage_bytes):] partial = ScienceDataPacket(hdr_data=start_pkt.hdr_data, body=body, pkt_format=self.pkt_format) self._pkt_partial = partial @@ -606,13 +606,13 @@ def _read_pkt_parts(self, start_pkt): if self.pkt_format == "1.2.1": if pkt_parts[-1].pad_byte_flag == 0: body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + \ - pkt_parts[-1].body[-pkt_parts[-1].CRC_LEN:] + pkt_parts[-1].body[-pkt_parts[-1].CRC_LEN:] else: body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + bytearray(1) + \ - pkt_parts[-1].body[-pkt_parts[-1].CRC_LEN:] + pkt_parts[-1].body[-pkt_parts[-1].CRC_LEN:] else: body = pkt_parts[-1].body[:pkt_parts[-1].SEC_HDR_LEN] + partial_data + \ - pkt_parts[-1].body[-(pkt_parts[-1].CRC_LEN + pkt_parts[-1].num_garbage_bytes):] + pkt_parts[-1].body[-(pkt_parts[-1].CRC_LEN + pkt_parts[-1].num_garbage_bytes):] partial = ScienceDataPacket(hdr_data=pkt_parts[-1].hdr_data, body=body, pkt_format=self.pkt_format) self._pkt_partial = partial @@ -644,13 +644,13 @@ def _read_pkt_parts(self, start_pkt): if self.pkt_format == "1.2.1": if pkt.pad_byte_flag == 0: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + \ - pkt.body[-pkt.CRC_LEN:] + pkt.body[-pkt.CRC_LEN:] else: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + bytearray(1) + \ - pkt.body[-pkt.CRC_LEN:] + pkt.body[-pkt.CRC_LEN:] else: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(pkt.MAX_DATA_LEN) + \ - pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] + pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, pkt_format=self.pkt_format) pkt_parts.append(garbage_pkt) data_accum_len += pkt.MAX_DATA_LEN @@ -661,13 +661,13 @@ def _read_pkt_parts(self, start_pkt): if self.pkt_format == "1.2.1": if pkt.pad_byte_flag == 0: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + \ - pkt.body[-pkt.CRC_LEN:] + pkt.body[-pkt.CRC_LEN:] else: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + bytearray(1) + \ - pkt.body[-pkt.CRC_LEN:] + pkt.body[-pkt.CRC_LEN:] else: body = pkt.body[:pkt.SEC_HDR_LEN] + bytearray(remaining_data_len) + \ - pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] + pkt.body[-(pkt.CRC_LEN + pkt.num_garbage_bytes):] garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body, pkt_format=self.pkt_format) pkt_parts.append(garbage_pkt) data_accum_len += remaining_data_len From fc290cd720e3762984f4c338898598950f4ece7c Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Mon, 18 Jul 2022 16:00:52 -0700 Subject: [PATCH 10/15] Add dcid_reassembly_report.txt that includes reassembly stats for all acquisitions. --- reassemble_raw_cube.py | 111 ++++++++++++++++++++++++++++------------- 1 file changed, 76 insertions(+), 35 deletions(-) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index ae646b7..cee3b68 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -340,6 +340,9 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f"{str(line_num).zfill(6)}\n") f.write("\n") + result = {"corrupt_lines": corrupt_lines} + return result + def main(): @@ -584,51 +587,89 @@ def main(): raise RuntimeError(f"Chunksize of {args.chunksize} must be a multiple of {num_lines}") frame_chunksize = min(args.chunksize // num_lines, num_frames) report_txt += f"Partition: {'processed' if processed_flag == 1 else 'raw'}\n" - report_txt += f"Number of lines per frame: {num_lines}\n" + report_txt += f"Number of lines per frame: {num_lines}\n\n" report_txt += f"Chunksize provided by args: {args.chunksize} lines or {args.chunksize // num_lines} frames\n" report_txt += f"Chunksize used to to split up acquisitions: {frame_chunksize * num_lines} lines or " \ f"{frame_chunksize} frames\n\n" logger.info(f"Using frame chunksize of {frame_chunksize} to split data collection into acquisitions.") + total_corrupt_lines = 0 # Only do the chunking if there is enough left over for another full chunk while i + (2 * frame_chunksize) <= num_frames: acq_data_paths = frame_data_paths[i: i + frame_chunksize] - reassemble_acquisition(acq_data_paths=acq_data_paths, - start_index=i, - stop_index=i + frame_chunksize - 1, - start_time=start_stop_times[i][0], - stop_time=start_stop_times[i + frame_chunksize - 1][1], - timing_info=timing_info, - processed_flag=processed_flag, - coadd_mode=coadd_mode, - num_bands=num_bands, - num_lines=num_lines, - instrument_mode=instrument_mode, - image_dir=image_dir, - report_text=report_txt, - failed_decompression_list=failed_decompression_list, - uncompressed_list=uncompressed_list, - missing_frame_nums=missing_frame_nums, - logger=logger) + result = reassemble_acquisition(acq_data_paths=acq_data_paths, + start_index=i, + stop_index=i + frame_chunksize - 1, + start_time=start_stop_times[i][0], + stop_time=start_stop_times[i + frame_chunksize - 1][1], + timing_info=timing_info, + processed_flag=processed_flag, + coadd_mode=coadd_mode, + num_bands=num_bands, + num_lines=num_lines, + instrument_mode=instrument_mode, + image_dir=image_dir, + report_text=report_txt, + failed_decompression_list=failed_decompression_list, + uncompressed_list=uncompressed_list, + missing_frame_nums=missing_frame_nums, + logger=logger) i += frame_chunksize + total_corrupt_lines += len(result["corrupt_lines"]) + # There will be one left over at the end that is the frame_chunksize + remaining frames acq_data_paths = frame_data_paths[i:] - reassemble_acquisition(acq_data_paths=acq_data_paths, - start_index=i, - stop_index=num_frames - 1, - start_time=start_stop_times[i][0], - stop_time=start_stop_times[num_frames - 1][1], - timing_info=timing_info, - processed_flag=processed_flag, - coadd_mode=coadd_mode, - num_bands=num_bands, - num_lines=num_lines, - instrument_mode=instrument_mode, - image_dir=image_dir, - report_text=report_txt, - failed_decompression_list=failed_decompression_list, - uncompressed_list=uncompressed_list, - missing_frame_nums=missing_frame_nums, - logger=logger) + result = reassemble_acquisition(acq_data_paths=acq_data_paths, + start_index=i, + stop_index=num_frames - 1, + start_time=start_stop_times[i][0], + stop_time=start_stop_times[num_frames - 1][1], + timing_info=timing_info, + processed_flag=processed_flag, + coadd_mode=coadd_mode, + num_bands=num_bands, + num_lines=num_lines, + instrument_mode=instrument_mode, + image_dir=image_dir, + report_text=report_txt, + failed_decompression_list=failed_decompression_list, + uncompressed_list=uncompressed_list, + missing_frame_nums=missing_frame_nums, + logger=logger) + total_corrupt_lines += len(result["corrupt_lines"]) + + # Write out a report for the data collection as a whole + dcid_report_path = os.path.join(args.work_dir, f"{dcid}_reassembly_report.txt") + with open(dcid_report_path, "w") as f: + f.write(report_txt) + # Instrument mode + f.write(f"Instrument mode: {instrument_mode}\n") + f.write(f"Instrument mode description: {INSTRUMENT_MODE_DESCRIPTIONS[instrument_mode]}\n\n") + # Decompression errors + f.write(f"Total decompression errors in this data collection: {len(failed_decompression_list)}\n") + f.write("List of frame numbers that failed decompression (if any):\n") + if len(failed_decompression_list) > 0: + f.write("\n".join(i for i in failed_decompression_list) + "\n") + f.write("\n") + # Missing frames + f.write(f"Total missing frames in this data collection: {len(missing_frame_nums)}\n") + f.write("List of missing frame numbers (if any):\n") + if len(missing_frame_nums) > 0: + f.write("\n".join(i for i in missing_frame_nums) + "\n") + f.write("\n") + # Cloudy frames + cloudy_frame_nums = [] + for p in frame_data_paths: + if int(os.path.basename(p).split("_")[4]) in (4, 5): + cloudy_frame_nums.append(int(os.path.basename(p).split("_")[2])) + cloudy_frame_nums = [str(num).zfill(5) for num in cloudy_frame_nums] + cloudy_frame_nums.sort() + f.write(f"Total cloudy frames in this data collection: {len(cloudy_frame_nums)}\n") + f.write(f"List of cloudy frame numbers (if any):\n") + if len(cloudy_frame_nums) > 0: + f.write("\n".join(i for i in cloudy_frame_nums) + "\n") + f.write("\n") + # Corrupt Lines + f.write(f"Total corrupt lines (line count mismatches) in this data collection: {total_corrupt_lines}\n") logger.info("Done") From 8dbd0a6f76792334148f97d491fa31be322e9046 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 19 Jul 2022 08:17:25 -0700 Subject: [PATCH 11/15] Add orbit, scene, submode to report --- reassemble_raw_cube.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index cee3b68..c7e40be 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -366,6 +366,9 @@ def main(): parser.add_argument("--level", help="Logging level", default="INFO") parser.add_argument("--log_path", help="Path to log file", default="reassemble_raw.log") parser.add_argument("--chunksize", help="Number of lines per output acquisition.", type=int, default=320000) + parser.add_argument("--orbit", help="Orbit number (padded)", default="00000") + parser.add_argument("--scene", help="Scene in orbit number (padded)", default="000") + parser.add_argument("--submode", help="Submode (science or dark)", default="science") parser.add_argument("--test_mode", action="store_true", help="If enabled, don't throw errors regarding unprocessed or un-coadded data") @@ -410,7 +413,10 @@ def main(): report_txt += f"Input frames directory: {args.frames_dir}\n" expected_frame_num_str = os.path.basename(frame_paths[0]).split("_")[3] report_txt += f"Total number of expected frames (from frame header): " \ - f"{int(expected_frame_num_str)}\n\n" + f"{int(expected_frame_num_str)}\n" + report_txt += f"Orbit: {args.orbit}\n" + report_txt += f"Scene: {args.scene}\n" + report_txt += f"Submode: {args.submode}\n\n" # Set up various lists to track frame parameters (num bands, processed, coadd mode) frame_data_paths = [] @@ -580,14 +586,16 @@ def main(): num, expected_frame_num_str, "7"]))) frame_data_paths.sort(key=lambda x: os.path.basename(x).split("_")[2]) + # Update report based on frames + report_txt += f"Partition: {'processed' if processed_flag == 1 else 'raw'}\n" + report_txt += f"Number of lines per frame: {num_lines}\n\n" + # Loop through the frames and create acquisitions i = 0 num_frames = len(frame_data_paths) if args.chunksize % num_lines != 0: raise RuntimeError(f"Chunksize of {args.chunksize} must be a multiple of {num_lines}") frame_chunksize = min(args.chunksize // num_lines, num_frames) - report_txt += f"Partition: {'processed' if processed_flag == 1 else 'raw'}\n" - report_txt += f"Number of lines per frame: {num_lines}\n\n" report_txt += f"Chunksize provided by args: {args.chunksize} lines or {args.chunksize // num_lines} frames\n" report_txt += f"Chunksize used to to split up acquisitions: {frame_chunksize * num_lines} lines or " \ f"{frame_chunksize} frames\n\n" From 252b4f2e46817d0465bac8f17023c231d0936a0e Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 19 Jul 2022 08:39:16 -0700 Subject: [PATCH 12/15] Tweak report formatting. Move instrument mode to data collection portion. --- reassemble_raw_cube.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index c7e40be..70dde98 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -125,8 +125,8 @@ def get_utc_time_from_gps(gps_time): def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, stop_time, timing_info, processed_flag, - coadd_mode, num_bands, num_lines, instrument_mode, image_dir, report_text, - failed_decompression_list, uncompressed_list, missing_frame_nums, logger): + coadd_mode, num_bands, num_lines, image_dir, report_text, failed_decompression_list, + uncompressed_list, missing_frame_nums, logger): # Reassemble frames into ENVI image cube filling in missing and cloudy data with data flags # First create acquisition_id from frame start_time # Assume acquisitions are at least 1 second long @@ -259,9 +259,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f'Stop time: {stop_time}\n') f.write(f"Number of samples: 1280\n") f.write(f"Number of bands: {num_bands}\n") - f.write(f"Number of lines: {num_lines_in_acq}\n") - f.write(f"Instrument mode: {instrument_mode}\n") - f.write(f"Instrument mode description: {INSTRUMENT_MODE_DESCRIPTIONS[instrument_mode]}\n\n") + f.write(f"Number of lines: {num_lines_in_acq}\n\n") f.write(f"First frame number in acquisition: {str(start_index).zfill(5)}\n") f.write(f"Last frame number in acquisition: {str(stop_index).zfill(5)}\n\n") @@ -413,7 +411,8 @@ def main(): report_txt += f"Input frames directory: {args.frames_dir}\n" expected_frame_num_str = os.path.basename(frame_paths[0]).split("_")[3] report_txt += f"Total number of expected frames (from frame header): " \ - f"{int(expected_frame_num_str)}\n" + f"{int(expected_frame_num_str)}\n\n" + report_txt += f"Orbit: {args.orbit}\n" report_txt += f"Scene: {args.scene}\n" report_txt += f"Submode: {args.submode}\n\n" @@ -587,7 +586,9 @@ def main(): frame_data_paths.sort(key=lambda x: os.path.basename(x).split("_")[2]) # Update report based on frames - report_txt += f"Partition: {'processed' if processed_flag == 1 else 'raw'}\n" + report_txt += f"Partition: {'processed' if processed_flag == 1 else 'raw'}\n\n" + report_txt += f"Instrument mode: {instrument_mode}\n" + report_txt += f"Instrument mode description: {INSTRUMENT_MODE_DESCRIPTIONS[instrument_mode]}\n\n" report_txt += f"Number of lines per frame: {num_lines}\n\n" # Loop through the frames and create acquisitions @@ -614,7 +615,6 @@ def main(): coadd_mode=coadd_mode, num_bands=num_bands, num_lines=num_lines, - instrument_mode=instrument_mode, image_dir=image_dir, report_text=report_txt, failed_decompression_list=failed_decompression_list, @@ -636,7 +636,6 @@ def main(): coadd_mode=coadd_mode, num_bands=num_bands, num_lines=num_lines, - instrument_mode=instrument_mode, image_dir=image_dir, report_text=report_txt, failed_decompression_list=failed_decompression_list, @@ -649,9 +648,6 @@ def main(): dcid_report_path = os.path.join(args.work_dir, f"{dcid}_reassembly_report.txt") with open(dcid_report_path, "w") as f: f.write(report_txt) - # Instrument mode - f.write(f"Instrument mode: {instrument_mode}\n") - f.write(f"Instrument mode description: {INSTRUMENT_MODE_DESCRIPTIONS[instrument_mode]}\n\n") # Decompression errors f.write(f"Total decompression errors in this data collection: {len(failed_decompression_list)}\n") f.write("List of frame numbers that failed decompression (if any):\n") From 6fd18358ef4e8a69a8b881dff538cc0d620a6c3f Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 20 Jul 2022 14:28:38 -0700 Subject: [PATCH 13/15] Add new instrument mode used only in testing. --- emit_sds_l1a/frame.py | 9 +++++++++ reassemble_raw_cube.py | 1 + 2 files changed, 10 insertions(+) diff --git a/emit_sds_l1a/frame.py b/emit_sds_l1a/frame.py index 8fb16dc..5d964e5 100644 --- a/emit_sds_l1a/frame.py +++ b/emit_sds_l1a/frame.py @@ -105,6 +105,15 @@ 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, 0x8c, 0xef]) + }, + "warm_img_row0_row327_not_flight": { + "desc": "Older version of Nominal Warm FPA used in testing", + "roic_values": + bytearray([0xc3, 0x34, 0x00, 0x00, 0x47, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x01, 0x02, 0x8c, 0x02, 0x02, 0x0e, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) } } diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index 70dde98..c042b22 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -39,6 +39,7 @@ "cold_img_slow": "Maximum integration time Cold FPA", "warm_img": "Nominal Warm FPA", "warm_img_short_integration": "Minimum integration time Warm FPA", + "warm_img_row0_row327_not_flight": "Older version of Nominal Warm FPA used in testing", "no_match": "No match" } From a7d4d313546443a7f6d77c020ec1a374786160b7 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Fri, 22 Jul 2022 13:14:25 -0700 Subject: [PATCH 14/15] Increment version to 1.4.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 93ca776..9577ccf 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ setuptools.setup( name="emit_sds_l1a", - version="1.3.0", + version="1.4.0", author="Winston Olson-Duvall", author_email="winston.olson-duvall@jpl.nasa.gov", description=""" From 9550cf64269882783dafa53360be13f1632bcf1b Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Fri, 22 Jul 2022 13:15:58 -0700 Subject: [PATCH 15/15] Update change log. --- CHANGELOG.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b16124..2020c30 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,16 +4,27 @@ All notable changes to this project will be documented in this file. Dates are d Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog). +#### [v1.4.0](https://github.com/emit-sds/emit-sds-l1a/compare/v1.3.0...v1.4.0) + +> 22 July 2022 + +- Dcid report [`#5`](https://github.com/emit-sds/emit-sds-l1a/pull/5) +- FSW 1.3 CCSDS update and pkt_format switch [`#4`](https://github.com/emit-sds/emit-sds-l1a/pull/4) +- Update ScienceDataPacket and SciencePacketProcessor to use new fsw_ver switch. [`f934b92`](https://github.com/emit-sds/emit-sds-l1a/commit/f934b925858892415d98d5d81bd7f0ea3faa06f1) +- Add dcid_reassembly_report.txt that includes reassembly stats for all acquisitions. [`fc290cd`](https://github.com/emit-sds/emit-sds-l1a/commit/fc290cd720e3762984f4c338898598950f4ece7c) +- Remove checking for pad byte flag. Instead increase secondary header size to 13. Add real_pkt_data_len. Compute num_garbage_bytes. Update data getter and setter. Update crc calculation. Update all places in code where partial or garbage packets are created. [`90263ec`](https://github.com/emit-sds/emit-sds-l1a/commit/90263eca8e457d7a79a57cf39f4de7d49f0aa544) + #### [v1.3.0](https://github.com/emit-sds/emit-sds-l1a/compare/v1.2.0...v1.3.0) > 6 June 2022 +- Merge develop into main for v1.3.0 [`#3`](https://github.com/emit-sds/emit-sds-l1a/pull/3) - J2000 and BAD timing [`#2`](https://github.com/emit-sds/emit-sds-l1a/pull/2) - Check number of valid lines and instrument mode [`#1`](https://github.com/emit-sds/emit-sds-l1a/pull/1) - Pre ioc ccsds updates [`#21`](https://github.com/emit-sds/emit-sds-l1a/pull/21) - update LICENSE and README [`ec26b1e`](https://github.com/emit-sds/emit-sds-l1a/commit/ec26b1ecb48d0601bab1803671cca1deb8a1896f) - Check frame instrument modes for consistency and add mode description to report. [`c3244ab`](https://github.com/emit-sds/emit-sds-l1a/commit/c3244ab2132e8786ef162cfd65a7c8581a26e3a9) -- Add utils to check packet sizes below a threshold and to find all the sync words in a stream. [`975ce53`](https://github.com/emit-sds/emit-sds-l1a/commit/975ce53bc98e4cd7ae8c732e421616c3196d3472) +- Update change log [`8078f17`](https://github.com/emit-sds/emit-sds-l1a/commit/8078f177a39a61b265e7611fe530f5afaaaabd18) #### [v1.2.0](https://github.com/emit-sds/emit-sds-l1a/compare/v1.1.0...v1.2.0)