From 5a24ee61259e05485c620138af58ed487a9ec5c0 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 23 Mar 2022 15:17:48 -0700 Subject: [PATCH 01/26] Sort the frame paths using the basename not the full path --- reassemble_raw_cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index b8d6315..118d784 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -542,7 +542,7 @@ def main(): frame_data_paths.append( os.path.join(image_dir, "_".join([dcid, start_stop_times[int(num)][0].strftime("%Y%m%dt%H%M%S"), num, expected_frame_num_str, "7"]))) - frame_data_paths.sort(key=lambda x: x.split("_")[2]) + frame_data_paths.sort(key=lambda x: os.path.basename(x).split("_")[2]) # Loop through the frames and create acquisitions i = 0 From 975ce53bc98e4cd7ae8c732e421616c3196d3472 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 29 Mar 2022 09:12:04 -0700 Subject: [PATCH 02/26] Add utils to check packet sizes below a threshold and to find all the sync words in a stream. --- util/find_all_sync_words.py | 54 +++++++++++++++++++++++++++++++++++++ util/packet_size_check.py | 29 ++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 util/find_all_sync_words.py create mode 100644 util/packet_size_check.py diff --git a/util/find_all_sync_words.py b/util/find_all_sync_words.py new file mode 100644 index 0000000..b4d64f2 --- /dev/null +++ b/util/find_all_sync_words.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +import argparse +import datetime +import itertools + +from emit_sds_l1a.ccsds_packet import ScienceDataPacket + +PRIMARY_HDR_LEN = 6 +HEADER_SYNC_WORD = bytes.fromhex("81FFFF81") + +parser = argparse.ArgumentParser() +parser.add_argument("infile") +parser.add_argument("method", type=int, default=1) +args = parser.parse_args() + +in_file = open(args.infile, "rb") +data = bytearray() + +print(datetime.datetime.now()) + +cnt = 0 +while True: + try: + pkt = ScienceDataPacket(in_file) + cnt += 1 + data += pkt.data + except EOFError: + break + +print(f"Count of packets: {cnt}") +print(datetime.datetime.now()) + +indices = [] + +if args.method == 1: + print("Using itertools...") + data_iters = itertools.tee(data, len(HEADER_SYNC_WORD)) + print(f"len(data_iters): {len(data_iters)}") + for i, it in enumerate(data_iters): + next(itertools.islice(it, i, i), None) + + for i, chunk in enumerate(zip(*data_iters)): + if bytearray(chunk) == HEADER_SYNC_WORD: + indices.append(i) +else: + print("Not using itertools...") + for i in range(len(data) - len(HEADER_SYNC_WORD)): + if data[i: i + len(HEADER_SYNC_WORD)] == HEADER_SYNC_WORD: + indices.append(i) + +print(indices) +print(datetime.datetime.now()) + diff --git a/util/packet_size_check.py b/util/packet_size_check.py new file mode 100644 index 0000000..2108605 --- /dev/null +++ b/util/packet_size_check.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +import argparse + +import emit.data_products as dp + +PRIMARY_HDR_LEN = 6 + +parser = argparse.ArgumentParser() +parser.add_argument("infile") +parser.add_argument("threshold", type=int) +args = parser.parse_args() + +in_file = open(args.infile, "rb") + +cnt = 0 +skip = 4000 +while True: + try: + pkt = dp.CCSDSPacket(in_file) + pkt_size = PRIMARY_HDR_LEN + pkt.pkt_data_len + 1 + if pkt_size < args.threshold: + cnt += 1 + if pkt.pkt_seq_cnt % skip == 0: + print(f"Packet {str(pkt.pkt_seq_cnt).zfill(5)} size: {pkt_size}") + except EOFError: + break + +print(f"Count of packets less than {args.threshold} bytes: {cnt}") From 25d84654571a3afd5222c2ab4b53d0d7727ef92b Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 29 Mar 2022 09:13:05 -0700 Subject: [PATCH 03/26] Fix PEP8 --- util/find_all_sync_words.py | 1 - 1 file changed, 1 deletion(-) diff --git a/util/find_all_sync_words.py b/util/find_all_sync_words.py index b4d64f2..990dd82 100644 --- a/util/find_all_sync_words.py +++ b/util/find_all_sync_words.py @@ -51,4 +51,3 @@ print(indices) print(datetime.datetime.now()) - From 4af5ed53ab5486ce74179b0a557224254dc5ca5c Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 29 Mar 2022 09:23:36 -0700 Subject: [PATCH 04/26] Add total count of sync words. --- util/find_all_sync_words.py | 1 + 1 file changed, 1 insertion(+) diff --git a/util/find_all_sync_words.py b/util/find_all_sync_words.py index 990dd82..31b3815 100644 --- a/util/find_all_sync_words.py +++ b/util/find_all_sync_words.py @@ -50,4 +50,5 @@ indices.append(i) print(indices) +print(f"Total sync words found: {len(indices)}") print(datetime.datetime.now()) From 549636ce833085f5f0e62e7b67163c12620821b4 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Mon, 16 May 2022 14:07:20 -0700 Subject: [PATCH 05/26] Fix raw waterfall script to use 'Agg' back-end --- util/raw_waterfall.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/util/raw_waterfall.py b/util/raw_waterfall.py index bc5b3ea..fef23eb 100644 --- a/util/raw_waterfall.py +++ b/util/raw_waterfall.py @@ -9,7 +9,10 @@ from pathlib import Path import sys +import matplotlib +matplotlib.use('Agg') import matplotlib.pyplot as plt + from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np import spectral.io.envi as envi From d6e789c39b756470a54540120be1865859e94bf0 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Mon, 16 May 2022 14:11:20 -0700 Subject: [PATCH 06/26] Fix PEP8 --- test/pycodestyle_config.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/pycodestyle_config.txt b/test/pycodestyle_config.txt index 52cf82a..6bb5ed5 100644 --- a/test/pycodestyle_config.txt +++ b/test/pycodestyle_config.txt @@ -1,2 +1,2 @@ [pycodestyle] -ignore = E501 \ No newline at end of file +ignore = E501, E402 \ No newline at end of file From a265bd38ad73641a29e5819c3add0e51e63fe6ed Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 17 May 2022 11:18:36 -0700 Subject: [PATCH 07/26] Add some diagnostic logging for sequence flags and number of data bytes read. Start to implement frame checksum check and PSC mismatch improvement. --- emit_sds_l1a/ccsds_packet.py | 33 ++++++++++++++++++++++++++------- util/packet_size_check.py | 22 +++++++++++++++++++++- 2 files changed, 47 insertions(+), 8 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index 8d29252..8b8445d 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -8,6 +8,8 @@ import logging import zlib +from emit_sds_l1a.frame import Frame + from enum import Enum from sortedcontainers import SortedDict @@ -15,7 +17,9 @@ class PSCMismatchException(Exception): - pass + + def __init__(self, msg, next_psc, cur_psc, **kwargs): + super(Exception, self).__init__(msg, **kwargs) class CCSDSPacket: @@ -132,7 +136,7 @@ def __init__(self, stream=None, **kwargs): enforced if these kwargs are used. """ super(ScienceDataPacket, self).__init__(stream=stream, **kwargs) - logger.debug("SDP primary header: " + str([bin(self.hdr_data[i])[2:].zfill(8) for i in range(self.PRIMARY_HDR_LEN)])) + # logger.debug("SDP primary header: " + str([bin(self.hdr_data[i])[2:].zfill(8) for i in range(self.PRIMARY_HDR_LEN)])) @property def data(self): @@ -146,7 +150,6 @@ def data(self): @data.setter def data(self, data): - # TODO: Update this with pad byte if self.pad_byte_flag == 0: self.body = self.body[:self.SEC_HDR_LEN] + data + self.body[-self.CRC_LEN:] else: @@ -236,8 +239,8 @@ def product_length(self): def __repr__(self): pkt_str = "= 1280 and frame is None: + frame = Frame(pkt_parts[:1280]) + if frame.is_valid(): + logger.info(f"Found valid frame checksum for frame: {frame}") + else: + # TODO: Must be invalid, need to start over + pass + if data_accum_len == expected_frame_len: # We're done diff --git a/util/packet_size_check.py b/util/packet_size_check.py index 2108605..9bce1ad 100644 --- a/util/packet_size_check.py +++ b/util/packet_size_check.py @@ -3,6 +3,7 @@ import argparse import emit.data_products as dp +from emit_sds_l1a.ccsds_packet import ScienceDataPacket PRIMARY_HDR_LEN = 6 @@ -13,12 +14,28 @@ in_file = open(args.infile, "rb") +seq_flag_counts = { + "0": 0, + "1": 0, + "2": 0, + "3": 0 +} + cnt = 0 skip = 4000 +total = 0 +counts = {} while True: try: - pkt = dp.CCSDSPacket(in_file) + pkt = ScienceDataPacket(in_file) + total += 1 + seq_flag_counts[str(pkt.seq_flags)] += 1 pkt_size = PRIMARY_HDR_LEN + pkt.pkt_data_len + 1 + data_size = len(pkt.data) + if data_size in counts: + counts[data_size] += 1 + else: + counts[data_size] = 1 if pkt_size < args.threshold: cnt += 1 if pkt.pkt_seq_cnt % skip == 0: @@ -26,4 +43,7 @@ except EOFError: break +print(f"Total packets: {total}") print(f"Count of packets less than {args.threshold} bytes: {cnt}") +print(f"seq_flag_counts: {seq_flag_counts}") +print(counts) From a2e6cad2e79e3cb599259790c1f87c38c5fb4582 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 17 May 2022 12:46:43 -0700 Subject: [PATCH 08/26] Add check for valid frame using frame checksum --- emit_sds_l1a/ccsds_packet.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index 8b8445d..3e38a83 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -453,7 +453,7 @@ def _read_frame_start_packet(self): self._stats.reset_bytes_since_last_index() # If sync word is found, check minimum processable length and read next packet if needed logger.info(f"Found sync word at index {index} in packet {pkt}") - logger.info(f"Sync word is at data index {self._stats.get_data_bytes_read() - len(pkt.data) + index}") + logger.debug(f"Sync word is at data index {self._stats.get_data_bytes_read() - len(pkt.data) + index}") # Remove data before sync word so packet data starts at the beginning of the frame pkt.data = pkt.data[index:] # Read follow on packet if data doesn't contain enough info (SYNC WORD + frame img size) @@ -515,7 +515,11 @@ def _read_pkt_parts(self, start_pkt): while True: # After the first 1280 bytes are read, check the frame checksum if data_accum_len >= 1280 and frame is None: - frame = Frame(pkt_parts[:1280]) + hdr_bytes = bytearray() + while len(hdr_bytes) < 1280: + for pkt in pkt_parts: + hdr_bytes += pkt.data + frame = Frame(hdr_bytes[:1280]) if frame.is_valid(): logger.info(f"Found valid frame checksum for frame: {frame}") else: From 619f7d988b78b89c337c4b1d5acd77bc44d2db7e Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 09:22:20 -0700 Subject: [PATCH 09/26] Return packet with PSCMismatch exception. Add in garbage packets up until the end of the frame (use expected frame length to identify). --- emit_sds_l1a/ccsds_packet.py | 45 +++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index 3e38a83..56dd005 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -18,8 +18,10 @@ class PSCMismatchException(Exception): - def __init__(self, msg, next_psc, cur_psc, **kwargs): + def __init__(self, msg, pkt, next_psc, **kwargs): super(Exception, self).__init__(msg, **kwargs) + self.pkt = pkt + self.next_psc = next_psc class CCSDSPacket: @@ -341,6 +343,7 @@ class SciencePacketProcessor: SEC_HDR_LEN = 11 MIN_PROCABLE_PKT_LEN = 8 CRC_LEN = 4 + MAX_DATA_LEN = 1479 def __init__(self, stream_path): logger.debug(f"Initializing SciencePacketProcessor from path {stream_path}") @@ -417,9 +420,8 @@ def _read_next_packet(self): self._cur_coarse = pkt.coarse_time self._cur_fine = pkt.fine_time self._processed_pkts[pkt_hash] = True - self._pkt_partial = pkt msg = f"Expected next psc of {next_psc} not equal to the psc of the next packet read {pkt.pkt_seq_cnt}" - raise PSCMismatchException(msg, next_psc, pkt.pkt_seq_cnt) + raise PSCMismatchException(msg, pkt, next_psc) def _read_frame_start_packet(self): sync_word_warning_count = 0 @@ -483,6 +485,7 @@ def _read_frame_start_packet(self): except PSCMismatchException as e: logger.warning(e) logger.warning("While looking for frame start packet, encountered PSC mismatch.") + self._pkt_partial = e.pkt def _read_pkt_parts(self, start_pkt): # Expected frame size is data length plus 1280 bytes for header @@ -559,9 +562,39 @@ def _read_pkt_parts(self, start_pkt): pkt = self._read_next_packet() except PSCMismatchException as e: logger.warning(e) - logger.warning("While reading packet parts, encountered PSC mismatch. Returning truncated frame.") - self._stats.truncated_frame() - return pkt_parts + # logger.warning("While reading packet parts, encountered PSC mismatch. Returning truncated frame.") + # self._stats.truncated_frame() + # return pkt_parts + + # Determine number of missing packets + pkt = e.pkt + num_missing = self._cur_psc - e.next_psc if self._cur_psc > e.next_psc \ + else self._cur_psc + pkt.CCSDS_PKT_SEC_COUNT_MOD - e.next_psc + + logger.info(f"While reading packet parts, encountered {num_missing} missing packets. Attempting to " + f"insert garbage packets") + + # Only insert garbage packets if the remaining data length can accommodate it + for i in range(num_missing): + remaining_data_len = expected_frame_len - data_accum_len + if remaining_data_len == 0: + logger.info(f"Not inserting any more garbage packets because end of frame.") + break + elif remaining_data_len >= self.MAX_DATA_LEN: + body = pkt.body[:self.SEC_HDR_LEN] + bytearray(self.MAX_DATA_LEN) + pkt.body[-self.CRC_LEN:] + garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body) + pkt_parts.append(garbage_pkt) + data_accum_len += self.MAX_DATA_LEN + logger.info(f"Inserted garbage packet with {self.MAX_DATA_LEN} bytes of data. Accum data is " + f"now {data_accum_len}") + elif 0 < remaining_data_len < self.MAX_DATA_LEN: + body = pkt.body[:self.SEC_HDR_LEN] + bytearray(remaining_data_len) + pkt.body[-self.CRC_LEN:] + garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body) + pkt_parts.append(garbage_pkt) + data_accum_len += remaining_data_len + logger.info(f"Inserted garbage packet with {remaining_data_len} bytes of data. Accum data is " + f"now {data_accum_len}") + pkt_parts.append(pkt) data_accum_len += len(pkt.data) logger.debug(f"Adding {len(start_pkt.data)}. Accum data is now {data_accum_len}") From acdac981609a24a7126414cd34b1f71d6d2b4a3d Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 09:23:03 -0700 Subject: [PATCH 10/26] Add util for creating missing packets. --- util/create_missing_packets.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 util/create_missing_packets.py diff --git a/util/create_missing_packets.py b/util/create_missing_packets.py new file mode 100644 index 0000000..0013ccd --- /dev/null +++ b/util/create_missing_packets.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +import argparse + +import emit.data_products as dp +# from emit_sds_l1a.ccsds_packet import ScienceDataPacket + +PRIMARY_HDR_LEN = 6 + +parser = argparse.ArgumentParser() +parser.add_argument("infile") +args = parser.parse_args() + +in_file = open(args.infile, "rb") + +out_file = f"{args.infile}_mpsc_16383" +out = open(out_file, "wb") + +cnt = 0 +while True: + try: + pkt = dp.CCSDSPacket(in_file) + if cnt % 16383 not in (0,1): + out.write(pkt.hdr_data) + out.write(pkt.body) + cnt += 1 + + except EOFError: + break + +out.close() From 264856cc129b4646fdbb21a353cae61d24ed2ed4 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 09:46:17 -0700 Subject: [PATCH 11/26] Report on 'corrupt' frames now instead of 'truncated' frames since we are no longer truncating frames but instead adding in garbage packets. --- emit_sds_l1a/ccsds_packet.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index 56dd005..3f17154 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -262,6 +262,7 @@ def __init__(self): "last_pkt_size": 0, "frames_read": 0, "truncated_frame_errors": 0, + "corrupt_frames": [], "invalid_pkt_errors": 0, "invalid_psc": [], "pkt_seq_errors": 0, @@ -308,10 +309,20 @@ def frame_read(self): def truncated_frame(self): self._stats["truncated_frame_errors"] += 1 + def corrupt_frame(self, frame): + name = "_".join([str(frame.dcid).zfill(10), frame.start_time.strftime("%Y%m%dt%H%M%S"), + str(frame.frame_count_in_acq).zfill(5), str(frame.planned_num_frames).zfill(5), + str(frame.acq_status), str(frame.processed_flag)]) + if name not in self._stats["corrupt_frames"]: + self._stats["corrupt_frames"].append(name) + def get_data_bytes_read(self): return self._stats["data_bytes_read"] def __str__(self): + self._stats["corrupt_frames"].sort() + corrupt_frames_str = "\n".join([i for i in self._stats["corrupt_frames"]]) + self._stats["missing_psc"].sort() missing_pscs_str = "\n".join([i for i in self._stats["missing_psc"]]) @@ -325,8 +336,10 @@ def __str__(self): f"Total CCSDS Packets Read: {self._stats['ccsds_pkts_read']}\n" f"Total bytes read: {self._stats['bytes_read']}\n\n" f"Bytes read since last index: {self._stats['bytes_read_since_last_index']}\n\n" - f"Total Frames Read: {self._stats['frames_read']}\n" - f"Truncated Frame Errors Encountered: {self._stats['truncated_frame_errors']}\n\n" + f"Total Frames Read: {self._stats['frames_read']}\n\n" + f"Corrupt Frame Errors Encountered: {len(self._stats['corrupt_frames'])}\n" + "Corrupt Frames:\n" + f"{corrupt_frames_str}\n\n" f"Invalid Packet Errors Encountered: {self._stats['invalid_pkt_errors']}\n" "Invalid Packet Values:\n" f"{invalid_pscs_str}\n\n" @@ -562,9 +575,6 @@ def _read_pkt_parts(self, start_pkt): pkt = self._read_next_packet() except PSCMismatchException as e: logger.warning(e) - # logger.warning("While reading packet parts, encountered PSC mismatch. Returning truncated frame.") - # self._stats.truncated_frame() - # return pkt_parts # Determine number of missing packets pkt = e.pkt @@ -572,7 +582,7 @@ def _read_pkt_parts(self, start_pkt): else self._cur_psc + pkt.CCSDS_PKT_SEC_COUNT_MOD - e.next_psc logger.info(f"While reading packet parts, encountered {num_missing} missing packets. Attempting to " - f"insert garbage packets") + f"insert garbage packets") # Only insert garbage packets if the remaining data length can accommodate it for i in range(num_missing): @@ -587,6 +597,7 @@ def _read_pkt_parts(self, start_pkt): data_accum_len += self.MAX_DATA_LEN logger.info(f"Inserted garbage packet with {self.MAX_DATA_LEN} bytes of data. Accum data is " f"now {data_accum_len}") + self._stats.corrupt_frame(frame) elif 0 < remaining_data_len < self.MAX_DATA_LEN: body = pkt.body[:self.SEC_HDR_LEN] + bytearray(remaining_data_len) + pkt.body[-self.CRC_LEN:] garbage_pkt = ScienceDataPacket(hdr_data=pkt.hdr_data, body=body) @@ -594,6 +605,7 @@ def _read_pkt_parts(self, start_pkt): data_accum_len += remaining_data_len logger.info(f"Inserted garbage packet with {remaining_data_len} bytes of data. Accum data is " f"now {data_accum_len}") + self._stats.corrupt_frame(frame) pkt_parts.append(pkt) data_accum_len += len(pkt.data) From 88aea0a291a9a9ebb8962a67d74b36f0a96d40b6 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 10:08:17 -0700 Subject: [PATCH 12/26] Add InvalidFrameHeader exception for cases where frame header fails checksum. Drop these frames and continue on to next one. --- emit_sds_l1a/ccsds_packet.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/emit_sds_l1a/ccsds_packet.py b/emit_sds_l1a/ccsds_packet.py index 3f17154..e87c56c 100644 --- a/emit_sds_l1a/ccsds_packet.py +++ b/emit_sds_l1a/ccsds_packet.py @@ -24,6 +24,10 @@ def __init__(self, msg, pkt, next_psc, **kwargs): self.next_psc = next_psc +class InvalidFrameHeader(Exception): + pass + + class CCSDSPacket: """CCSDS Space Packet Object Provides an abstraction of a CCSDS Space Packet to simplify handling CCSDS @@ -377,6 +381,9 @@ def read_frame(self): pkt_parts = self._read_pkt_parts(start_pkt) logger.info(f"READ FRAME END") return self._reconstruct_frame(pkt_parts) + except InvalidFrameHeader as e: + logger.warning(e) + logger.info("Skipping invalid frame. Continuing to look for next frame header... ") except EOFError: logger.info( "Received EOFError when reading files. No more data to process" @@ -506,6 +513,8 @@ def _read_pkt_parts(self, start_pkt): logger.debug(f"Start packet says frame img size is {expected_frame_len}") # Handle case where frame data is less than current packet data size + # TODO: This block is probably never executed since the start packet usually contains only the header and + # nothing more if expected_frame_len < len(start_pkt.data): # Create a partial and then read in short frame partial_data = start_pkt.data[expected_frame_len:] @@ -539,8 +548,8 @@ def _read_pkt_parts(self, start_pkt): if frame.is_valid(): logger.info(f"Found valid frame checksum for frame: {frame}") else: - # TODO: Must be invalid, need to start over - pass + self._pkt_partial = None + raise InvalidFrameHeader(f"Frame failed checksum and is invalid: {frame}") if data_accum_len == expected_frame_len: From ced9c9d8182538a88b93aa8845ac32bda311e0ee Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 12:41:20 -0700 Subject: [PATCH 13/26] Add is_empty flag to reassembly report. --- reassemble_raw_cube.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index 118d784..fdb7d93 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -147,6 +147,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, lc_increment = 2 if processed_flag == 1 and coadd_mode == 1 else 1 lc_lookup = None corrupt_lines = [] + is_empty = True for path in acq_data_paths: frame_num_str = os.path.basename(path).split(".")[0].split("_")[2] status = int(os.path.basename(path).split(".")[0].split("_")[4]) @@ -154,6 +155,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, logger.info(f"Adding frame {path}") # Non-cloudy frames if status in (0, 1): + is_empty = False # Write frame to output array frame = np.memmap(path, shape=(num_lines, int(hdr["bands"]), int(hdr["samples"])), dtype=np.int16, mode="r") output[line:line + num_lines, :, :] = frame[:, :, :].copy() @@ -245,6 +247,9 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f"First frame number in acquisition: {str(start_index).zfill(5)}\n") f.write(f"Last frame number in acquisition: {str(stop_index).zfill(5)}\n\n") + # If all frames are cloudy, missing, or failed decompression, then indicate it in the report + f.write(f"Acquisition is empty (all frames are cloudy or missing): {is_empty}\n\n") + # Get timing info using loop in case the timing info is missing on the first frame. timing_info_found = False for i in range(start_index, stop_index + 1): From 1288d5ae5322db98e8a835a168171f9c4c65dbf9 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 18 May 2022 12:55:23 -0700 Subject: [PATCH 14/26] Fix PEP8 --- util/create_missing_packets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/create_missing_packets.py b/util/create_missing_packets.py index 0013ccd..9b14d5d 100644 --- a/util/create_missing_packets.py +++ b/util/create_missing_packets.py @@ -20,7 +20,7 @@ while True: try: pkt = dp.CCSDSPacket(in_file) - if cnt % 16383 not in (0,1): + if cnt % 16383 not in (0, 1): out.write(pkt.hdr_data) out.write(pkt.body) cnt += 1 From ec26b1ecb48d0601bab1803671cca1deb8a1896f Mon Sep 17 00:00:00 2001 From: pgbrodrick Date: Thu, 19 May 2022 12:33:03 -0700 Subject: [PATCH 15/26] update LICENSE and README --- LICENSE | 201 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 14 ++-- 2 files changed, 211 insertions(+), 4 deletions(-) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index b1f1f5b..55f3e15 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,21 @@ -# emit-sds-l1a +

+emit-sds-l1a +

## Description -Welcome to the emit-sds-l1a repository. This repository contains scripts for executing the various EMIT L1A PGEs. These PGEs include the following functions: + +_NOTE - See the **develop** branch - set as default - for the latest updates._ + +Welcome to the EMIT Level Level 1A science data system repository. To understand how this repository is linked to the rest of the emit-sds repositories, please see [the repository guide](https://github.com/emit-sds/emit-main/wiki/Repository-Guide). + +This repository contains scripts for executing the various EMIT L1A PGEs. These PGEs include the following functions: * Depacketizing science frames from CCSDS stream files * Depacketizing engineering data from CCSDS * Decompressing and reassembling raw image files in ENVI format * Reformatting BAD STO files into NetCDF files -To understand how this repository is linked to the rest of the emit-sds repositories, please see the [repository guide](https://github.com/emit-sds/emit-main/wiki/Repository-Guide). ## Dependency Requirements @@ -65,4 +71,4 @@ are decompression files. ``` python reformat_bad.py -``` \ No newline at end of file +``` From aa805bddbacf1fd884453e6818f6d63504b932da Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Sun, 22 May 2022 09:24:52 -0700 Subject: [PATCH 16/26] Change is_empty to has_min_proc_lines - look for minimum processable number of lines and return in report. --- .gitignore | 1 + reassemble_raw_cube.py | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index cb0f9e2..1706b30 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ emit_sds_l1a.egg-info __pycache__ .coverage +.DS_Store \ No newline at end of file diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index fdb7d93..f40b3f2 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -27,6 +27,7 @@ CORRUPT_FRAME_FLAG = -9997 CORRUPT_LINE_FLAG = -9996 CLOUDY_FRAME_FLAG = -9990 +MIN_PROC_LINES = 256 def get_utc_time_from_gps(gps_time): @@ -147,7 +148,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, lc_increment = 2 if processed_flag == 1 and coadd_mode == 1 else 1 lc_lookup = None corrupt_lines = [] - is_empty = True + num_processable_lines = 0 for path in acq_data_paths: frame_num_str = os.path.basename(path).split(".")[0].split("_")[2] status = int(os.path.basename(path).split(".")[0].split("_")[4]) @@ -155,7 +156,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, logger.info(f"Adding frame {path}") # Non-cloudy frames if status in (0, 1): - is_empty = False + num_processable_lines += num_lines # Write frame to output array frame = np.memmap(path, shape=(num_lines, int(hdr["bands"]), int(hdr["samples"])), dtype=np.int16, mode="r") output[line:line + num_lines, :, :] = frame[:, :, :].copy() @@ -247,8 +248,10 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f"First frame number in acquisition: {str(start_index).zfill(5)}\n") f.write(f"Last frame number in acquisition: {str(stop_index).zfill(5)}\n\n") - # If all frames are cloudy, missing, or failed decompression, then indicate it in the report - f.write(f"Acquisition is empty (all frames are cloudy or missing): {is_empty}\n\n") + # Indicate if acquisition has more than the min number of processable lines + has_min_proc_lines = True if num_processable_lines > MIN_PROC_LINES else False + f.write(f"Acquisition has min processable lines (>{MIN_PROC_LINES} lines with valid data): " + f"{has_min_proc_lines}\n\n") # Get timing info using loop in case the timing info is missing on the first frame. timing_info_found = False From b0501f61c3890decfe8d792a11a854b2ed592b50 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 24 May 2022 08:21:59 -0700 Subject: [PATCH 17/26] Now just count the number of valid lines and return it in report. --- reassemble_raw_cube.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index f40b3f2..a2746ec 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -27,7 +27,6 @@ CORRUPT_FRAME_FLAG = -9997 CORRUPT_LINE_FLAG = -9996 CLOUDY_FRAME_FLAG = -9990 -MIN_PROC_LINES = 256 def get_utc_time_from_gps(gps_time): @@ -148,7 +147,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, lc_increment = 2 if processed_flag == 1 and coadd_mode == 1 else 1 lc_lookup = None corrupt_lines = [] - num_processable_lines = 0 + num_valid_lines = 0 for path in acq_data_paths: frame_num_str = os.path.basename(path).split(".")[0].split("_")[2] status = int(os.path.basename(path).split(".")[0].split("_")[4]) @@ -156,7 +155,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, logger.info(f"Adding frame {path}") # Non-cloudy frames if status in (0, 1): - num_processable_lines += num_lines + num_valid_lines += num_lines # Write frame to output array frame = np.memmap(path, shape=(num_lines, int(hdr["bands"]), int(hdr["samples"])), dtype=np.int16, mode="r") output[line:line + num_lines, :, :] = frame[:, :, :].copy() @@ -175,8 +174,10 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, # If lc_lookup is still unpopulated it means the entire frame had corrupt lines if lc_lookup is None: # This seems very unlikely as it would mean that all or most of the line counts were corrupt - logger.warning(f"Could not find incremental line counts in frame number {frame_num_str}.") + logger.warning(f"Could not find incremental line counts in frame number {frame_num_str}. " + f"Assuming that all lines are corrupt.") corrupt_lines += list(range(start_line_in_frame, start_line_in_frame + num_lines)) + num_valid_lines -= num_lines else: logger.info(f"Found a good line count in frame {frame_num_str} and generated a line count lookup.") @@ -201,6 +202,7 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, if lc_lookup is not None and lc_lookup[start_line_in_frame + i] != line_count: logger.warning(f"Found corrupt line at line number {start_line_in_frame + i}") corrupt_lines.append(start_line_in_frame + i) + num_valid_lines -= 1 # Cloudy frames if status in (4, 5): @@ -248,10 +250,8 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f"First frame number in acquisition: {str(start_index).zfill(5)}\n") f.write(f"Last frame number in acquisition: {str(stop_index).zfill(5)}\n\n") - # Indicate if acquisition has more than the min number of processable lines - has_min_proc_lines = True if num_processable_lines > MIN_PROC_LINES else False - f.write(f"Acquisition has min processable lines (>{MIN_PROC_LINES} lines with valid data): " - f"{has_min_proc_lines}\n\n") + # Report on number of valid (not cloudy, missing, or corrupt) lines. + f.write(f"Number of lines with valid data (not cloudy, missing, or corrupt): {num_valid_lines}\n\n") # Get timing info using loop in case the timing info is missing on the first frame. timing_info_found = False From d2ea28ec148bde8747b39683c4a398af2018c97a Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 24 May 2022 10:27:04 -0700 Subject: [PATCH 18/26] Add fine time to timestamp in NetCDF file. --- reformat_bad.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/reformat_bad.py b/reformat_bad.py index 62d0e09..7ee826d 100644 --- a/reformat_bad.py +++ b/reformat_bad.py @@ -17,6 +17,8 @@ from ait.core import dmc +J2000_OFFSET = 0 + def lookup_header_indices(hdr): iss_pui_map = { @@ -164,24 +166,18 @@ def main(): # ind = lookup_header_indices(header) out_arr.sort(key=lambda x: x[ind["time_coarse"]]) - # TODO: How to convert to J2000 and where do I do that? - min_time = int(out_arr[0][ind["time_coarse"]]) max_time = int(out_arr[-1][ind["time_coarse"]]) # Create NetCDF file and write out selected fields fout = h5netcdf.File(output_path, "w") - # For now, we have both ephemeris and attitude with same time spacing. - # We could change that in the future if needed. - tspace = 1.0 - # tm = np.arange(min_time, max_time + 1, tspace) - tm = np.asarray([float(row[ind["time_coarse"]]) for row in out_arr], dtype=np.float) + # TODO: J2000 conversion + tm = np.asarray([float(row[ind["time_coarse"]]) + float(row[ind["time_fine"]] + J2000_OFFSET) + for row in out_arr], dtype=np.float) pos = np.zeros((tm.shape[0], 3)) vel = np.zeros((tm.shape[0], 3)) quat = np.zeros((tm.shape[0], 4)) - # TODO: Can I just assume that I have all the time values or do I need to do some checking and add in missing rows? - for i, row in enumerate(out_arr): pos[i, :] = (row[ind["pos_x"]], row[ind["pos_y"]], row[ind["pos_z"]]) vel[i, :] = (row[ind["vel_x"]], row[ind["vel_y"]], row[ind["vel_z"]]) @@ -201,8 +197,8 @@ def main(): t = g.create_variable("time_j2000", ('t',), data=tm) t.attrs["units"] = "s" t = g.create_variable("quaternion", ('t', 'quaternion'), data=quat) - t.attrs[ - "description"] = "Attitude quaternion, goes from spacecraft to ECI. The coefficient convention used has the real part in the first column." + t.attrs["description"] = "Attitude quaternion, goes from spacecraft to ECI. The coefficient convention used has " \ + "the real part in the first column." t.attrs["units"] = "dimensionless" fout.close() From 4bab9b38e455db3e266c75d16362c68f207c4dca Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 24 May 2022 15:32:15 -0700 Subject: [PATCH 19/26] Add instrument modes to frame object --- emit_sds_l1a/frame.py | 82 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 80 insertions(+), 2 deletions(-) diff --git a/emit_sds_l1a/frame.py b/emit_sds_l1a/frame.py index 39ac87d..5d044dd 100644 --- a/emit_sds_l1a/frame.py +++ b/emit_sds_l1a/frame.py @@ -15,6 +15,69 @@ NUM_32_BIT_UINTS = 4294967296 +INSTRUMENT_MODES = { + "LD": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x22, + 0xdd, 0xef]), + "LDN": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x00, + 0x00, 0xef]), + "LDN_vdda": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x0f, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x00, + 0x00, 0xef]), + "cold_img": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]), + "cold_img_vdda": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]), + "cold_img_mid": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xa0, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]), + "cold_img_mid_vdda": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xa0, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]), + "cold_img_slow": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xff, + 0x07, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]), + "warm_img": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x01, 0x02, 0x8c, 0x02, 0x02, 0x0e, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]), + "warm_img_short_integration": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x02, 0x8c, 0x02, 0x02, 0x07, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) +} + class Frame: @@ -36,6 +99,7 @@ def __init__(self, frame_binary): self.cloudy_flag = (self.hdr[32] & 0x04) >> 2 self.line_timestamp = int.from_bytes(self.hdr[36:40], byteorder="little", signed=False) self.line_count = int.from_bytes(self.hdr[44:52], byteorder="little", signed=False) + self.roic_register = self.hdr[108:174] self.frame_count_in_acq = int.from_bytes(self.hdr[810:818], byteorder="little", signed=False) self.solar_zenith = int.from_bytes(self.hdr[822:826], byteorder="little", signed=False) self.planned_num_frames = int.from_bytes(self.hdr[922:926], byteorder="little", signed=False) @@ -50,6 +114,9 @@ def __init__(self, frame_binary): self.start_time_gps = self._calc_start_time_gps() self.start_time = self._get_utc_time_from_gps(self.start_time_gps) + # Get the frame instrument_mode + self.instrument_mode = self._get_instrument_mode() + logger.debug(f"Initialized frame: {self}") def __repr__(self): @@ -60,8 +127,9 @@ def __repr__(self): repr += "line_timestamp={} line_count={} ".format(self.line_timestamp, self.line_count) repr += "frame_count_in_acq={} solar_zenith={} planned_num_frames={} os_time_timestamp={} os_time={} ".format( self.frame_count_in_acq, self.solar_zenith, self.planned_num_frames, self.os_time_timestamp, self.os_time) - repr += " num_bands={} coadd_mode={} checksum={} os_time_utc={} start_time={}>".format( - self.num_bands, self.coadd_mode, self.frame_header_checksum, self.os_time_in_utc, self.start_time) + repr += " num_bands={} coadd_mode={} checksum={} os_time_utc={} start_time={} instrument_mode={}>".format( + self.num_bands, self.coadd_mode, self.frame_header_checksum, self.os_time_in_utc, self.start_time, + self.instrument_mode) return repr def _get_utc_time_from_gps(self, gps_time): @@ -80,6 +148,16 @@ def _calc_start_time_gps(self): nanoseconds_offset = (line_timestamp - self.os_time_timestamp) * 10 ** 4 return self.os_time + nanoseconds_offset + def _get_instrument_mode(self): + instrument_mode = None + for mode, values in INSTRUMENT_MODES.items(): + if self.roic_register == bytes(values): + instrument_mode = mode + break + if instrument_mode is None: + instrument_mode = "no_match" + return instrument_mode + def _compute_hdr_checksum(self): # Compute sum of header fields up to offset 1276 sum = 0 From c3244ab2132e8786ef162cfd65a7c8581a26e3a9 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Tue, 24 May 2022 16:16:48 -0700 Subject: [PATCH 20/26] Check frame instrument modes for consistency and add mode description to report. --- emit_sds_l1a/frame.py | 154 +++++++++++++++++++++++++---------------- reassemble_raw_cube.py | 32 +++++++-- 2 files changed, 121 insertions(+), 65 deletions(-) diff --git a/emit_sds_l1a/frame.py b/emit_sds_l1a/frame.py index 5d044dd..c498802 100644 --- a/emit_sds_l1a/frame.py +++ b/emit_sds_l1a/frame.py @@ -16,66 +16,96 @@ NUM_32_BIT_UINTS = 4294967296 INSTRUMENT_MODES = { - "LD": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, - 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x22, - 0xdd, 0xef]), - "LDN": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, - 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x00, - 0x00, 0xef]), - "LDN_vdda": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, - 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x0f, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x00, - 0x00, 0xef]), - "cold_img": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, - 0x01, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, - 0x8c, 0xef]), - "cold_img_vdda": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, - 0x01, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, - 0x8c, 0xef]), - "cold_img_mid": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xa0, - 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, - 0x8c, 0xef]), - "cold_img_mid_vdda": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xa0, - 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, - 0x8c, 0xef]), - "cold_img_slow": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xff, - 0x07, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, - 0x8c, 0xef]), - "warm_img": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x01, 0x02, 0x8c, 0x02, 0x02, 0x0e, - 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, - 0x8c, 0xef]), - "warm_img_short_integration": - bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x02, 0x8c, 0x02, 0x02, 0x07, - 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, - 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, - 0x8c, 0xef]) + "LD": { + "desc": "Line driver mode standard pin stripe image", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x22, + 0xdd, 0xef]) + }, + "LDN": { + "desc": "Line driver mode noise measurement", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x00, + 0x00, 0xef]) + }, + "LDN_vdda": { + "desc": "Line driver mode vi test on vdda noise measurement", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x02, 0x00, 0x00, 0x00, 0x0f, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x00, + 0x00, 0xef]) + }, + "cold_img": { + "desc": "Nominal Cold FPA", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) + }, + "cold_img_vdda": { + "desc": "Nominal and vi test set on vdda Cold FPA", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x02, 0x8c, 0x02, 0x02, 0x3f, + 0x01, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) + }, + "cold_img_mid": { + "desc": "Gypsum Cold FPA", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xa0, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) + }, + "cold_img_mid_vdda": { + "desc": "Gypsum and vi test set on vdda Cold FPA", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xa0, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) + }, + "cold_img_slow": { + "desc": "Maximum integration time Cold FPA", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 0x8c, 0x02, 0x02, 0xff, + 0x07, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x00, 0x20, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xaa, 0xf5, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) + }, + "warm_img": { + "desc": "Nominal Warm FPA", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x01, 0x02, 0x8c, 0x02, 0x02, 0x0e, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) + }, + "warm_img_short_integration": { + "desc": "Minimum integration time Warm FPA", + "roic_values": + bytearray([0xc3, 0x34, 0x06, 0x00, 0x4d, 0x01, 0x01, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x02, 0x8c, 0x02, 0x02, 0x07, + 0x00, 0x02, 0x7c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x35, 0x20, 0x20, 0x20, + 0x20, 0x30, 0x38, 0x37, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x9a, 0xfe, 0x8c, 0x8c, 0x8c, + 0x8c, 0xef]) + } } @@ -116,6 +146,8 @@ def __init__(self, frame_binary): # Get the frame instrument_mode self.instrument_mode = self._get_instrument_mode() + self.instrument_mode_desc = "no_match" if self.instrument_mode == "no_match" else \ + INSTRUMENT_MODES[self.instrument_mode]["desc"] logger.debug(f"Initialized frame: {self}") @@ -151,7 +183,7 @@ def _calc_start_time_gps(self): def _get_instrument_mode(self): instrument_mode = None for mode, values in INSTRUMENT_MODES.items(): - if self.roic_register == bytes(values): + if self.roic_register == bytes(values["roic_values"]): instrument_mode = mode break if instrument_mode is None: diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index a2746ec..2990209 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -28,6 +28,18 @@ CORRUPT_LINE_FLAG = -9996 CLOUDY_FRAME_FLAG = -9990 +INSTRUMENT_MODE_DESCRIPTIONS = { + "LD": "Line driver mode standard pin stripe image", + "LDN": "Line driver mode noise measurement", + "LDN_vdda": "Line driver mode vi test on vdda noise measurement", + "cold_img": "Nominal Cold FPA", + "cold_img_vdda": "Nominal and vi test set on vdda Cold FPA", + "cold_img_mid": "Gypsum Cold FPA", + "cold_img_mid_vdda": "Gypsum and vi test set on vdda Cold FPA", + "cold_img_slow": "Maximum integration time Cold FPA", + "warm_img": "Nominal Warm FPA", + "warm_img_short_integration": "Minimum integration time Warm FPA" +} def get_utc_time_from_gps(gps_time): # Convert gps_time in nanoseconds to a timestamp in utc @@ -111,8 +123,8 @@ def get_utc_time_from_gps(gps_time): def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, stop_time, timing_info, processed_flag, - coadd_mode, num_bands, num_lines, image_dir, report_text, failed_decompression_list, - uncompressed_list, missing_frame_nums, logger): + coadd_mode, num_bands, num_lines, instrument_mode, image_dir, report_text, + failed_decompression_list, uncompressed_list, missing_frame_nums, logger): # Reassemble frames into ENVI image cube filling in missing and cloudy data with data flags # First create acquisition_id from frame start_time # Assume acquisitions are at least 1 second long @@ -245,7 +257,8 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f'Stop time: {stop_time}\n') f.write(f"Number of samples: 1280\n") f.write(f"Number of bands: {num_bands}\n") - f.write(f"Number of lines: {num_lines_in_acq}\n\n") + f.write(f"Number of lines: {num_lines_in_acq}\n") + f.write(f"Instrument mode description: {INSTRUMENT_MODE_DESCRIPTIONS[instrument_mode]} \n\n") f.write(f"First frame number in acquisition: {str(start_index).zfill(5)}\n") f.write(f"Last frame number in acquisition: {str(stop_index).zfill(5)}\n\n") @@ -399,6 +412,7 @@ def main(): num_lines_list = [] processed_flag_list = [] coadd_mode_list = [] + instrument_mode_list = [] failed_decompression_list = [] uncompressed_list = [] line_counts = [None] * int(expected_frame_num_str) @@ -459,7 +473,7 @@ def main(): # Get line count for each frame line_counts[frame_num_index] = uncomp_frame.line_count - # Get timing infor for each frame + # Get timing info for each frame timing_info[frame_num_index] = { "line_timestamp": uncomp_frame.line_timestamp, "os_time_timestamp": uncomp_frame.os_time_timestamp, @@ -468,6 +482,7 @@ def main(): num_bands_list.append(uncomp_frame.num_bands) processed_flag_list.append(uncomp_frame.processed_flag) + instrument_mode_list.append(uncomp_frame.instrument_mode) # Num lines is only 64 in unprocessed frames where data size is 1280 * bands * 64 * 2 size_of_64 = 1280 * uncomp_frame.num_bands * 64 * 2 if uncomp_frame.processed_flag == 0 and uncomp_frame.data_size == size_of_64: @@ -499,6 +514,13 @@ def main(): raise RuntimeError( f"Not all frames have the same number of lines. See list of num_lines: {num_lines_list}") + # Check all frames have same instrument mode + for i in range(len(instrument_mode_list)): + if instrument_mode_list[i] != instrument_mode_list[0]: + raise RuntimeError( + f"Not all frames have the same instrument mode. See list of instrument modes: {instrument_mode_list}") + instrument_mode = instrument_mode_list[0] + # Abort if any of the frames are not processed (i.e. they are from the raw partition) processed_flag_list.sort() for processed_flag in processed_flag_list: @@ -577,6 +599,7 @@ def main(): coadd_mode=coadd_mode, num_bands=num_bands, num_lines=num_lines, + instrument_mode=instrument_mode, image_dir=image_dir, report_text=report_txt, failed_decompression_list=failed_decompression_list, @@ -596,6 +619,7 @@ def main(): coadd_mode=coadd_mode, num_bands=num_bands, num_lines=num_lines, + instrument_mode=instrument_mode, image_dir=image_dir, report_text=report_txt, failed_decompression_list=failed_decompression_list, From 08d3e8c1068c06ab28881490de9583c73c7315b0 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 25 May 2022 09:01:54 -0700 Subject: [PATCH 21/26] Tweak 'no match' messaging. Add two lines for instrument mode and description. --- emit_sds_l1a/frame.py | 2 +- reassemble_raw_cube.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/emit_sds_l1a/frame.py b/emit_sds_l1a/frame.py index c498802..8fb16dc 100644 --- a/emit_sds_l1a/frame.py +++ b/emit_sds_l1a/frame.py @@ -146,7 +146,7 @@ def __init__(self, frame_binary): # Get the frame instrument_mode self.instrument_mode = self._get_instrument_mode() - self.instrument_mode_desc = "no_match" if self.instrument_mode == "no_match" else \ + self.instrument_mode_desc = "No match" if self.instrument_mode == "no_match" else \ INSTRUMENT_MODES[self.instrument_mode]["desc"] logger.debug(f"Initialized frame: {self}") diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index 2990209..047f3a7 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -38,7 +38,8 @@ "cold_img_mid_vdda": "Gypsum and vi test set on vdda Cold FPA", "cold_img_slow": "Maximum integration time Cold FPA", "warm_img": "Nominal Warm FPA", - "warm_img_short_integration": "Minimum integration time Warm FPA" + "warm_img_short_integration": "Minimum integration time Warm FPA", + "no_match": "No match" } def get_utc_time_from_gps(gps_time): @@ -258,7 +259,8 @@ def reassemble_acquisition(acq_data_paths, start_index, stop_index, start_time, f.write(f"Number of samples: 1280\n") f.write(f"Number of bands: {num_bands}\n") f.write(f"Number of lines: {num_lines_in_acq}\n") - f.write(f"Instrument mode description: {INSTRUMENT_MODE_DESCRIPTIONS[instrument_mode]} \n\n") + f.write(f"Instrument mode: {instrument_mode}\n") + f.write(f"Instrument mode description: {INSTRUMENT_MODE_DESCRIPTIONS[instrument_mode]}\n\n") f.write(f"First frame number in acquisition: {str(start_index).zfill(5)}\n") f.write(f"Last frame number in acquisition: {str(stop_index).zfill(5)}\n\n") From 048b9ebd47399368f59d917f16871a39f74c82af Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Wed, 1 Jun 2022 13:12:59 -0700 Subject: [PATCH 22/26] Include J2000 offset of 630763148.8160727 --- reformat_bad.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/reformat_bad.py b/reformat_bad.py index 7ee826d..dec49b4 100644 --- a/reformat_bad.py +++ b/reformat_bad.py @@ -17,7 +17,7 @@ from ait.core import dmc -J2000_OFFSET = 0 +J2000_OFFSET = 630763148.8160727 def lookup_header_indices(hdr): @@ -171,8 +171,8 @@ def main(): # Create NetCDF file and write out selected fields fout = h5netcdf.File(output_path, "w") - # TODO: J2000 conversion - tm = np.asarray([float(row[ind["time_coarse"]]) + float(row[ind["time_fine"]] + J2000_OFFSET) + # Get GPS time from coarse and fine time, then subtract J2000 offset to get J2000 time + tm = np.asarray([float(row[ind["time_coarse"]]) + float(row[ind["time_fine"]] - J2000_OFFSET) for row in out_arr], dtype=np.float) pos = np.zeros((tm.shape[0], 3)) vel = np.zeros((tm.shape[0], 3)) From 05a062d6cf064967df4635ad1c2a6c59536e6717 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Mon, 6 Jun 2022 10:17:14 -0700 Subject: [PATCH 23/26] Fix J2000 syntax --- reformat_bad.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reformat_bad.py b/reformat_bad.py index dec49b4..bd16e17 100644 --- a/reformat_bad.py +++ b/reformat_bad.py @@ -172,7 +172,7 @@ def main(): # Create NetCDF file and write out selected fields fout = h5netcdf.File(output_path, "w") # Get GPS time from coarse and fine time, then subtract J2000 offset to get J2000 time - tm = np.asarray([float(row[ind["time_coarse"]]) + float(row[ind["time_fine"]] - J2000_OFFSET) + tm = np.asarray([float(row[ind["time_coarse"]]) + float(row[ind["time_fine"]]) - J2000_OFFSET for row in out_arr], dtype=np.float) pos = np.zeros((tm.shape[0], 3)) vel = np.zeros((tm.shape[0], 3)) From 62312e0f67d217a1426e348db5fa8d74293913bb Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Mon, 6 Jun 2022 10:36:29 -0700 Subject: [PATCH 24/26] Fix PEP 8 --- reassemble_raw_cube.py | 1 + 1 file changed, 1 insertion(+) diff --git a/reassemble_raw_cube.py b/reassemble_raw_cube.py index 047f3a7..b14b3fe 100644 --- a/reassemble_raw_cube.py +++ b/reassemble_raw_cube.py @@ -42,6 +42,7 @@ "no_match": "No match" } + def get_utc_time_from_gps(gps_time): # Convert gps_time in nanoseconds to a timestamp in utc d = dmc.GPS_Epoch + dt.timedelta(seconds=(gps_time / 10 ** 9)) From 33cde438b5779f692a4612062ae552529632c1ed Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Mon, 6 Jun 2022 10:37:07 -0700 Subject: [PATCH 25/26] Update version to 1.3.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0b6c2f0..93ca776 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ setuptools.setup( name="emit_sds_l1a", - version="1.2.0", + version="1.3.0", author="Winston Olson-Duvall", author_email="winston.olson-duvall@jpl.nasa.gov", description=""" From 8078f177a39a61b265e7611fe530f5afaaaabd18 Mon Sep 17 00:00:00 2001 From: Winston Olson-Duvall Date: Mon, 6 Jun 2022 10:38:14 -0700 Subject: [PATCH 26/26] Update change log --- CHANGELOG.md | 112 ++++++++++++++++++++++++++++----------------------- 1 file changed, 62 insertions(+), 50 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5cd4443..1b16124 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,85 +4,97 @@ All notable changes to this project will be documented in this file. Dates are d Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog). -#### [v1.2.0](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/compare/v1.1.0...v1.2.0) +#### [v1.3.0](https://github.com/emit-sds/emit-sds-l1a/compare/v1.2.0...v1.3.0) + +> 6 June 2022 + +- J2000 and BAD timing [`#2`](https://github.com/emit-sds/emit-sds-l1a/pull/2) +- Check number of valid lines and instrument mode [`#1`](https://github.com/emit-sds/emit-sds-l1a/pull/1) +- Pre ioc ccsds updates [`#21`](https://github.com/emit-sds/emit-sds-l1a/pull/21) +- update LICENSE and README [`ec26b1e`](https://github.com/emit-sds/emit-sds-l1a/commit/ec26b1ecb48d0601bab1803671cca1deb8a1896f) +- Check frame instrument modes for consistency and add mode description to report. [`c3244ab`](https://github.com/emit-sds/emit-sds-l1a/commit/c3244ab2132e8786ef162cfd65a7c8581a26e3a9) +- Add utils to check packet sizes below a threshold and to find all the sync words in a stream. [`975ce53`](https://github.com/emit-sds/emit-sds-l1a/commit/975ce53bc98e4cd7ae8c732e421616c3196d3472) + +#### [v1.2.0](https://github.com/emit-sds/emit-sds-l1a/compare/v1.1.0...v1.2.0) > 18 March 2022 -- Add raw waterfall script [`37f5a31`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/37f5a314e622432c0413d84caa0c0fd9183f6cc9) -- Fix PEP8 [`0ccb1f1`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/0ccb1f1cd9170e560227d7f59302f5640dd1d305) -- Incrememnt line count by 2 if from processed partition AND coadd is enabled. [`365ac46`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/365ac462f0ae1baa5f9e2f9bb48f449c127937ae) +- Merge develop to main for v1.2.0 [`#20`](https://github.com/emit-sds/emit-sds-l1a/pull/20) +- Add raw waterfall script [`37f5a31`](https://github.com/emit-sds/emit-sds-l1a/commit/37f5a314e622432c0413d84caa0c0fd9183f6cc9) +- Fix PEP8 [`0ccb1f1`](https://github.com/emit-sds/emit-sds-l1a/commit/0ccb1f1cd9170e560227d7f59302f5640dd1d305) +- Update change log [`b090e59`](https://github.com/emit-sds/emit-sds-l1a/commit/b090e597495bf3d1e5a360a8eaca2d4253ffb7b2) -#### [v1.1.0](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/compare/v1.0.0...v1.1.0) +#### [v1.1.0](https://github.com/emit-sds/emit-sds-l1a/compare/v1.0.0...v1.1.0) > 28 February 2022 -- Merge develop to main for v1.1.0 [`#19`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/19) -- CCSDS format change [`#18`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/18) -- CCSDS format change [`#17`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/17) -- Add pad_byte_flag and use it to get and set data property to include the garbage pad byte or not. [`eb182c3`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/eb182c36b5f34c70586799cee9d6e9327f67ddcb) -- Update changelog. [`b037da5`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/b037da57e32452c67848adf196b5181e9ee6d236) -- Update version to 1.1.0 [`401c442`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/401c4420350206989053e70fc876d693ae458d42) +- Merge develop to main for v1.1.0 [`#19`](https://github.com/emit-sds/emit-sds-l1a/pull/19) +- CCSDS format change [`#18`](https://github.com/emit-sds/emit-sds-l1a/pull/18) +- CCSDS format change [`#17`](https://github.com/emit-sds/emit-sds-l1a/pull/17) +- Add pad_byte_flag and use it to get and set data property to include the garbage pad byte or not. [`eb182c3`](https://github.com/emit-sds/emit-sds-l1a/commit/eb182c36b5f34c70586799cee9d6e9327f67ddcb) +- Update changelog. [`b037da5`](https://github.com/emit-sds/emit-sds-l1a/commit/b037da57e32452c67848adf196b5181e9ee6d236) +- Update version to 1.1.0 [`401c442`](https://github.com/emit-sds/emit-sds-l1a/commit/401c4420350206989053e70fc876d693ae458d42) -### [v1.0.0](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/compare/v0.4.1...v1.0.0) +### [v1.0.0](https://github.com/emit-sds/emit-sds-l1a/compare/v0.4.1...v1.0.0) > 9 February 2022 -- Merge develop to main for 1.0.0 [`#16`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/16) -- Add changelog [`b501999`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/b5019998362a6b8cba141073c29e8221544cc8c0) -- Use the out_arr to lookup the BAD coarse time instead of doing a range from min time to max time. [`c06ed6b`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/c06ed6bbeac405fc788053e2281d4f6f4f788a14) -- Change version to 1.0.0 [`824959d`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/824959d6dfd11bcb9f1c139701f32e800d39bb06) +- Merge develop to main for 1.0.0 [`#16`](https://github.com/emit-sds/emit-sds-l1a/pull/16) +- Add changelog [`b501999`](https://github.com/emit-sds/emit-sds-l1a/commit/b5019998362a6b8cba141073c29e8221544cc8c0) +- Use the out_arr to lookup the BAD coarse time instead of doing a range from min time to max time. [`c06ed6b`](https://github.com/emit-sds/emit-sds-l1a/commit/c06ed6bbeac405fc788053e2281d4f6f4f788a14) +- Change version to 1.0.0 [`824959d`](https://github.com/emit-sds/emit-sds-l1a/commit/824959d6dfd11bcb9f1c139701f32e800d39bb06) -#### [v0.4.1](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/compare/v0.4.0...v0.4.1) +#### [v0.4.1](https://github.com/emit-sds/emit-sds-l1a/compare/v0.4.0...v0.4.1) > 31 January 2022 -- Merge develop into main for v0.4.1 [`#15`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/15) -- Update readme to conform to PGE specification. [`d0e6d52`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/d0e6d5232626f91213223631974c1248bf65dd5b) -- Update readme [`cb36edd`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/cb36edd188d7e399352cb6fb73d0f23694ebebeb) -- Update changelog [`7e445ac`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/7e445ac4c5d27d52af2e6190fee7e61761dd6863) +- Merge develop into main for v0.4.1 [`#15`](https://github.com/emit-sds/emit-sds-l1a/pull/15) +- Update readme to conform to PGE specification. [`d0e6d52`](https://github.com/emit-sds/emit-sds-l1a/commit/d0e6d5232626f91213223631974c1248bf65dd5b) +- Update readme [`cb36edd`](https://github.com/emit-sds/emit-sds-l1a/commit/cb36edd188d7e399352cb6fb73d0f23694ebebeb) +- Update changelog [`7e445ac`](https://github.com/emit-sds/emit-sds-l1a/commit/7e445ac4c5d27d52af2e6190fee7e61761dd6863) -#### [v0.4.0](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/compare/v0.3.0...v0.4.0) +#### [v0.4.0](https://github.com/emit-sds/emit-sds-l1a/compare/v0.3.0...v0.4.0) > 20 January 2022 -- Merge develop for release 0.4.0 [`#14`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/14) -- Bad start stop [`#13`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/13) -- Bad reformatting [`#12`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/12) -- Line timestamp [`#11`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/11) -- Split acquisitions [`#10`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/10) -- Create initial framework of BAD reformatting. Read in STO file. Start to write out NetCDF file. [`dc47683`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/dc47683868950c6290e98aa55dd30086f4d0ea91) -- Use start time in frame filename instead of os time. Write report files per acquisition providing details (missing frames, cloudy frames, decompression failures) per acquisition instead of for the entire data collection. [`fa48545`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/fa48545c17f0cad8a2ce083cbb0da4e89524c1a0) -- Move reassembly code to its own function and iterate over frames using chunksize [`391c4d9`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/391c4d9d02279eb9fb92785933b4e5c6303acfc4) +- Merge develop for release 0.4.0 [`#14`](https://github.com/emit-sds/emit-sds-l1a/pull/14) +- Bad start stop [`#13`](https://github.com/emit-sds/emit-sds-l1a/pull/13) +- Bad reformatting [`#12`](https://github.com/emit-sds/emit-sds-l1a/pull/12) +- Line timestamp [`#11`](https://github.com/emit-sds/emit-sds-l1a/pull/11) +- Split acquisitions [`#10`](https://github.com/emit-sds/emit-sds-l1a/pull/10) +- Create initial framework of BAD reformatting. Read in STO file. Start to write out NetCDF file. [`dc47683`](https://github.com/emit-sds/emit-sds-l1a/commit/dc47683868950c6290e98aa55dd30086f4d0ea91) +- Use start time in frame filename instead of os time. Write report files per acquisition providing details (missing frames, cloudy frames, decompression failures) per acquisition instead of for the entire data collection. [`fa48545`](https://github.com/emit-sds/emit-sds-l1a/commit/fa48545c17f0cad8a2ce083cbb0da4e89524c1a0) +- Move reassembly code to its own function and iterate over frames using chunksize [`391c4d9`](https://github.com/emit-sds/emit-sds-l1a/commit/391c4d9d02279eb9fb92785933b4e5c6303acfc4) -#### [v0.3.0](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/compare/v0.2.0...v0.3.0) +#### [v0.3.0](https://github.com/emit-sds/emit-sds-l1a/compare/v0.2.0...v0.3.0) > 25 October 2021 -- Merge develop for version 0.3.0 [`#9`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/9) -- Frame span ccsds [`#8`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/8) -- Test fpa data [`#7`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/7) -- Add bytes read since last index into report. [`d1cd9f4`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/d1cd9f42b76e95d3b09b29ed8cd22254bc07269b) -- Use frame_count_pre and frame_count_post to distinguish those fields. Add a utility to rename a frame with different frame num and expected num of frames. [`3cb1d72`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/3cb1d727fffd25bea8b496f058ebdbf5d7393cac) -- Change course to coarse. [`8695c04`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/8695c04f22962bb53c296a46317c9dd6e60e8709) +- Merge develop for version 0.3.0 [`#9`](https://github.com/emit-sds/emit-sds-l1a/pull/9) +- Frame span ccsds [`#8`](https://github.com/emit-sds/emit-sds-l1a/pull/8) +- Test fpa data [`#7`](https://github.com/emit-sds/emit-sds-l1a/pull/7) +- Add bytes read since last index into report. [`d1cd9f4`](https://github.com/emit-sds/emit-sds-l1a/commit/d1cd9f42b76e95d3b09b29ed8cd22254bc07269b) +- Use frame_count_pre and frame_count_post to distinguish those fields. Add a utility to rename a frame with different frame num and expected num of frames. [`3cb1d72`](https://github.com/emit-sds/emit-sds-l1a/commit/3cb1d727fffd25bea8b496f058ebdbf5d7393cac) +- Change course to coarse. [`8695c04`](https://github.com/emit-sds/emit-sds-l1a/commit/8695c04f22962bb53c296a46317c9dd6e60e8709) -#### [v0.2.0](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/compare/v0.1.0...v0.2.0) +#### [v0.2.0](https://github.com/emit-sds/emit-sds-l1a/compare/v0.1.0...v0.2.0) > 31 August 2021 -- Merge develop into main for SDS Release 2 [`#6`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/6) -- Check Frame Report [`#5`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/5) -- Rawqa [`#4`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/4) -- Update ccsds depacketization for engineering and science [`#3`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/3) -- Frame updates based on latest frame header [`#2`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/2) -- Integrate decompression [`#1`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/pull/1) -- Add ccsds code (formerly in emit-l0-sds repo). [`c56bbdd`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/c56bbdd064aa3309a0b9efb6b9c837bf65af5403) -- Use CCSDSPacket base class ScienceDataPacket sub-class. Incorporate latest CCSDS format (w/ secondary header flags and crc). Update ccsds spoofer to match new format. [`f4dcc3d`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/f4dcc3d378ec86939d0aab845113ad0e956bafb2) -- When reading next packet, check for already seen packets (to handle overlap) and PSC mismatch. Ignore already seen packets. If there is a mismatch, then save next packet as a partial and raise exception. This has been tested against various ccsds streams to stress edge cases. [`8209902`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/820990223befd4d098aefc3af58ebaac0fa1ef3f) +- Merge develop into main for SDS Release 2 [`#6`](https://github.com/emit-sds/emit-sds-l1a/pull/6) +- Check Frame Report [`#5`](https://github.com/emit-sds/emit-sds-l1a/pull/5) +- Rawqa [`#4`](https://github.com/emit-sds/emit-sds-l1a/pull/4) +- Update ccsds depacketization for engineering and science [`#3`](https://github.com/emit-sds/emit-sds-l1a/pull/3) +- Frame updates based on latest frame header [`#2`](https://github.com/emit-sds/emit-sds-l1a/pull/2) +- Integrate decompression [`#1`](https://github.com/emit-sds/emit-sds-l1a/pull/1) +- Add ccsds code (formerly in emit-l0-sds repo). [`c56bbdd`](https://github.com/emit-sds/emit-sds-l1a/commit/c56bbdd064aa3309a0b9efb6b9c837bf65af5403) +- Use CCSDSPacket base class ScienceDataPacket sub-class. Incorporate latest CCSDS format (w/ secondary header flags and crc). Update ccsds spoofer to match new format. [`f4dcc3d`](https://github.com/emit-sds/emit-sds-l1a/commit/f4dcc3d378ec86939d0aab845113ad0e956bafb2) +- When reading next packet, check for already seen packets (to handle overlap) and PSC mismatch. Ignore already seen packets. If there is a mismatch, then save next packet as a partial and raise exception. This has been tested against various ccsds streams to stress edge cases. [`8209902`](https://github.com/emit-sds/emit-sds-l1a/commit/820990223befd4d098aefc3af58ebaac0fa1ef3f) #### v0.1.0 > 19 October 2020 -- Add initial run script for L1A engineering data [`ea20082`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/ea20082a93a4d7f2086232c009225ca79a8081e2) -- Update README.md [`0ca6e15`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/0ca6e15a94bf8f4f7e9f823bb3829fe0b12f066d) -- Initial commit [`6e2c913`](https://github.jpl.nasa.gov/emit-sds/emit-sds-l1a/commit/6e2c913b2dd184a3986fb10702cf3e706776b782) +- Add initial run script for L1A engineering data [`ea20082`](https://github.com/emit-sds/emit-sds-l1a/commit/ea20082a93a4d7f2086232c009225ca79a8081e2) +- Update README.md [`0ca6e15`](https://github.com/emit-sds/emit-sds-l1a/commit/0ca6e15a94bf8f4f7e9f823bb3829fe0b12f066d) +- Initial commit [`6e2c913`](https://github.com/emit-sds/emit-sds-l1a/commit/6e2c913b2dd184a3986fb10702cf3e706776b782)