diff --git a/CHANGES.md b/CHANGES.md index 00890822..48faa0bf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,17 @@ +1.17.0 (2023-04-24) +------------------- +- We now omit sources in the photometry stage that have an area larger than 1000 pixels as they lead to long + processing times and are almost invariably spurious. + +1.16.1 (2023-04-23) +------------------- +- Correction to aperture photometry. We were incorrectly using the radius instead of the diameter + +1.16.0 (2023-04-18) +------------------- +- Calibration frames are now associated with output data products rather than frames + so that we have more than one calibration data product produced per frame. + 1.15.2 (2023-04-12) ------------------- - Fix to fpacking data when the image data array is None diff --git a/banzai/lco.py b/banzai/lco.py index 8efd4d2b..6f801318 100644 --- a/banzai/lco.py +++ b/banzai/lco.py @@ -141,7 +141,7 @@ def write(self, runtime_context): if runtime_context.post_to_archive: archived_image_info = file_utils.post_to_ingester(data_product.file_buffer, self, data_product.filename, meta=data_product.meta) - self.frame_id = archived_image_info.get('frameid') + data_product.frame_id = archived_image_info.get('frameid') if not runtime_context.no_file_cache: os.makedirs(self.get_output_directory(runtime_context), exist_ok=True) @@ -170,7 +170,7 @@ def to_db_record(self, output_product): 'instrument_id': self.instrument.id, 'is_master': self.is_master, 'is_bad': self.is_bad, - 'frameid': self.frame_id, + 'frameid': output_product.frame_id, 'attributes': {}} for attribute in self.grouping_criteria: record_attributes['attributes'][attribute] = str(getattr(self, attribute)) diff --git a/banzai/photometry.py b/banzai/photometry.py index bb8862e4..01a6c70f 100755 --- a/banzai/photometry.py +++ b/banzai/photometry.py @@ -102,6 +102,10 @@ def do_stage(self, image): # Do an initial source detection segmentation_map = detect_sources(convolved_data, self.threshold, npixels=self.min_area) + # We now remove any sources with an area > 1000 pixels because they are almost invariably spurious + segmentation_map.remove_labels(segmentation_map.labels[segmentation_map.areas > 1000]) + segmentation_map.relabel_consecutive(1) + logger.info('Deblending sources', image=image) # Note that nlevels here is DEBLEND_NTHRESH in source extractor which is 32 by default deblended_seg_map = deblend_sources(convolved_data, segmentation_map, @@ -125,9 +129,9 @@ def do_stage(self, image): 'xy': catalog.covar_sigxy.value, 'background': catalog.background_mean}) - for r in range(1, 7): - radius_arcsec = r / image.pixel_scale - sources[f'fluxaper{r}'], sources[f'fluxerr{r}'] = catalog.circular_photometry(radius_arcsec) + for d in range(1, 7): + radius_arcsec = d / image.pixel_scale / 2.0 + sources[f'fluxaper{d}'], sources[f'fluxerr{d}'] = catalog.circular_photometry(radius_arcsec) for r in [0.25, 0.5, 0.75]: sources['fluxrad' + f'{r:.2f}'.lstrip("0.")] = catalog.fluxfrac_radius(r) diff --git a/banzai/tests/test_frames.py b/banzai/tests/test_frames.py index d3ceedb6..0317958b 100644 --- a/banzai/tests/test_frames.py +++ b/banzai/tests/test_frames.py @@ -63,9 +63,8 @@ def test_frame_to_db_record(): 'CONFMODE': 'full_frame'}, name='SCI')] test_frame = LCOCalibrationFrame(hdu_list=hdu_list, file_path='/foo/bar') test_frame.is_bad = False - test_frame.frame_id = 1234 test_frame.instrument = MagicMock(id=7) - mock_data_product = MagicMock(filename='test.fits.fz', filepath='/path/to/test/test.fits.fz') + mock_data_product = MagicMock(filename='test.fits.fz', filepath='/path/to/test/test.fits.fz', frame_id=1234) db_record = test_frame.to_db_record(mock_data_product) assert type(db_record) == CalibrationImage