From 790fa0d5a83780443107980e906781d905643913 Mon Sep 17 00:00:00 2001 From: Carlos Gomez Date: Wed, 21 Jun 2017 15:20:20 +0200 Subject: [PATCH] Fixed quite a few bugs based on reports of VIP users --- vip/__init__.py | 2 +- vip/negfc/mcmc_sampling.py | 2 +- vip/pca/pca_fullfr.py | 2 +- vip/pca/pca_local.py | 20 +++--- vip/phot/detection.py | 64 ++++------------- vip/phot/fakecomp.py | 26 ++++--- vip/preproc/badframes.py | 6 +- vip/preproc/recentering.py | 7 +- vip/preproc/rescaling.py | 136 ++++++++++++++++++++++--------------- vip/stats/distances.py | 12 +--- 10 files changed, 136 insertions(+), 141 deletions(-) diff --git a/vip/__init__.py b/vip/__init__.py index f99f6611..956b7248 100644 --- a/vip/__init__.py +++ b/vip/__init__.py @@ -12,7 +12,7 @@ from . import stats from . import var -__version__ = "0.7.1" +__version__ = "0.7.2" print("---------------------------------------------------") print(" oooooo oooo ooooo ooooooooo. ") diff --git a/vip/negfc/mcmc_sampling.py b/vip/negfc/mcmc_sampling.py index cfe2d46d..5d8a2cab 100644 --- a/vip/negfc/mcmc_sampling.py +++ b/vip/negfc/mcmc_sampling.py @@ -555,7 +555,7 @@ def mcmc_negfc_sampling(cubes, angs, psfn, ncomp, plsc, initial_state, geom += 1 lastcheck = k if display: - showWalk(chain) + show_walk_plot(chain) if save: import pickle diff --git a/vip/pca/pca_fullfr.py b/vip/pca/pca_fullfr.py index 5349c530..e5b3888f 100644 --- a/vip/pca/pca_fullfr.py +++ b/vip/pca/pca_fullfr.py @@ -710,7 +710,7 @@ def grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, step, inti, intf, annulus_width=annulus_width, verbose=False) if cube_ref is not None: - ref_lib = prepare_matrix(cube_ref, scaling, mask_center_px, + ref_lib, _ = prepare_matrix(cube_ref, scaling, mask_center_px, mode='annular', annulus_radius=ann_radius, annulus_width=annulus_width, verbose=False) else: diff --git a/vip/pca/pca_local.py b/vip/pca/pca_local.py index 8145d40a..2bb4b259 100644 --- a/vip/pca/pca_local.py +++ b/vip/pca/pca_local.py @@ -220,10 +220,14 @@ def pca_adi_annular(cube, angle_list, radius_int=0, fwhm=4, asize=3, Known size of the FHWM in pixels to be used. Deafult is 4. asize : int, optional The size of the annuli, in FWHM. Default is 3 which is recommended when - *quad* is True. Smaller values are valid when *quad* is False. + ``quad`` is True. Smaller values are valid when ``quad`` is False. delta_rot : int, optional Factor for increasing the parallactic angle threshold, expressed in FWHM. Default is 1 (excludes 1 FHWM on each side of the considered frame). + According to Absil+13, a slightly better contrast can be reached for the + innermost annuli if we consider a ``delta_rot`` condition as small as + 0.1 lambda/D. This is because at very small separation, the effect of + speckle correlation is more significant than self-subtraction. ncomp : int or list or 1d numpy array, optional How many PCs are kept. If none it will be automatically determined. If a list is provided and it matches the number of annuli then a different @@ -237,14 +241,14 @@ def pca_adi_annular(cube, angle_list, radius_int=0, fwhm=4, asize=3, in single-process mode. min_frames_pca : int, optional Minimum number of frames in the PCA reference library. Be careful, when - min_frames_pca <= ncomp, then for certain frames the subtracted low-rank - approximation is not optimal (getting a 10 PCs out of 2 frames is not - possible so the maximum number of PCs is used = 2). In practice the - resulting frame may be more noisy. It is recommended to decrease - delta_rot and have enough frames in the libraries to allow getting - ncomp PCs. + ``min_frames_pca`` <= ``ncomp``, then for certain frames the subtracted + low-rank approximation is not optimal (getting a 10 PCs out of 2 frames + is not possible so the maximum number of PCs is used = 2). In practice + the resulting frame may be more noisy. It is recommended to decrease + ``delta_rot`` and have enough frames in the libraries to allow getting + ``ncomp`` PCs. tol : float, optional - Stopping criterion for choosing the number of PCs when *ncomp* is None. + Stopping criterion for choosing the number of PCs when ``ncomp`` is None. Lower values will lead to smaller residuals and more PCs. quad : {False, True}, bool optional If False the images are processed in annular fashion. If True, quadrants diff --git a/vip/phot/detection.py b/vip/phot/detection.py index 0d463df5..33a048f9 100644 --- a/vip/phot/detection.py +++ b/vip/phot/detection.py @@ -20,7 +20,6 @@ from astropy.table import Table from astropy.modeling.models import Gaussian2D from astropy.modeling.fitting import LevMarLSQFitter -from photutils.detection import findstars from skimage.feature import peak_local_max from ..var import (mask_circle, pp_subplots, get_square, frame_center, frame_filter_gaussian2d, fit_2dgaussian) @@ -28,6 +27,7 @@ from .snr import snr_ss from .frame_analysis import frame_quick_report +# TODO: Add the option of computing and thresholding an S/N map def detection(array, psf, bkg_sigma=1, mode='lpeaks', matched_filter=False, mask=True, snr_thresh=5, plot=True, debug=False, @@ -47,7 +47,7 @@ def detection(array, psf, bkg_sigma=1, mode='lpeaks', matched_filter=False, bkg_sigma : float, optional The number standard deviations above the clipped median for setting the background level. - mode : {'lpeaks','irafsf','daofind','log','dog'}, optional + mode : {'lpeaks','log','dog'}, optional Sets with algorithm to use. Each algorithm yields different results. matched_filter : {True, False}, bool optional Whether to correlate with the psf of not. @@ -109,21 +109,6 @@ def detection(array, psf, bkg_sigma=1, mode='lpeaks', matched_filter=False, Gaussian fit is done on each of the candidates constraining the position on the subimage and the sigma of the fit. Finally the blobs are filtered based on its SNR. - - Irafsf. starfind algorithm (IRAF software) searches images for local density - maxima that have a peak amplitude greater than threshold above the local - background and have a PSF full-width half-maximum similar to the input fwhm. - The objects' centroid, roundness (ellipticity), and sharpness are calculated - using image moments. Finally the blobs are filtered based on its SNR. - - Daofind. Searches images for local density maxima that have a peak amplitude - greater than threshold (approximately; threshold is applied to a convolved - image) and have a size and shape similar to the defined 2D Gaussian kernel. - The Gaussian kernel is defined by the fwhm, ratio, theta, and sigma_radius - input parameters. Daofind finds the object centroid by fitting the the - marginal x and y 1D distributions of the Gaussian kernel to the marginal x - and y distributions of the input (unconvolved) data image. Finally the blobs - are filtered based on its SNR. """ def check_blobs(array_padded, coords_temp, fwhm, debug): @@ -219,9 +204,7 @@ def print_abort(): print 'Sigma clipped stddev = {:.3f}'.format(stddev) print 'Background threshold = {:.3f}'.format(bkg_level) print - - #round = 0.3 # roundness constraint - + if mode=='lpeaks' or mode=='log' or mode=='dog': # Padding the image with zeros to avoid errors at the edges pad = 10 @@ -234,7 +217,8 @@ def print_abort(): if mode=='lpeaks': # Finding local peaks (can be done in the correlated frame) coords_temp = peak_local_max(frame_det, threshold_abs=bkg_level, - min_distance=fwhm, num_peaks=20) + min_distance=int(np.ceil(fwhm)), + num_peaks=20) coords = check_blobs(array_padded, coords_temp, fwhm, debug) coords = np.array(coords) if verbose and coords.shape[0]>0: print_coords(coords) @@ -264,28 +248,9 @@ def print_abort(): coords = check_blobs(array_padded, coords, fwhm, debug) coords = np.array(coords) if coords.shape[0]>0 and verbose: print_coords(coords) - - elif mode=='daofind': - tab = findstars.daofind(frame_det, fwhm=fwhm, threshold=bkg_level, - roundlo=-round,roundhi=round) - coords = np.transpose((np.array(tab['ycentroid']), - np.array(tab['xcentroid']))) - if verbose: - print 'Blobs found:', len(coords) - print tab['ycentroid','xcentroid','roundness1','roundness2','flux'] - - elif mode=='irafsf': - tab = findstars.irafstarfind(frame_det, fwhm=fwhm, - threshold=bkg_level, - roundlo=0, roundhi=round) - coords = np.transpose((np.array(tab['ycentroid']), - np.array(tab['xcentroid']))) - if verbose: - print 'Blobs found:', len(coords) - print tab['ycentroid','xcentroid','fwhm','flux','roundness'] else: - msg = 'Wrong mode. Available modes: lpeaks, daofind, irafsf, log, dog.' + msg = 'Wrong mode. Available modes: lpeaks, log, dog.' raise TypeError(msg) if coords.shape[0]==0: @@ -299,23 +264,20 @@ def print_abort(): yy_out = [] xx_out = [] snr_list = [] - px_list = [] - if mode=='lpeaks' or mode=='log' or mode=='dog': - xx -= pad - yy -= pad + xx -= pad + yy -= pad # Checking SNR for potential sources for i in range(yy.shape[0]): - y = yy[i] - x = xx[i] + y = yy[i] + x = xx[i] if verbose: print sep print 'X,Y = ({:.1f},{:.1f})'.format(x,y) subim = get_square(array, size=15, y=y, x=x) snr = snr_ss(array, (x,y), fwhm, False, verbose=False) snr_list.append(snr) - px_list.append(array[y,x]) - if snr >= snr_thresh and array[y,x]>0: + if snr >= snr_thresh: if plot: pp_subplots(subim) if verbose: @@ -332,8 +294,8 @@ def print_abort(): _ = frame_quick_report(array, fwhm, (x,y), verbose=verbose) if debug or full_output: - table = Table([yy.tolist(), xx.tolist(), px_list, snr_list], - names=('y','x','px_val','px_snr')) + table = Table([yy.tolist(), xx.tolist(), snr_list], + names=('y','x','px_snr')) table.sort('px_snr') yy_final = np.array(yy_final) xx_final = np.array(xx_final) diff --git a/vip/phot/fakecomp.py b/vip/phot/fakecomp.py index 064e80d0..879aeb07 100644 --- a/vip/phot/fakecomp.py +++ b/vip/phot/fakecomp.py @@ -155,7 +155,8 @@ def create_psf_template(array, size, fwhm=4, verbose=True, collapse='mean'): return psf_normd -def psf_norm(array, fwhm=4, size=None, threshold=None, mask_core=None): +def psf_norm(array, fwhm=4, size=None, threshold=None, mask_core=None, + full_output=True, verbose=True): """ Scales a PSF, so the 1*FWHM aperture flux equals 1. Parameters @@ -204,21 +205,26 @@ def psf_norm(array, fwhm=4, size=None, threshold=None, mask_core=None): # we check whether the flux is normalized and fix it if needed fwhm_aper = photutils.CircularAperture((frame_center(psfs)), fwhm/2.) - fwhm_aper_phot = photutils.aperture_photometry(psfs, fwhm_aper, + fwhm_aper_phot = photutils.aperture_photometry(psfs, fwhm_aper, method='exact') - fwhm_flux = np.array(fwhm_aper_phot['aperture_sum']) + if verbose: + print "Flux in 1xFWHM aperture: {}".format(fwhm_flux) + if fwhm_flux>1.1 or fwhm_flux<0.9: - psf_norm = psfs/np.array(fwhm_aper_phot['aperture_sum']) + psf_norm_array = psfs/np.array(fwhm_aper_phot['aperture_sum']) else: - psf_norm = psfs + psf_norm_array = psfs - if threshold is not None: - psf_norm[np.where(psf_norm < threshold)] = 0 + if threshold is not None: + psf_norm_array[np.where(psf_norm_array < threshold)] = 0 if mask_core is not None: - psf_norm = get_circle(psf_norm, radius=mask_core) - - return psf_norm + psf_norm_array = get_circle(psf_norm_array, radius=mask_core) + + if full_output: + return psf_norm_array, fwhm_flux + else: + return psf_norm_array diff --git a/vip/preproc/badframes.py b/vip/preproc/badframes.py index fdcc5334..35b2eaa4 100644 --- a/vip/preproc/badframes.py +++ b/vip/preproc/badframes.py @@ -231,9 +231,9 @@ def cube_detect_badfr_correlation(array, frame_ref, dist='pearson', frame_ref : int Index of the frame that will be used as a reference. Must be of course a frame taken with a good wavefront quality. - dist : {'sad','euclidean','mse','pearson','spearman','ssim'}, str optional + dist : {'sad','euclidean','mse','pearson','spearman'}, str optional One of the similarity or disimilarity measures from function - vortex.stats.distances.cube_distance(). SSIM is recommended. + vip.stats.distances.cube_distance(). percentile : int The percentage of frames that will be discarded. plot : {True, False}, bool optional @@ -262,7 +262,7 @@ def cube_detect_badfr_correlation(array, frame_ref, dist='pearson', subarray = cube_crop_frames(array, min(30, array.shape[1]), verbose=False) distances = cube_distance(subarray, frame_ref, 'full', dist, plot=False) - if dist=='ssim' or dist=='pearson' or dist=='spearman': # measures of correlation or similarity + if dist=='pearson' or dist=='spearman': # measures of correlation or similarity minval = np.min(distances[~np.isnan(distances)]) distances = np.nan_to_num(distances) distances[np.where(distances==0)] = minval diff --git a/vip/preproc/recentering.py b/vip/preproc/recentering.py index 06973c74..62f2eef0 100644 --- a/vip/preproc/recentering.py +++ b/vip/preproc/recentering.py @@ -406,6 +406,8 @@ def frame_center_radon(array, cropsize=101, hsize=0.4, step=0.01, optimy, optimx : float Values of the Y, X coordinates of the center of the frame based on the radon optimization. + If full_output is True then the radon cost function surface is returned + along with the optimal x and y. Notes ----- @@ -967,9 +969,10 @@ def cube_recenter_moffat2d_fit(array, pos_y, pos_x, fwhm=4, subi_size=5, pool = Pool(processes=int(nproc)) res = pool.map(EFT,itt.izip(itt.repeat(_centroid_2dm_frame), itt.repeat(array), range(n_frames), - size.tolist(), pos_y.tolist(), + size.tolist(), pos_y.tolist(), pos_x.tolist(), star_approx_coords, - star_not_present, negative, fwhm)) + star_not_present, itt.repeat(negative), + fwhm)) res = np.array(res) pool.close() y = cy - res[:,0] diff --git a/vip/preproc/rescaling.py b/vip/preproc/rescaling.py index 820da1f9..c209bbbe 100644 --- a/vip/preproc/rescaling.py +++ b/vip/preproc/rescaling.py @@ -27,23 +27,21 @@ from ..var import frame_center -def frame_px_resampling(array, scale, imlib='opencv', interpolation='bicubic', - scale_y=None, scale_x=None): +def frame_px_resampling(array, scale, interpolation='bicubic', + scale_y=None, scale_x=None, keep_odd=True, + full_output=False): """ Resamples the pixels of a frame wrt to the center, changing the size of the frame. If 'scale' < 1 then the frame is downsampled and if - 'scale' > 1 then its pixels are upsampled. Several modes of interpolation - are available. - + 'scale' > 1 then its pixels are upsampled. Uses opencv for maximum speed. + Several modes of interpolation are available. + Parameters ---------- array : array_like Input frame, 2d array. - scale : float + scale : int or float Scale factor for upsampling or downsampling the frame. - imlib : {'opencv', 'skimage'}, str optional - Library used for image transformations. Opencv is faster than ndimage or - skimage. - interpolation : {'bicubic', 'bilinear', 'nearneig'}, optional + interpolation : {'bicubic', 'bilinear', 'nearneig', 'area'}, optional 'nneighbor' stands for nearest-neighbor interpolation, 'bilinear' stands for bilinear interpolation, 'bicubic' for interpolation over 4x4 pixel neighborhood. @@ -52,66 +50,96 @@ def frame_px_resampling(array, scale, imlib='opencv', interpolation='bicubic', option for interpolation of noisy astronomical images. Even with 'bicubic' interpolation, opencv library is several orders of magnitude faster than python similar functions (e.g. ndimage). - scale_y : float - Scale factor for upsampling or downsampling the frame along y. If + scale_y : int or float, opt + Scale factor for upsampling or downsampling the frame along y only. If provided, it takes priority on scale parameter. - scale_x : float - Scale factor for upsampling or downsampling the frame along x. If + scale_x : int or float, opt + Scale factor for upsampling or downsampling the frame along x only. If provided, it takes priority on scale parameter. - + keep_odd: bool, opt + Will slightly modify the scale factor in order for the final frame size to keep + y and x sizes odd. This keyword does nothing if the input array has even + dimensions. + full_output: bool, opt + If True, it will also return the scale factor (slightly modified if keep_odd + was set to True) + Returns ------- array_resc : array_like Output resampled frame. """ if not array.ndim == 2: - raise TypeError('Input array is not a frame or 2d array') + raise TypeError('Input array is not a frame or 2d array.') if scale_y is None: scale_y = scale if scale_x is None: scale_x = scale - if imlib not in ['skimage', 'opencv']: - raise ValueError('Imlib not recognized, try opencv or ndimage') - - if imlib == 'skimage' or no_opencv: - if interpolation == 'bilinear': - order = 1 - elif interpolation == 'bicubic': - order = 3 - elif interpolation == 'nearneig': - order = 0 - else: - raise TypeError('Interpolation method not recognized.') - - min_val = np.min(array) - im_temp = array - min_val - max_val = np.max(im_temp) - im_temp /= max_val - - array_resc = rescale(im_temp, scale=(scale_y, scale_x), order=order) - - array_resc *= max_val - array_resc += min_val - + ny = array.shape[0] + nx = array.shape[1] + + if keep_odd: + if ny % 2 != 0: # original y size is odd? + new_ny = ny * scale_y # check final size + if not new_ny > 0.5: + raise ValueError( + "scale_y is too small; resulting array would be 0 size.") + # final size is decimal? => make it integer + if new_ny % 1 > 0.5: + scale_y = (new_ny + 1 - (new_ny % 1)) / ny + elif new_ny % 1 > 0: + scale_y = (new_ny - (new_ny % 1)) / ny + new_ny = ny * scale_y + # final size is even? + if new_ny % 2 == 0: + if scale_y > 1: # if upscaling => go to closest odd with even-1 + scale_y = float(new_ny - 1) / ny + else: + scale_y = float(new_ny + 1) / ny + # if downscaling => go to closest odd with even+1 (reversible) + + if nx % 2 != 0: # original x size is odd? + new_nx = nx * scale_x # check final size + if not new_nx > 0.5: + raise ValueError( + "scale_x is too small; resulting array would be 0 size.") + # final size is decimal? => make it integer + if new_nx % 1 > 0.5: + scale_x = (new_nx + 1 - (new_nx % 1)) / nx + elif new_nx % 1 > 0: + scale_x = (new_nx - (new_nx % 1)) / nx + new_nx = nx * scale_x + # final size is even? + if new_nx % 2 == 0: + if scale_x > 1: # if upscaling => go to closest odd with even-1 + scale_x = float(new_nx - 1) / nx + else: # if downscaling => go to closest odd with even+1 (reversible) + scale_x = float(new_nx + 1) / nx + + if interpolation == 'bilinear': + intp = cv2.INTER_LINEAR + elif interpolation == 'bicubic': + intp = cv2.INTER_CUBIC + elif interpolation == 'nearneig': + intp = cv2.INTER_NEAREST + elif interpolation == 'area': + intp = cv2.INTER_AREA else: - if interpolation == 'bilinear': - intp = cv2.INTER_LINEAR - elif interpolation == 'bicubic': - intp= cv2.INTER_CUBIC - elif interpolation == 'nearneig': - intp = cv2.INTER_NEAREST - else: - raise TypeError('Interpolation method not recognized.') + raise TypeError('The interpolation method is not recognized.') - array_resc = cv2.resize(array.astype(np.float32), (0,0), fx=scale_x, - fy=scale_y, interpolation=intp) + array_resc = cv2.resize(array.astype(np.float32), (0, 0), fx=scale_x, + fy=scale_y, interpolation=intp) - # TODO: For conservation of flux (e.g. in aperture 1xFWHM), what to do when - # there is a different scaling factor in x and y? - if scale_y==scale_x: - array_resc /= scale ** 2 + array_resc /= (scale_y * scale_x) - return array_resc + if full_output: + if scale_y == scale_x: + scale = scale_y + else: + scale = (scale_y, scale_x) + return array_resc, scale + else: + return array_resc diff --git a/vip/stats/distances.py b/vip/stats/distances.py index 8ef4f5ef..21da568c 100644 --- a/vip/stats/distances.py +++ b/vip/stats/distances.py @@ -13,7 +13,6 @@ import numpy as np import scipy.stats from matplotlib import pyplot as plt -from skimage.measure import structural_similarity as ssim from ..var import get_annulus @@ -34,11 +33,7 @@ def cube_distance(array, frame, mode='full', dist='sad', inradius=None, The Spearman and Pearson correlation coefficients, vary between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. - The Structural Similarity Index was proposed by Wang et al. 2004. - (http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf) - SSIM varies between -1 and 1, where 1 means perfect similarity. SSIM - attempts to model the perceived change in the structural information of the - image. + Parameters ---------- @@ -48,7 +43,7 @@ def cube_distance(array, frame, mode='full', dist='sad', inradius=None, Reference frame in the cube. mode : {'full','annulus'}, string optional Whether to use the full frames or a centered annulus. - dist : {'sad','euclidean','mse','pearson','spearman','ssim'}, str optional + dist : {'sad','euclidean','mse','pearson','spearman'}, str optional Which criterion to use. inradius : None or int, optional The inner radius when mode is 'annulus'. @@ -92,9 +87,6 @@ def cube_distance(array, frame, mode='full', dist='sad', inradius=None, elif dist=='spearman': spear, _ = scipy.stats.spearmanr(frame_ref.ravel(), framei.ravel()) lista.append(spear) - elif dist=='ssim': - lista.append(ssim(frame_ref, framei, win_size=7, - dynamic_range=frame_ref.max() - frame_ref.min())) else: raise ValueError('Distance not recognized') lista = np.array(lista)