Skip to content

Commit

Permalink
John Marriner's Final (fingers crossed) update)
Browse files Browse the repository at this point in the history
  • Loading branch information
ericmorganson committed Jun 22, 2016
1 parent 58eb3dc commit 22b2cef
Show file tree
Hide file tree
Showing 2 changed files with 201 additions and 65 deletions.
149 changes: 117 additions & 32 deletions python/pixcorrect/fix_columns.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from pixcorrect.corr_util import logger, do_once, items_must_match
from despyfits.DESImage import DESDataImage, DESImage, DESBPMImage
from pixcorrect.PixCorrectDriver import PixCorrectImStep
from pixcorrect.clippedMean import clippedMean
#from pixcorrect.clippedMean import clippedMean
from despyfits import maskbits

# Which section of the config file to read for this step
Expand All @@ -36,21 +36,71 @@ def __str__(self):
class FixColumns(PixCorrectImStep):
description = "Fix the correctable columns"
step_name = config_section

CORR = maskbits.BPMDEF_CORR # BPM flag for correctable pixels
REJECT = maskbits.BADPIX_BPM + \
maskbits.BADPIX_SATURATE +\
maskbits.BADPIX_BADAMP + \
maskbits.BADPIX_SUSPECT # bitmask for comparison pixels
# BPM flag for correctable pixels
CORR = maskbits.BPMDEF_CORR
# These BPM flags may be present for correctable pixels
BPMOK = CORR | maskbits.BPMDEF_BIAS_COL | maskbits.BPMDEF_FUNKY_COL | maskbits.BPMDEF_SUSPECT
BPMOK |= maskbits.BPMDEF_NEAREDGE
# These image mask flags indicate bad pixels in the reference columns
#REJECT = maskbits.BADPIX_BPM + \
# maskbits.BADPIX_SATURATE +\
# maskbits.BADPIX_BADAMP + \
# maskbits.BADPIX_SUSPECT # bitmask for comparison pixels
MINIMUM_PIXELS = 100 # Smallest number of pixels in column to correct
CLIP_SIGMA = 4 # Rejection threshold for mean statistics

@classmethod
def _clippedLine(cls,y,z,doslope,nSigma):
"""
Perform a straight line fit to data, iteratively clipping outliers > nSigma
It is implicitly assumed that the slope is small. The variance is computed
for slope=0 and outliers are initially clipped with that slope. This avoids
the problem of outliers biasing the slope excessively.
If doslope is true, clipping outliers includes the fitted slope, otherwise it
does not include the slope. The slope is always calculated and the mean value
does not depend on the result for the slope (except in so far as it affects
clipping of outliers).
"""

iqdSigma = 1.349
p25 = np.percentile(z, 25.)
p50 = np.percentile(z, 50.)
sigma = (p50-p25)/iqdSigma
err = nSigma*sigma
lower = p50 - err
upper = p50 + err
mask = np.bitwise_or(z<lower,z>upper)
#nrej is number of points rejected in the current pass.
#Set to arbitrary number>0 for first pass
nrej = 100
while nrej:
yp = y[:][~mask]
zp = z[:][~mask]
n = np.size(yp)
if n<cls.MINIMUM_PIXELS:
return 0.0, 0.0, -1.0, 0
avey = np.sum(yp)/n
yp -= avey
mean = np.sum(zp)/n
slope = np.sum(yp*zp)/np.sum(yp*yp)
if doslope:
res = z - slope*(y-avey) - mean
else:
res = z - mean
rej = np.absolute(res) > err
nrej = np.sum(rej & ~mask)
mask |= rej

gres = res[:][~mask]
var = np.sqrt(np.sum(gres*gres)/n)
return mean, slope, var, n

@classmethod
def _valid_pix(cls, image, bpm, icol):
"""
Return boolean array saying which pixels in column icol are useful for sky stats
"""
use = (bpm.mask[:,icol] & cls.REJECT)==0
use = (bpm.mask[:,icol] & ~maskbits.BADPIX_NEAREDGE) == 0
use &= ~np.isinf(image.data[:,icol])
use &= ~np.isnan(image.data[:,icol])
return use
Expand All @@ -68,18 +118,21 @@ def __call__(cls, image, bpm):
- `image`: DESImage to fix.
- `bpm`: DESBPMImage for this CCD
"""

#Modified 6/19/2016
#Use clipLine to fit column slope
#Change NEIGHBORS from 10 to 6
#Change VAR_TOLERANCE from 0.5 to 0.25
#Remove lower limit on correction
#Change mask bit usage
# -- correct all correctable pixels, but use only "good" pixels to compute correction

logger.info('Fixing columns')

NEIGHBORS = 10 # Number of comparison columns to seek
RANGE = 20 # Farthest away to look for comparison columns
NEIGHBORS = 6 # Number of comparison columns to seek
RANGE = 12 # Farthest away to look for comparison columns
# Largest allowable fractional difference in variance between the fixable column
# and its neighbors:
VAR_TOLERANCE = 0.5
# John Marriner would not apply correction unless it was this much larger
# than the statistical noise in the correction:
MINIMUM_SIGNIFICANCE = 5

VAR_TOLERANCE = 0.25

if image.mask is None:
raise FixColumnsError('Input image does not have mask')
Expand All @@ -92,23 +145,42 @@ def __call__(cls, image, bpm):
# A "fixable" column will have CORR flag set at either start or end of column
fixable = np.where(np.logical_or(bpm.mask[0,:] & cls.CORR,
bpm.mask[-1,:] & cls.CORR))[0]
#Just an array that gives the ordinal number of each row in the column (for fitting the slope)
colord = np.arange(4096)
#Don't use slope in clipLine
doslope = False

for icol in fixable:
# The fixable column is a hot bias pixel type if COL_BIAS is set
#hotbias = np.logical_or(bpm.mask[0,icol] & maskbits.BPMDEF_BIAS_COL,
# bpm.mask[-1,icol] & maskbits.BPMDEF_BIAS_COL)
# Which pixels in the column are fixable?
# They need to have only the CORR flag set, and be finite, and not saturated.
# They need to have the CORR flag set (other BPM bits specified by BPMOK are allowed)
# Checking for valid NAN's or INF's should no longer be necessary, but is harmless
# Also, we do not use any bad pixels.
coldata = image.data[:,icol]
colbpm = bpm.mask[:,icol]
ignore = np.logical_or( colbpm & ~cls.CORR, np.isinf(coldata))
#Pixels that can't be corrected
ignore = np.logical_or(colbpm & ~cls.BPMOK, np.isinf(coldata))
ignore |= np.isnan(coldata)
ignore |= image.mask[:,icol] & maskbits.BADPIX_SATURATE
corr_rows = np.logical_and(colbpm & cls.CORR, ~ignore)
#Additional pixels that are not used to compute the average
ignore |= image.mask[:,icol] & ~(maskbits.BADPIX_BPM | maskbits.BADPIX_NEAREDGE)
use_rows = np.logical_and(colbpm & cls.CORR, ~ignore)

if np.count_nonzero(use_rows) < cls.MINIMUM_PIXELS:
logger.info("Not enough pixels to fix column {:d}".format(icol))
continue

# Get a robust estimate of mean level in target column
col_mean, col_var, col_n = clippedMean(coldata[use_rows], cls.CLIP_SIGMA)

# Get a robust estimate of mean level and slope in target column
#col_mean, col_var, col_n = clippedMean(coldata[use_rows], cls.CLIP_SIGMA)
y = colord[use_rows]
z = coldata[use_rows]
col_mean, col_slope, col_var, col_n = cls._clippedLine(y,z,doslope,cls.CLIP_SIGMA)
if col_var <= 0.0:
logger.info("Error in clipped line fit for column {:d}".format(icol))
continue

# Now want to collect stats on up to NEIGHBORS nearby columns
norm_stats = []
ilow = icol
Expand All @@ -125,8 +197,14 @@ def __call__(cls, image, bpm):
use &= use_rows
if np.count_nonzero(use) < cls.MINIMUM_PIXELS:
continue
norm_stats.append(clippedMean(image.data[:,ilow][use],cls.CLIP_SIGMA))
y = colord[use]
z = image.data[:,ilow][use]
ref_col,ref_slope,ref_var, ref_n = cls._clippedLine(y,z,doslope,cls.CLIP_SIGMA)
if ref_var<=0.0: continue
norm_stats.append([ref_col,ref_slope,ref_var,ref_n])
#norm_stats.append(clippedMean(image.data[:,ilow][use],cls.CLIP_SIGMA))
break

while ihigh<high_limit:
# get stats from next useful column to right:
ihigh+=1
Expand All @@ -136,7 +214,12 @@ def __call__(cls, image, bpm):
use &= use_rows
if np.count_nonzero(use) < cls.MINIMUM_PIXELS:
continue
norm_stats.append(clippedMean(image.data[:,ihigh][use],cls.CLIP_SIGMA))
y = colord[use]
z = image.data[:,ihigh][use]
ref_col,ref_slope,ref_var,ref_n = cls._clippedLine(y,z,doslope,cls.CLIP_SIGMA)
if ref_var<=0.0: continue
norm_stats.append([ref_col,ref_slope,ref_var,ref_n])

break
if len(norm_stats) < NEIGHBORS:
# Don't fix the column if we did not get comparison columns
Expand All @@ -147,23 +230,25 @@ def __call__(cls, image, bpm):
mean = np.array([i[0] for i in norm_stats])
var = np.array([i[1] for i in norm_stats])
wt = np.array([i[2] for i in norm_stats]) / var

slope = np.array([i[1] for i in norm_stats])
var = np.array([i[2] for i in norm_stats])
wt = np.array([i[3] for i in norm_stats]) / var
nc = np.array([i[3] for i in norm_stats])
# Do not apply correction if the target column's variance is much
# different from the comparison columns
norm_var = np.sum(var*wt)/np.sum(wt)
if np.abs(col_var - norm_var) > VAR_TOLERANCE * norm_var:
logger.info('Too much variance to fix col {:d}'.format(icol))
continue

correction = np.sum(mean*wt)/np.sum(wt) - col_mean
norm_mean = np.sum(mean*wt)/np.sum(wt)
correction = norm_mean - col_mean
correction_var = 1./np.sum(wt) + col_var/col_n

# Apply correction:
image.data[:,icol][use_rows] += correction
image.data[:,icol][corr_rows] += correction
# Promote the corrected pixels from useless to just imperfect:
image.mask[:,icol][use_rows] &= ~maskbits.BADPIX_BPM
image.mask[:,icol][use_rows] |= maskbits.BADPIX_FIXED
print 'correction:::',correction ##
image.mask[:,icol][corr_rows] &= ~maskbits.BADPIX_BPM
image.mask[:,icol][corr_rows] |= maskbits.BADPIX_FIXED
logger.info('Corrected column {:d} by {:f}'.format(icol,float(correction)))

if bpm.sourcefile is None:
Expand Down
117 changes: 84 additions & 33 deletions python/pixcorrect/make_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,30 @@ def __call__(cls, image, bpm_im, saturate, clear):
logger.warning('Null operation requested in make_mask')
return ret_code

#Flag saturated pixels first, if requested
if saturate:
# Check for header keyword of whether it's been done
kw = 'DESSAT'
if kw in image.header.keys() and not clear:
logger.warning('Skipping saturation check ('+kw+' already set)')
else:
logger.info('Flagging saturated pixels')
nsat = 0
for amp in decaminfo.amps:
sec = section2slice(image['DATASEC'+amp])
sat = image['SATURAT'+amp]
satpix = image.data[sec]>=sat
image.mask[sec][satpix] |= BADPIX_SATURATE
nsat += np.count_nonzero(satpix)

image.write_key(kw, time.asctime(time.localtime()),
comment = 'Flag saturated pixels')
image.write_key('NSATPIX',nsat,
comment='Number of saturated pixels')

logger.debug('Finished flagging saturated pixels')

#Now fill in BPM
if bpm_im is not None:
# Check for header keyword of whether it's been done
kw = 'DESBPM'
Expand All @@ -55,7 +79,16 @@ def __call__(cls, image, bpm_im, saturate, clear):
except:
return 1

# Mark the unusable data
# Enable the following kluge code to work with old style BPM's
#Replace CORR with BIAS_COL
#bitmask = BPMDEF_CORR
#mark = (bpm_im.mask & bitmask) != 0
#bpm_im.mask[mark] |= BPMDEF_BIAS_COL
# Clear correctable bits from BPM if any are already set
#bpm_im.mask -= (bpm_im.mask & BPMDEF_CORR)
#====End kluge

# Map the following BPM bits to BADPIX_BPM in the image mask
bitmask = BPMDEF_FLAT_MIN | \
BPMDEF_FLAT_MAX | \
BPMDEF_FLAT_MASK | \
Expand All @@ -65,27 +98,67 @@ def __call__(cls, image, bpm_im, saturate, clear):
BPMDEF_BIAS_COL | \
BPMDEF_FUNKY_COL | \
BPMDEF_WACKY_PIX
# ERICM Removed BPMDEF_CORR and addec FUNKY_COL
# ??? Add FUNKY_COL to the list of unusable pixels?
# ERICM Removed BPMDEF_CORR and added FUNKY_COL to the above list
mark = (bpm_im.mask & bitmask) != 0
image.mask[mark] |= BADPIX_BPM

# Mark edge pixels and bad amplifier with their own bits
# Copy BPM edge pixels to image mask
bitmask = BPMDEF_EDGE
mark = (bpm_im.mask & bitmask) != 0
image.mask[mark] |= BADPIX_EDGE

# Copy bad amplifier bits to image mask
bitmask = BPMDEF_BADAMP
mark = (bpm_im.mask & bitmask) != 0
image.mask[mark] |= BADPIX_BADAMP

# Clearing and then marking correctable pixels
bpm_im.mask -= (bpm_im.mask & BPMDEF_CORR)
bitmask = BPMDEF_FUNKY_COL | \
BPMDEF_BIAS_COL
# Copy SUSPECT BPM bits to image mask
bitmask = BPMDEF_SUSPECT
mark = (bpm_im.mask & bitmask) != 0
image.mask[mark] |= BADPIX_SUSPECT

# Copy NEAREDGE BPM bits to image mask
bitmask = BPMDEF_NEAREDGE
mark = (bpm_im.mask & bitmask) != 0
bpm_im.mask[mark] |= BPMDEF_CORR
image.mask[mark] |= BADPIX_FIXED

image.mask[mark] |= BADPIX_NEAREDGE

# Copy TAPEBUMP BPM bits to image mask
bitmask = BPMDEF_TAPEBUMP
mark = (bpm_im.mask & bitmask) != 0
image.mask[mark] |= BADPIX_TAPEBUMP

# Mark correctable pixels.
# Pixels flagged as BPMDEF_BIAS_COL and BPMDEF_FUNKY_COL may be correctable.
# We flag them in the image as bad (BADPIX_BPM), but - if fix_columns is run,
# the BADPIX_BPM flag will be cleared and the BADPIX_FIX flag will be set
# For each column find the number of pixels flagged as BIAS_HOT and BIAS_COL
N_BIAS_HOT = np.sum((bpm_im.mask & BPMDEF_BIAS_HOT) > 0, axis=0)
N_BIAS_COL = np.sum((bpm_im.mask & BPMDEF_BIAS_COL) > 0, axis=0)
maskwidth=bpm_im.mask.shape[1]
# First do columns with N_BIAS_COL set for 1 or more pixels
biascols=np.arange(maskwidth)[(N_BIAS_COL > 0)]
for icol in biascols:
#Clear FUNKY_COL bit if set for all pixels in this column
#The reason for clearing the bit is that the FUNKY_COL detection is
#sensitive to hot bias pixels and may flag those columns by "mistake"
bpm_im.mask[:,icol] -= (bpm_im.mask[:,icol] & BPMDEF_FUNKY_COL )
#Correctable columns have exactly 1 BIAS_HOT pixel
if N_BIAS_HOT[icol] == 1:
#Correctable pixels have BIAS_COL bit set
bpm_im.mask[:,icol][(bpm_im.mask[:,icol]&BPMDEF_BIAS_COL)>0] |= BPMDEF_CORR
logger.info('Column '+str(icol)+' has 1 hot pixel and is correctable.')
else:
logger.info('Column '+str(icol)+' has '+str(N_BIAS_HOT[icol])+' hot pixels and is NOT correctable.')

#Now do columns with FUNKY_COL set. Note that the FUNKY_COL bits have been cleared above
#for hot bias columns
N_FUNKY_COL = np.sum((bpm_im.mask & BPMDEF_FUNKY_COL) > 0, axis=0)
funkycols=np.arange(maskwidth)[(N_FUNKY_COL > 0)]
for icol in funkycols:
#Correctable pixels have FUNKY_COL bit set
bpm_im.mask[:,icol][(bpm_im.mask[:,icol]&BPMDEF_FUNKY_COL)>0] |= BPMDEF_CORR
logger.info('Column '+str(icol)+' is funky and correctable.')


image[kw] = time.asctime(time.localtime())
image.write_key(kw, time.asctime(time.localtime()),
Expand All @@ -97,28 +170,6 @@ def __call__(cls, image, bpm_im, saturate, clear):

logger.debug('Finished applying BPM')

if saturate:
# Check for header keyword of whether it's been done
kw = 'DESSAT'
if kw in image.header.keys() and not clear:
logger.warning('Skipping saturation check ('+kw+' already set)')
else:
logger.info('Flagging saturated pixels')
nsat = 0
for amp in decaminfo.amps:
sec = section2slice(image['DATASEC'+amp])
sat = image['SATURAT'+amp]
satpix = image.data[sec]>=sat
image.mask[sec][satpix] |= BADPIX_SATURATE
nsat += np.count_nonzero(satpix)

image.write_key(kw, time.asctime(time.localtime()),
comment = 'Flag saturated pixels')
image.write_key('NSATPIX',nsat,
comment='Number of saturated pixels')

logger.debug('Finished flagging saturated pixels')

return ret_code

@classmethod
Expand Down

0 comments on commit 22b2cef

Please sign in to comment.