Skip to content

Commit

Permalink
Merge branch 'master' into daniel
Browse files Browse the repository at this point in the history
  • Loading branch information
benhills authored Oct 9, 2020
2 parents 5b08bb8 + abb76bd commit 5f11b6f
Show file tree
Hide file tree
Showing 128 changed files with 3,685 additions and 86,406 deletions.
4 changes: 4 additions & 0 deletions IS2_velocity/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from . import readers
from . import correlation_processing
from . import extract_alongtrack
from . import atl03_reprocessing
57 changes: 57 additions & 0 deletions IS2_velocity/atl03_reprocessing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np

def reinterpolate_atl03(x_in,h_in,x_step=5,x_win=10):
r"""Recreate an ATL06-like product at finer resolution.
The ICESat-2 ATL06 product is created by linear regression
on the ATL03 photon cloud every 20 meters; then, the ATL06
elevation is evaluated at the center-point of the regression.
This funtion is meant to recrate this processing flow but for any
arbitrary step size.
Parameters
------
x_in : array
along-track photon locations
h_in : array
along-track photon height
x_step : float; optional
along-track step between points in the desired output array
x_win : float; optional
window over which the liner fit is done (window is in both directions)
Output
------
xs : array
along-track distance for reinterpolated points
hs : array
heights corresponding to the distances in 'xs'
"""

# Set up the output arrays to be filled
xs = np.arange(np.nanmin(x_in),np.nanmax(x_in),x_step)
hs = np.empty_like(xs)
print('Total Length:',len(xs))

for i,x in enumerate(xs):
if i%100 == 0:
print(i)
# Find all the points within x_win meters of the current x location
idxs = np.logical_and(x_in>x-x_win,x_in<x+x_win)
# Set up a linear regression on the points inside the window
x_fit = x_in[idxs]
h_fit = h_in[idxs]
# index the points that are not nans (these will be used in the regression)
idx_nonan = ~np.isnan(x_fit) & ~np.isnan(h_fit)
if ~np.any(idx_nonan):
# If all the points in the window are nans, output nan
hs[i] = np.nan
else:
fit = np.polyfit(x_fit[idx_nonan],h_fit[idx_nonan],1)
# Evaluate the regression at the centerpoint to get the height
hs[i] = np.polyval(fit,x)

return xs,hs
182 changes: 182 additions & 0 deletions IS2_velocity/correlation_processing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
from scipy.signal import correlate
from astropy.time import Time

# -------------------------------------------------------------------------------------------

def velocity(x_in,dh1,dh2,dt,output_xs,search_width=100,segment_length=2000,dx=20,corr_threshold=.65):
r"""Calculate along-track velocity by correlating the along-track elevation between repeat acquisitions.
Parameters
------
x_in : array
along-track distance
dh1 : array
surface slope at cycle 1
dh2 : array
surface slope at cycle 2
dt : float
time difference between cycles
output_xs : array
along-track distances at which the velocities will be calculated and output
search_width : float
the maximum distance which the stencil array will be moved to look for a good correlation
segment_length : float
the length of the array to be correlated
dx : float
spacing between points in the x array
corr_threshold : float
minimum correlation to be considered an acceptable fit
Output
------
velocities : array
calculated velocities corresponding to locations output_xs
correlations : array
calculated correlations for each of the velocity points
"""

# create output arrays to be filled
velocities = np.nan*np.ones_like(output_xs)
correlations = np.nan*np.ones_like(output_xs)

# iterate over all the output locations
for xi,x_start in enumerate(output_xs):
# find indices for the stencil
idx1 = np.argmin(abs(x_in-x_start))
idx2 = idx1 + int(np.round(segment_length/dx))
# cut out a wider chunk of data at time t2 (second cycle)
idx3 = idx1 - int(np.round(search_width/dx)) # offset on earlier end by # indices in search_width
idx4 = idx2 + int(np.round(search_width/dx)) # offset on later end by # indices in search_width

# get the two arrays that will be correlated to one another
dh_stencil = dh1[idx1:idx2]
dh_wide = dh2[idx3:idx4]

# correlate old with newer data
corr = correlate(dh_stencil, dh_wide, mode = 'valid', method = 'direct')
norm_val = np.sqrt(np.sum(dh_stencil**2)*np.sum(dh_wide**2)) # normalize so values range between 0 and 1
corr = corr / norm_val
lagvec = np.arange(- int(np.round(search_width/dx)), int(search_width/dx) +1,1)# for mode = 'valid'
shift_vec = lagvec * dx

if all(np.isnan(corr)):
continue
else:
correlation_value = np.nanmax(corr)
if correlation_value >= corr_threshold:
idx_peak = np.arange(len(corr))[corr == correlation_value][0]
best_shift = shift_vec[idx_peak]
velocities[xi] = best_shift/(dt/365)
correlations[xi] = correlation_value
else:
velocities[xi] = np.nan
correlations[xi] = correlation_value

return velocities,correlations

# -------------------------------------------------------------------------------------------

def smooth_and_diff(x_in,h_in,win=None):
r"""Smooth and differentiate an along-track height array.
Parameters
------
x_in : array
along-track distance
h_in : array
along-track elevation
win : float; default None
smoothing window (in meters)
for example, if win=60 m, the smoothing window is a 3 point running average smoothed dataset, because each point is 20 m apart
Output
------
h : array
along-track height (smoothed if win is not None)
dh : array
surface slope
"""

# Default is no smoothing
if win is None:
# calculate the surface slope
dh = np.gradient(h_in,x_in)
return h_in,dh

# running average smoother / filter
else:
# calculate the step between x points
dx = np.mean(np.gradient(x_in))
# number of points (meters / dx)
N = int(np.round(win/dx))
# smooth
h_smooth = np.nan*np.ones_like(x_in)
h_smooth[N//2:-N//2+1] = np.convolve(h_in,np.ones(N)/N, mode='valid')
# calculate the surface slope
dh = np.gradient(h_smooth,x_in)
return h_smooth,dh

# -------------------------------------------------------------------------------------------

def fill_seg_ids(x_in,h_in,seg_ids,dx=20):
r"""Fill the along-track vector so that there are no skipped points
Parameters
------
x_in : array
along-track distance
h_in : array
along-track elevation
seg_ids : array
segment ids as imported with atl06_to_dict function
dx : float; default 20
step between points (20 m is the default for atl06)
Output
------
x_full : array
filled array of along-track distance
h_full : array
filled array of alont-track elevation
"""

# make a monotonically increasing x vector
ind = seg_ids - np.nanmin(seg_ids) # indices starting at zero, using the segment_id field, so any skipped segment will be kept in correct location
x_full = np.arange(np.max(ind)+1) * dx + x_in[0]
h_full = np.zeros(np.max(ind)+1) + np.NaN
h_full[ind] = h_in

return x_full,h_full

# -------------------------------------------------------------------------------------------

def time_diff(D1,D2):
r"""Get the time difference between two cycles
Parameters
------
D1 : Dictionary 1
D2 : Dictionary 2
Output
------
dt : float
time step
"""

# get time strings
t1_string = D1['data_start_utc'].astype(str) #figure out later if just picking the first one it ok
t2_string = D2['data_start_utc'].astype(str) #figure out later if just picking hte first one it ok
# use astropy to get time from strings
t1 = Time(t1_string)
t2 = Time(t2_string)
# time step to julien days
dt = (t2 - t1).jd

return dt

# -------------------------------------------------------------------------------------------
144 changes: 144 additions & 0 deletions IS2_velocity/draft_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np

### UNFINISHED FUNCTION - basic structure is good
### working on debugging problems with peaks at edges for the sub-sample routine

def find_correlation_peak(lagvec, shift_vec, corr_normed, max_width, min_width, dx_interp, fit_type, plotting):
# max_width = how many SAMPLES wide the half peak can be, max, samples
# min_width = the narrowest the half peak can be, samples
# dx_interp = interpolated sample spacing, in units of SAMPLES

from scipy.interpolate import interp1d

# peak of un-interpolated data
ix_peak = np.arange(len(corr_normed))[corr_normed == np.nanmax(corr_normed)][0]
ix_peak = np.where(corr_normed == np.nanmax(corr_normed))[0][0]

if fit_type == 'raw':
best_lag = lagvec[ix_peak]
best_shift = shift_vec[ix_peak]
peak_corr_value = corr_normed[ix_peak]

else:
### Assume we're doing a sub-sample fit
# find troughs before and after peak

corr_diff = corr_normed[1:] - corr_normed[:-1] # to find troughs, zero-crossings

# cases:
# ix_peak is too close to the start (ix_peak < min_width)
# calculate right width by finding trough or max/min, use ix_peak as left width
if ix_peak < min_width:
left_width = ix_peak # go to left edge
# determine right width
if ix_peak == 0:
ix_next_trough = np.where(corr_diff[ix_peak:] > 0)[0][0] + ix_peak
else:
ix_next_trough = np.where(corr_diff[ix_peak-1:] > 0)[0][0] + ix_peak
width_next_trough = -(ix_peak - ix_next_trough)
if width_next_trough < min_width:
right_width = min_width
elif width_next_trough > max_width:
right_width = max_width
else:
right_width = width_next_trough

# ix_peak is too close to the end (len(corr_normed) - ix_peak < min_width)
# calculate left width by finding trough or max/min, use len(corr_normed) - ix_peak as right width
elif len(corr_normed) - ix_peak < min_width:
right_width = len(corr_normed) - ix_peak -1
#determine left width
ix_prev_trough = ix_peak - np.where(np.flip(corr_diff[:ix_peak-1]) < 0)[0][0]
width_prev_trough = ix_peak - ix_prev_trough
if width_prev_trough < min_width:
left_width = min_width
elif width_prev_trough > max_width:
left_width = max_width
else:
left_width = width_prev_trough

# other
else:
# calculate both left and right width
ix_next_trough = np.where(corr_diff[ix_peak-1:] > 0)[0][0] + ix_peak
ix_prev_trough = ix_peak - np.where(np.flip(corr_diff[:ix_peak-1]) < 0)[0][0]
# if width is greater than min, pick smaller width, so is same on both sides
width = np.min([ix_peak - ix_prev_trough, -(ix_peak - ix_next_trough)])
if width > max_width:
width = max_width
elif width < min_width:
width = min_width
left_width = width
right_width = width

# # deal with edges; just go to edge
# if ix_peak + width >= len(corr_normed):
# right_width = len(corr_normed) - ix_peak -1
# else:
# right_width = width

# if width>= ix_peak:
# left_width = ix_peak
# else:
# left_width = width

# cut out data and an x vector before and after the peak (for plotting)
dcut = corr_normed[ix_peak - left_width: ix_peak + right_width +1]
xvec_cut = np.arange(ix_peak - left_width, ix_peak + right_width + 1)
lagvec_cut = lagvec[xvec_cut]

if fit_type == 'parabola':
# fit parabola; in these data, usually a poor fit
fit_coeffs = np.polyfit(xvec_cut, dcut, deg=2, full = True)

# create xvec to interpolate the parabola onto and compute
interp_xvec = np.arange(ix_peak - left_width, ix_peak + right_width + 1, 0.01)
fit = np.polyval(p = fit_coeffs[0], x = interp_xvec)

# interp lagvec
lagvec_interp = np.arange(lagvec[ix_peak - left_width], lagvec[ix_peak + right_width + 1], dx_interp)

# compute peak
ix_peak_interp = np.where(fit == np.max(fit))[0][0]
best_lag = interp_xvec[ix_peak_interp]
peak_corr_value = fit[ix_peak_interp]

elif fit_type == 'cubic':
# # find troughs before and after peak
# corr_diff = corr_normed[1:] - corr_normed[:-1]
# ix_next_trough = np.where(corr_diff[ix_peak:] > 0)[0][0] + ix_peak
# ix_prev_trough = ix_peak - np.where(np.flip(corr_diff[:ix_peak]) < 0)[0][0]
# width = np.min([ix_peak - ix_prev_trough, -(ix_peak - ix_next_trough)])
# if width > max_width:
# width = max_width
# elif width < min_width:
# width = min_width

# # cut out data and an x vector before and after the peak (for plotting)
# dcut = corr_normed[ix_peak - width: ix_peak + width +1]
# xvec_cut = np.arange(ix_peak - width, ix_peak + width + 1)
# lagvec_cut = lagvec[xvec_cut]

# if np.sum(np.isnan(dcut)) >0:
# print(str(np.sum(np.isnan(dcut))) + ' nans')

# cubic spline interpolation
# create xvector to interpolate onto
interp_xvec = np.arange(ix_peak - left_width, ix_peak + right_width , dx_interp)
# create interpolation function
f = interp1d(xvec_cut, dcut, kind = 'cubic')
# evaluate interpolation function on new xvector
fit = f(interp_xvec)

# interpolate lagvec
lagvec_interp = np.arange(lagvec[ix_peak - left_width], lagvec[ix_peak + right_width], dx_interp)

# compute peak
ix_peak_interp = np.where(fit == np.max(fit))[0][0]
best_lag = lagvec_interp[ix_peak_interp]
peak_corr_value = fit[ix_peak_interp]

return best_lag, peak_corr_value
Loading

0 comments on commit 5f11b6f

Please sign in to comment.