From b23319eef88db16f804b6efd978e0a6f8c429e5e Mon Sep 17 00:00:00 2001 From: Grace Barcheck Date: Fri, 13 Nov 2020 09:48:44 -0500 Subject: [PATCH 1/4] =?UTF-8?q?preprocessing:=20add=20h=5Fli=5FRAW=20outpu?= =?UTF-8?q?t=20from=20interp=5Fnans;=20fill=20nans=20forward=20and=20backw?= =?UTF-8?q?ards.=20correlation=5Fprocessing:=20fix=20bug=20in=20=E2=80=9Cd?= =?UTF-8?q?etermine=5Fx1s=E2=80=9D;=20update=20%nans=20determination=20to?= =?UTF-8?q?=20use=20h=5Fli=5FRAW=20and=20have=20correct=20logic?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- gb_test.py | 320 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 320 insertions(+) create mode 100644 gb_test.py diff --git a/gb_test.py b/gb_test.py new file mode 100644 index 0000000..8a9ce6b --- /dev/null +++ b/gb_test.py @@ -0,0 +1,320 @@ +# Import the basic libraries +import numpy as np +import matplotlib.pyplot as plt +# %matplotlib notebook +import os,re,h5py,pyproj, glob + +# Import the basic libraries +import numpy as np +import matplotlib.pyplot as plt + +# This cell: put the versions of codes I'm working on first in path +import sys +print(sys.path) +working_module_directory = '/Users/grace/Dropbox/Cornell/projects/003/git_repo' +if not sys.path[0] == working_module_directory: + sys.path.insert(0, working_module_directory) +print(sys.path) + +# As an example, import a function from the ICESat-2 surface velocity library +from IS2_velocity.correlation_processing import calculate_velocities + +from IS2_velocity.readers_atl06 import load_data_by_rgt + +########## testinga bunch of data +# where is data +data_path = '/Users/grace/Dropbox/Cornell/projects/003/FIS_03_04/raw_ATL06/' +ATL06_files=glob.glob(os.path.join(data_path, '*.h5')) + + +# where to save raw data +save_path = '/Users/grace/Dropbox/Cornell/projects/003/FIS_03_04/corr_results/' + +# List of available rgts +rgts = {} +rgts_list = [] +for filepath in ATL06_files: + filename = filepath.split('/')[-1] + rgt = filename.split('_')[3][0:4] + track = filename.split('_')[3][4:6] +# print(rgt,track) + if not rgt in rgts.keys(): + rgts[rgt] = [] + rgts[rgt].append(track) + else: + rgts[rgt].append(track) + rgts_list.append(rgt) + + +# all rgt values in our study are are in rgts.keys() +print(rgts.keys()) + +# available tracks for each rgt are in rgts[rgt]; ex.: +print(rgts['0848']) + +# Calculate velocity between cycles 3 and 4 +cycle1 = '03' +cycle2 = '04' +beams = ['gt1l', 'gt1r', 'gt2l', 'gt2r', 'gt3l', 'gt3r'] + +# Loop over rgts; Load data; preprocess; run cross correlations; write out +from IS2_velocity.preprocessing import * +from IS2_velocity.saving import * +from IS2_velocity.saving import * +from IS2_velocity.plotting import plot_measures_along_track_comparison + + +# saving options +write_out_path = save_path +write_out_prefix = 'run20201112' +map_data_root = '/Users/grace/Dropbox/Cornell/projects/003/FIS_data/' # where map data are stored; different on each computer +spatial_extent = np.array([-65, -86, -55, -81]) +product = 'ATL06' +dx = 20 +segment_length = 2000 +search_width = 1000 +along_track_step = 100 +max_percent_nans = 10 + +error_rgts = [] +for ir, rgt in enumerate(rgts_list): + print(rgt) + if cycle1 in rgts[rgt] and cycle2 in rgts[rgt]: + # if there is data for both cycles (there is not necessarily) + + try: + # load data + data = load_data_by_rgt(rgt, data_path, product='ATL06', format='hdf5') + + # preprocess + data = fill_seg_ids(data) + data = interpolate_nans(data) + data = filt(data, filter_type='running_average', running_avg_window=60) + data = differentiate(data) + + # run correlations + data = calculate_velocities(data, cycle1, cycle2, beams) + + # save results + save_is2_velocity(data, rgt, write_out_path, write_out_prefix, product, map_data_root, + cycle1, cycle2, dx, segment_length, search_width, along_track_step, + max_percent_nans, spatial_extent) + # + # plotting = True + # if plotting: + # plot_out_location = write_out_path + # correlation_threshold = 0.65 + # velocity_number = 0 + # spatial_extent = np.array([-65, -86, -55, -81]) + # epsg = 3031 + # plot_measures_along_track_comparison(rgt, beams, write_out_path, correlation_threshold, spatial_extent, + # plot_out_location, map_data_root, velocity_number, epsg) + except Exception: + print('problem with data from rgt ' + str(rgt)) + error_rgts += [rgt] + print(Exception) + + +processed_files = glob.glob(write_out_path + '/*hdf5') +for ir, rgt in enumerate(rgts_list): + print(rgt) + if cycle1 in rgts[rgt] and cycle2 in rgts[rgt]: + try: + plot_out_location = write_out_path + correlation_threshold = 0.65 + velocity_number = 0 + spatial_extent = np.array([-65, -86, -55, -81]) + epsg = 3031 + plot_measures_along_track_comparison(rgt, beams, write_out_path, correlation_threshold, spatial_extent, + plot_out_location, map_data_root, velocity_number, epsg) + except: + pass + +# problem rgts: 0513, 0842, 0491, 0772, 0771, 0878, 0720, 0482, 0833, 0492 + +################################## + + + + + +# path to data, relative to folder /notebooks +data_dir = 'data/'#'../data/' +rgt = '0848' + +# Load data; This step loads raw data, interpolates to constant spacing, filters if requested, and +# differentiates +filter_type = 'running_average' +running_avg_window = 100 + +data = load_data_by_rgt(rgt, data_dir, product = 'ATL06',format = 'hdf5') + +# preprocessing +from IS2_velocity.preprocessing import * + +data = fill_seg_ids(data) +data = interpolate_nans(data) +data = filt(data) +data = differentiate(data) + +## calc veloces +from IS2_velocity.correlation_processing import calculate_velocities + +# Calculate velocity between cycles 3 and 4 +cycle1 = '03' +cycle2 = '04' +beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r'] + +data = calculate_velocities(data, cycle1, cycle2, beams) + +################ OLD +# As an example, import a function from the ICESat-2 surface velocity library +# from IS2_velocity.correlation_processing import velocity +# help(velocity) + +##### TEST READER atl06 +# Import the reader script +# from IS2_velocity.readers import atl06_to_dict + +# read in dictionaries from two different cycles +data_dir = 'data/' # '../data/' +fn_1 = 'processed_ATL06_20190822153035_08480411_003_01.h5' +D1=atl06_to_dict(data_dir+fn_1,'/gt2l', index=None, epsg=3031, format='hdf5') +fn_2 = 'processed_ATL06_20190523195046_08480311_003_01.h5' +D2=atl06_to_dict(data_dir+fn_2,'/gt2l', index=None, epsg=3031, format='hdf5') + +# Plot the landice elevation along the pass. +plt.figure(figsize=(8,4)) + +plt.plot(D1['x_atc']/1000.,D1['h_li'],c='indianred') +plt.plot(D2['x_atc']/1000.,D2['h_li'],c='steelblue') +plt.ylabel('Elevation (m)') +plt.xlabel('Along-Track Distance (km)') + +plt.tight_layout() + +# Get segment ids from the loaded dictionaries +x1,h1 = fill_seg_ids(D1['x_atc'],D1['h_li'],D1['segment_id']) +x2,h2 = fill_seg_ids(D2['x_atc'],D2['h_li'],D2['segment_id']) + +# Smooth and differentiate the elevation product (this is a preprocessing step) +running_avg_window = 100 +h1_filt = filt(x1, h1, running_avg_window, filter_type = 'running_average') +h2_filt = filt(x2, h2, running_avg_window, filter_type = 'running_average') +dh1 = differentiate(x1,h1) +dh2 = differentiate(x2,h2) +dh1_filt = differentiate(x1,h1_filt) +dh2_filt = differentiate(x2,h2_filt) + + + +plt.figure(figsize=(8,6)) + +# Plot smoothed surface elevation +plt.subplot(211) +plt.tick_params(labelbottom=False,bottom=False) +plt.plot(x1/1000.,h1,c='grey') +plt.plot(x1/1000.,h1_filt,c='k') +plt.ylabel('Elevation (m)') + +# Plot the surface Slope +plt.subplot(212) +plt.plot(x1/1000.,dh1,c='grey') +plt.plot(x1/1000.,dh1_filt,c='k') +plt.xlabel('Along-Track Distance (km)') +plt.ylabel('Surface Slope (m/m)') + +plt.tight_layout() + +# Calculate time offset +dt = time_diff(D1,D2) + +### Control the correlation step: +segment_length = 2000 # meters, how wide is the window we are correlating in each step +search_width = 1000 # meters, how far in front of and behind the window to check for correlation +along_track_step = 100 # meters; how much to jump between each consecutivevelocity determination +max_percent_nans = 10 # Maximum % of segment length that can be nans and still do the correlation step + +x_atc, lats, lons, h_li_raw, h_li_raw_NoNans, h_li, h_li_diff, times, min_seg_ids, segment_ids, cycles_this_rgt, x_ps, y_ps = \ + load_data_by_rgt(rgt = '0848', path_to_data = data_dir, product = 'ATL06', \ + filter_type = 'running_average', running_avg_window = running_avg_window, \ + format = 'hdf5') + + +cycles_this_rgt = sorted(cycles_this_rgt) +cycle1 = cycles_this_rgt[0] +cycle2 = cycles_this_rgt[1] + +# dt = time_diff # Currently coded to use hdf5 data dictionary; +### Timing of each cycle in the current velocity determination +t1_string = times[cycle1]['gt1l'][0].astype(str) # figure out later if just picking hte first one it ok +t1 = Time(t1_string) +t2_string = times[cycle2]['gt1l'][0].astype(str) # figure out later if just picking hte first one it ok +t2 = Time(t2_string) + +### Which beams to process +beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r'] + +### Elapsed time between cycles +dt = (t2 - t1).jd # difference in julian days + +### +rgt = '0848' + +dx = 20 +velocities, correlations, lags, midpoints_x_atc, midpoints_xy, midpoints_lons, midpoints_lats = \ + velocity(x_atc, h_li_diff, lats, lons, segment_ids, beams, cycle1, cycle2, \ + segment_length, search_width, along_track_step, max_percent_nans, dx) + + + +from matplotlib.gridspec import GridSpec + +beam = 'gt1l' +x1 = x_atc['03'][beam] +x2 = x_atc['04'][beam] +h1 = h_li['03'][beam] +h2 = h_li['04'][beam] +dh1 = h_li_diff['03'][beam] +dh2 = h_li_diff['04'][beam] +vel_xs = midpoints_x_atc[rgt][beam] +velocs = velocities[rgt][beam] + +plt.figure(figsize=(8,4)) +gs = GridSpec(2,2) + +# Plot the elevation profiles again +plt.subplot(gs[0,0]) +plt.tick_params(bottom=False,labelbottom=False) +plt.plot(x1/1000.-29000,h1,'.',c='indianred') +plt.plot(x2/1000.-29000,h2,'.',c='steelblue',ms=3) +plt.ylabel('Elevation (m)') +plt.title('ATL06',fontweight='bold') +plt.xlim(80,580) + +# Plot the slopes again +plt.subplot(gs[1,0]) +plt.tick_params(bottom=False,labelbottom=False) +plt.plot(x1/1000.-29000,dh1,'.',c='indianred') +plt.plot(x2/1000.-29000,dh2,'.',c='steelblue',ms=3) +plt.ylim(-.05,.05) +plt.ylabel('Surface Slope (m/m)') +plt.xlim(80,580) + +# Plot the calculated velocities along track +ax5 = plt.subplot(gs[0,1]) +plt.plot(vel_xs/1000.-29000,velocs,'.',c='k',label='ATL06') +plt.ylabel('Velocity (m/yr)') +plt.xlabel('Along-Track Distance (km)') +plt.xlim(80,580) +plt.ylim(-500,1500) + +plt.tight_layout() + + + + + + + + From cdb47fa0fc8bcac4adc7efad05e55b5f886c3999 Mon Sep 17 00:00:00 2001 From: Grace Barcheck Date: Fri, 13 Nov 2020 09:53:24 -0500 Subject: [PATCH 2/4] =?UTF-8?q?preprocessing:=20add=20h=5Fli=5FRAW=20outpu?= =?UTF-8?q?t=20from=20interp=5Fnans;=20fill=20nans=20forward=20and=20backw?= =?UTF-8?q?ards.=20correlation=5Fprocessing:=20fix=20bug=20in=20=E2=80=9Cd?= =?UTF-8?q?etermine=5Fx1s=E2=80=9D;=20update=20%nans=20determination=20to?= =?UTF-8?q?=20use=20h=5Fli=5FRAW=20and=20have=20correct=20logic?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- IS2_velocity/correlation_processing.py | 18 +++++++++--------- IS2_velocity/preprocessing.py | 14 ++++++++++++-- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/IS2_velocity/correlation_processing.py b/IS2_velocity/correlation_processing.py index 7aff778..7ad9a5e 100644 --- a/IS2_velocity/correlation_processing.py +++ b/IS2_velocity/correlation_processing.py @@ -138,11 +138,12 @@ def determine_x1s(data, cycle1, cycle2, beam, search_width=1000, segment_length= # pick out the track that starts at greater x_atc, and use that as x1s vector if min_x_atc_cycle1 != min_x_atc_cycle2: x1 = np.nanmax([min_x_atc_cycle1, min_x_atc_cycle2]) + # determine which cycle starts at greater x_atc cycle_n = np.arange(0, 2)[[min_x_atc_cycle1, min_x_atc_cycle2] == x1][0] if cycle_n == 0: - cycletmp = cycle2 - elif cycle_n == 1: cycletmp = cycle1 + elif cycle_n == 1: + cycletmp = cycle2 ### Generate the x1s vector, in the case that the repeat tracks don't start in the same place x1s = data['x_atc'][cycletmp][beam][ @@ -179,6 +180,7 @@ def correlate_single_x1(x1, data, cycle1, cycle2, beam, search_width=1000, segme :param dx: :return: """ + ### Determing Elapsed time between cycles dt = time_diff(data,cycle1,cycle2) @@ -196,8 +198,7 @@ def correlate_single_x1(x1, data, cycle1, cycle2, beam, search_width=1000, segme npts_short = int( np.round(segment_length / dx)) # how many data points in the later, shorter vector to correlate npts_extra_long = int(np.round(search_width / dx)) # how many extra datapoints before/after in the earlier, longer vector to correlate - ix_x1 = np.arange(len(data['x_atc'][cycle1][beam]))[data['x_atc'][cycle1][beam] >= x1][ - 0] # Index of first point that is greater than x1 + ix_x1 = np.arange(len(data['x_atc'][cycle1][beam]))[data['x_atc'][cycle1][beam] >= x1][0] # Index of first point that is greater than x1 ix_x2 = ix_x1 + npts_short # ix_x1 + number of datapoints within the desired segment length x_t1 = x_full_t1[ix_x1:ix_x2] # cut out x_atc values, first cycle lats_t1 = data['latitude'][cycle1][beam][ix_x1:ix_x2] # cut out latitude values, first cycle @@ -210,8 +211,7 @@ def correlate_single_x1(x1, data, cycle1, cycle2, beam, search_width=1000, segme ### Cut out data: wider chunk of data at time t2 (second cycle) ix_x3 = ix_x1 - npts_extra_long # extend on earlier end by number of indices in search_width ix_x4 = ix_x2 + npts_extra_long # extend on later end by number of indices in search_width - h_li2 = data['h_li_diff'][cycle2][beam][ - ix_x3 - 1:ix_x4 - 1] # cut out land ice height values, second cycle; start 1 index earlier because + h_li2 = data['h_li_diff'][cycle2][beam][ix_x3-1:ix_x4-1] # cut out land ice height values, second cycle; start 1 index earlier because # the h_li_diff data are differentiated, and therefore one sample shorter # Find segment midpoints; this is the position where we will assign the velocity measurement from each window @@ -223,11 +223,11 @@ def correlate_single_x1(x1, data, cycle1, cycle2, beam, search_width=1000, segme data['midpoints_seg_ids'][beam][xi] = seg_ids_t1[midpt_ix] ### Determine number of nans in each data chunk - n_nans1 = np.sum(np.isnan(data['h_li'][cycle1][beam][ix_x1:ix_x2])) - n_nans2 = np.sum(np.isnan(data['h_li'][cycle2][beam][ix_x3:ix_x4])) + n_nans1 = np.sum(np.isnan(data['h_li_RAW'][cycle1][beam][ix_x1:ix_x2])) + n_nans2 = np.sum(np.isnan(data['h_li_RAW'][cycle2][beam][ix_x3:ix_x4])) ### Only process if there are fewer than 10% nans in either data chunk: - if not (n_nans1 / len(h_li1) <= max_percent_nans / 100) and (n_nans2 / len(h_li2) <= max_percent_nans / 100): + if ((n_nans1 / len(h_li1) >= max_percent_nans / 100) or (n_nans2 / len(h_li2) >= max_percent_nans / 100)): ### If there are too many nans, just save a nan data['velocities'][beam][xi] = np.nan data['correlations'][beam][xi] = np.nan diff --git a/IS2_velocity/preprocessing.py b/IS2_velocity/preprocessing.py index d71656d..1603392 100644 --- a/IS2_velocity/preprocessing.py +++ b/IS2_velocity/preprocessing.py @@ -73,13 +73,23 @@ def interpolate_nans(data): Warning("Be careful interpolating, linear interpolation could lead to misleading correlations.") + # to save raw data + data['h_li_RAW'] = {} + for cycle in data['h_li'].keys(): + # to save raw data + data['h_li_RAW'][cycle] = {} + for beam in data['h_li'][cycle].keys(): + # to save raw data + data['h_li_RAW'][cycle][beam] = data['h_li'][cycle][beam] + ### interpolate nans in pandas interp_hold = {'x_atc': data['x_atc'][cycle][beam], 'h_li': data['h_li'][cycle][beam]} df = pd.DataFrame(interp_hold, columns = ['x_atc','h_li']) - # linear interpolation for now - df['h_li'].interpolate(method = 'linear', inplace = True) + # linear interpolation for now; forward and backward, so nans at start and end are filled also + df['h_li'].interpolate(method = 'linear', limit_direction = 'forward', inplace = True) + df['h_li'].interpolate(method = 'linear', limit_direction = 'backward', inplace = True) data['h_li'][cycle][beam] = df['h_li'].values data['flags'].append('interp_nans') From bab4b6f4b77eb0ee553a426d8d3c1fe42c77ddba Mon Sep 17 00:00:00 2001 From: Grace Barcheck Date: Fri, 13 Nov 2020 10:20:18 -0500 Subject: [PATCH 3/4] one more fix in correlation_processing --- IS2_velocity/correlation_processing.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/IS2_velocity/correlation_processing.py b/IS2_velocity/correlation_processing.py index c0ffaab..37e0abc 100644 --- a/IS2_velocity/correlation_processing.py +++ b/IS2_velocity/correlation_processing.py @@ -69,8 +69,10 @@ def calculate_velocities(data, cycle1, cycle2, beams, search_width=1000, segment ### Loop over each beam for beam in beams: - data = calculate_velocity_single_beam(data,cycle1,cycle2,beam,search_width=search_width,segment_length=segment_length, - max_percent_nans=max_percent_nans,along_track_step=along_track_step, dx=dx) + if (beam in data['x_atc'][cycle1].keys()) and (beam in data['x_atc'][cycle2].keys()): # if data is available for both beams + data = calculate_velocity_single_beam(data,cycle1,cycle2,beam,search_width=search_width,segment_length=segment_length, + max_percent_nans=max_percent_nans,along_track_step=along_track_step, dx=dx) + # eventually: fill in empty data with some flag return data From a0bebbdece1595ad50489ab973347a17a2463a4c Mon Sep 17 00:00:00 2001 From: Grace Barcheck Date: Sun, 15 Nov 2020 09:22:27 -0500 Subject: [PATCH 4/4] update extract_alongtrack to use new code; troubleshoot ascending/descending; update saving to use new extract_alongtrack code. --- gb_test.py | 320 ----------------------------------------------------- 1 file changed, 320 deletions(-) delete mode 100644 gb_test.py diff --git a/gb_test.py b/gb_test.py deleted file mode 100644 index 8a9ce6b..0000000 --- a/gb_test.py +++ /dev/null @@ -1,320 +0,0 @@ -# Import the basic libraries -import numpy as np -import matplotlib.pyplot as plt -# %matplotlib notebook -import os,re,h5py,pyproj, glob - -# Import the basic libraries -import numpy as np -import matplotlib.pyplot as plt - -# This cell: put the versions of codes I'm working on first in path -import sys -print(sys.path) -working_module_directory = '/Users/grace/Dropbox/Cornell/projects/003/git_repo' -if not sys.path[0] == working_module_directory: - sys.path.insert(0, working_module_directory) -print(sys.path) - -# As an example, import a function from the ICESat-2 surface velocity library -from IS2_velocity.correlation_processing import calculate_velocities - -from IS2_velocity.readers_atl06 import load_data_by_rgt - -########## testinga bunch of data -# where is data -data_path = '/Users/grace/Dropbox/Cornell/projects/003/FIS_03_04/raw_ATL06/' -ATL06_files=glob.glob(os.path.join(data_path, '*.h5')) - - -# where to save raw data -save_path = '/Users/grace/Dropbox/Cornell/projects/003/FIS_03_04/corr_results/' - -# List of available rgts -rgts = {} -rgts_list = [] -for filepath in ATL06_files: - filename = filepath.split('/')[-1] - rgt = filename.split('_')[3][0:4] - track = filename.split('_')[3][4:6] -# print(rgt,track) - if not rgt in rgts.keys(): - rgts[rgt] = [] - rgts[rgt].append(track) - else: - rgts[rgt].append(track) - rgts_list.append(rgt) - - -# all rgt values in our study are are in rgts.keys() -print(rgts.keys()) - -# available tracks for each rgt are in rgts[rgt]; ex.: -print(rgts['0848']) - -# Calculate velocity between cycles 3 and 4 -cycle1 = '03' -cycle2 = '04' -beams = ['gt1l', 'gt1r', 'gt2l', 'gt2r', 'gt3l', 'gt3r'] - -# Loop over rgts; Load data; preprocess; run cross correlations; write out -from IS2_velocity.preprocessing import * -from IS2_velocity.saving import * -from IS2_velocity.saving import * -from IS2_velocity.plotting import plot_measures_along_track_comparison - - -# saving options -write_out_path = save_path -write_out_prefix = 'run20201112' -map_data_root = '/Users/grace/Dropbox/Cornell/projects/003/FIS_data/' # where map data are stored; different on each computer -spatial_extent = np.array([-65, -86, -55, -81]) -product = 'ATL06' -dx = 20 -segment_length = 2000 -search_width = 1000 -along_track_step = 100 -max_percent_nans = 10 - -error_rgts = [] -for ir, rgt in enumerate(rgts_list): - print(rgt) - if cycle1 in rgts[rgt] and cycle2 in rgts[rgt]: - # if there is data for both cycles (there is not necessarily) - - try: - # load data - data = load_data_by_rgt(rgt, data_path, product='ATL06', format='hdf5') - - # preprocess - data = fill_seg_ids(data) - data = interpolate_nans(data) - data = filt(data, filter_type='running_average', running_avg_window=60) - data = differentiate(data) - - # run correlations - data = calculate_velocities(data, cycle1, cycle2, beams) - - # save results - save_is2_velocity(data, rgt, write_out_path, write_out_prefix, product, map_data_root, - cycle1, cycle2, dx, segment_length, search_width, along_track_step, - max_percent_nans, spatial_extent) - # - # plotting = True - # if plotting: - # plot_out_location = write_out_path - # correlation_threshold = 0.65 - # velocity_number = 0 - # spatial_extent = np.array([-65, -86, -55, -81]) - # epsg = 3031 - # plot_measures_along_track_comparison(rgt, beams, write_out_path, correlation_threshold, spatial_extent, - # plot_out_location, map_data_root, velocity_number, epsg) - except Exception: - print('problem with data from rgt ' + str(rgt)) - error_rgts += [rgt] - print(Exception) - - -processed_files = glob.glob(write_out_path + '/*hdf5') -for ir, rgt in enumerate(rgts_list): - print(rgt) - if cycle1 in rgts[rgt] and cycle2 in rgts[rgt]: - try: - plot_out_location = write_out_path - correlation_threshold = 0.65 - velocity_number = 0 - spatial_extent = np.array([-65, -86, -55, -81]) - epsg = 3031 - plot_measures_along_track_comparison(rgt, beams, write_out_path, correlation_threshold, spatial_extent, - plot_out_location, map_data_root, velocity_number, epsg) - except: - pass - -# problem rgts: 0513, 0842, 0491, 0772, 0771, 0878, 0720, 0482, 0833, 0492 - -################################## - - - - - -# path to data, relative to folder /notebooks -data_dir = 'data/'#'../data/' -rgt = '0848' - -# Load data; This step loads raw data, interpolates to constant spacing, filters if requested, and -# differentiates -filter_type = 'running_average' -running_avg_window = 100 - -data = load_data_by_rgt(rgt, data_dir, product = 'ATL06',format = 'hdf5') - -# preprocessing -from IS2_velocity.preprocessing import * - -data = fill_seg_ids(data) -data = interpolate_nans(data) -data = filt(data) -data = differentiate(data) - -## calc veloces -from IS2_velocity.correlation_processing import calculate_velocities - -# Calculate velocity between cycles 3 and 4 -cycle1 = '03' -cycle2 = '04' -beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r'] - -data = calculate_velocities(data, cycle1, cycle2, beams) - -################ OLD -# As an example, import a function from the ICESat-2 surface velocity library -# from IS2_velocity.correlation_processing import velocity -# help(velocity) - -##### TEST READER atl06 -# Import the reader script -# from IS2_velocity.readers import atl06_to_dict - -# read in dictionaries from two different cycles -data_dir = 'data/' # '../data/' -fn_1 = 'processed_ATL06_20190822153035_08480411_003_01.h5' -D1=atl06_to_dict(data_dir+fn_1,'/gt2l', index=None, epsg=3031, format='hdf5') -fn_2 = 'processed_ATL06_20190523195046_08480311_003_01.h5' -D2=atl06_to_dict(data_dir+fn_2,'/gt2l', index=None, epsg=3031, format='hdf5') - -# Plot the landice elevation along the pass. -plt.figure(figsize=(8,4)) - -plt.plot(D1['x_atc']/1000.,D1['h_li'],c='indianred') -plt.plot(D2['x_atc']/1000.,D2['h_li'],c='steelblue') -plt.ylabel('Elevation (m)') -plt.xlabel('Along-Track Distance (km)') - -plt.tight_layout() - -# Get segment ids from the loaded dictionaries -x1,h1 = fill_seg_ids(D1['x_atc'],D1['h_li'],D1['segment_id']) -x2,h2 = fill_seg_ids(D2['x_atc'],D2['h_li'],D2['segment_id']) - -# Smooth and differentiate the elevation product (this is a preprocessing step) -running_avg_window = 100 -h1_filt = filt(x1, h1, running_avg_window, filter_type = 'running_average') -h2_filt = filt(x2, h2, running_avg_window, filter_type = 'running_average') -dh1 = differentiate(x1,h1) -dh2 = differentiate(x2,h2) -dh1_filt = differentiate(x1,h1_filt) -dh2_filt = differentiate(x2,h2_filt) - - - -plt.figure(figsize=(8,6)) - -# Plot smoothed surface elevation -plt.subplot(211) -plt.tick_params(labelbottom=False,bottom=False) -plt.plot(x1/1000.,h1,c='grey') -plt.plot(x1/1000.,h1_filt,c='k') -plt.ylabel('Elevation (m)') - -# Plot the surface Slope -plt.subplot(212) -plt.plot(x1/1000.,dh1,c='grey') -plt.plot(x1/1000.,dh1_filt,c='k') -plt.xlabel('Along-Track Distance (km)') -plt.ylabel('Surface Slope (m/m)') - -plt.tight_layout() - -# Calculate time offset -dt = time_diff(D1,D2) - -### Control the correlation step: -segment_length = 2000 # meters, how wide is the window we are correlating in each step -search_width = 1000 # meters, how far in front of and behind the window to check for correlation -along_track_step = 100 # meters; how much to jump between each consecutivevelocity determination -max_percent_nans = 10 # Maximum % of segment length that can be nans and still do the correlation step - -x_atc, lats, lons, h_li_raw, h_li_raw_NoNans, h_li, h_li_diff, times, min_seg_ids, segment_ids, cycles_this_rgt, x_ps, y_ps = \ - load_data_by_rgt(rgt = '0848', path_to_data = data_dir, product = 'ATL06', \ - filter_type = 'running_average', running_avg_window = running_avg_window, \ - format = 'hdf5') - - -cycles_this_rgt = sorted(cycles_this_rgt) -cycle1 = cycles_this_rgt[0] -cycle2 = cycles_this_rgt[1] - -# dt = time_diff # Currently coded to use hdf5 data dictionary; -### Timing of each cycle in the current velocity determination -t1_string = times[cycle1]['gt1l'][0].astype(str) # figure out later if just picking hte first one it ok -t1 = Time(t1_string) -t2_string = times[cycle2]['gt1l'][0].astype(str) # figure out later if just picking hte first one it ok -t2 = Time(t2_string) - -### Which beams to process -beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r'] - -### Elapsed time between cycles -dt = (t2 - t1).jd # difference in julian days - -### -rgt = '0848' - -dx = 20 -velocities, correlations, lags, midpoints_x_atc, midpoints_xy, midpoints_lons, midpoints_lats = \ - velocity(x_atc, h_li_diff, lats, lons, segment_ids, beams, cycle1, cycle2, \ - segment_length, search_width, along_track_step, max_percent_nans, dx) - - - -from matplotlib.gridspec import GridSpec - -beam = 'gt1l' -x1 = x_atc['03'][beam] -x2 = x_atc['04'][beam] -h1 = h_li['03'][beam] -h2 = h_li['04'][beam] -dh1 = h_li_diff['03'][beam] -dh2 = h_li_diff['04'][beam] -vel_xs = midpoints_x_atc[rgt][beam] -velocs = velocities[rgt][beam] - -plt.figure(figsize=(8,4)) -gs = GridSpec(2,2) - -# Plot the elevation profiles again -plt.subplot(gs[0,0]) -plt.tick_params(bottom=False,labelbottom=False) -plt.plot(x1/1000.-29000,h1,'.',c='indianred') -plt.plot(x2/1000.-29000,h2,'.',c='steelblue',ms=3) -plt.ylabel('Elevation (m)') -plt.title('ATL06',fontweight='bold') -plt.xlim(80,580) - -# Plot the slopes again -plt.subplot(gs[1,0]) -plt.tick_params(bottom=False,labelbottom=False) -plt.plot(x1/1000.-29000,dh1,'.',c='indianred') -plt.plot(x2/1000.-29000,dh2,'.',c='steelblue',ms=3) -plt.ylim(-.05,.05) -plt.ylabel('Surface Slope (m/m)') -plt.xlim(80,580) - -# Plot the calculated velocities along track -ax5 = plt.subplot(gs[0,1]) -plt.plot(vel_xs/1000.-29000,velocs,'.',c='k',label='ATL06') -plt.ylabel('Velocity (m/yr)') -plt.xlabel('Along-Track Distance (km)') -plt.xlim(80,580) -plt.ylim(-500,1500) - -plt.tight_layout() - - - - - - - -