From ed3191e9ceaebb0789d0035f5d7fb2d551e89785 Mon Sep 17 00:00:00 2001
From: B Steele
Date: Mon, 29 Jan 2024 15:46:23 -0700
Subject: [PATCH 1/5] fix LS9
---
modeling/07_Landsat_9_GTB.Rmd | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/modeling/07_Landsat_9_GTB.Rmd b/modeling/07_Landsat_9_GTB.Rmd
index 95a460b..ae5576d 100644
--- a/modeling/07_Landsat_9_GTB.Rmd
+++ b/modeling/07_Landsat_9_GTB.Rmd
@@ -74,7 +74,7 @@ ee.Initialize(project = 'ee-ross-superior')
These assets were created in the 03_Train_Test_Split.Rmd file
-```{r}
+```{python}
training_ls9 = ee.FeatureCollection("projects/ee-ross-superior/assets/train-test/training_ls9")
testing_ls9 = ee.FeatureCollection("projects/ee-ross-superior/assets/train-test/testing_ls9")
```
@@ -165,8 +165,8 @@ the df.copy() function unlinks the original list from the new one. Silly python.
```{python}
accuracy_heads = class_values.copy()
accuracy_heads.extend(['GTB_accuracy', 'GTB_kappa'])
-landsat5_perf = fs_GTB_ls9.copy()
-landsat5_perf.extend([acc_values_GTB_ls9, k_GTB_ls9])
+landsat9_perf = fs_GTB_ls9.copy()
+landsat9_perf.extend([acc_values_GTB_ls9, k_GTB_ls9])
performance_collation = pd.DataFrame(
[landsat9_perf],
@@ -544,7 +544,7 @@ for d in range(date_length_9):
#Check how many existing tasks are running and take a break of 5 mins if it's >10
maximum_no_of_tasks(10, 5*60)
#Send next task.
- export_image.start()
+ export_image.start()
```
From 79aeda1f3e5eb0619720e0fc9b47736f1ede6505 Mon Sep 17 00:00:00 2001
From: B Steele
Date: Wed, 31 Jan 2024 09:47:56 -0700
Subject: [PATCH 2/5] small tweaks for reproducibility and LS9 output
---
data/output/GTB_2024-01-08_l9_confusion.csv | 6 ++++++
data/output/GTB_2024-01-08_l9_training_confusion.csv | 6 ++++++
data/output/GTB_LS9_2024-01-08_performance_stats.csv | 2 ++
pySetup.R | 3 ++-
4 files changed, 16 insertions(+), 1 deletion(-)
create mode 100644 data/output/GTB_2024-01-08_l9_confusion.csv
create mode 100644 data/output/GTB_2024-01-08_l9_training_confusion.csv
create mode 100644 data/output/GTB_LS9_2024-01-08_performance_stats.csv
diff --git a/data/output/GTB_2024-01-08_l9_confusion.csv b/data/output/GTB_2024-01-08_l9_confusion.csv
new file mode 100644
index 0000000..fd11139
--- /dev/null
+++ b/data/output/GTB_2024-01-08_l9_confusion.csv
@@ -0,0 +1,6 @@
+class,cloud,openWater,lightNearShoreSediment,offShoreSediment,darkNearShoreSediment,mission
+cloud,59,1,0,0,0,Landsat 9
+openWater,0,16,0,4,0,Landsat 9
+lightNearShoreSediment,1,0,22,6,2,Landsat 9
+offShoreSediment,0,0,2,30,0,Landsat 9
+darkNearShoreSediment,0,0,2,4,7,Landsat 9
diff --git a/data/output/GTB_2024-01-08_l9_training_confusion.csv b/data/output/GTB_2024-01-08_l9_training_confusion.csv
new file mode 100644
index 0000000..4ccaf34
--- /dev/null
+++ b/data/output/GTB_2024-01-08_l9_training_confusion.csv
@@ -0,0 +1,6 @@
+class,cloud,openWater,lightNearShoreSediment,offShoreSediment,darkNearShoreSediment,mission
+cloud,124,0,0,0,0,Landsat 9
+openWater,0,47,0,2,0,Landsat 9
+lightNearShoreSediment,0,0,60,5,1,Landsat 9
+offShoreSediment,0,1,0,75,1,Landsat 9
+darkNearShoreSediment,0,0,3,1,22,Landsat 9
diff --git a/data/output/GTB_LS9_2024-01-08_performance_stats.csv b/data/output/GTB_LS9_2024-01-08_performance_stats.csv
new file mode 100644
index 0000000..b98af32
--- /dev/null
+++ b/data/output/GTB_LS9_2024-01-08_performance_stats.csv
@@ -0,0 +1,2 @@
+satellite,cloud,openWater,lightNearShoreSediment,offShoreSediment,darkNearShoreSediment,GTB_accuracy,GTB_kappa
+Landsat 9,0.9833333333333333,0.8648648648648648,0.7719298245614036,0.7894736842105263,0.6363636363636364,0.8589743589743589,0.8100193744810407
diff --git a/pySetup.R b/pySetup.R
index 07aadf9..a0c6fc0 100644
--- a/pySetup.R
+++ b/pySetup.R
@@ -1,5 +1,6 @@
-try(install_miniconda())
+options(timeout = 1000)
+try(install_miniconda(force = T))
# list python modules
py_modules = c('earthengine-api', 'pandas', 'xarray', 'rasterio', 'rioxarray', 'fiona', 'geopandas', 'geemap')
From b91c1f7942e7e232310f8e26592188a946345dea Mon Sep 17 00:00:00 2001
From: B Steele
Date: Wed, 31 Jan 2024 15:55:07 -0700
Subject: [PATCH 3/5] update methods/results/summary
---
Methods_Results_Summary.Rmd | 177 +++++++++++---
Methods_Results_Summary.html | 449 ++++++++++++++++++-----------------
2 files changed, 371 insertions(+), 255 deletions(-)
diff --git a/Methods_Results_Summary.Rmd b/Methods_Results_Summary.Rmd
index df5dfce..e1eb417 100644
--- a/Methods_Results_Summary.Rmd
+++ b/Methods_Results_Summary.Rmd
@@ -22,20 +22,47 @@ package_loader <- function(x) {
invisible(lapply(packages, package_loader))
-model_version = "2024-01-03"
+model_version = "2024-01-08"
```
# Introduction
-Remote sensing image classification is common in terrestrial applications (in particular, land use and land cover), but has not been applied in aquatic environments beyond general presence and absence of water and wetlands. The primary exception to the use of image classification in aquatic environments is assessing the presence of submerged aquatic vegetation ("SAV") (e.g., [@visser2018, @e.l.hestir2012]); however, these classification methods require high resolution imagery with high spectral resolution often from land-based high-resolution photography or unpersoned aerial vehicles ("UAVs").
-
-In the Great Lakes (GL) region, much of the use of image classification is completed using moderate resolution (e.g., Landsat, Sentinel, MODIS) satellite images, focusing on mapping the distribution and types of wetlands throughout the region ([@mohseni2023, @v.l.valenti2020]), as well as SAV distribution throughout the system [@wolter2005]. Most of these analyses focus on a relatively short temporal period (months to years), while a some span the entire Landsat archive from the mid '80s through the recent past (e.g., [@amani2022]).
-
-In the recent past, much attention has been paid to the apparent proliferation of algal blooms in some of the clearest lakes, including Lake Superior (cite). While detecting algal blooms from moderate-resolution satellite imagery is difficult due to low temporal frequency, time of day of acquisition, pixel size, and spectral band metrics (cite), as well as the lack of observed, spatially-explicit bloom observations to validate presence and absence, detecting sediment plumes (which often precede algal blooms) is relatively easy with just the red, green, and blue bands common on nearly all moderate-resolution satellites.
-
-In this analysis, we use the Landsat Collection 2 Surface Reflectance product archive (missions 5 through 9, [@vermote2016, @masek2006]) and the Sentinel 2 Surface Reflectance product archive [@drusch2012], a novel crowd-sourced label data set (eePlumB), and Google Earth Engine to create image classification models to create a time series of rasters that enumerate sediment plumes across the western arm of Lake Superior.
-
-```{r fig-aoi, fig.dpi = 300, fig.align="center", fig.width=2, fig.height=1, fig.cap = "Area of interest for this analysis in purple, consisting of a portion the western extent of Lake Superior, the Apostle Islands, and Chequamegon Bay."}
+Remote sensing image classification is common in terrestrial applications (in
+particular, land use and land cover), but has not been applied in aquatic
+environments beyond general presence and absence of water and wetlands. The
+primary exception to the use of image classification in aquatic environments is
+assessing the presence of submerged aquatic vegetation ("SAV") (e.g.,
+[@visser2018, @e.l.hestir2012]); however, these classification methods require
+high resolution imagery with high spectral resolution often from land-based
+high-resolution photography or unpersoned aerial vehicles ("UAVs").
+
+In the Great Lakes (GL) region, much of the use of image classification is
+completed using moderate resolution (e.g., Landsat, Sentinel, MODIS) satellite
+images, focusing on mapping the distribution and types of wetlands throughout
+the region ([@mohseni2023, @v.l.valenti2020]), as well as SAV distribution
+throughout the system [@wolter2005]. Most of these analyses focus on a
+relatively short temporal period (months to years), while a some span the
+entire Landsat archive from the mid '80s through the recent past (e.g.,
+[@amani2022]).
+
+In the recent past, much attention has been paid to the apparent proliferation
+of algal blooms in some of the clearest lakes, including Lake Superior (cite).
+While detecting algal blooms from moderate-resolution satellite imagery is
+difficult due to low temporal frequency, time of day of acquisition, pixel
+size, and spectral band metrics (cite), as well as the lack of observed,
+spatially-explicit bloom observations to validate presence and absence,
+detecting sediment plumes (which often precede algal blooms) is relatively easy
+with just the red, green, and blue bands common on nearly all
+moderate-resolution satellites.
+
+In this analysis, we use the Landsat Collection 2 Surface Reflectance product
+archive (missions 5 through 9, [@vermote2016, @masek2006]) and the Sentinel 2
+Surface Reflectance product archive [@drusch2012], a novel crowd-sourced label
+data set (eePlumB), and Google Earth Engine to create image classification
+models to create a time series of rasters that enumerate sediment plumes across
+the western arm of Lake Superior.
+
+```{r fig-aoi, fig.dpi = 300, fig.align="center", fig.width=2, fig.height=1}
aoi <- read_sf("data/aoi/Superior_AOI_modeling.shp") %>%
st_make_valid()
@@ -46,47 +73,92 @@ tm_basemap("CartoDB.Positron") +
```
+:"Area of interest for this analysis in purple, consisting of a portion the
+western extent of Lake Superior, the Apostle Islands, and Chequamegon Bay."
+
# Methods
## eePlumB
-Using the overarching architecture presented in the Global Rivers Obstruction Database (GROD) [@yang2022] to engage volunteer observers, we croudsourced class labels for Landsat and Sentinel-2 images for the following classes: 'cloud', 'open water', 'light near shore sediment', 'dark near shore sediment', 'offshore sediment', 'shoreline contamination', 'other', and 'algae bloom' using our Earth Engine Plume and Bloom labeling interface ("eePlumB"). Dates for labeling were limited to the months of April through November to avoid ice-on.
-
-In order to eliminate outlier band information and reduce noise in the input for our models, the second and ninety-eighth percentiles were calculated for each mission-band combination and label data associated with values outside of those cutoffs were dropped from the analysis. [[Could add the `02_label_class_summaries.Rmd` as supplemental.]]
+Using the overarching architecture presented in the Global Rivers Obstruction
+Database (GROD) [@yang2022] to engage volunteer observers, we croudsourced
+class labels for Landsat and Sentinel-2 images for the following classes:
+'cloud', 'open water', 'light near shore sediment', 'dark near shore sediment',
+'offshore sediment', 'shoreline contamination', 'other', and 'algae bloom'
+using our Earth Engine Plume and Bloom labeling interface ("eePlumB"). Dates
+for labeling were limited to the months of April through November to avoid
+ice-on.
+
+In order to eliminate outlier band information and reduce noise in the input
+for our models, the second and ninety-eighth percentiles were calculated for
+each mission-band combination and label data associated with values outside of
+those cutoffs were dropped from the analysis. [[Could add the
+`02_label_class_summaries.Rmd` as supplemental.]]
## Model development
-We used the built-in gradient tree boost ("GTB") ee.Classifier() method within Google Earth Engine to create classification models from the crowd-sourced label data. Label data were randomly split into training (70%) and test (30%) data sets, with no special handling procedures for classes or satellite missions. Data were examined to assure that all classes and missions were present in both the training and testing data sets.
-
-GTB models for each mission were trained independently on the rescaled band data from red, green, blue, near infrared, and both shortwave infrared bands for Landsat missions to classify 5 categories: cloud, open water, light near shore sediment, dark near shore sediment, and offshore sediment. For Sentinel-2, the bands used to develop the classifier were red, green, blue, red edge 1-3, near infrared, and both short-wave infrared bands. We did not tune the hyperparameters for the GTB model, as performance was already acceptable for discerning open water from sediment plume using 10 trees.
+We used the built-in gradient tree boost ("GTB") ee.Classifier() method within
+Google Earth Engine to create classification models from the crowd-sourced
+label data. Label data were randomly split into training (70%) and test (30%)
+data sets, with no special handling procedures for classes or satellite
+missions. Data were examined to assure that all classes and missions were
+present in both the training and testing data sets.
+
+GTB models for each mission were trained independently on the rescaled band
+data from red, green, blue, near infrared, and both shortwave infrared bands
+for Landsat missions to classify 5 categories: cloud, open water, light near
+shore sediment, dark near shore sediment, and offshore sediment. For
+Sentinel-2, the bands used to develop the classifier were red, green, blue, red
+edge 1-3, near infrared, and both short-wave infrared bands. We did not tune
+the hyperparameters for the GTB model, as performance was already acceptable
+for discerning open water from sediment plume using 10 trees.
## Image classification
### Image Pre-processing
-Mosaic-ed images were made for each mission-date as mean band values where any two path row or tiles overlapped. All Landsat mission-date images were pre-processed to remove saturated pixels, and only images with an image quality greater than or equal to 7 were included. Sentinel-2 bands that had a pixel resoluiton greater than 10m x 10m were reprojected (downsampled) to 10m x 10m pixel sizes so that the GTB model could be applied to the composite images more efficiently. No further pre-processing was completed on the Sentinel-2 data.
-
-Three areas of interest (AOIs) were used in this analysis: the complete AOI, the AOI without shoreline contamination, and the AOI with shoreline contamination. The area of shoreline contamination was defined as any area within 60 meters of a volunteer-identified pixel with shoreline contamination. We assumed that shoreline contamination was consistent throughout the analysis and was not specific to any particular satellite or time period.
+Mosaic-ed images were made for each mission-date as mean band values where any
+two path row or tiles overlapped. All Landsat mission-date images were
+pre-processed to remove saturated pixels, and only images with an image quality
+greater than or equal to 7 were included. Sentinel-2 bands that had a pixel
+resoluiton greater than 10m x 10m were reprojected (downsampled) to 10m x 10m
+pixel sizes so that the GTB model could be applied to the composite images more
+efficiently. No further pre-processing was completed on the Sentinel-2 data.
+
+Three areas of interest (AOIs) were used in this analysis: the complete AOI,
+the AOI without shoreline contamination, and the AOI with shoreline
+contamination. The area of shoreline contamination was defined as any area
+within 60 meters of a volunteer-identified pixel with shoreline contamination.
+We assumed that shoreline contamination was consistent throughout the analysis
+and was not specific to any particular satellite or time period.
### Model application and summaries
-Each GTB model was applied to the corresponding satellite image stack and two data types were output: a tabular data summary of the area classified and the total area of each class for all three AOIs, as well as a .tif raster at the resolution the GTB was applied (10m for Sentinel-2 and 30m for Landsat) for each classified mission-date image. The .tif rasters were labeled by pixel with the following values:
-
-| Pixel Value \| Pixel Description \|
+Each GTB model was applied to the corresponding satellite image stack and two
+data types were output: a tabular data summary of the area classified and the
+total area of each class for all three AOIs, as well as a .tif raster at the
+resolution the GTB was applied (10m for Sentinel-2 and 30m for Landsat) for
+each classified mission-date image. The .tif rasters were labeled by pixel with
+the following values:
-\|-------------\|-----------------------------------------\| \| 1 \| cloud \| \| 2 \| open water \| \| 3 \| light, near-shore sediment \| \| 4 \| dark, near-shore sediment \| \| 5 \| offshore sediment \| \| 0 \| out of area/masked for saturated pixels \|
+| Pixel Value | Pixel Description |
+|-------------|-----------------------------------------| | 1 | cloud | | 2 |
+open water | | 3 | light, near-shore sediment | | 4 | dark, near-shore sediment
+| | 5 | offshore sediment | | 0 | out of area/masked for saturated pixels |
-: Pixel values and description for the GTB tif model output.
+: Pixel values and description for the GTB tif model output.
## Model evaluation metrics
-Models were evaluated through error matrices, kappa statistics, and F1 statistics for each class.
+Models were evaluated through error matrices, kappa statistics, and F1
+statistics for each class.
- error matrix - testing: given the test data, does the model assign the correct class? These are tibble-style summaries where the model-assigned class and label class are compared.
- kappa statistic: an indicator of how much better or worse a model performs than by random chance. score is -1 to 1, where 0 is the same as random chance, positive values are better than random chance and negative are poorer than random chance
- F1 score: the harmonic mean of precision and recall per class (beta = 1, hence F1 where precision and recall are evenly weighted). Scores of 0 means the model cannot predict the correct class, a score of 1 means the model perfectly predicts the correct class.
-Models were evaluated as 5-class categories and 3-class categories where all sediment categories were compiled into a single class.
+Models were evaluated as 5-class categories and 3-class categories where all
+sediment categories were compiled into a single class.
# Results
@@ -122,7 +194,12 @@ filtered_label_table <- filtered_labels %>%
label_table_join <- full_join(label_table, filtered_label_table)
```
-The collated crowdsourced label dataset consisted of `r nrow(labels)` labels for across all classes. There were `r nrow(ml_labels)` labels that were part of the classes of interest (cloud, open water, sediment). After filtering for outliers from each subset of mission-specific labels, there were `r nrow(filtered_labels)` labels with complete band information. Table 1 presents a break down of the labels.
+The collated crowdsourced label dataset consisted of `r nrow(labels)` labels
+for across all classes. There were `r nrow(ml_labels)` labels that were part of
+the classes of interest (cloud, open water, sediment). After filtering for
+outliers from each subset of mission-specific labels, there were `r
+nrow(filtered_labels)` labels with complete band information. Table 1 presents
+a break down of the labels.
```{r, echo = F}
gt(label_table_join) %>%
@@ -155,7 +232,13 @@ md_summ_table_filt <- mission_date_summary_filtered %>%
summary_table_join <- full_join(md_summ_table, md_summ_table_filt)
```
-Labels were present from `r nrow(mission_date_summary)` individual mission-date combinations spanning the dates of `r min(mission_date_summary$date)` to `r max(mission_date_summary$date)`. Labels in the filtered dataset were present from `r nrow(mission_date_summary_filtered)` mission-date combinations spanning the dates `r min(mission_date_summary_filtered$date)` to `r max(mission_date_summary_filtered$date)`. See Table 2 for a complete breakdown of labels by mission-date combination.
+Labels were present from `r nrow(mission_date_summary)` individual mission-date
+combinations spanning the dates of `r min(mission_date_summary$date)` to `r
+max(mission_date_summary$date)`. Labels in the filtered dataset were present
+from `r nrow(mission_date_summary_filtered)` mission-date combinations spanning
+the dates `r min(mission_date_summary_filtered$date)` to `r
+max(mission_date_summary_filtered$date)`. See Table 2 for a complete breakdown
+of labels by mission-date combination.
```{r, echo = F}
gt(summary_table_join) %>%
@@ -167,10 +250,26 @@ gt(summary_table_join) %>%
## Model evaluation
-Models performance was acceptable across open water, cloud, and discrete sediment categories. All statistics presented in this section represent summary statistics for classes from the testing set. Kappa statistic across all missions was always greater than 0.8, indicating much better performance than random assignment (Table 4). The F1 score, balanced equally between precision and recall, was reasonable across all categories and missions with the minimum F1 score being 0.62 for "dark near-shore sediment" for Landsat 7 (Table 4). Cloud and open water classification F1 scores were always greater than 0.86 (Table 4).
+Models performance was acceptable across open water, cloud, and discrete
+sediment categories. All statistics presented in this section represent summary
+statistics for classes from the testing set. Kappa statistic across all
+missions was always greater than 0.8, indicating much better performance than
+random assignment (Table 4). The F1 score, balanced equally between precision
+and recall, was reasonable across all categories and missions with the minimum
+F1 score being 0.62 for "dark near-shore sediment" for Landsat 7 (Table 4).
+Cloud and open water classification F1 scores were always greater than 0.86
+(Table 4).
```{r, echo = F}
-perf_metrics <- read_csv(paste0("data/output/GTB_", model_version, "_performance_stats.csv")) %>%
+# get a list of the performance metrics list
+perf_metrics_list <- list.files("data/output/",
+ pattern = "_performance_stats.csv",
+ full.names = T)
+# filter for only those for the desired model version
+perf_metrics_list <- perf_metrics_list[grepl(model_version, perf_metrics_list)]
+
+perf_metrics <- map_dfr(perf_metrics_list, read_csv) %>%
+ #drop accuracy, since it's not meaningful for uneven groups
select(-GTB_accuracy) %>%
mutate(across(c(cloud:GTB_kappa),
~ round(., 2))) %>%
@@ -189,11 +288,14 @@ gt(perf_metrics) %>%
tab_footnote('Table 4. Summary of F1 scores per class and across-class kappa statisic for the GTB model.')
```
-When the sediment classes were aggregated to a single sediment class, F1 scores and the kappa statistic increased dramatically (Table 5).
+When the sediment classes were aggregated to a single sediment class, F1 scores
+and the kappa statistic increased dramatically (Table 5).
```{r echo = F}
# load confusion files
-confusion_files <- list.files("data/output", pattern = c(model_version), full.names = T)
+confusion_files <- list.files("data/output",
+ pattern = c(model_version),
+ full.names = T)
confusion_files <- confusion_files[!grepl("performance", confusion_files)]
confusion_files <- confusion_files[!grepl("training", confusion_files)]
# and read them in
@@ -308,9 +410,16 @@ gt(summary_simple) %>%
# Discussion
-The GTB model was applied to all images in the Landsat and Sentinel 2 stacks, irregardless of time of year and presence/absence of ice. Classified images should only be used during ice-free periods, as no attempt was made to mask ice or to classify ice. It is important to note that evaluation of the GTB model was only done on the available by-pixel labels and that accuracy at classification edges may not be precise.
+The GTB model was applied to all images in the Landsat and Sentinel 2 stacks,
+irregardless of time of year and presence/absence of ice. Classified images
+should only be used during ice-free periods, as no attempt was made to mask ice
+or to classify ice. It is important to note that evaluation of the GTB model
+was only done on the available by-pixel labels and that accuracy at
+classification edges may not be precise.
-In some cases, hazy dispersed clouds are incorrectly classified as off-shore sediment. Caution should be used clouds characterize a large proportion of the AOI.
+In some cases, hazy dispersed clouds are incorrectly classified as off-shore
+sediment. Caution should be used clouds characterize a large proportion of the
+AOI.
# References
diff --git a/Methods_Results_Summary.html b/Methods_Results_Summary.html
index 1c18430..c18d14a 100644
--- a/Methods_Results_Summary.html
+++ b/Methods_Results_Summary.html
@@ -11,7 +11,7 @@
-
+
Methods and Results Summary
@@ -5600,7 +5600,7 @@
Methods and Results Summary
ROSSyndicate
-2024-01-08
+2024-01-31
@@ -5644,8 +5644,11 @@ Introduction
create image classification models to create a time series of rasters
that enumerate sediment plumes across the western arm of Lake
Superior.
-
-
+
+
+:“Area of interest for this analysis in purple, consisting of a
+portion the western extent of Lake Superior, the Apostle Islands, and
+Chequamegon Bay.”
Methods
@@ -5658,7 +5661,8 @@
eePlumB
‘open water’, ‘light near shore sediment’, ‘dark near shore sediment’,
‘offshore sediment’, ‘shoreline contamination’, ‘other’, and ‘algae
bloom’ using our Earth Engine Plume and Bloom labeling interface
-(“eePlumB”).
+(“eePlumB”). Dates for labeling were limited to the months of April
+through November to avoid ice-on.
In order to eliminate outlier band information and reduce noise in
the input for our models, the second and ninety-eighth percentiles were
calculated for each mission-band combination and label data associated
@@ -5782,20 +5786,20 @@
Label dataset
from each subset of mission-specific labels, there were 5255 labels with
complete band information. Table 1 presents a break down of the
labels.
-
-
@@ -6299,20 +6303,20 @@
Label dataset
dataset were present from 102 mission-date combinations spanning the
dates 1984-07-07 to 2023-04-11. See Table 2 for a complete breakdown of
labels by mission-date combination.
-
-
@@ -6734,20 +6738,20 @@
Model evaluation
categories and missions with the minimum F1 score being 0.62 for “dark
near-shore sediment” for Landsat 7 (Table 4). Cloud and open water
classification F1 scores were always greater than 0.86 (Table 4).
-
-
@@ -7174,9 +7178,9 @@
Model evaluation
1.00 |
0.98 |
0.81 |
-0.89 |
-0.61 |
-0.91 |
+0.90 |
+0.71 |
+0.92 |