Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Flood Timeseries Dataset #85

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions sample_data/use_cases/boston_floods/datasets.json
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,24 @@
]
}
},
{
"name": "Boston Flood Timeseries",
"description": "46-hour flood simulation over the Boston Harbor Watershed",
"category": "flood",
"type": "raster",
"files": [
{
"url": "https://data.kitware.com/api/v1/item/6564cc5ac5a2b36857ad16cf/download",
"path": "boston/flood_timeseries.zip",
"metadata": {
"source": "Simulated by Jack Watson at Northeastern University"
}
}
],
"style_options": {
"transparency_threshold": -1
}
},
{
"name": "DC Metro",
"description": "DC Metro Lines and Stations",
Expand Down
3 changes: 2 additions & 1 deletion sample_data/use_cases/boston_floods/projects.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
"Boston Zip Codes",
"Boston Sea Level Rises",
"Boston 10-Year Flood Events",
"Boston 100-Year Flood Events"
"Boston 100-Year Flood Events",
"Boston Flood Timeseries"
]
},
{
Expand Down
3 changes: 3 additions & 0 deletions uvdat/core/tasks/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ def convert_dataset(
dataset.processing = True
dataset.save()

# remove any existing generated files
FileItem.objects.filter(dataset=dataset, metadata__generated=True).delete()

if dataset.dataset_type == dataset.DatasetType.RASTER:
RasterMapLayer.objects.filter(dataset=dataset).delete()
for file_to_convert in FileItem.objects.filter(dataset=dataset):
Expand Down
56 changes: 55 additions & 1 deletion uvdat/core/tasks/map_layers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
import os
from pathlib import Path
import tempfile
import zipfile
Expand All @@ -11,7 +12,7 @@
import shapefile
from webcolors import name_to_hex

from uvdat.core.models import RasterMapLayer, VectorMapLayer
from uvdat.core.models import FileItem, RasterMapLayer, VectorMapLayer
from uvdat.core.models.map_layers import VectorFeature


Expand Down Expand Up @@ -46,8 +47,61 @@ def add_styling(geojson_data, style_options):
return geopandas.GeoDataFrame.from_features(features)


def split_raster_zip(file_item, style_options):
"""For each raster image in a zip file, create a new FileItem."""
# NOTE: This implementation is not abstract;
# this was built for the Boston Flood Timeseries dataset.
Comment on lines +50 to +53
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this is the case, I think the function should be labeled as such.

import large_image_converter

if file_item.file_type == 'zip':
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if file_item.file_type == 'zip':
if file_item.file_type != 'zip':
return

# modify index of original FileItem
file_item.index = -1
file_item.save()

with tempfile.TemporaryDirectory() as temp_dir:
archive_path = Path(temp_dir, 'archive.zip')
extracted_path = Path(temp_dir, 'extracted')
converted_path = Path(temp_dir, 'converted')
with open(archive_path, 'wb') as archive_file:
# copy from file field
archive_file.write(file_item.file.open('rb').read())
# unzip all
with zipfile.ZipFile(archive_path) as zip_archive:
zip_archive.extractall(extracted_path)
# read VRT files
vrt_files = list(extracted_path.glob('**/*.vrt'))
vrt_files.sort() # rely on filenames for ordering
converted_path.mkdir(parents=True, exist_ok=True)
for index, vrt_filepath in enumerate(vrt_files):
cog_filepath = converted_path / vrt_filepath.name.replace('vrt', 'tiff')
large_image_converter.convert(str(vrt_filepath), str(cog_filepath))
cog_size = os.path.getsize(cog_filepath)
metadata = file_item.metadata
metadata.update(dict(generated=True))
new_file_item = FileItem.objects.create(
name=cog_filepath.name,
file_size=cog_size,
file_type='tiff',
index=index,
# inherit other fields from original FileItem
dataset=file_item.dataset,
chart=file_item.chart,
metadata=metadata,
)
with open(cog_filepath, 'rb') as cog:
new_file_item.file.save(cog_filepath, ContentFile(cog.read()))
if cog_size > 0:
create_raster_map_layer(new_file_item, style_options)


def create_raster_map_layer(file_item, style_options):
"""Save a RasterMapLayer from a FileItem's contents."""
if style_options is None:
style_options = {}
if file_item.file_type == 'zip':
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Similar to my other comment, if the logic for extracting this data is specific to the boston flood timeseries dataset, a comment here explaining that would be good. That way, in the future, we know this is not general behavior.

split_raster_zip(file_item, style_options)
return

import large_image_converter

new_map_layer = RasterMapLayer.objects.create(
Expand Down
41 changes: 41 additions & 0 deletions web/src/components/OptionsDrawerContents.vue
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ export default {
const layerRange = ref<number[]>([]);
const colormapRange = ref<number[]>([]);
const applyToAll = ref<boolean>(true);
const ticker = ref();

const allowOpacityModification = computed(
() =>
Expand Down Expand Up @@ -220,6 +221,37 @@ export default {
opacity.value = 1;
}

function pause() {
clearInterval(ticker.value);
ticker.value = undefined;
}
function play() {
if (currentDataset.value?.map_layers) {
pause();
ticker.value = setInterval(() => {
if (
currentDataset.value?.map_layers &&
currentDatasetLayerIndex.value <
currentDataset.value.map_layers?.length - 1
) {
currentDatasetLayerIndex.value += 1;
} else {
pause();
}
}, 2000);
}
}
function rewind() {
pause();
ticker.value = setInterval(() => {
if (currentDatasetLayerIndex.value > 0) {
currentDatasetLayerIndex.value -= 1;
} else {
pause();
}
}, 2000);
}

watch(colormap, updateColormap);
watch(opacity, updateLayerOpacity);

Expand All @@ -245,6 +277,9 @@ export default {
getNetworkNodeName,
updateColormap,
resetNetwork,
play,
pause,
rewind,
};
},
};
Expand Down Expand Up @@ -283,8 +318,14 @@ export default {
dense
min="0"
step="1"
hide-details
:max="currentDataset?.map_layers.length - 1"
/>
<div style="width: 100%; text-align: center">
<v-btn @click="play" icon="mdi-play" variant="text" />
<v-btn @click="pause" icon="mdi-pause" variant="text" />
<v-btn @click="rewind" icon="mdi-rewind" variant="text" />
</div>
<v-card-subtitle class="wrap-subtitle">
Current layer name: {{ currentLayerName || "Untitled" }}
</v-card-subtitle>
Expand Down
Loading