Skip to content

Commit

Permalink
Merge branch 'release/1.33.4'
Browse files Browse the repository at this point in the history
  • Loading branch information
rajadain committed Mar 3, 2022
2 parents d74c651 + 9947768 commit 5b7dccc
Show file tree
Hide file tree
Showing 19 changed files with 262 additions and 156 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ First, ensure that you have a set of Amazon Web Services (AWS) credentials with
$ aws configure --profile mmw-stg
```

You will also need to set the MMW Datahub AWS credential as your default. These are stored in lastpass under the name `MMW Azavea DataHub AWS`. Ensure that the AWS credentials file has universal read permissions.

Ensure you have the [vagrant-disksize](https://github.com/sprotheroe/vagrant-disksize) plugin installed:

```bash
Expand Down
6 changes: 5 additions & 1 deletion doc/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@

## Python Jupyter notebooks demonstrating the use of the Model My Watershed geoprocessing API

2018-8-19. Created by [Emilio Mayorga](https://github.com/emiliom/), University of Washington.
The following Jupyter Notebooks provide example workflows for the Model My Watershed (ModelMW) public web services Application Programming Interface (API) for automating some of the workflows that are provided by the web application.

Detailed ModelMW web service API documentation is provided at: https://modelmywatershed.org/api/docs/

Example notebooks were first created by [Emilio Mayorga](https://github.com/emiliom/) (University of Washington) on 2018-8-19, and have been maintained to work with subsequent changes to the API.

1. [MMW_API_watershed_demo.ipynb](https://github.com/WikiWatershed/model-my-watershed/blob/develop/doc/MMW_API_landproperties_demo.ipynb). Go [here](http://nbviewer.jupyter.org/github/WikiWatershed/model-my-watershed/blob/develop/doc/MMW_API_watershed_demo.ipynb) to view the functioning, interactive Folium map at the end of the notebook.

Expand Down
6 changes: 6 additions & 0 deletions src/mmw/apps/core/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')


class JobStatus:
STARTED = 'started'
COMPLETE = 'complete'
FAILED = 'failed'


class Job(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL,
on_delete=models.SET_NULL,
Expand Down
6 changes: 3 additions & 3 deletions src/mmw/apps/core/tasks.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from django.utils.timezone import now
from celery import shared_task
from apps.core.models import Job
from apps.core.models import Job, JobStatus

import json
import logging
Expand Down Expand Up @@ -29,7 +29,7 @@ def save_job_error(request, exc, traceback, job_id):
job.error = exc
job.traceback = traceback or 'No traceback'
job.delivered_at = now()
job.status = 'failed'
job.status = JobStatus.FAILED
job.save()
except Exception as e:
logger.error('Failed to save job error status. Job will appear hung.'
Expand All @@ -47,5 +47,5 @@ def save_job_result(self, result, id, model_input):
job.delivered_at = now()
job.uuid = self.request.id
job.model_input = model_input
job.status = 'complete'
job.status = JobStatus.COMPLETE
job.save()
6 changes: 3 additions & 3 deletions src/mmw/apps/export/hydroshare.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import json

from io import StringIO
from io import BytesIO
from zipfile import ZipFile
from rauth import OAuth2Service
from urllib.parse import urljoin, urlparse
Expand Down Expand Up @@ -87,7 +87,7 @@ def add_files(self, resource_id, files):
{'name': 'String', 'contents': 'String'}
"""
zippath = resource_id + '.zip'
stream = StringIO()
stream = BytesIO()

# Zip all given files into the stream
with ZipFile(stream, 'w') as zf:
Expand Down Expand Up @@ -124,7 +124,7 @@ def get_project_snapshot(self, resource_id):
snapshot_path = 'mmw_project_snapshot.json'
try:
stream = self.getResourceFile(resource_id, snapshot_path)
fio = StringIO()
fio = BytesIO()
for chunk in stream:
fio.write(chunk)

Expand Down
2 changes: 1 addition & 1 deletion src/mmw/apps/export/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def create_resource(user_id, project_id, params):

for ext in SHAPEFILE_EXTENSIONS:
filename = f'/tmp/{resource}.{ext}'
with open(filename) as shapefile:
with open(filename, 'rb') as shapefile:
hs.addResourceFile(resource, shapefile, f'area-of-interest.{ext}')
os.remove(filename)

Expand Down
14 changes: 14 additions & 0 deletions src/mmw/apps/geoprocessing_api/exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from rest_framework import status
from rest_framework.exceptions import APIException


class JobNotReadyError(APIException):
status_code = status.HTTP_428_PRECONDITION_REQUIRED
default_code = 'precondition_required'
default_detail = 'The prepare job has not finished yet.'


class JobFailedError(APIException):
status_code = status.HTTP_412_PRECONDITION_FAILED
default_code = 'precondition_failed'
default_detail = 'The prepare job has failed.'
82 changes: 80 additions & 2 deletions src/mmw/apps/geoprocessing_api/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

from django.conf import settings

from apps.core.models import JobStatus

STREAM_DATASOURCE = Parameter(
'datasource',
IN_PATH,
Expand Down Expand Up @@ -125,7 +127,7 @@
properties={
'job': Schema(type=TYPE_STRING, format=FORMAT_UUID,
example='6e514e69-f46b-47e7-9476-c1f5be0bac01'),
'status': Schema(type=TYPE_STRING, example='started'),
'status': Schema(type=TYPE_STRING, example=JobStatus.STARTED),
}
)

Expand All @@ -135,7 +137,7 @@
properties={
'job_uuid': Schema(type=TYPE_STRING, format=FORMAT_UUID,
example='6e514e69-f46b-47e7-9476-c1f5be0bac01'),
'status': Schema(type=TYPE_STRING, example='started'),
'status': Schema(type=TYPE_STRING, example=JobStatus.STARTED),
'result': Schema(type=TYPE_OBJECT),
'error': Schema(type=TYPE_STRING),
'started': Schema(type=TYPE_STRING, format=FORMAT_DATETIME,
Expand Down Expand Up @@ -178,3 +180,79 @@
},
required=['location'],
)

nlcd_override_allowed_values = '", "'.join([
'nlcd-2019-30m-epsg5070-512-byte',
'nlcd-2016-30m-epsg5070-512-byte',
'nlcd-2011-30m-epsg5070-512-byte',
'nlcd-2006-30m-epsg5070-512-byte',
'nlcd-2001-30m-epsg5070-512-byte',
'nlcd-2011-30m-epsg5070-512-int8',
])
LAYER_OVERRIDES = Schema(
title='Layer Overrides',
type=TYPE_OBJECT,
description='MMW combines different datasets in model runs. These have '
'default values, but can be overridden by specifying them '
'here. Only specify a value for the layers you want to '
'override.',
properties={
'__LAND__': Schema(
type=TYPE_STRING,
example='nlcd-2019-30m-epsg5070-512-byte',
description='The NLCD layer to use. Valid options are: '
f'"{nlcd_override_allowed_values}". All "-byte" '
'layers are from the NLCD19 product. The "-int8" '
'layer is from the NLCD11 product. The default value '
'is NLCD19 2019 "nlcd-2019-30m-epsg5070-512-byte".',
),
'__STREAMS__': Schema(
type=TYPE_STRING,
example='nhdhr',
description='The streams layer to use. Valid options are: '
'"nhdhr" for NHD High Resolution Streams, "nhd" for '
'NHD Medium Resolution Streams, and "drb" for '
'Delaware High Resolution. The area of interest must '
'be completely within the Delaware River Basin for '
'"drb". "nhdhr" and "nhd" can be used within the '
'Continental United States. In some cases, "nhdhr" '
'may timeout. In such cases, "nhd" can be used as a '
'fallback. "nhdhr" is the default.'
)
},
)

MODELING_REQUEST = Schema(
title='Modeling Request',
type=TYPE_OBJECT,
properties={
'area_of_interest': MULTIPOLYGON,
'wkaoi': Schema(
title='Well-Known Area of Interest',
type=TYPE_STRING,
example='huc12__55174',
description='The table and ID for a well-known area of interest, '
'such as a HUC. '
'Format "table__id", eg. "huc12__55174" will analyze '
'the HUC-12 City of Philadelphia-Schuylkill River.',
),
'layer_overrides': LAYER_OVERRIDES,
},
)

GWLFE_REQUEST = Schema(
title='GWLF-E Request',
type=TYPE_OBJECT,
properties={
'input': Schema(
type=TYPE_OBJECT,
description='The result of modeling/gwlf-e/prepare/',
),
'job_uuid': Schema(
type=TYPE_STRING,
format=FORMAT_UUID,
example='6e514e69-f46b-47e7-9476-c1f5be0bac01',
description='The job uuid of modeling/gwlf-e/prepare/',
),
},
)
4 changes: 4 additions & 0 deletions src/mmw/apps/geoprocessing_api/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,5 +36,9 @@
re_path(r'jobs/' + uuid_regex, get_job, name='get_job'),
re_path(r'modeling/worksheet/$', views.start_modeling_worksheet,
name='start_modeling_worksheet'),
re_path(r'modeling/gwlf-e/prepare/$', views.start_modeling_gwlfe_prepare,
name='start_modeling_gwlfe_prepare'),
re_path(r'modeling/gwlf-e/run/$', views.start_modeling_gwlfe_run,
name='start_modeling_gwlfe_run'),
re_path(r'watershed/$', views.start_rwd, name='start_rwd'),
]
Loading

0 comments on commit 5b7dccc

Please sign in to comment.