Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding git paths to inputs #9

Merged
merged 12 commits into from
Jun 14, 2024
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ dependencies:
- ipykernel
- scipy
- birdy
- tensorflow
- tensorflow=2.15.0
- numpy=1.26.0
# tests
- pytest
Expand Down
33 changes: 25 additions & 8 deletions shearwater/processes/wps_cyclone.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from pywps import Process, LiteralInput, ComplexOutput
from pywps.app.Common import Metadata
# import birdy
# from tensorflow.keras import models
import pickle
# import pickle
import numpy as np
import numpy
import pandas as pd
# from datetime import datetime
import os
from pywps import FORMATS
from pathlib import Path
import urllib.request

# import intake

Expand Down Expand Up @@ -107,14 +107,21 @@ def _handler(self, request, response):
# area = request.inputs['area'][0].data

# to be updated with data repository
data1 = pd.read_csv("../shearwater/data/test_dailymeans_Sindian_1.zip")
data2 = pd.read_csv("../shearwater/data/test_dailymeans_Sindian_2.zip")
data1 = pd.read_csv(
"https://github.com/climateintelligence/shearwater/raw/main/data/test_dailymeans_Sindian_1.zip")
# ("../shearwater/data/test_dailymeans_Sindian_1.zip")
data2 = pd.read_csv(
"https://github.com/climateintelligence/shearwater/raw/main/data/test_dailymeans_Sindian_2.zip")
# ("../shearwater/data/test_dailymeans_Sindian_2.zip")
data = pd.concat((data1, data2), ignore_index=True)
data = data.loc[(data.time >= start_date) & (data.time <= end_date)]

variables = ['vo', 'r', 'u_200', 'u_850', 'v_200', 'v_850', 'tcwv', 'sst', 'shear']
with open('../shearwater/data/full_statistics.pkl', 'rb') as f:
means, stds = pickle.load(f)
# with open("https://github.com/climateintelligence/shearwater/raw/main/data/full_statistics.pkl", 'rb') as f:
# means, stds = pickle.load(f)
means, stds = pd.read_pickle(
"https://github.com/climateintelligence/shearwater/raw/main/data/full_statistics.zip")

data[variables] = (data[variables]-means[variables])/stds[variables]

number_of_img, rows, cols = len(data.time.unique()), len(data.latitude.unique()), len(data.longitude.unique())
Expand All @@ -137,14 +144,24 @@ def _handler(self, request, response):

test_img_std = np.pad(test_img_std, ((0, 0), (1, 2), (1, 2), (0, 0)), 'constant')

model_trained = models.load_model('../shearwater/data/Unet_sevenAreas_fullStd_0lag_model.keras')
workdir = Path(self.workdir)
model_path = os.path.join(workdir, "Unet_sevenAreas_fullStd_0lag_model.keras")
urllib.request.urlretrieve(
"https://github.com/climateintelligence/shearwater/raw/main/data/Unet_sevenAreas_fullStd_0lag_model.keras",
model_path # "Unet_sevenAreas_fullStd_0lag_model.keras"
)

# model_trained = models.load_model(
# "https://github.com/climateintelligence/shearwater/raw/main/data/Unet_sevenAreas_fullStd_0lag_model.keras")
# ('../shearwater/data/Unet_sevenAreas_fullStd_0lag_model.keras')

model_trained = models.load_model(model_path)

prediction = model_trained.predict(test_img_std)

data = data[["latitude", "longitude", "time"]]
data['predictions_lag0'] = prediction.reshape(-1, 1)

workdir = Path(self.workdir)
prediction_path = os.path.join(workdir, "prediction_Sindian.csv")
data.to_csv(prediction_path)

Expand Down
Loading
Loading