diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1dc0d97..46e64f6 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -61,7 +61,7 @@ jobs: fetch-depth: '0' - name: Build wheels - uses: pypa/cibuildwheel@v2.8.1 + uses: pypa/cibuildwheel@v2.16.2 env: CIBW_ARCHS: ${{ matrix.cfg.arch }} with: diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e8e6b9..7559b1a 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,24 @@ Changelog ========= +Version 1.7.1 +------------- +*October 14, 2023* + +**New features** + +* added wheels for Python v3.11 and v3.12 ([#277](https://github.com/GeoStat-Framework/PyKrige/pull/277)) + +**Changes** + +* dropped Python 3.7 support ([#277](https://github.com/GeoStat-Framework/PyKrige/pull/277)) + +**Bug fixes** + +* fixed print statement in uk3d ([#272](https://github.com/GeoStat-Framework/PyKrige/issues/272)) +* fixed exact_values behavior in C backend ([#256](https://github.com/GeoStat-Framework/PyKrige/pull/256)) + + Version 1.7.0 ------------- *August 18, 2022* diff --git a/benchmarks/kriging_benchmarks.py b/benchmarks/kriging_benchmarks.py index ed0cf84..47ebb03 100644 --- a/benchmarks/kriging_benchmarks.py +++ b/benchmarks/kriging_benchmarks.py @@ -51,7 +51,6 @@ def make_benchark(n_train, n_test, n_dim=2): # All the following tests are performed with the linear variogram model for backend in BACKENDS: for n_closest_points in N_MOVING_WINDOW: - if backend == "vectorized" and n_closest_points is not None: continue # this is not supported diff --git a/examples/09_kriging_meuse.ipynb b/examples/09_kriging_meuse.ipynb index f7f09e6..625e05c 100644 --- a/examples/09_kriging_meuse.ipynb +++ b/examples/09_kriging_meuse.ipynb @@ -17,7 +17,7 @@ }, "outputs": [], "source": [ - "# This example requires some extra packages compared to the PyKrige package. \n", + "# This example requires some extra packages compared to the PyKrige package.\n", "# At the time of the creation, I used the conda package manager\n", "# and installed the following (with the versions at the time):\n", "# pandas 0.18.1, geopandas 0.2.1, seaborn 0.7.1, folium 0.2.1, shapely 1.5.16\n", @@ -33,6 +33,7 @@ "import numpy as np\n", "import folium\n", "from folium import plugins\n", + "\n", "%matplotlib inline\n", "from pykrige.ok import OrdinaryKriging\n", "from sklearn.neighbors import KNeighborsRegressor\n", @@ -40,6 +41,7 @@ "import pykrige.kriging_tools as kt\n", "import seaborn as sb\n", "from IPython.core.display import HTML\n", + "\n", "HTML(\"\")" ] }, @@ -62,16 +64,16 @@ }, "outputs": [], "source": [ - "if(~os.path.isfile('meuse.zip')):\n", - " url = 'http://spatial-analyst.net/book/system/files/meuse.zip'\n", + "if ~os.path.isfile(\"meuse.zip\"):\n", + " url = \"http://spatial-analyst.net/book/system/files/meuse.zip\"\n", " results = requests.get(url)\n", - " print('Status code download: {}'.format(results.status_code))\n", - "with open('meuse.zip', 'wb') as f:\n", + " print(\"Status code download: {}\".format(results.status_code))\n", + "with open(\"meuse.zip\", \"wb\") as f:\n", " f.write(results.content)\n", - "zip_ref = zipfile.ZipFile('meuse.zip', 'r')\n", - "zip_ref.extractall('meuse_example_data/')\n", + "zip_ref = zipfile.ZipFile(\"meuse.zip\", \"r\")\n", + "zip_ref.extractall(\"meuse_example_data/\")\n", "zip_ref.close()\n", - "os.remove('meuse.zip')" + "os.remove(\"meuse.zip\")" ] }, { @@ -82,10 +84,10 @@ }, "outputs": [], "source": [ - "meuse = gpd.read_file('meuse_example_data/meuse.shp')\n", - "meuse.crs = {'init':'epsg:28992'}\n", - "meuse['x'] = meuse['geometry'].apply(lambda x: x.x)\n", - "meuse['y'] = meuse['geometry'].apply(lambda x: x.y)\n", + "meuse = gpd.read_file(\"meuse_example_data/meuse.shp\")\n", + "meuse.crs = {\"init\": \"epsg:28992\"}\n", + "meuse[\"x\"] = meuse[\"geometry\"].apply(lambda x: x.x)\n", + "meuse[\"y\"] = meuse[\"geometry\"].apply(lambda x: x.y)\n", "meuse.sample()" ] }, @@ -105,18 +107,25 @@ }, "outputs": [], "source": [ - "feature_to_plot = 'lead'\n", + "feature_to_plot = \"lead\"\n", "\n", - "meuse_lat_long = meuse.to_crs({'init': 'epsg:4326'})\n", - "meuse_lat_long['long'] = meuse_lat_long.geometry.apply(lambda x: x.x)\n", - "meuse_lat_long['lat'] = meuse_lat_long.geometry.apply(lambda x: x.y)\n", - "mean_long = np.mean(meuse_lat_long['long'])\n", - "mean_lat = np.mean(meuse_lat_long['lat'])\n", - "m = folium.Map([mean_lat, mean_long], zoom_start=13, tiles='Stamen Toner')\n", - "scale = folium.colormap.linear.YlOrRd.scale(vmin=0, vmax=meuse_lat_long[feature_to_plot].max())\n", + "meuse_lat_long = meuse.to_crs({\"init\": \"epsg:4326\"})\n", + "meuse_lat_long[\"long\"] = meuse_lat_long.geometry.apply(lambda x: x.x)\n", + "meuse_lat_long[\"lat\"] = meuse_lat_long.geometry.apply(lambda x: x.y)\n", + "mean_long = np.mean(meuse_lat_long[\"long\"])\n", + "mean_lat = np.mean(meuse_lat_long[\"lat\"])\n", + "m = folium.Map([mean_lat, mean_long], zoom_start=13, tiles=\"Stamen Toner\")\n", + "scale = folium.colormap.linear.YlOrRd.scale(\n", + " vmin=0, vmax=meuse_lat_long[feature_to_plot].max()\n", + ")\n", "for row in meuse_lat_long.iterrows():\n", - " folium.CircleMarker(location=[row[1]['lat'], row[1]['long']], radius=50, color=None, fill_opacity=1,\n", - " fill_color=scale(row[1][feature_to_plot])).add_to(m)\n", + " folium.CircleMarker(\n", + " location=[row[1][\"lat\"], row[1][\"long\"]],\n", + " radius=50,\n", + " color=None,\n", + " fill_opacity=1,\n", + " fill_color=scale(row[1][feature_to_plot]),\n", + " ).add_to(m)\n", "m.add_children(scale)" ] }, @@ -138,11 +147,17 @@ "outputs": [], "source": [ "np.random.seed(0)\n", - "test_indexes = np.random.choice(a=meuse.index, size=int(np.round(len(meuse.index.values)/4)))\n", + "test_indexes = np.random.choice(\n", + " a=meuse.index, size=int(np.round(len(meuse.index.values) / 4))\n", + ")\n", "train_indexes = [index for index in meuse.index if index not in test_indexes]\n", - "meuse_test = meuse.loc[test_indexes,:].copy()\n", - "meuse_train = meuse.loc[train_indexes,:].copy()\n", - "print('Number of observations in training: {}, in test: {}'.format(len(meuse_train), len(meuse_test)))" + "meuse_test = meuse.loc[test_indexes, :].copy()\n", + "meuse_train = meuse.loc[train_indexes, :].copy()\n", + "print(\n", + " \"Number of observations in training: {}, in test: {}\".format(\n", + " len(meuse_train), len(meuse_test)\n", + " )\n", + ")" ] }, { @@ -162,13 +177,25 @@ }, "outputs": [], "source": [ - "model = OrdinaryKriging(x=meuse_train['x'], y=meuse_train['y'], z=meuse_train['lead'], verbose=True,\n", - " variogram_parameters=[13500, 900, 4000],\n", - " enable_plotting=True, nlags=30, weight=True, variogram_model='spherical')\n", - "meuse_train['prediction'] = model.execute(style='points',xpoints=meuse_train['x'], ypoints=meuse_train['y'] )[0].data\n", - "meuse_train['kriging_residual'] = meuse_train['lead'] - meuse_train['prediction']\n", - "meuse_test['prediction'] = model.execute(style='points', xpoints=meuse_test['x'], ypoints=meuse_test['y'] )[0].data\n", - "meuse_test['kriging_residual'] = meuse_test['lead'] - meuse_test['prediction']" + "model = OrdinaryKriging(\n", + " x=meuse_train[\"x\"],\n", + " y=meuse_train[\"y\"],\n", + " z=meuse_train[\"lead\"],\n", + " verbose=True,\n", + " variogram_parameters=[13500, 900, 4000],\n", + " enable_plotting=True,\n", + " nlags=30,\n", + " weight=True,\n", + " variogram_model=\"spherical\",\n", + ")\n", + "meuse_train[\"prediction\"] = model.execute(\n", + " style=\"points\", xpoints=meuse_train[\"x\"], ypoints=meuse_train[\"y\"]\n", + ")[0].data\n", + "meuse_train[\"kriging_residual\"] = meuse_train[\"lead\"] - meuse_train[\"prediction\"]\n", + "meuse_test[\"prediction\"] = model.execute(\n", + " style=\"points\", xpoints=meuse_test[\"x\"], ypoints=meuse_test[\"y\"]\n", + ")[0].data\n", + "meuse_test[\"kriging_residual\"] = meuse_test[\"lead\"] - meuse_test[\"prediction\"]" ] }, { @@ -189,29 +216,37 @@ }, "outputs": [], "source": [ - "plt.figure(figsize=(6,6))\n", + "plt.figure(figsize=(6, 6))\n", "plt.subplot(221)\n", - "plt.plot(meuse_train['prediction'], meuse_train['lead'], '.')\n", - "plt.title('Training: pred vs obs')\n", - "plt.xlabel('Predictions')\n", - "plt.ylabel('True value')\n", - "plt.plot([0,700], [0,700], 'g--')\n", - "plt.ylim(0,700)\n", - "plt.xlim(0,700)\n", + "plt.plot(meuse_train[\"prediction\"], meuse_train[\"lead\"], \".\")\n", + "plt.title(\"Training: pred vs obs\")\n", + "plt.xlabel(\"Predictions\")\n", + "plt.ylabel(\"True value\")\n", + "plt.plot([0, 700], [0, 700], \"g--\")\n", + "plt.ylim(0, 700)\n", + "plt.xlim(0, 700)\n", "plt.subplot(222)\n", - "meuse_train['kriging_residual'].hist()\n", - "plt.title('Hist training res\\nMedian absolute error: {:.1f}'.format(np.median(np.abs(meuse_train['kriging_residual']))))\n", + "meuse_train[\"kriging_residual\"].hist()\n", + "plt.title(\n", + " \"Hist training res\\nMedian absolute error: {:.1f}\".format(\n", + " np.median(np.abs(meuse_train[\"kriging_residual\"]))\n", + " )\n", + ")\n", "plt.subplot(223)\n", - "plt.plot(meuse_test['prediction'], meuse_test['lead'], '.')\n", - "plt.plot([0,700], [0,700], 'g--')\n", - "plt.title('Test: pred vs obs')\n", - "plt.xlabel('Predictions')\n", - "plt.ylabel('True value')\n", - "plt.ylim(0,700)\n", - "plt.xlim(0,700)\n", + "plt.plot(meuse_test[\"prediction\"], meuse_test[\"lead\"], \".\")\n", + "plt.plot([0, 700], [0, 700], \"g--\")\n", + "plt.title(\"Test: pred vs obs\")\n", + "plt.xlabel(\"Predictions\")\n", + "plt.ylabel(\"True value\")\n", + "plt.ylim(0, 700)\n", + "plt.xlim(0, 700)\n", "plt.subplot(224)\n", - "meuse_test['kriging_residual'].hist()\n", - "plt.title('Hist test res\\nMedian absolute error: {:.1f}'.format(np.median(np.abs(meuse_test['kriging_residual']))))\n", + "meuse_test[\"kriging_residual\"].hist()\n", + "plt.title(\n", + " \"Hist test res\\nMedian absolute error: {:.1f}\".format(\n", + " np.median(np.abs(meuse_test[\"kriging_residual\"]))\n", + " )\n", + ")\n", "plt.tight_layout()" ] }, @@ -232,14 +267,14 @@ }, "outputs": [], "source": [ - "parameters = {'n_neighbors':np.arange(1,10)}\n", + "parameters = {\"n_neighbors\": np.arange(1, 10)}\n", "nn_model = KNeighborsRegressor()\n", "nn_model_cv = GridSearchCV(nn_model, parameters)\n", - "nn_model_cv = nn_model_cv.fit(meuse_train[['x', 'y']], meuse_train['lead']) \n", - "print('Optimal number of neighbours {}'.format(nn_model_cv.best_params_))\n", + "nn_model_cv = nn_model_cv.fit(meuse_train[[\"x\", \"y\"]], meuse_train[\"lead\"])\n", + "print(\"Optimal number of neighbours {}\".format(nn_model_cv.best_params_))\n", "nn_model = nn_model_cv.best_estimator_\n", - "meuse_test['nn_prediction'] = nn_model.predict(meuse_test[['x', 'y']])\n", - "meuse_test['nn_residual'] = meuse_test['lead'] - meuse_test['nn_prediction']" + "meuse_test[\"nn_prediction\"] = nn_model.predict(meuse_test[[\"x\", \"y\"]])\n", + "meuse_test[\"nn_residual\"] = meuse_test[\"lead\"] - meuse_test[\"nn_prediction\"]" ] }, { @@ -251,11 +286,16 @@ "outputs": [], "source": [ "sb.set_style(\"whitegrid\")\n", - "plt.figure(figsize=(4,4))\n", - "sb.boxplot(data=meuse_test[[\"nn_residual\",\"kriging_residual\"]] )\n", - "plt.title('Compairing residuals\\nmedian abs res NN: {:.1f}, Kriging {:.1f}\\nmean abs res NN: {:.1f}, Kriging: {:.1f}'\\\n", - " .format(np.median(np.abs(meuse_test['nn_residual'])), np.median(np.abs(meuse_test['kriging_residual'])),\n", - " np.mean(np.abs(meuse_test['nn_residual'])), np.mean(np.abs(meuse_test['kriging_residual']))))" + "plt.figure(figsize=(4, 4))\n", + "sb.boxplot(data=meuse_test[[\"nn_residual\", \"kriging_residual\"]])\n", + "plt.title(\n", + " \"Compairing residuals\\nmedian abs res NN: {:.1f}, Kriging {:.1f}\\nmean abs res NN: {:.1f}, Kriging: {:.1f}\".format(\n", + " np.median(np.abs(meuse_test[\"nn_residual\"])),\n", + " np.median(np.abs(meuse_test[\"kriging_residual\"])),\n", + " np.mean(np.abs(meuse_test[\"nn_residual\"])),\n", + " np.mean(np.abs(meuse_test[\"kriging_residual\"])),\n", + " )\n", + ")" ] }, { @@ -276,9 +316,8 @@ "outputs": [], "source": [ "class PolygonPointSampler(object):\n", - " \n", - " def __init__(self, polygon=''):\n", - " u\"\"\"\n", + " def __init__(self, polygon=\"\"):\n", + " \"\"\"\n", " Initialize a new PolygonPointSampler object using the specified polygon\n", " object (as allocated by Shapely). If no polygon is given a new empty\n", " one is created and set as the base polygon.\n", @@ -290,56 +329,59 @@ " self.samples = list()\n", " self.sample_count = 0\n", " self.prepared = False\n", - " \n", + "\n", " def add_polygon(self, polygon):\n", - " u\"\"\"\n", + " \"\"\"\n", " Add another polygon entity to the base polygon by geometrically unifying\n", " it with the current one.\n", " \"\"\"\n", " self.polygon = self.polygon.union(polygon)\n", " self.prepared = False\n", - " \n", + "\n", " def get_spatial_df(self):\n", - " geo_df = pd.DataFrame(self.samples, columns=['geometry']).set_geometry('geometry')\n", - " geo_df['x'] = geo_df['geometry'].apply(lambda x: x.coords[0][0])\n", - " geo_df['y'] = geo_df['geometry'].apply(lambda x: x.coords[0][1])\n", + " geo_df = pd.DataFrame(self.samples, columns=[\"geometry\"]).set_geometry(\n", + " \"geometry\"\n", + " )\n", + " geo_df[\"x\"] = geo_df[\"geometry\"].apply(lambda x: x.coords[0][0])\n", + " geo_df[\"y\"] = geo_df[\"geometry\"].apply(lambda x: x.coords[0][1])\n", " return geo_df\n", - " \n", + "\n", " def print_samples(self):\n", - " u\"\"\"\n", + " \"\"\"\n", " Print all sample points using their WKT representation.\n", " \"\"\"\n", " for sample_pt in self.samples:\n", " print(sample_pt)\n", - " \n", + "\n", " def prepare_sampling(self):\n", - " u\"\"\"\n", + " \"\"\"\n", " Prepare the actual sampling procedure by splitting up the specified base\n", " polygon (that may consist of multiple simple polygons) and appending its\n", " compartments to a dedicated list.\n", " \"\"\"\n", " self.src = list()\n", - " if hasattr(self.polygon, 'geoms'):\n", + " if hasattr(self.polygon, \"geoms\"):\n", " for py in self.polygon:\n", " self.src.append(py)\n", " else:\n", " self.src.append(self.polygon)\n", " self.prepared = True\n", - " \n", + "\n", " def perform_sampling(self):\n", - " u\"\"\"\n", + " \"\"\"\n", " Create a stub for the actual sampling procedure.\n", " \"\"\"\n", " raise NotImplementedError\n", - " \n", + "\n", + "\n", "class RegularGridSampler(PolygonPointSampler):\n", - " def __init__(self, polygon = '', x_interval = 100, y_interval = 100):\n", + " def __init__(self, polygon=\"\", x_interval=100, y_interval=100):\n", " super(self.__class__, self).__init__(polygon)\n", " self.x_interval = x_interval\n", " self.y_interval = y_interval\n", - " \n", + "\n", " def perform_sampling(self):\n", - " u\"\"\"\n", + " \"\"\"\n", " Perform sampling by substituting the polygon with a regular grid of\n", " sample points within it. The distance between the sample points is\n", " given by x_interval and y_interval.\n", @@ -352,17 +394,18 @@ " upp_x = int(ur[0]) / self.x_interval * self.x_interval + self.x_interval\n", " low_y = int(ll[1]) / self.y_interval * self.y_interval\n", " upp_y = int(ur[1]) / self.y_interval * self.y_interval + self.y_interval\n", - " \n", + "\n", " for x in floatrange(low_x, upp_x, self.x_interval):\n", " for y in floatrange(low_y, upp_y, self.y_interval):\n", " p = shapely.geometry.Point(x, y)\n", " if p.within(self.polygon):\n", " self.samples.append(p)\n", "\n", + "\n", "def floatrange(start, stop, step):\n", " while start < stop:\n", " yield start\n", - " start += step\n" + " start += step" ] }, { @@ -387,10 +430,10 @@ "sampler = RegularGridSampler(convex_hull, x_interval=50, y_interval=50)\n", "sampler.perform_sampling()\n", "grid_points = sampler.get_spatial_df()\n", - "plt.figure(figsize=(4,4))\n", - "plt.plot(grid_points['x'], grid_points['y'], '.')\n", - "plt.plot(meuse['x'], meuse['y'], 'r.')\n", - "plt.title('Sampled grid')" + "plt.figure(figsize=(4, 4))\n", + "plt.plot(grid_points[\"x\"], grid_points[\"y\"], \".\")\n", + "plt.plot(meuse[\"x\"], meuse[\"y\"], \"r.\")\n", + "plt.title(\"Sampled grid\")" ] }, { @@ -408,7 +451,9 @@ }, "outputs": [], "source": [ - "grid_points['prediction'] = model.execute(style='points', xpoints=grid_points['x'], ypoints=grid_points['y'])[0].data" + "grid_points[\"prediction\"] = model.execute(\n", + " style=\"points\", xpoints=grid_points[\"x\"], ypoints=grid_points[\"y\"]\n", + ")[0].data" ] }, { @@ -430,11 +475,11 @@ }, "outputs": [], "source": [ - "grid_points_gpd = grid_points.set_geometry('geometry')\n", - "grid_points_gpd.crs = {'init':'epsg:28992'}\n", - "grid_points_gpd = grid_points_gpd.to_crs({'init': 'epsg:4326'})\n", - "grid_points_gpd['long'] = grid_points_gpd.geometry.apply(lambda x: x.x)\n", - "grid_points_gpd['lat'] = grid_points_gpd.geometry.apply(lambda x: x.y)" + "grid_points_gpd = grid_points.set_geometry(\"geometry\")\n", + "grid_points_gpd.crs = {\"init\": \"epsg:28992\"}\n", + "grid_points_gpd = grid_points_gpd.to_crs({\"init\": \"epsg:4326\"})\n", + "grid_points_gpd[\"long\"] = grid_points_gpd.geometry.apply(lambda x: x.x)\n", + "grid_points_gpd[\"lat\"] = grid_points_gpd.geometry.apply(lambda x: x.y)" ] }, { @@ -445,9 +490,15 @@ }, "outputs": [], "source": [ - "grid_points_pivot = grid_points_gpd.pivot(values='prediction', columns='x', index='y').fillna(0)\n", - "grid_points_pivot = grid_points_pivot.loc[:,grid_points_pivot.columns.sort_values(ascending=True)]\n", - "grid_points_pivot = grid_points_pivot.loc[grid_points_pivot.index.sort_values(ascending=True),:]" + "grid_points_pivot = grid_points_gpd.pivot(\n", + " values=\"prediction\", columns=\"x\", index=\"y\"\n", + ").fillna(0)\n", + "grid_points_pivot = grid_points_pivot.loc[\n", + " :, grid_points_pivot.columns.sort_values(ascending=True)\n", + "]\n", + "grid_points_pivot = grid_points_pivot.loc[\n", + " grid_points_pivot.index.sort_values(ascending=True), :\n", + "]" ] }, { @@ -458,10 +509,15 @@ }, "outputs": [], "source": [ - "plt.contourf(np.unique(grid_points_pivot.columns.values), np.unique(grid_points_pivot.index.values), \n", - " grid_points_pivot.values/np.nanmax(grid_points_pivot.values),20,cmap='GnBu')\n", - "plt.plot(meuse['x'], meuse['y'], '.')\n", - "plt.title('Kriged grid values')" + "plt.contourf(\n", + " np.unique(grid_points_pivot.columns.values),\n", + " np.unique(grid_points_pivot.index.values),\n", + " grid_points_pivot.values / np.nanmax(grid_points_pivot.values),\n", + " 20,\n", + " cmap=\"GnBu\",\n", + ")\n", + "plt.plot(meuse[\"x\"], meuse[\"y\"], \".\")\n", + "plt.title(\"Kriged grid values\")" ] }, { @@ -480,9 +536,10 @@ "outputs": [], "source": [ "def color_function(value):\n", - " if (value==0) | (value==np.nan) : return (0,0,0,0)\n", - " else: \n", - " color = matplotlib.cm.YlOrRd(value) \n", + " if (value == 0) | (value == np.nan):\n", + " return (0, 0, 0, 0)\n", + " else:\n", + " color = matplotlib.cm.YlOrRd(value)\n", " return color" ] }, @@ -494,15 +551,27 @@ }, "outputs": [], "source": [ - "m = folium.Map([mean_lat, mean_long], zoom_start=13, tiles='Stamen Toner')\n", - "m.add_children(plugins.ImageOverlay(image = (grid_points_pivot.values/np.nanmax(grid_points_pivot.values)), \n", - " opacity=0.7,origin='lower',\n", - " colormap=color_function,\n", - " bounds = [[np.min(grid_points_gpd['lat']), np.min(grid_points_gpd['long'])],\n", - " [np.max(grid_points_gpd['lat']), np.max(grid_points_gpd['long'])]]))\n", + "m = folium.Map([mean_lat, mean_long], zoom_start=13, tiles=\"Stamen Toner\")\n", + "m.add_children(\n", + " plugins.ImageOverlay(\n", + " image=(grid_points_pivot.values / np.nanmax(grid_points_pivot.values)),\n", + " opacity=0.7,\n", + " origin=\"lower\",\n", + " colormap=color_function,\n", + " bounds=[\n", + " [np.min(grid_points_gpd[\"lat\"]), np.min(grid_points_gpd[\"long\"])],\n", + " [np.max(grid_points_gpd[\"lat\"]), np.max(grid_points_gpd[\"long\"])],\n", + " ],\n", + " )\n", + ")\n", "for row in meuse_lat_long.iterrows():\n", - " folium.CircleMarker(location=[row[1]['lat'], row[1]['long']], radius=50, color=None, fill_opacity=1,\n", - " fill_color=scale(row[1][feature_to_plot])).add_to(m)\n", + " folium.CircleMarker(\n", + " location=[row[1][\"lat\"], row[1][\"long\"]],\n", + " radius=50,\n", + " color=None,\n", + " fill_opacity=1,\n", + " fill_color=scale(row[1][feature_to_plot]),\n", + " ).add_to(m)\n", "m.add_children(scale)" ] }, diff --git a/pyproject.toml b/pyproject.toml index b51514c..69ac969 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,7 @@ [build-system] requires = [ - "setuptools>=62", - "wheel", - "setuptools_scm[toml]>=6.4", + "setuptools>=64", + "setuptools_scm>=7", "oldest-supported-numpy", "scipy>=1.1.0,<2", "Cython>=0.28.3,<3.0", @@ -10,7 +9,7 @@ requires = [ build-backend = "setuptools.build_meta" [project] -requires-python = ">=3.7" +requires-python = ">=3.8" name = "PyKrige" description = "Kriging Toolkit for Python." authors = [ @@ -35,10 +34,11 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: GIS", "Topic :: Scientific/Engineering :: Mathematics", @@ -93,11 +93,7 @@ profile = "black" multi_line_output = 3 [tool.black] -target-version = [ - "py36", - "py37", - "py38", -] +target-version = ["py38"] [tool.coverage] [tool.coverage.run] @@ -142,8 +138,8 @@ target-version = [ [tool.cibuildwheel] # Switch to using build build-frontend = "build" -# Disable building PyPy wheels on all platforms, 32bit for py3.10 and musllinux builds, py3.6 -skip = ["cp36-*", "pp*", "cp310-win32", "cp310-manylinux_i686", "*-musllinux_*"] +# Disable building PyPy wheels on all platforms, 32bit for py3.10/11/12 and musllinux builds, py3.6/7 +skip = ["cp36-*", "cp37-*", "pp*", "cp31*-win32", "cp31*-manylinux_i686", "*-musllinux_*"] # Run the package tests using `pytest` test-extras = "test" test-command = "pytest -v {package}/tests" diff --git a/src/pykrige/core.py b/src/pykrige/core.py index 0a01c36..25ad3e4 100755 --- a/src/pykrige/core.py +++ b/src/pykrige/core.py @@ -231,18 +231,14 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): """ if variogram_model_parameters is None: - parameter_list = None elif type(variogram_model_parameters) is dict: - if variogram_model in ["linear"]: - if ( "slope" not in variogram_model_parameters.keys() or "nugget" not in variogram_model_parameters.keys() ): - raise KeyError( "'linear' variogram model requires 'slope' " "and 'nugget' specified in variogram model " @@ -250,20 +246,17 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ) else: - parameter_list = [ variogram_model_parameters["slope"], variogram_model_parameters["nugget"], ] elif variogram_model in ["power"]: - if ( "scale" not in variogram_model_parameters.keys() or "exponent" not in variogram_model_parameters.keys() or "nugget" not in variogram_model_parameters.keys() ): - raise KeyError( "'power' variogram model requires 'scale', " "'exponent', and 'nugget' specified in " @@ -271,7 +264,6 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ) else: - parameter_list = [ variogram_model_parameters["scale"], variogram_model_parameters["exponent"], @@ -279,12 +271,10 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ] elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]: - if ( "range" not in variogram_model_parameters.keys() or "nugget" not in variogram_model_parameters.keys() ): - raise KeyError( "'%s' variogram model requires 'range', " "'nugget', and either 'sill' or 'psill' " @@ -293,9 +283,7 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ) else: - if "sill" in variogram_model_parameters.keys(): - parameter_list = [ variogram_model_parameters["sill"] - variogram_model_parameters["nugget"], @@ -304,7 +292,6 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ] elif "psill" in variogram_model_parameters.keys(): - parameter_list = [ variogram_model_parameters["psill"], variogram_model_parameters["range"], @@ -312,7 +299,6 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ] else: - raise KeyError( "'%s' variogram model requires either " "'sill' or 'psill' specified in " @@ -321,7 +307,6 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ) elif variogram_model in ["custom"]: - raise TypeError( "For user-specified custom variogram model, " "parameters must be specified in a list, " @@ -329,7 +314,6 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ) else: - raise ValueError( "Specified variogram model must be one of the " "following: 'linear', 'power', 'gaussian', " @@ -338,11 +322,8 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ) elif type(variogram_model_parameters) is list: - if variogram_model in ["linear"]: - if len(variogram_model_parameters) != 2: - raise ValueError( "Variogram model parameter list must have " "exactly two entries when variogram model " @@ -352,9 +333,7 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): parameter_list = variogram_model_parameters elif variogram_model in ["power"]: - if len(variogram_model_parameters) != 3: - raise ValueError( "Variogram model parameter list must have " "exactly three entries when variogram model " @@ -364,9 +343,7 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): parameter_list = variogram_model_parameters elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]: - if len(variogram_model_parameters) != 3: - raise ValueError( "Variogram model parameter list must have " "exactly three entries when variogram model " @@ -380,11 +357,9 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ] elif variogram_model in ["custom"]: - parameter_list = variogram_model_parameters else: - raise ValueError( "Specified variogram model must be one of the " "following: 'linear', 'power', 'gaussian', " @@ -393,7 +368,6 @@ def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): ) else: - raise TypeError( "Variogram model parameters must be provided in either " "a list or a dict when they are explicitly specified." @@ -825,7 +799,6 @@ def _find_statistics( sigma = np.zeros(y.shape) for i in range(y.shape[0]): - # skip the first value in the kriging problem if i == 0: continue diff --git a/src/pykrige/uk.py b/src/pykrige/uk.py index 233b711..471802a 100644 --- a/src/pykrige/uk.py +++ b/src/pykrige/uk.py @@ -535,7 +535,6 @@ def _calculate_data_point_zscalars(self, x, y, type_="array"): for m in range(ny): for n in range(nx): - if type_ == "scalar": xn = x yn = y diff --git a/tests/test_classification_krige.py b/tests/test_classification_krige.py index 8d389f5..c5c9975 100644 --- a/tests/test_classification_krige.py +++ b/tests/test_classification_krige.py @@ -90,7 +90,6 @@ def test_krige_classification_housing(): ) for ml_model, krige_method in _methods(): - class_model = ClassificationKriging( classification_model=ml_model, method=krige_method, n_closest_points=2 ) diff --git a/tests/test_core.py b/tests/test_core.py index 9e197a8..fbd4fd6 100755 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -26,7 +26,6 @@ @pytest.fixture def validation_ref(): - data = np.genfromtxt(os.path.join(BASE_DIR, "test_data/test_data.txt")) ok_test_answer, ok_test_gridx, ok_test_gridy, cellsize, no_data = kt.read_asc_grid( os.path.join(BASE_DIR, "test_data/test1_answer.asc"), footer=2 @@ -44,7 +43,6 @@ def validation_ref(): @pytest.fixture def sample_data_2d(): - data = np.array( [ [0.3, 1.2, 0.47], @@ -82,7 +80,6 @@ def sample_data_3d(): def test_core_adjust_for_anisotropy(): - X = np.array([[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]]).T X_adj = core._adjust_for_anisotropy(X, [0.0, 0.0], [2.0], [90.0]) assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0, -1.0]), **allclose_pars) @@ -90,7 +87,6 @@ def test_core_adjust_for_anisotropy(): def test_core_adjust_for_anisotropy_3d(): - # this is a bad examples, as the X matrix is symmetric # and insensitive to transpositions X = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]).T @@ -115,7 +111,6 @@ def test_core_adjust_for_anisotropy_3d(): def test_core_make_variogram_parameter_list(): - # test of first case - variogram_model_parameters is None # function should return None unaffected result = core._make_variogram_parameter_list("linear", None) @@ -186,7 +181,6 @@ def test_core_make_variogram_parameter_list(): def test_core_initialize_variogram_model(validation_ref): - data, _, _ = validation_ref # Note the variogram_function argument is not a string in real life... @@ -242,7 +236,6 @@ def test_core_initialize_variogram_model(validation_ref): def test_core_initialize_variogram_model_3d(sample_data_3d): - data, _, _ = sample_data_3d # Note the variogram_function argument is not a string in real life... @@ -308,7 +301,6 @@ def test_core_initialize_variogram_model_3d(sample_data_3d): def test_core_calculate_variogram_model(): - res = core._calculate_variogram_model( np.array([1.0, 2.0, 3.0, 4.0]), np.array([2.05, 2.95, 4.05, 4.95]), @@ -383,7 +375,6 @@ def test_core_calculate_variogram_model(): def test_core_krige(): - # Example 3.2 from Kitanidis data = np.array([[9.7, 47.6, 1.22], [43.8, 24.6, 2.822]]) z, ss = core._krige( @@ -410,7 +401,6 @@ def test_core_krige(): def test_core_krige_3d(): - # Adapted from example 3.2 from Kitanidis data = np.array([[9.7, 47.6, 1.0, 1.22], [43.8, 24.6, 1.0, 2.822]]) z, ss = core._krige( @@ -497,7 +487,6 @@ def test_non_exact(): def test_ok(validation_ref): - # Test to compare OK results to those obtained using KT3D_H2O. # (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, # vol. 47, no. 4, 580-586.) @@ -518,7 +507,6 @@ def test_ok(validation_ref): def test_ok_update_variogram_model(validation_ref): - data, (ok_test_answer, gridx, gridy), _ = validation_ref with pytest.raises(ValueError): @@ -574,7 +562,6 @@ def test_ok_get_variogram_points(validation_ref): def test_ok_execute(sample_data_2d): - data, (gridx, gridy, _), mask_ref = sample_data_2d ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2]) @@ -717,7 +704,6 @@ def test_cython_ok(sample_data_2d): def test_uk(validation_ref): - # Test to compare UK with linear drift to results from KT3D_H2O. # (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, # vol. 47, no. 4, 580-586.) @@ -739,7 +725,6 @@ def test_uk(validation_ref): def test_uk_update_variogram_model(sample_data_2d): - data, (gridx, gridy, _), mask_ref = sample_data_2d with pytest.raises(ValueError): @@ -806,7 +791,6 @@ def test_uk_get_variogram_points(validation_ref): def test_uk_calculate_data_point_zscalars(sample_data_2d): - data, (gridx, gridy, _), mask_ref = sample_data_2d dem = np.arange(0.0, 5.1, 0.1) @@ -869,7 +853,6 @@ def test_uk_calculate_data_point_zscalars(sample_data_2d): def test_uk_execute_single_point(): - # Test data and answer from lecture notes by Nicolas Christou, UCLA Stats data = np.array( [ @@ -912,7 +895,6 @@ def test_uk_execute_single_point(): def test_uk_execute(sample_data_2d): - data, (gridx, gridy, _), mask_ref = sample_data_2d uk = UniversalKriging( @@ -1035,7 +1017,6 @@ def test_uk_execute(sample_data_2d): def test_ok_uk_produce_same_result(validation_ref): - data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref gridx = np.linspace(1067000.0, 1072000.0, 100) @@ -1085,7 +1066,6 @@ def test_ok_uk_produce_same_result(validation_ref): def test_ok_backends_produce_same_result(validation_ref): - data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref gridx = np.linspace(1067000.0, 1072000.0, 100) @@ -1105,7 +1085,6 @@ def test_ok_backends_produce_same_result(validation_ref): def test_uk_backends_produce_same_result(validation_ref): - data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref gridx = np.linspace(1067000.0, 1072000.0, 100) @@ -1125,7 +1104,6 @@ def test_uk_backends_produce_same_result(validation_ref): def test_kriging_tools(sample_data_2d): - data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2]) @@ -1238,7 +1216,6 @@ def test_kriging_tools(sample_data_2d): # http://doc.pytest.org/en/latest/skipping.html#id1 @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") def test_uk_three_primary_drifts(sample_data_2d): - data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d well = np.array([[1.1, 1.1, -1.0]]) @@ -1277,7 +1254,6 @@ def test_uk_three_primary_drifts(sample_data_2d): def test_uk_specified_drift(sample_data_2d): - data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d xg, yg = np.meshgrid(gridx, gridy) @@ -1407,7 +1383,6 @@ def test_uk_specified_drift(sample_data_2d): def test_uk_functional_drift(sample_data_2d): - data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d well = np.array([[1.1, 1.1, -1.0]]) @@ -1501,7 +1476,6 @@ def func_well(x, y): def test_uk_with_external_drift(validation_ref): - data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref dem, demx, demy, cellsize, no_data = kt.read_asc_grid( @@ -1937,7 +1911,6 @@ def func(params, dist): def test_ok3d(validation_ref): - data, (ok_test_answer, gridx_ref, gridy_ref), _ = validation_ref # Test to compare K3D results to those obtained using KT3D_H2O. @@ -2016,7 +1989,6 @@ def test_ok3d(validation_ref): def test_ok3d_moving_window(): - # Test to compare K3D results to those obtained using KT3D. data = np.genfromtxt( os.path.join(BASE_DIR, "test_data", "test3d_data.txt"), skip_header=1 @@ -2045,7 +2017,6 @@ def test_ok3d_moving_window(): def test_ok3d_uk3d_and_backends_produce_same_results(validation_ref): - data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref ok3d = OrdinaryKriging3D( @@ -2123,7 +2094,6 @@ def test_ok3d_uk3d_and_backends_produce_same_results(validation_ref): def test_ok3d_update_variogram_model(sample_data_3d): - data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d with pytest.raises(ValueError): @@ -2160,7 +2130,6 @@ def test_ok3d_update_variogram_model(sample_data_3d): def test_uk3d_update_variogram_model(sample_data_3d): - data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d with pytest.raises(ValueError): @@ -2197,7 +2166,6 @@ def test_uk3d_update_variogram_model(sample_data_3d): def test_ok3d_backends_produce_same_result(sample_data_3d): - data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d k3d = OrdinaryKriging3D( @@ -2234,7 +2202,6 @@ def test_ok3d_backends_produce_same_result(sample_data_3d): def test_ok3d_execute(sample_data_3d): - data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3]) @@ -2385,7 +2352,6 @@ def test_ok3d_execute(sample_data_3d): def test_uk3d_execute(sample_data_3d): - data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3]) @@ -2536,7 +2502,6 @@ def test_uk3d_execute(sample_data_3d): def test_force_exact_3d(sample_data_3d): - data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d k3d = OrdinaryKriging3D( @@ -2595,7 +2560,6 @@ def test_force_exact_3d(sample_data_3d): def test_uk3d_specified_drift(sample_data_3d): - data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d zg, yg, xg = np.meshgrid(gridz_ref, gridy_ref, gridx_ref, indexing="ij") @@ -2674,7 +2638,6 @@ def test_uk3d_specified_drift(sample_data_3d): def test_uk3d_functional_drift(sample_data_3d): - data, (gridx, gridy, gridz), mask_ref = sample_data_3d func_x = lambda x, y, z: x # noqa @@ -2725,7 +2688,6 @@ def test_uk3d_functional_drift(sample_data_3d): def test_geometric_code(): - # Create selected points distributed across the sphere: N = 4 lon = np.array([7.0, 7.0, 187.0, 73.231]) diff --git a/tests/test_regression_krige.py b/tests/test_regression_krige.py index a222d51..ca5da2a 100644 --- a/tests/test_regression_krige.py +++ b/tests/test_regression_krige.py @@ -88,7 +88,6 @@ def test_krige_housing(): ) for ml_model, krige_method in _methods(): - reg_kr_model = RegressionKriging( regression_model=ml_model, method=krige_method, n_closest_points=2 )