diff --git a/dev-environment.yml b/dev-environment.yml index 2186f4b9..e17b7561 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -8,7 +8,7 @@ dependencies: - numpy=1.* - matplotlib=3.* - pyproj>=3.4,<4 - - rasterio>=1.3,<2 + - rasterio>=1.3,<1.4 - scipy=1.* - tqdm - scikit-image=0.* @@ -23,6 +23,7 @@ dependencies: - richdem # Test dependencies + - gdal # To test against GDAL - pytest - pytest-xdist - pyyaml @@ -43,10 +44,9 @@ dependencies: - numpydoc - pip: + # Optional dependency - -e ./ - # Optional dependencies - - noisyopt # "Headless" needed for opencv to install without requiring system dependencies - opencv-contrib-python-headless diff --git a/environment.yml b/environment.yml index 8f5279c5..cd855041 100644 --- a/environment.yml +++ b/environment.yml @@ -8,7 +8,7 @@ dependencies: - numpy=1.* - matplotlib=3.* - pyproj>=3.4,<4 - - rasterio>=1.3,<2 + - rasterio>=1.3,<1.4 - scipy=1.* - tqdm - scikit-image=0.* diff --git a/requirements.txt b/requirements.txt index 98df6ee8..6fced653 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ numba==0.* numpy==1.* matplotlib==3.* pyproj>=3.4,<4 -rasterio>=1.3,<2 +rasterio>=1.3,<1.4 scipy==1.* tqdm scikit-image==0.* diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index a57c2bd9..af10213f 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -1,6 +1,8 @@ """Functions to test the affine coregistrations.""" from __future__ import annotations +import warnings + import geopandas as gpd import geoutils import numpy as np @@ -213,11 +215,11 @@ def test_raise_all_nans(self) -> None: @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize("shifts", [(20, 5, 2), (-50, 100, 2)]) # type: ignore - @pytest.mark.parametrize("coreg_method", [coreg.NuthKaab, coreg.GradientDescending, coreg.ICP]) # type: ignore + @pytest.mark.parametrize("coreg_method", [coreg.NuthKaab, coreg.DhMinimize, coreg.ICP]) # type: ignore def test_coreg_translations__synthetic(self, fit_args, shifts, coreg_method) -> None: """ Test the horizontal/vertical shift coregistrations with synthetic shifted data. These tests include NuthKaab, - ICP and GradientDescending. + ICP and DhMinimize. We test all combinaison of inputs: raster-raster, point-raster and raster-point. @@ -227,7 +229,8 @@ def test_coreg_translations__synthetic(self, fit_args, shifts, coreg_method) -> to be the right one; and that there is no other errors introduced in the process). """ - # Initiate coregistration + warnings.filterwarnings("ignore", message="Covariance of the parameters*") + horizontal_coreg = coreg_method() # Copy dictionary and remove inlier mask @@ -249,7 +252,7 @@ def test_coreg_translations__synthetic(self, fit_args, shifts, coreg_method) -> fit_shifts = [-horizontal_coreg.meta["outputs"]["affine"][k] for k in ["shift_x", "shift_y", "shift_z"]] # ICP can be less precise than other methods - rtol = 10e-2 if coreg_method != coreg.ICP else 10e-1 + rtol = 10e-2 if coreg_method == coreg.NuthKaab else 10e-1 assert np.allclose(shifts, fit_shifts, rtol=rtol) # For a point cloud output, need to interpolate with the other DEM to get dh @@ -274,14 +277,14 @@ def test_coreg_translations__synthetic(self, fit_args, shifts, coreg_method) -> # Check applying the coregistration removes 99% of the variance (95% for ICP) # Need to standardize by the elevation difference spread to avoid huge/small values close to infinity - tol = 0.01 if coreg_method != coreg.ICP else 0.05 + tol = 0.01 if coreg_method == coreg.NuthKaab else 0.05 assert np.nanvar(dh / np.nanstd(init_dh)) < tol @pytest.mark.parametrize( "coreg_method__shift", [ (coreg.NuthKaab, (9.202739, 2.735573, -1.97733)), - (coreg.GradientDescending, (10.0, 2.5, -1.964539)), + (coreg.DhMinimize, (10.0850892, 2.898166, -1.943001)), (coreg.ICP, (8.73833, 1.584255, -1.943957)), ], ) # type: ignore @@ -299,7 +302,6 @@ def test_coreg_translations__example( # Get the coregistration method and expected shifts from the inputs coreg_method, expected_shifts = coreg_method__shift - # Run co-registration c = coreg_method(subsample=50000) c.fit(ref, tba, inlier_mask=inlier_mask, verbose=verbose, random_state=42) diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index d65eb592..5632f7eb 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -113,16 +113,11 @@ def recursive_typeddict_items(typed_dict: Mapping[str, Any]) -> Iterable[str]: # Assert all keys exist in the mapping key to str dictionary used for info list_info_keys = list(dict_key_to_str.keys()) - # TODO: Remove GradientDescending + ICP keys here once generic optimizer is used + # TODO: Remove ICP keys here once generic optimizer is used # Temporary exceptions: pipeline/blockwise + gradientdescending/icp list_exceptions = [ "step_meta", "pipeline", - "x0", - "bounds", - "deltainit", - "deltatol", - "feps", "rejection_scale", "num_levels", ] @@ -771,7 +766,7 @@ def test_pipeline__errors(self) -> None: def test_pipeline_pts(self) -> None: - pipeline = coreg.NuthKaab() + coreg.GradientDescending() + pipeline = coreg.NuthKaab() + coreg.DhMinimize() ref_points = self.ref.to_pointcloud(subsample=5000, random_state=42).ds ref_points["E"] = ref_points.geometry.x ref_points["N"] = ref_points.geometry.y diff --git a/xdem/coreg/__init__.py b/xdem/coreg/__init__.py index a942c740..d21446b6 100644 --- a/xdem/coreg/__init__.py +++ b/xdem/coreg/__init__.py @@ -5,7 +5,7 @@ from xdem.coreg.affine import ( # noqa ICP, AffineCoreg, - GradientDescending, + DhMinimize, NuthKaab, VerticalShift, ) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 700559fd..c2288f7f 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -27,7 +27,6 @@ CoregDict, InFitOrBinDict, InRandomDict, - InSpecificDict, OutAffineDict, _bin_or_and_fit_nd, _get_subsample_mask_pts_rst, @@ -45,9 +44,9 @@ try: from noisyopt import minimizeCompass - _has_noisyopt = True + _HAS_NOISYOPT = True except ImportError: - _has_noisyopt = False + _HAS_NOISYOPT = False ###################################### # Generic functions for affine methods @@ -629,17 +628,17 @@ def nuth_kaab( return final_offsets, subsample_final -######################## -# 2/ Gradient descending -######################## +#################### +# 2/ Dh minimization +#################### -def _gradient_descending_fit_func( +def _dh_minimize_fit_func( coords_offsets: tuple[float, float], dh_interpolator: Callable[[float, float], NDArrayf], -) -> float: +) -> NDArrayf: """ - Fitting function of gradient descending method, returns the NMAD of elevation residuals. + Fitting function of dh minimization method, returns the NMAD of elevation differences. :param coords_offsets: Coordinate offsets at this iteration (easting, northing) in georeferenced unit. :param dh_interpolator: Interpolator returning elevation differences at the subsampled points for a certain @@ -648,81 +647,84 @@ def _gradient_descending_fit_func( """ # Calculate the elevation difference - dh = dh_interpolator(coords_offsets[0], coords_offsets[1]) + dh = dh_interpolator(coords_offsets[0], coords_offsets[1]).flatten() - # Return NMAD of residuals - return float(nmad(dh)) + return dh -def _gradient_descending_fit( +def _dh_minimize_fit( dh_interpolator: Callable[[float, float], NDArrayf], - res: tuple[float, float], - params_noisyopt: InSpecificDict, + params_fit_or_bin: InFitOrBinDict, verbose: bool = False, + **kwargs: Any, ) -> tuple[float, float, float]: """ Optimize the statistical dispersion of the elevation differences residuals. :param dh_interpolator: Interpolator returning elevation differences at the subsampled points for a certain horizontal offset (see _preprocess_pts_rst_subsample_interpolator). - :param res: Resolution of DEM. - :param params_noisyopt: Parameters for noisyopt minimization. + :param params_fit_or_bin: Parameters for fitting or binning. :param verbose: Whether to print statements. :return: Optimized offsets (easing, northing, vertical) in georeferenced unit. """ - # Define cost function - def func_cost(offset: tuple[float, float]) -> float: - return _gradient_descending_fit_func(offset, dh_interpolator=dh_interpolator) - - # Mean resolution - mean_res = (res[0] + res[1]) / 2 - # Run pattern search minimization - res = minimizeCompass( - func_cost, - x0=tuple(x * mean_res for x in params_noisyopt["x0"]), - deltainit=params_noisyopt["deltainit"] * mean_res, - deltatol=params_noisyopt["deltatol"] * mean_res, - feps=params_noisyopt["feps"] * mean_res, - bounds=( - tuple(b * mean_res for b in params_noisyopt["bounds"]), - tuple(b * mean_res for b in params_noisyopt["bounds"]), - ), - disp=verbose, - errorcontrol=False, - ) + # Define partial function + loss_func = params_fit_or_bin["fit_loss_func"] + + def fit_func(coords_offsets: tuple[float, float]) -> np.floating[Any]: + return loss_func(_dh_minimize_fit_func(coords_offsets=coords_offsets, dh_interpolator=dh_interpolator)) + + # Initial offset near zero + init_offsets = (0, 0) + + # Default parameters depending on optimizer used + if params_fit_or_bin["fit_minimizer"] == scipy.optimize.minimize: + if "method" not in kwargs.keys(): + kwargs.update({"method": "Nelder-Mead"}) + kwargs.update({"options": {"xatol": 10e-6, "maxiter": 500}}) + # This method has trouble when initialized with 0,0, so defaulting to 1,1 + # (tip from Simon Gascoin: https://github.com/GlacioHack/xdem/pull/595#issuecomment-2387104719) + init_offsets = (1, 1) + + elif _HAS_NOISYOPT and params_fit_or_bin["fit_minimizer"] == minimizeCompass: + kwargs.update({"errorcontrol": False}) + if "deltatol" not in kwargs.keys(): + kwargs.update({"deltatol": 0.004}) + if "feps" not in kwargs.keys(): + kwargs.update({"feps": 10e-5}) + + results = params_fit_or_bin["fit_minimizer"](fit_func, init_offsets, **kwargs) # Get final offsets with the right sign direction - offset_east = -res.x[0] - offset_north = -res.x[1] + offset_east = -results.x[0] + offset_north = -results.x[1] offset_vertical = float(np.nanmedian(dh_interpolator(-offset_east, -offset_north))) return offset_east, offset_north, offset_vertical -def gradient_descending( +def dh_minimize( ref_elev: NDArrayf | gpd.GeoDataFrame, tba_elev: NDArrayf | gpd.GeoDataFrame, inlier_mask: NDArrayb, transform: rio.transform.Affine, area_or_point: Literal["Area", "Point"] | None, params_random: InRandomDict, - params_noisyopt: InSpecificDict, + params_fit_or_bin: InFitOrBinDict, z_name: str, weights: NDArrayf | None = None, verbose: bool = False, + **kwargs: Any, ) -> tuple[tuple[float, float, float], int]: """ - Gradient descending coregistration method (Zhihao, in prep.), for any point-raster or raster-raster input, + Elevation difference minimization coregistration method, for any point-raster or raster-raster input, including subsampling and interpolation to the same points. :return: Final estimated offset: east, north, vertical (in georeferenced units). """ - if not _has_noisyopt: - raise ValueError("Optional dependency needed. Install 'noisyopt'") if verbose: - print("Running gradient descending coregistration (Zhihao, in prep.)") + print("Running dh minimization coregistration.") # Perform preprocessing: subsampling and interpolation of inputs and auxiliary vars at same points dh_interpolator, _, subsample_final = _preprocess_pts_rst_subsample_interpolator( @@ -737,10 +739,9 @@ def gradient_descending( ) # Perform fit - res = _res(transform) - # TODO: To match original implementation, need to first add back weight support for point data - final_offsets = _gradient_descending_fit( - dh_interpolator=dh_interpolator, res=res, params_noisyopt=params_noisyopt, verbose=verbose + # TODO: To match original implementation, need to add back weight support for point data + final_offsets = _dh_minimize_fit( + dh_interpolator=dh_interpolator, params_fit_or_bin=params_fit_or_bin, verbose=verbose ) return final_offsets, subsample_final @@ -1432,9 +1433,9 @@ def _to_matrix_func(self) -> NDArrayf: return matrix -class GradientDescending(AffineCoreg): +class DhMinimize(AffineCoreg): """ - Gradient descending coregistration. + Elevation difference minimization coregistration. Estimates vertical and horizontal translations. @@ -1445,29 +1446,20 @@ class GradientDescending(AffineCoreg): def __init__( self, - x0: tuple[float, float] = (0, 0), - bounds: tuple[float, float] = (-10, 10), - deltainit: int = 2, - deltatol: float = 0.004, - feps: float = 0.0001, - subsample: int | float = 6000, + fit_minimizer: Callable[..., tuple[NDArrayf, Any]] = scipy.optimize.minimize, + fit_loss_func: Callable[[NDArrayf], np.floating[Any]] = nmad, + subsample: int | float = 5e5, ) -> None: """ - Instantiate gradient descending coregistration object. + Instantiate dh minimization object. - :param x0: The initial point of gradient descending iteration. - :param bounds: The boundary of the maximum shift. - :param deltainit: Initial pattern size. - :param deltatol: Target pattern size, or the precision you want achieve. - :param feps: Parameters for algorithm. Smallest difference in function value to resolve. + :param fit_minimizer: Minimizer for the coregistration function. + :param fit_loss_func: Loss function for the minimization of residuals. :param subsample: Subsample the input for speed-up. <1 is parsed as a fraction. >1 is a pixel count. - - The algorithm terminates when the iteration is locally optimal at the target pattern size 'deltatol', - or when the function value differs by less than the tolerance 'feps' along all directions. - """ - meta = {"bounds": bounds, "x0": x0, "deltainit": deltainit, "deltatol": deltatol, "feps": feps} - super().__init__(subsample=subsample, meta=meta) + + meta_fit = {"fit_or_bin": "fit", "fit_minimizer": fit_minimizer, "fit_loss_func": fit_loss_func} + super().__init__(subsample=subsample, meta=meta_fit) # type: ignore def _fit_rst_rst( self, @@ -1516,11 +1508,10 @@ def _fit_rst_pts( # Get parameters stored in class params_random = self._meta["inputs"]["random"] - # TODO: Replace params noisyopt by kwargs? (=classic optimizer parameters) - params_noisyopt = self._meta["inputs"]["specific"] + params_fit_or_bin = self._meta["inputs"]["fitorbin"] # Call method - (easting_offset, northing_offset, vertical_offset), subsample_final = gradient_descending( + (easting_offset, northing_offset, vertical_offset), subsample_final = dh_minimize( ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, @@ -1530,7 +1521,8 @@ def _fit_rst_pts( weights=weights, verbose=verbose, params_random=params_random, - params_noisyopt=params_noisyopt, + params_fit_or_bin=params_fit_or_bin, + **kwargs, ) # Write output to class diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 4bf96f5c..4e64d37d 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -77,6 +77,8 @@ "fit_or_bin": "Fit, bin or bin+fit", "fit_func": "Function to fit", "fit_optimizer": "Optimizer for fitting", + "fit_minimizer": "Minimizer of method", + "fit_loss_func": "Loss function of method", "bin_statistic": "Binning statistic", "bin_sizes": "Bin sizes or edges", "bin_apply_method": "Bin apply method", @@ -1428,6 +1430,12 @@ class InFitOrBinDict(TypedDict, total=False): # Fit parameters: function to fit and optimizer fit_func: Callable[..., NDArrayf] fit_optimizer: Callable[..., tuple[NDArrayf, Any]] + + # TODO: Solve redundancy between optimizer and minimizer (curve_fit or minimize as default?) + # For a minimization problem + fit_minimizer: Callable[..., tuple[NDArrayf, Any]] + fit_loss_func: Callable[[NDArrayf], np.floating[Any]] + # Bin parameters: bin sizes, statistic and apply method bin_sizes: int | dict[str, int | Iterable[float]] bin_statistic: Callable[[NDArrayf], np.floating[Any]] @@ -1475,15 +1483,6 @@ class InSpecificDict(TypedDict, total=False): # (Using Deramp) Polynomial order selected for deramping poly_order: int - # (Using GradientDescending) - # (Temporary) Parameters of gradient descending - # TODO: Remove in favor of kwargs like for curve_fit? - x0: tuple[float, float] - bounds: tuple[float, float] - deltainit: int - deltatol: float - feps: float - # (Using ICP) rejection_scale: float num_levels: int