Skip to content

Commit

Permalink
Consistent Generic Steppers (#52)
Browse files Browse the repository at this point in the history
* Use consistent attribute names

* Forward API changes to tests

* Forward changes to qualitative rollout
  • Loading branch information
Ceyron authored Oct 22, 2024
1 parent ad869e4 commit ac70930
Show file tree
Hide file tree
Showing 8 changed files with 109 additions and 108 deletions.
22 changes: 11 additions & 11 deletions exponax/stepper/generic/_convection.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


class GeneralConvectionStepper(BaseStepper):
coefficients: tuple[float, ...]
linear_coefficients: tuple[float, ...]
convection_scale: float
dealiasing_fraction: float
single_channel: bool
Expand All @@ -23,7 +23,7 @@ def __init__(
num_points: int,
dt: float,
*,
coefficients: tuple[float, ...] = (0.0, 0.0, 0.01),
linear_coefficients: tuple[float, ...] = (0.0, 0.0, 0.01),
convection_scale: float = 1.0,
single_channel: bool = False,
conservative: bool = False,
Expand Down Expand Up @@ -74,7 +74,7 @@ def __init__(
in each dimension is the same. Hence, the total number of degrees of
freedom is `Nᵈ`.
- `dt`: The timestep size `Δt` between two consecutive states.
- `coefficients` (keyword-only): The list of coefficients `a_j`
- `linear_coefficients` (keyword-only): The list of coefficients `a_j`
corresponding to the derivatives. The length of this tuple
represents the highest occuring derivative. The default value `(0.0,
0.0, 0.01)` corresponds to the Burgers equation (because of the
Expand Down Expand Up @@ -103,7 +103,7 @@ def __init__(
coefficients of the exponential time differencing Runge Kutta
method. Default: 1.0.
"""
self.coefficients = coefficients
self.linear_coefficients = linear_coefficients
self.convection_scale = convection_scale
self.single_channel = single_channel
self.dealiasing_fraction = dealiasing_fraction
Expand Down Expand Up @@ -136,7 +136,7 @@ def _build_linear_operator(
axis=0,
keepdims=True,
)
for i, c in enumerate(self.coefficients)
for i, c in enumerate(self.linear_coefficients)
)
return linear_operator

Expand All @@ -156,15 +156,15 @@ def _build_nonlinear_fun(


class NormalizedConvectionStepper(GeneralConvectionStepper):
normalized_coefficients: tuple[float, ...]
normalized_linear_coefficients: tuple[float, ...]
normalized_convection_scale: float

def __init__(
self,
num_spatial_dims: int,
num_points: int,
*,
normalized_coefficients: tuple[float, ...] = (0.0, 0.0, 0.01 * 0.1),
normalized_linear_coefficients: tuple[float, ...] = (0.0, 0.0, 0.01 * 0.1),
normalized_convection_scale: float = 1.0 * 0.1,
single_channel: bool = False,
conservative: bool = False,
Expand Down Expand Up @@ -205,7 +205,7 @@ def __init__(
boundary point. In higher dimensions; the number of points in each
dimension is the same. Hence, the total number of degrees of freedom
is `Nᵈ`.
- `normalized_coefficients`: The list of coefficients
- `normalized_linear_coefficients`: The list of coefficients
`α_j` corresponding to the derivatives. The length of this tuple
represents the highest occuring derivative. The default value `(0.0,
0.0, 0.01)` corresponds to the Burgers equation (because of the
Expand Down Expand Up @@ -235,14 +235,14 @@ def __init__(
coefficients of the exponential time differencing Runge Kutta
method. Default: 1.0.
"""
self.normalized_coefficients = normalized_coefficients
self.normalized_linear_coefficients = normalized_linear_coefficients
self.normalized_convection_scale = normalized_convection_scale
super().__init__(
num_spatial_dims=num_spatial_dims,
domain_extent=1.0, # Derivative operator is just scaled with 2 * jnp.pi
num_points=num_points,
dt=1.0,
coefficients=normalized_coefficients,
linear_coefficients=normalized_linear_coefficients,
convection_scale=normalized_convection_scale,
order=order,
dealiasing_fraction=dealiasing_fraction,
Expand Down Expand Up @@ -364,7 +364,7 @@ def __init__(
super().__init__(
num_spatial_dims=num_spatial_dims,
num_points=num_points,
normalized_coefficients=normalized_coefficients,
normalized_linear_coefficients=normalized_coefficients,
normalized_convection_scale=normalized_convection_scale,
single_channel=single_channel,
order=order,
Expand Down
20 changes: 10 additions & 10 deletions exponax/stepper/generic/_gradient_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


class GeneralGradientNormStepper(BaseStepper):
coefficients: tuple[float, ...]
linear_coefficients: tuple[float, ...]
gradient_norm_scale: float
dealiasing_fraction: float

Expand All @@ -21,7 +21,7 @@ def __init__(
num_points: int,
dt: float,
*,
coefficients: tuple[float, ...] = (0.0, 0.0, -1.0, 0.0, -1.0),
linear_coefficients: tuple[float, ...] = (0.0, 0.0, -1.0, 0.0, -1.0),
gradient_norm_scale: float = 1.0,
order=2,
dealiasing_fraction: float = 2 / 3,
Expand Down Expand Up @@ -66,7 +66,7 @@ def __init__(
in each dimension is the same. Hence, the total number of degrees of
freedom is `Nᵈ`.
- `dt`: The timestep size `Δt` between two consecutive states.
- `coefficients` (keyword-only): The list of coefficients `a_j`
- `linear_coefficients` (keyword-only): The list of coefficients `a_j`
corresponding to the derivatives. The length of this tuple
represents the highest occuring derivative. The default value `(0.0,
0.0, -1.0, 0.0, -1.0)` corresponds to the Kuramoto- Sivashinsky
Expand All @@ -89,7 +89,7 @@ def __init__(
coefficients of the exponential time differencing Runge Kutta
method. Default: 1.0.
"""
self.coefficients = coefficients
self.linear_coefficients = linear_coefficients
self.gradient_norm_scale = gradient_norm_scale
self.dealiasing_fraction = dealiasing_fraction
super().__init__(
Expand All @@ -113,7 +113,7 @@ def _build_linear_operator(
axis=0,
keepdims=True,
)
for i, c in enumerate(self.coefficients)
for i, c in enumerate(self.linear_coefficients)
)
return linear_operator

Expand All @@ -132,15 +132,15 @@ def _build_nonlinear_fun(


class NormalizedGradientNormStepper(GeneralGradientNormStepper):
normalized_coefficients: tuple[float, ...]
normalized_linear_coefficients: tuple[float, ...]
normalized_gradient_norm_scale: float

def __init__(
self,
num_spatial_dims: int,
num_points: int,
*,
normalized_coefficients: tuple[float, ...] = (
normalized_linear_coefficients: tuple[float, ...] = (
0.0,
0.0,
-1.0 * 0.1 / (60.0**2),
Expand Down Expand Up @@ -217,14 +217,14 @@ def __init__(
coefficients of the exponential time differencing Runge Kutta
method. Default: 1.0.
"""
self.normalized_coefficients = normalized_coefficients
self.normalized_linear_coefficients = normalized_linear_coefficients
self.normalized_gradient_norm_scale = normalized_gradient_norm_scale
super().__init__(
num_spatial_dims=num_spatial_dims,
domain_extent=1.0,
num_points=num_points,
dt=1.0,
coefficients=normalized_coefficients,
linear_coefficients=normalized_linear_coefficients,
gradient_norm_scale=normalized_gradient_norm_scale,
order=order,
dealiasing_fraction=dealiasing_fraction,
Expand Down Expand Up @@ -339,7 +339,7 @@ def __init__(
super().__init__(
num_spatial_dims=num_spatial_dims,
num_points=num_points,
normalized_coefficients=normalized_coefficients,
normalized_linear_coefficients=normalized_coefficients,
normalized_gradient_norm_scale=normalized_gradient_norm_scale,
order=order,
dealiasing_fraction=dealiasing_fraction,
Expand Down
30 changes: 15 additions & 15 deletions exponax/stepper/generic/_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@


class GeneralLinearStepper(BaseStepper):
coefficients: tuple[float, ...]
linear_coefficients: tuple[float, ...]

def __init__(
self,
Expand All @@ -20,7 +20,7 @@ def __init__(
num_points: int,
dt: float,
*,
coefficients: tuple[float, ...] = (0.0, -0.1, 0.01),
linear_coefficients: tuple[float, ...] = (0.0, -0.1, 0.01),
):
"""
General timestepper for a d-dimensional (`d ∈ {1, 2, 3}`) linear
Expand Down Expand Up @@ -67,7 +67,7 @@ def __init__(
number of points in each dimension is the same. Hence, the total
number of degrees of freedom is `Nᵈ`.
- `dt`: The timestep size `Δt` between two consecutive states.
- `coefficients` (keyword-only): The list of coefficients `a_j`
- `linear_coefficients` (keyword-only): The list of coefficients `a_j`
corresponding to the derivatives. Default: `[0.0, -0.1, 0.01]`.
**Notes:**
Expand Down Expand Up @@ -137,7 +137,7 @@ def __init__(
the function [`exponax.stepper.generic.normalize_coefficients`][] to
obtain the normalized coefficients.
"""
self.coefficients = coefficients
self.linear_coefficients = linear_coefficients
super().__init__(
num_spatial_dims=num_spatial_dims,
domain_extent=domain_extent,
Expand All @@ -157,7 +157,7 @@ def _build_linear_operator(
axis=0,
keepdims=True,
)
for i, c in enumerate(self.coefficients)
for i, c in enumerate(self.linear_coefficients)
)
return linear_operator

Expand All @@ -172,14 +172,14 @@ def _build_nonlinear_fun(


class NormalizedLinearStepper(GeneralLinearStepper):
normalized_coefficients: tuple[float, ...]
normalized_linear_coefficients: tuple[float, ...]

def __init__(
self,
num_spatial_dims: int,
num_points: int,
*,
normalized_coefficients: tuple[float, ...] = (0.0, -0.5, 0.01),
normalized_linear_coefficients: tuple[float, ...] = (0.0, -0.5, 0.01),
):
"""
Timestepper for d-dimensional (`d ∈ {1, 2, 3}`) linear PDEs on periodic
Expand Down Expand Up @@ -218,25 +218,25 @@ def __init__(
dynamics. This must a tuple of floats. The length of the tuple
defines the highest occuring linear derivative in the PDE.
"""
self.normalized_coefficients = normalized_coefficients
self.normalized_linear_coefficients = normalized_linear_coefficients
super().__init__(
num_spatial_dims=num_spatial_dims,
domain_extent=1.0,
num_points=num_points,
dt=1.0,
coefficients=normalized_coefficients,
linear_coefficients=normalized_linear_coefficients,
)


class DifficultyLinearStepper(NormalizedLinearStepper):
difficulties: tuple[float, ...]
linear_difficulties: tuple[float, ...]

def __init__(
self,
num_spatial_dims: int = 1,
num_points: int = 48,
*,
difficulties: tuple[float, ...] = (0.0, -2.0),
linear_difficulties: tuple[float, ...] = (0.0, -2.0),
):
"""
Timestepper for d-dimensional (`d ∈ {1, 2, 3}`) linear PDEs on periodic
Expand Down Expand Up @@ -275,17 +275,17 @@ def __init__(
be a tuple of floats. The length of the tuple defines the highest
occuring linear derivative in the PDE. Default is `(0.0, -2.0)`.
"""
self.difficulties = difficulties
self.linear_difficulties = linear_difficulties
normalized_coefficients = extract_normalized_coefficients_from_difficulty(
difficulties,
linear_difficulties,
num_spatial_dims=num_spatial_dims,
num_points=num_points,
)

super().__init__(
num_spatial_dims=num_spatial_dims,
num_points=num_points,
normalized_coefficients=normalized_coefficients,
normalized_linear_coefficients=normalized_coefficients,
)


Expand Down Expand Up @@ -318,7 +318,7 @@ def __init__(
"""
difficulties = (0.0,) * (order) + (difficulty,)
super().__init__(
difficulties=difficulties,
linear_difficulties=difficulties,
num_spatial_dims=num_spatial_dims,
num_points=num_points,
)
Loading

0 comments on commit ac70930

Please sign in to comment.