From f84b72206af04fe2cb1113948e2f55644607224f Mon Sep 17 00:00:00 2001 From: David Zwicker Date: Wed, 31 Jul 2024 09:17:12 +0200 Subject: [PATCH] Format docstrings automatically (#580) --- docs/source/create_performance_plots.py | 11 +- docs/source/parse_examples.py | 2 +- docs/source/run_autodoc.py | 5 +- docs/sphinx_ext/package_config.py | 2 +- docs/sphinx_ext/simplify_typehints.py | 12 +- docs/sphinx_ext/toctree_filter.py | 2 +- examples/heterogeneous_bcs.py | 2 +- examples/pde_1d_class.py | 4 +- examples/pde_brusselator_class.py | 8 +- examples/pde_coupled.py | 2 +- examples/pde_custom_class.py | 4 +- examples/pde_custom_numba.py | 6 +- examples/pde_sir.py | 4 +- examples/py_modelrunner.py | 2 +- pde/__init__.py | 6 +- pde/fields/__init__.py | 3 +- pde/fields/base.py | 102 ++++---- pde/fields/collection.py | 79 +++---- pde/fields/datafield_base.py | 95 ++++---- pde/fields/scalar.py | 29 ++- pde/fields/tensorial.py | 33 ++- pde/fields/vectorial.py | 51 ++-- pde/grids/__init__.py | 3 +- pde/grids/_mesh.py | 59 +++-- pde/grids/base.py | 165 ++++++------- pde/grids/boundaries/__init__.py | 12 +- pde/grids/boundaries/axes.py | 33 ++- pde/grids/boundaries/axis.py | 40 ++-- pde/grids/boundaries/local.py | 222 +++++++++--------- pde/grids/cartesian.py | 31 ++- pde/grids/coordinates/__init__.py | 3 +- pde/grids/coordinates/base.py | 26 +- pde/grids/coordinates/bipolar.py | 4 +- pde/grids/coordinates/bispherical.py | 4 +- pde/grids/coordinates/cartesian.py | 4 +- pde/grids/coordinates/cylindrical.py | 2 +- pde/grids/coordinates/polar.py | 4 +- pde/grids/coordinates/spherical.py | 4 +- pde/grids/cylindrical.py | 21 +- pde/grids/operators/__init__.py | 3 +- pde/grids/operators/cartesian.py | 123 +++++----- pde/grids/operators/common.py | 29 ++- pde/grids/operators/cylindrical_sym.py | 39 ++- pde/grids/operators/polar_sym.py | 33 ++- pde/grids/operators/spherical_sym.py | 45 ++-- pde/grids/spherical.py | 35 ++- pde/pdes/__init__.py | 5 +- pde/pdes/allen_cahn.py | 13 +- pde/pdes/base.py | 47 ++-- pde/pdes/cahn_hilliard.py | 13 +- pde/pdes/diffusion.py | 13 +- pde/pdes/kpz_interface.py | 13 +- pde/pdes/kuramoto_sivashinsky.py | 13 +- pde/pdes/laplace.py | 5 +- pde/pdes/pde.py | 29 ++- pde/pdes/swift_hohenberg.py | 13 +- pde/pdes/wave.py | 15 +- pde/solvers/__init__.py | 7 +- pde/solvers/adams_bashforth.py | 13 +- pde/solvers/base.py | 54 ++--- pde/solvers/controller.py | 17 +- pde/solvers/crank_nicolson.py | 11 +- pde/solvers/explicit.py | 33 ++- pde/solvers/explicit_mpi.py | 18 +- pde/solvers/implicit.py | 17 +- pde/solvers/scipy.py | 14 +- pde/storage/__init__.py | 5 +- pde/storage/base.py | 63 +++-- pde/storage/file.py | 32 +-- pde/storage/memory.py | 21 +- pde/storage/modelrunner.py | 30 ++- pde/storage/movie.py | 41 ++-- pde/tools/__init__.py | 5 +- pde/tools/cache.py | 50 ++-- pde/tools/config.py | 27 +-- pde/tools/cuboid.py | 31 ++- pde/tools/docstrings.py | 13 +- pde/tools/expressions.py | 69 +++--- pde/tools/ffmpeg.py | 19 +- pde/tools/math.py | 21 +- pde/tools/misc.py | 30 ++- pde/tools/modelrunner.py | 11 +- pde/tools/mpi.py | 29 +-- pde/tools/numba.py | 31 ++- pde/tools/output.py | 20 +- pde/tools/parameters.py | 39 ++- pde/tools/parse_duration.py | 3 +- pde/tools/plotting.py | 63 +++-- pde/tools/spectral.py | 9 +- pde/tools/typing.py | 19 +- pde/trackers/__init__.py | 3 +- pde/trackers/base.py | 31 ++- pde/trackers/interactive.py | 23 +- pde/trackers/interrupts.py | 23 +- pde/trackers/trackers.py | 55 +++-- pde/visualization/__init__.py | 4 +- pde/visualization/movies.py | 19 +- pde/visualization/plotting.py | 31 ++- scripts/create_requirements.py | 19 +- scripts/create_storage_test_resources.py | 8 +- scripts/format_code.sh | 3 + scripts/performance_boundaries.py | 7 +- scripts/performance_laplace.py | 26 +- scripts/performance_solvers.py | 6 +- scripts/profile_import.py | 6 +- scripts/run_tests.py | 12 +- scripts/show_environment.py | 8 +- tests/conftest.py | 13 +- tests/fields/fixtures/fields.py | 6 +- tests/fields/test_field_collections.py | 32 +-- tests/fields/test_generic_fields.py | 56 ++--- tests/fields/test_scalar_fields.py | 74 +++--- tests/fields/test_tensorial_fields.py | 14 +- tests/fields/test_vectorial_fields.py | 32 +-- .../grids/boundaries/test_axes_boundaries.py | 20 +- .../grids/boundaries/test_axis_boundaries.py | 4 +- .../grids/boundaries/test_local_boundaries.py | 50 ++-- .../operators/test_cartesian_operators.py | 46 ++-- .../grids/operators/test_common_operators.py | 4 +- .../operators/test_cylindrical_operators.py | 30 +-- tests/grids/operators/test_polar_operators.py | 22 +- .../operators/test_spherical_operators.py | 30 +-- tests/grids/test_cartesian_grids.py | 32 +-- tests/grids/test_coordinates.py | 10 +- tests/grids/test_cylindrical_grids.py | 6 +- tests/grids/test_generic_grids.py | 28 +-- tests/grids/test_grid_mesh.py | 22 +- tests/grids/test_spherical_grids.py | 14 +- tests/pdes/test_diffusion_pdes.py | 14 +- tests/pdes/test_generic_pdes.py | 4 +- tests/pdes/test_laplace_pdes.py | 4 +- tests/pdes/test_pde_class.py | 50 ++-- tests/pdes/test_pdes_mpi.py | 8 +- tests/pdes/test_wave_pdes.py | 2 +- tests/requirements.txt | 1 + tests/resources/run_pde.py | 2 +- tests/solvers/test_adams_bashforth_solver.py | 2 +- tests/solvers/test_controller.py | 2 +- tests/solvers/test_explicit_mpi_solvers.py | 4 +- tests/solvers/test_explicit_solvers.py | 16 +- tests/solvers/test_generic_solvers.py | 10 +- tests/solvers/test_implicit_solvers.py | 4 +- tests/solvers/test_scipy_solvers.py | 4 +- tests/storage/test_file_storages.py | 14 +- tests/storage/test_generic_storages.py | 24 +- tests/storage/test_memory_storages.py | 4 +- tests/storage/test_modelrunner_storages.py | 2 +- tests/storage/test_movie_storages.py | 22 +- tests/test_examples.py | 4 +- tests/test_integration.py | 17 +- tests/tools/test_cache.py | 58 +++-- tests/tools/test_config.py | 10 +- tests/tools/test_cuboid.py | 8 +- tests/tools/test_expressions.py | 42 ++-- tests/tools/test_ffmpeg.py | 2 +- tests/tools/test_math.py | 4 +- tests/tools/test_misc.py | 14 +- tests/tools/test_mpi.py | 4 +- tests/tools/test_numba.py | 8 +- tests/tools/test_output.py | 6 +- tests/tools/test_parameters.py | 10 +- tests/tools/test_parse_duration.py | 2 +- tests/tools/test_plotting_tools.py | 6 +- tests/tools/test_spectral.py | 8 +- tests/trackers/test_interrupts.py | 10 +- tests/trackers/test_trackers.py | 28 +-- tests/visualization/test_movies.py | 6 +- tests/visualization/test_plotting.py | 12 +- 168 files changed, 1825 insertions(+), 1908 deletions(-) diff --git a/docs/source/create_performance_plots.py b/docs/source/create_performance_plots.py index 06d1ccb4..27b563d8 100755 --- a/docs/source/create_performance_plots.py +++ b/docs/source/create_performance_plots.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -""" -Code for creating performance plots +"""Code for creating performance plots. .. codeauthor:: David Zwicker """ @@ -32,7 +31,7 @@ def time_function(func, arg, repeat=3, use_out=False): - """estimates the computation speed of a function + """Estimates the computation speed of a function. Args: func (callable): The function to test @@ -54,7 +53,7 @@ def time_function(func, arg, repeat=3, use_out=False): def get_performance_data(periodic=False): - """obtain the data used in the performance plot + """Obtain the data used in the performance plot. Args: periodic (bool): The boundary conditions of the underlying grid @@ -90,7 +89,7 @@ def get_performance_data(periodic=False): def plot_performance(performance_data, title=None): - """plot the performance data + """Plot the performance data. Args: performance_data: The data obtained from calling @@ -129,7 +128,7 @@ def plot_performance(performance_data, title=None): def main(): - """run main scripts""" + """Run main scripts.""" data = get_performance_data(periodic=False) plot_performance(data, title="2D Laplacian (reflecting BCs)") plt.savefig("performance_noflux.pdf", transparent=True) diff --git a/docs/source/parse_examples.py b/docs/source/parse_examples.py index 7fc79e8a..7c5fc7ad 100755 --- a/docs/source/parse_examples.py +++ b/docs/source/parse_examples.py @@ -11,7 +11,7 @@ def main(): - """parse all examples and write them in a special example module""" + """Parse all examples and write them in a special example module.""" # create the output directory OUTPUT.mkdir(parents=True, exist_ok=True) diff --git a/docs/source/run_autodoc.py b/docs/source/run_autodoc.py index 26b3157e..ae7fc760 100755 --- a/docs/source/run_autodoc.py +++ b/docs/source/run_autodoc.py @@ -16,8 +16,8 @@ def replace_in_file(infile, replacements, outfile=None): - """reads in a file, replaces the given data using python formatting and - writes back the result to a file. + """Reads in a file, replaces the given data using python formatting and writes back + the result to a file. Args: infile (str): @@ -27,7 +27,6 @@ def replace_in_file(infile, replacements, outfile=None): outfile (str): Output file to which the data is written. If it is omitted, the input file will be overwritten instead - """ if outfile is None: outfile = infile diff --git a/docs/sphinx_ext/package_config.py b/docs/sphinx_ext/package_config.py index a5e2676c..12077315 100644 --- a/docs/sphinx_ext/package_config.py +++ b/docs/sphinx_ext/package_config.py @@ -3,7 +3,7 @@ class PackageConfigDirective(SphinxDirective): - """directive that displays all package configuration items""" + """Directive that displays all package configuration items.""" has_content = True required_arguments = 0 diff --git a/docs/sphinx_ext/simplify_typehints.py b/docs/sphinx_ext/simplify_typehints.py index a2411f64..71308c0b 100644 --- a/docs/sphinx_ext/simplify_typehints.py +++ b/docs/sphinx_ext/simplify_typehints.py @@ -1,6 +1,4 @@ -""" -Simple sphinx plug-in that simplifies type information in function signatures -""" +"""Simple sphinx plug-in that simplifies type information in function signatures.""" import re @@ -56,10 +54,10 @@ def process_signature( app, what: str, name: str, obj, options, signature, return_annotation ): - """Process signature by applying replacement rules""" + """Process signature by applying replacement rules.""" def process(sig_obj): - """process the signature object""" + """Process the signature object.""" if sig_obj is not None: for key, value in REPLACEMENTS_REGEX.items(): sig_obj = re.sub(key, value, sig_obj) @@ -74,7 +72,7 @@ def process(sig_obj): def process_docstring(app, what: str, name: str, obj, options, lines): - """Process docstring by applying replacement rules""" + """Process docstring by applying replacement rules.""" for i, line in enumerate(lines): for key, value in REPLACEMENTS: line = line.replace(key, value) @@ -82,6 +80,6 @@ def process_docstring(app, what: str, name: str, obj, options, lines): def setup(app): - """set up hooks for this sphinx plugin""" + """Set up hooks for this sphinx plugin.""" app.connect("autodoc-process-signature", process_signature) app.connect("autodoc-process-docstring", process_docstring) diff --git a/docs/sphinx_ext/toctree_filter.py b/docs/sphinx_ext/toctree_filter.py index 964aaed5..f4d45d75 100644 --- a/docs/sphinx_ext/toctree_filter.py +++ b/docs/sphinx_ext/toctree_filter.py @@ -4,7 +4,7 @@ class TocTreeFilter(TocTree): - """directive to filter table-of-contents entries""" + """Directive to filter table-of-contents entries.""" hasPat = re.compile(r"^\s*:(.+):(.+)$") diff --git a/examples/heterogeneous_bcs.py b/examples/heterogeneous_bcs.py index b9bfa3ec..f31cc79e 100644 --- a/examples/heterogeneous_bcs.py +++ b/examples/heterogeneous_bcs.py @@ -28,7 +28,7 @@ # define the boundary conditions, which here are calculated from a function def bc_value(adjacent_value, dx, x, y, t): - """return boundary value""" + """Return boundary value.""" return np.sign(x) diff --git a/examples/pde_1d_class.py b/examples/pde_1d_class.py index d625d64d..43df85ed 100644 --- a/examples/pde_1d_class.py +++ b/examples/pde_1d_class.py @@ -18,10 +18,10 @@ class KortewegDeVriesPDE(PDEBase): - """Korteweg-de Vries equation""" + """Korteweg-de Vries equation.""" def evolution_rate(self, state, t=0): - """implement the python version of the evolution equation""" + """Implement the python version of the evolution equation.""" assert state.grid.dim == 1 # ensure the state is one-dimensional grad_x = state.gradient("auto_periodic_neumann")[0] return 6 * state * grad_x - grad_x.laplace("auto_periodic_neumann") diff --git a/examples/pde_brusselator_class.py b/examples/pde_brusselator_class.py index d99aa4bf..47ffce9a 100644 --- a/examples/pde_brusselator_class.py +++ b/examples/pde_brusselator_class.py @@ -25,7 +25,7 @@ class BrusselatorPDE(PDEBase): - """Brusselator with diffusive mobility""" + """Brusselator with diffusive mobility.""" def __init__(self, a=1, b=3, diffusivity=[1, 0.1], bc="auto_periodic_neumann"): super().__init__() @@ -35,13 +35,13 @@ def __init__(self, a=1, b=3, diffusivity=[1, 0.1], bc="auto_periodic_neumann"): self.bc = bc # boundary condition def get_initial_state(self, grid): - """prepare a useful initial state""" + """Prepare a useful initial state.""" u = ScalarField(grid, self.a, label="Field $u$") v = self.b / self.a + 0.1 * ScalarField.random_normal(grid, label="Field $v$") return FieldCollection([u, v]) def evolution_rate(self, state, t=0): - """pure python implementation of the PDE""" + """Pure python implementation of the PDE.""" u, v = state rhs = state.copy() d0, d1 = self.diffusivity @@ -50,7 +50,7 @@ def evolution_rate(self, state, t=0): return rhs def _make_pde_rhs_numba(self, state): - """nunmba-compiled implementation of the PDE""" + """Nunmba-compiled implementation of the PDE.""" d0, d1 = self.diffusivity a, b = self.a, self.b laplace = state.grid.make_operator("laplace", bc=self.bc) diff --git a/examples/pde_coupled.py b/examples/pde_coupled.py index 72c41e83..217b9d11 100644 --- a/examples/pde_coupled.py +++ b/examples/pde_coupled.py @@ -20,7 +20,7 @@ class FitzhughNagumoPDE(PDEBase): - """FitzHugh–Nagumo model with diffusive coupling""" + """FitzHugh–Nagumo model with diffusive coupling.""" def __init__(self, stimulus=0.5, τ=10, a=0, b=0, bc="auto_periodic_neumann"): super().__init__() diff --git a/examples/pde_custom_class.py b/examples/pde_custom_class.py index 27507079..7e9ca974 100644 --- a/examples/pde_custom_class.py +++ b/examples/pde_custom_class.py @@ -15,10 +15,10 @@ class KuramotoSivashinskyPDE(PDEBase): - """Implementation of the normalized Kuramoto–Sivashinsky equation""" + """Implementation of the normalized Kuramoto–Sivashinsky equation.""" def evolution_rate(self, state, t=0): - """implement the python version of the evolution equation""" + """Implement the python version of the evolution equation.""" state_lap = state.laplace(bc="auto_periodic_neumann") state_lap2 = state_lap.laplace(bc="auto_periodic_neumann") state_grad = state.gradient(bc="auto_periodic_neumann") diff --git a/examples/pde_custom_numba.py b/examples/pde_custom_numba.py index f5df35fa..4e390089 100644 --- a/examples/pde_custom_numba.py +++ b/examples/pde_custom_numba.py @@ -17,21 +17,21 @@ class KuramotoSivashinskyPDE(PDEBase): - """Implementation of the normalized Kuramoto–Sivashinsky equation""" + """Implementation of the normalized Kuramoto–Sivashinsky equation.""" def __init__(self, bc="auto_periodic_neumann"): super().__init__() self.bc = bc def evolution_rate(self, state, t=0): - """implement the python version of the evolution equation""" + """Implement the python version of the evolution equation.""" state_lap = state.laplace(bc=self.bc) state_lap2 = state_lap.laplace(bc=self.bc) state_grad_sq = state.gradient_squared(bc=self.bc) return -state_grad_sq / 2 - state_lap - state_lap2 def _make_pde_rhs_numba(self, state): - """nunmba-compiled implementation of the PDE""" + """Nunmba-compiled implementation of the PDE.""" gradient_squared = state.grid.make_operator("gradient_squared", bc=self.bc) laplace = state.grid.make_operator("laplace", bc=self.bc) diff --git a/examples/pde_sir.py b/examples/pde_sir.py index a851c7cf..f504a1e0 100644 --- a/examples/pde_sir.py +++ b/examples/pde_sir.py @@ -21,7 +21,7 @@ class SIRPDE(PDEBase): - """SIR-model with diffusive mobility""" + """SIR-model with diffusive mobility.""" def __init__( self, beta=0.3, gamma=0.9, diffusivity=0.1, bc="auto_periodic_neumann" @@ -33,7 +33,7 @@ def __init__( self.bc = bc # boundary condition def get_state(self, s, i): - """generate a suitable initial state""" + """Generate a suitable initial state.""" norm = (s + i).data.max() # maximal density if norm > 1: s /= norm diff --git a/examples/py_modelrunner.py b/examples/py_modelrunner.py index 1878bd39..02c60140 100755 --- a/examples/py_modelrunner.py +++ b/examples/py_modelrunner.py @@ -21,7 +21,7 @@ def run(storage, diffusivity=0.1): - """function that runs the model + """Function that runs the model. Args: storage (:mod:`~modelrunner.storage.group.StorageGroup`): diff --git a/pde/__init__.py b/pde/__init__.py index 8a5d9b5f..b7310ce0 100644 --- a/pde/__init__.py +++ b/pde/__init__.py @@ -1,7 +1,5 @@ -""" -The py-pde package provides classes and methods for solving partial differential -equations. -""" +"""The py-pde package provides classes and methods for solving partial differential +equations.""" # determine the package version try: diff --git a/pde/fields/__init__.py b/pde/fields/__init__.py index 7e5507a8..e0815cfe 100644 --- a/pde/fields/__init__.py +++ b/pde/fields/__init__.py @@ -1,5 +1,4 @@ -""" -Defines fields, which contain the actual data stored on a discrete grid. +"""Defines fields, which contain the actual data stored on a discrete grid. .. autosummary:: :nosignatures: diff --git a/pde/fields/base.py b/pde/fields/base.py index 7e5a5459..ceec5149 100644 --- a/pde/fields/base.py +++ b/pde/fields/base.py @@ -1,5 +1,4 @@ -""" -Defines base class of fields or collections, which are discretized on grids +"""Defines base class of fields or collections, which are discretized on grids. .. codeauthor:: David Zwicker """ @@ -28,11 +27,11 @@ class RankError(TypeError): - """error indicating that the field has the wrong rank""" + """Error indicating that the field has the wrong rank.""" class FieldBase(metaclass=ABCMeta): - """abstract base class for describing (discretized) fields""" + """Abstract base class for describing (discretized) fields.""" _subclasses: dict[str, type[FieldBase]] = {} # all classes inheriting from this _grid: GridBase # the grid on which the field is defined @@ -63,7 +62,7 @@ def __init__( self._logger = logging.getLogger(self.__class__.__name__) def __init_subclass__(cls, **kwargs): # @NoSelf - """register all subclassess to reconstruct them later""" + """Register all subclassess to reconstruct them later.""" super().__init_subclass__(**kwargs) if cls is not FieldBase: @@ -83,12 +82,12 @@ def __setstate__(self, state): @property def data(self) -> np.ndarray: - """:class:`~numpy.ndarray`: discretized data at the support points""" + """:class:`~numpy.ndarray`: discretized data at the support points.""" return self._data_valid @data.setter def data(self, value: NumberOrArray) -> None: - """set the valid data of the field + """Set the valid data of the field. Args: value: @@ -110,12 +109,12 @@ def _idx_valid(self) -> tuple[slice, ...]: @property def _data_full(self) -> np.ndarray: - """:class:`~numpy.ndarray`: the full data including ghost cells""" + """:class:`~numpy.ndarray`: the full data including ghost cells.""" return self.__data_full @_data_full.setter def _data_full(self, value: NumberOrArray) -> None: - """set the full data including ghost cells + """Set the full data including ghost cells. Args: value: @@ -147,7 +146,8 @@ def _data_full(self, value: NumberOrArray) -> None: @property def _data_flat(self) -> np.ndarray: - """:class:`~numpy.ndarray`: flat version of discretized data with ghost cells""" + """:class:`~numpy.ndarray`: flat version of discretized data with ghost + cells.""" # flatten the first dimension of the internal data by creating a view and then # setting the new shape. This disallows accidental copying of the data data_flat = self._data_full.view() @@ -156,7 +156,7 @@ def _data_flat(self) -> np.ndarray: @_data_flat.setter def _data_flat(self, value: np.ndarray) -> None: - """set the full data including ghost cells from a flattened array""" + """Set the full data including ghost cells from a flattened array.""" # simply set the data -> this might need to be overwritten self._data_full = value @@ -167,7 +167,7 @@ def writeable(self) -> bool: @writeable.setter def writeable(self, value: bool) -> None: - """set whether the field data can be changed or not""" + """Set whether the field data can be changed or not.""" self._data_full.flags.writeable = value self._data_valid.flags.writeable = value @@ -178,7 +178,7 @@ def label(self) -> str | None: @label.setter def label(self, value: str | None = None): - """set the new label of the field""" + """Set the new label of the field.""" if value is None or isinstance(value, str): self._label = value else: @@ -188,7 +188,7 @@ def label(self, value: str | None = None): def from_state( cls, attributes: dict[str, Any], data: np.ndarray | None = None ) -> FieldBase: - """create a field from given state. + """Create a field from given state. Args: attributes (dict): @@ -210,7 +210,7 @@ def from_state( @classmethod def from_file(cls, filename: str) -> FieldBase: - """create field from data stored in a file + """Create field from data stored in a file. Field can be written to a file using :meth:`FieldBase.to_file`. @@ -255,7 +255,7 @@ def from_file(cls, filename: str) -> FieldBase: @classmethod def _from_hdf_dataset(cls, dataset) -> FieldBase: - """construct a field by reading data from an hdf5 dataset""" + """Construct a field by reading data from an hdf5 dataset.""" # copy attributes from hdf attributes = dict(dataset.attrs) @@ -269,11 +269,11 @@ def _from_hdf_dataset(cls, dataset) -> FieldBase: @property def grid(self) -> GridBase: - """:class:`~pde.grids.base,GridBase`: The grid on which the field is defined""" + """:class:`~pde.grids.base,GridBase`: The grid on which the field is defined.""" return self._grid def to_file(self, filename: str, **kwargs) -> None: - r"""store field in a file + r"""Store field in a file. The extension of the filename determines what format is being used. If it ends in `.h5` or `.hdf`, the Hierarchical Data Format is used. The other supported @@ -314,7 +314,7 @@ def to_file(self, filename: str, **kwargs) -> None: raise ValueError(f"Do not know how to save data to `*{extension}`") def _write_hdf_dataset(self, hdf_path, key: str = "data") -> None: - """write data to a given hdf5 path `hdf_path`""" + """Write data to a given hdf5 path `hdf_path`""" # write the data dataset = hdf_path.create_dataset(key, data=self.data) @@ -323,7 +323,7 @@ def _write_hdf_dataset(self, hdf_path, key: str = "data") -> None: dataset.attrs[key] = value def _write_to_image(self, filename: str, **kwargs): - """write data to image + """Write data to image. Args: filename (str): The path to the image that will be created @@ -334,7 +334,7 @@ def _write_to_image(self, filename: str, **kwargs): def copy( self: TField, *, label: str | None = None, dtype: DTypeLike | None = None ) -> TField: - """return a new field with the data (but not the grid) copied + """Return a new field with the data (but not the grid) copied. Args: label (str, optional): @@ -350,7 +350,7 @@ def copy( def assert_field_compatible( self, other: FieldBase, accept_scalar: bool = False ) -> None: - """checks whether `other` is compatible with the current field + """Checks whether `other` is compatible with the current field. Args: other (FieldBase): @@ -373,7 +373,7 @@ def assert_field_compatible( @property def dtype(self) -> DTypeLike: - """:class:`~DTypeLike`: the numpy dtype of the underlying data""" + """:class:`~DTypeLike`: the numpy dtype of the underlying data.""" # this property is necessary to support np.iscomplexobj for DataFieldBases return self.data.dtype # type: ignore @@ -407,7 +407,7 @@ def attributes_serialized(self) -> dict[str, str]: @classmethod def unserialize_attributes(cls, attributes: dict[str, str]) -> dict[str, Any]: - """unserializes the given attributes + """Unserializes the given attributes. Args: attributes (dict): @@ -426,13 +426,13 @@ def unserialize_attributes(cls, attributes: dict[str, str]) -> dict[str, Any]: return cls._subclasses[class_name].unserialize_attributes(attributes) def __eq__(self, other): - """test fields for equality, ignoring the label""" + """Test fields for equality, ignoring the label.""" if not isinstance(other, self.__class__): return NotImplemented return self.grid == other.grid and np.array_equal(self.data, other.data) def _unary_operation(self: TField, op: Callable) -> TField: - """perform an unary operation on this field + """Perform an unary operation on this field. Args: op (callable): @@ -445,16 +445,16 @@ def _unary_operation(self: TField, op: Callable) -> TField: @property def real(self: TField) -> TField: - """:class:`FieldBase`: Real part of the field""" + """:class:`FieldBase`: Real part of the field.""" return self._unary_operation(np.real) @property def imag(self: TField) -> TField: - """:class:`FieldBase`: Imaginary part of the field""" + """:class:`FieldBase`: Imaginary part of the field.""" return self._unary_operation(np.imag) def conjugate(self: TField) -> TField: - """returns complex conjugate of the field + """Returns complex conjugate of the field. Returns: :class:`FieldBase`: the complex conjugated field @@ -462,7 +462,7 @@ def conjugate(self: TField) -> TField: return self._unary_operation(np.conjugate) def __neg__(self): - """return the negative of the current field + """Return the negative of the current field. :class:`FieldBase`: The negative of the current field """ @@ -471,7 +471,7 @@ def __neg__(self): def _binary_operation( self, other, op: Callable, scalar_second: bool = True ) -> FieldBase: - """perform a binary operation between this field and `other` + """Perform a binary operation between this field and `other` Args: other (number of FieldBase): @@ -524,7 +524,7 @@ def _binary_operation( def _binary_operation_inplace( self: TField, other, op_inplace: Callable, scalar_second: bool = True ) -> TField: - """perform an in-place binary operation between this field and `other` + """Perform an in-place binary operation between this field and `other` Args: other (number of FieldBase): @@ -560,45 +560,45 @@ def _binary_operation_inplace( return self def __add__(self, other) -> FieldBase: - """add two fields""" + """Add two fields.""" return self._binary_operation(other, np.add, scalar_second=False) __radd__ = __add__ def __iadd__(self: TField, other) -> TField: - """add `other` to the current field""" + """Add `other` to the current field.""" return self._binary_operation_inplace(other, np.add, scalar_second=False) def __sub__(self, other) -> FieldBase: - """subtract two fields""" + """Subtract two fields.""" return self._binary_operation(other, np.subtract, scalar_second=False) def __rsub__(self, other) -> FieldBase: - """subtract two fields""" + """Subtract two fields.""" return self._binary_operation( other, lambda x, y, out: np.subtract(y, x, out=out), scalar_second=False ) def __isub__(self: TField, other) -> TField: - """add `other` to the current field""" + """Add `other` to the current field.""" return self._binary_operation_inplace(other, np.subtract, scalar_second=False) def __mul__(self, other) -> FieldBase: - """multiply field by value""" + """Multiply field by value.""" return self._binary_operation(other, np.multiply, scalar_second=False) __rmul__ = __mul__ def __imul__(self: TField, other) -> TField: - """multiply field by value""" + """Multiply field by value.""" return self._binary_operation_inplace(other, np.multiply, scalar_second=False) def __truediv__(self, other) -> FieldBase: - """divide field by value""" + """Divide field by value.""" return self._binary_operation(other, np.true_divide, scalar_second=True) def __rtruediv__(self, other) -> FieldBase: - """divide field by value""" + """Divide field by value.""" def rdivision(x, y, **kwargs): return np.true_divide(y, x, **kwargs) @@ -606,17 +606,17 @@ def rdivision(x, y, **kwargs): return self._binary_operation(other, rdivision, scalar_second=True) def __itruediv__(self: TField, other) -> TField: - """divide field by value""" + """Divide field by value.""" return self._binary_operation_inplace(other, np.true_divide, scalar_second=True) def __pow__(self, exponent: float) -> FieldBase: - """raise data of the field to a certain power""" + """Raise data of the field to a certain power.""" if not np.isscalar(exponent): raise NotImplementedError("Only scalar exponents are supported") return self._binary_operation(exponent, np.power, scalar_second=True) def __ipow__(self: TField, exponent: float) -> TField: - """raise data of the field to a certain power in-place""" + """Raise data of the field to a certain power in-place.""" if not np.isscalar(exponent): raise NotImplementedError("Only scalar exponents are supported") self.data **= exponent @@ -630,7 +630,7 @@ def apply( label: str | None = None, evaluate_args: dict[str, Any] | None = None, ) -> TField: - """applies a function/expression to the data and returns it as a field + """Applies a function/expression to the data and returns it as a field. Args: func (callable or str): @@ -692,7 +692,7 @@ def apply( def get_line_data( self, scalar: str = "auto", extract: str = "auto" ) -> dict[str, Any]: - """return data for a line plot of the field + """Return data for a line plot of the field. Args: scalar (str or int): @@ -708,7 +708,7 @@ def get_line_data( @abstractmethod def get_image_data(self) -> dict[str, Any]: - r"""return data for plotting an image of the field + r"""Return data for plotting an image of the field. Args: scalar (str or int): @@ -725,16 +725,16 @@ def get_image_data(self) -> dict[str, Any]: @abstractmethod def plot(self, *args, **kwargs): - """visualize the field""" + """Visualize the field.""" @abstractmethod def _get_napari_data(self, **kwargs) -> dict[str, dict[str, Any]]: - """returns data for plotting this field using :mod:`napari`""" + """Returns data for plotting this field using :mod:`napari`""" def plot_interactive( self, viewer_args: dict[str, Any] | None = None, **kwargs ) -> None: - """create an interactive plot of the field using :mod:`napari` + """Create an interactive plot of the field using :mod:`napari` For a detailed description of the launched program, see the `napari webpage `_. @@ -759,7 +759,7 @@ def plot_interactive( def split_mpi( self: TField, decomposition: Literal["auto"] | int | list[int] = "auto" ) -> TField: - """splits the field onto subgrids in an MPI run + """Splits the field onto subgrids in an MPI run. In a normal serial simulation, the method simply returns the field itself. In contrast, in an MPI simulation, the field provided on the main node is split diff --git a/pde/fields/collection.py b/pde/fields/collection.py index 049f0d65..ac1f795d 100644 --- a/pde/fields/collection.py +++ b/pde/fields/collection.py @@ -1,5 +1,4 @@ -""" -Defines a collection of fields to represent multiple fields defined on a common grid. +"""Defines a collection of fields to represent multiple fields defined on a common grid. .. codeauthor:: David Zwicker """ @@ -34,7 +33,7 @@ class FieldCollection(FieldBase): - """Collection of fields defined on the same grid + """Collection of fields defined on the same grid. Note: All fields in a collection must have the same data type. This might lead to @@ -143,7 +142,7 @@ def __init__( self.labels = labels # type: ignore def __repr__(self): - """return instance as string""" + """Return instance as string.""" fields = [] for f in self.fields: name = f.__class__.__name__ @@ -154,11 +153,11 @@ def __repr__(self): return f"{self.__class__.__name__}({', '.join(fields)})" def __len__(self): - """return the number of stored fields""" + """Return the number of stored fields.""" return len(self.fields) def __iter__(self) -> Iterator[DataFieldBase]: - """return iterator over the actual fields""" + """Return iterator over the actual fields.""" return iter(self.fields) @overload @@ -168,7 +167,7 @@ def __getitem__(self, index: int | str) -> DataFieldBase: ... def __getitem__(self, index: slice) -> FieldCollection: ... def __getitem__(self, index: int | str | slice) -> DataFieldBase | FieldCollection: - """returns one or many fields from the collection + """Returns one or many fields from the collection. If `index` is an integer or string, the field at this position or with this label is returned, respectively. If `index` is a :class:`slice`, a collection is @@ -193,7 +192,7 @@ def __getitem__(self, index: int | str | slice) -> DataFieldBase | FieldCollecti raise TypeError(f"Unsupported index `{index}`") def __setitem__(self, index: int | str, value: NumberOrArray): - """set the value of a specific field + """Set the value of a specific field. Args: index (int or str): @@ -230,7 +229,7 @@ def fields(self) -> list[DataFieldBase]: @property def labels(self) -> _FieldLabels: - """:class:`_FieldLabels`: the labels of all fields + """:class:`_FieldLabels`: the labels of all fields. Note: The attribute returns a special class :class:`_FieldLabels` to allow @@ -242,14 +241,14 @@ def labels(self) -> _FieldLabels: @labels.setter def labels(self, values: list[str | None]): - """sets the labels of all fields""" + """Sets the labels of all fields.""" if len(values) != len(self): raise ValueError("Require a label for each field") for field, value in zip(self.fields, values): field.label = value def __eq__(self, other): - """test fields for equality, ignoring the label""" + """Test fields for equality, ignoring the label.""" if not isinstance(other, self.__class__): return NotImplemented return self.fields == other.fields @@ -258,7 +257,7 @@ def __eq__(self, other): def from_state( cls, attributes: dict[str, Any], data: np.ndarray | None = None ) -> FieldCollection: - """create a field collection from given state. + """Create a field collection from given state. Args: attributes (dict): @@ -296,7 +295,7 @@ def from_data( labels: list[str | None] | _FieldLabels | None = None, dtype: DTypeLike = None, ): - """create a field collection from classes and data + """Create a field collection from classes and data. Args: field_classes (list): @@ -336,7 +335,7 @@ def from_data( @classmethod def _from_hdf_dataset(cls, dataset) -> FieldCollection: - """construct the class by reading data from an hdf5 dataset""" + """Construct the class by reading data from an hdf5 dataset.""" # copy attributes from hdf attributes = dict(dataset.attrs) @@ -356,7 +355,7 @@ def _from_hdf_dataset(cls, dataset) -> FieldCollection: return cls(fields, **attributes) # type: ignore def _write_hdf_dataset(self, hdf_path): - """write data to a given hdf5 path `hdf_path`""" + """Write data to a given hdf5 path `hdf_path`""" # write attributes of the collection for key, value in self.attributes_serialized.items(): hdf_path.attrs[key] = value @@ -366,7 +365,7 @@ def _write_hdf_dataset(self, hdf_path): field._write_hdf_dataset(hdf_path, f"field_{i}") def assert_field_compatible(self, other: FieldBase, accept_scalar: bool = False): - """checks whether `other` is compatible with the current field + """Checks whether `other` is compatible with the current field. Args: other (FieldBase): Other field this is compared to @@ -394,7 +393,7 @@ def from_scalar_expressions( labels: Sequence[str] | None = None, dtype: DTypeLike = None, ) -> FieldCollection: - """create a field collection on a grid from given expressions + """Create a field collection on a grid from given expressions. Warning: {WARNING_EXEC} @@ -457,7 +456,7 @@ def scalar_random_uniform( labels: Sequence[str] | None = None, rng: np.random.Generator | None = None, ) -> FieldCollection: - """create scalar fields with random values between `vmin` and `vmax` + """Create scalar fields with random values between `vmin` and `vmax` Args: num_fields (int): @@ -515,7 +514,7 @@ def attributes_serialized(self) -> dict[str, str]: @classmethod def unserialize_attributes(cls, attributes: dict[str, str]) -> dict[str, Any]: - """unserializes the given attributes + """Unserializes the given attributes. Args: attributes (dict): @@ -541,7 +540,7 @@ def copy( label: str | None = None, dtype: DTypeLike = None, ) -> FieldCollection: - """return a copy of the data, but not of the grid + """Return a copy of the data, but not of the grid. Args: label (str, optional): @@ -562,7 +561,7 @@ def append( *fields: DataFieldBase | FieldCollection, label: str | None = None, ) -> FieldCollection: - """create new collection with appended field(s) + """Create new collection with appended field(s) Args: fields (`FieldCollection` or `DataFieldBase`): @@ -594,7 +593,7 @@ def append( ) def _unary_operation(self: FieldCollection, op: Callable) -> FieldCollection: - """perform an unary operation on this field collection + """Perform an unary operation on this field collection. Args: op (callable): @@ -613,7 +612,7 @@ def interpolate_to_grid( fill: Number | None = None, label: str | None = None, ) -> FieldCollection: - """interpolate the data of this field collection to another grid. + """Interpolate the data of this field collection to another grid. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -641,7 +640,7 @@ def smooth( out: FieldCollection | None = None, label: str | None = None, ) -> FieldCollection: - """applies Gaussian smoothing with the given standard deviation + """Applies Gaussian smoothing with the given standard deviation. This function respects periodic boundary conditions of the underlying grid, using reflection when no periodicity is specified. @@ -673,17 +672,17 @@ def smooth( @property def integrals(self) -> list: - """integrals of all fields""" + """Integrals of all fields.""" return [field.integral for field in self] @property def averages(self) -> list: - """averages of all fields""" + """Averages of all fields.""" return [field.average for field in self] @property def magnitudes(self) -> np.ndarray: - """:class:`~numpy.ndarray`: scalar magnitudes of all fields""" + """:class:`~numpy.ndarray`: scalar magnitudes of all fields.""" return np.array([field.magnitude for field in self]) def get_line_data( # type: ignore @@ -692,7 +691,7 @@ def get_line_data( # type: ignore scalar: str = "auto", extract: str = "auto", ) -> dict[str, Any]: - r"""return data for a line plot of the field + r"""Return data for a line plot of the field. Args: index (int): @@ -710,7 +709,7 @@ def get_line_data( # type: ignore return self[index].get_line_data(scalar=scalar, extract=extract) def get_image_data(self, index: int = 0, **kwargs) -> dict[str, Any]: - r"""return data for plotting an image of the field + r"""Return data for plotting an image of the field. Args: index (int): Index of the field whose data is returned @@ -732,7 +731,7 @@ def _get_merged_image_data( vmin: float | list[float | None] | None = None, vmax: float | list[float | None] | None = None, ) -> tuple[np.ndarray, dict[str, Any]]: - """obtain data required for a merged plot + """Obtain data required for a merged plot. Args: colors (list): @@ -802,7 +801,7 @@ def _get_merged_image_data( return rgb_arr, field_data def _update_merged_image_plot(self, reference: PlotReference) -> None: - """update an merged image plot with the current field values + """Update an merged image plot with the current field values. Args: reference (:class:`PlotReference`): @@ -828,7 +827,7 @@ def _plot_merged_image( vmax: float | list[float | None] | None = None, **kwargs, ) -> PlotReference: - r"""visualize fields by mapping to different color chanels in a 2d density plot + r"""Visualize fields by mapping to different color chanels in a 2d density plot. Args: ax (:class:`matplotlib.axes.Axes`): @@ -899,7 +898,7 @@ def _plot_rgb_image( vmax: float | list[float | None] | None = None, **kwargs, ) -> PlotReference: - r"""visualize fields by mapping to different color chanels in a 2d density plot + r"""Visualize fields by mapping to different color chanels in a 2d density plot. Args: ax (:class:`matplotlib.axes.Axes`): @@ -935,7 +934,7 @@ def _plot_rgb_image( ) def _update_plot(self, reference: list[PlotReference]) -> None: - """update a plot collection with the current field values + """Update a plot collection with the current field values. Args: reference (list of :class:`PlotReference`): @@ -957,7 +956,7 @@ def plot( subplot_args=None, **kwargs, ) -> list[PlotReference]: - r"""visualize all the fields in the collection + r"""Visualize all the fields in the collection. Args: kind (str or list of str): @@ -1051,7 +1050,7 @@ def plot( return reference def _get_napari_data(self, **kwargs) -> dict[str, dict[str, Any]]: - r"""returns data for plotting all fields + r"""Returns data for plotting all fields. Args: \**kwargs: all arguments are forwarded to `_get_napari_layer_data` @@ -1067,7 +1066,7 @@ def _get_napari_data(self, **kwargs) -> dict[str, dict[str, Any]]: class _FieldLabels: - """helper class that allows manipulating all labels of field collections""" + """Helper class that allows manipulating all labels of field collections.""" def __init__(self, collection: FieldCollection): """ @@ -1094,7 +1093,7 @@ def __iter__(self) -> Iterator[str | None]: yield field.label def __getitem__(self, index: int | slice) -> str | None | list[str | None]: - """return one or many labels of a field in the collection""" + """Return one or many labels of a field in the collection.""" if isinstance(index, int): return self.collection[index].label elif isinstance(index, slice): @@ -1103,7 +1102,7 @@ def __getitem__(self, index: int | slice) -> str | None | list[str | None]: raise TypeError("Unsupported index type") def __setitem__(self, index: int | slice, value: None | str | list[str | None]): - """change one or many labels of a field in the collection""" + """Change one or many labels of a field in the collection.""" if isinstance(index, int): self.collection.fields[index].label = value # type: ignore elif isinstance(index, slice): @@ -1118,7 +1117,7 @@ def __setitem__(self, index: int | slice, value: None | str | list[str | None]): raise TypeError("Unsupported index type") def index(self, label: str) -> int: - """return the index in the field labels where a certain label is stored + """Return the index in the field labels where a certain label is stored. Args: label (str): diff --git a/pde/fields/datafield_base.py b/pde/fields/datafield_base.py index 0fe1a3a3..16d6bb9a 100644 --- a/pde/fields/datafield_base.py +++ b/pde/fields/datafield_base.py @@ -1,5 +1,4 @@ -""" -Defines base class of single fields with arbitrary rank +"""Defines base class of single fields with arbitrary rank. .. codeauthor:: David Zwicker """ @@ -35,7 +34,7 @@ class DataFieldBase(FieldBase, metaclass=ABCMeta): - """abstract base class for describing fields of single entities""" + """Abstract base class for describing fields of single entities.""" rank: int # the rank of the tensor field @@ -114,7 +113,7 @@ def __init__( self.data = data_arr def __repr__(self) -> str: - """return instance as string""" + """Return instance as string.""" class_name = self.__class__.__name__ result = f"{class_name}(grid={self.grid!r}, data={self.data}" if self.label: @@ -124,7 +123,7 @@ def __repr__(self) -> str: return result + ")" def __str__(self) -> str: - """return instance as string""" + """Return instance as string.""" result = ( f"{self.__class__.__name__}(grid={self.grid}, " f"data=Array{self.data.shape}" @@ -146,7 +145,7 @@ def random_uniform( dtype: DTypeLike | None = None, rng: np.random.Generator | None = None, ) -> TDataField: - """create field with uniform distributed random values + """Create field with uniform distributed random values. These values are uncorrelated in space. @@ -194,7 +193,7 @@ def random_normal( dtype: DTypeLike | None = None, rng: np.random.Generator | None = None, ) -> TDataField: - """create field with normal distributed random values + """Create field with normal distributed random values. These values are uncorrelated in space. A complex field is returned when either `mean` or `std` is a complex number. In this case, the real and imaginary parts @@ -260,7 +259,7 @@ def random_harmonic( dtype: DTypeLike | None = None, rng: np.random.Generator | None = None, ) -> TDataField: - r"""create a random field build from harmonics + r"""Create a random field build from harmonics. The resulting fields will be highly correlated in space and can thus serve for testing differential operators. @@ -335,7 +334,7 @@ def random_colored( dtype: DTypeLike | None = None, rng: np.random.Generator | None = None, ) -> TDataField: - r"""create a field of random values with colored noise + r"""Create a field of random values with colored noise. The spatially correlated values obey @@ -381,7 +380,8 @@ def random_colored( @classmethod def get_class_by_rank(cls, rank: int) -> type[DataFieldBase]: - """return a :class:`DataFieldBase` subclass describing a field with a given rank + """Return a :class:`DataFieldBase` subclass describing a field with a given + rank. Args: rank (int): The rank of the tensor field @@ -404,7 +404,7 @@ def from_state( attributes: dict[str, Any], data: np.ndarray | None = None, ) -> TDataField: - """create a field from given state. + """Create a field from given state. Args: attributes (dict): @@ -448,7 +448,7 @@ def data_shape(self) -> tuple[int, ...]: @classmethod def unserialize_attributes(cls, attributes: dict[str, str]) -> dict[str, Any]: - """unserializes the given attributes + """Unserializes the given attributes. Args: attributes (dict): @@ -466,7 +466,7 @@ def unserialize_attributes(cls, attributes: dict[str, str]) -> dict[str, Any]: return results def _write_to_image(self, filename: str, **kwargs) -> None: - r"""write data to image + r"""Write data to image. Args: filename (str): @@ -499,7 +499,7 @@ def make_interpolator( fill: Number | None = None, with_ghost_cells: bool = False, ) -> Callable[[np.ndarray, np.ndarray], NumberOrArray]: - r"""returns a function that can be used to interpolate values. + r"""Returns a function that can be used to interpolate values. Args: fill (Number, optional): @@ -543,7 +543,7 @@ def make_interpolator( def interpolator( point: np.ndarray, data: np.ndarray | None = None ) -> np.ndarray: - """return the interpolated value at the position `point` + """Return the interpolated value at the position `point` Args: point (:class:`~numpy.ndarray`): @@ -590,7 +590,7 @@ def interpolate( bc: BoundariesData | None = None, fill: Number | None = None, ) -> np.ndarray: - r"""interpolate the field to points between support points + r"""Interpolate the field to points between support points. Args: point (:class:`~numpy.ndarray`): @@ -630,7 +630,7 @@ def interpolate_to_grid( fill: Number | None = None, label: str | None = None, ) -> TDataField: - """interpolate the data of this field to another grid. + """Interpolate the data of this field to another grid. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -654,7 +654,7 @@ def interpolate_to_grid( raise NotImplementedError(f"Cannot interpolate {self.__class__.__name__}") def insert(self, point: np.ndarray, amount: ArrayLike) -> None: - """adds an (integrated) value to the field at an interpolated position + """Adds an (integrated) value to the field at an interpolated position. Args: point (:class:`~numpy.ndarray`): @@ -710,7 +710,7 @@ def insert(self, point: np.ndarray, amount: ArrayLike) -> None: def get_boundary_values( self, axis: int, upper: bool, bc: BoundariesData | None = None ) -> NumberOrArray: - """get the field values directly on the specified boundary + """Get the field values directly on the specified boundary. Args: axis (int): @@ -744,7 +744,7 @@ def get_boundary_values( def set_ghost_cells( self, bc: BoundariesData, *, set_corners: bool = False, args=None ) -> None: - """set the boundary values on virtual points for all boundaries + """Set the boundary values on virtual points for all boundaries. Args: bc (str or list or tuple or dict): @@ -762,26 +762,26 @@ def set_ghost_cells( @property @abstractmethod def integral(self) -> NumberOrArray: - """integral of the scalar field over space""" + """Integral of the scalar field over space.""" @abstractmethod def to_scalar( self, scalar: str = "auto", *, label: str | None = None ) -> ScalarField: - """return scalar variant of the field""" + """Return scalar variant of the field.""" @property def average(self) -> NumberOrArray: - """float or :class:`~numpy.ndarray`: the average of data + """Float or :class:`~numpy.ndarray`: the average of data. - This is calculated by integrating each component of the field over space - and dividing by the grid volume + This is calculated by integrating each component of the field over space and + dividing by the grid volume """ return self.integral / self.grid.volume @property def fluctuations(self) -> NumberOrArray: - """float or :class:`~numpy.ndarray`: quantification of the average fluctuations + """Float or :class:`~numpy.ndarray`: quantification of the average fluctuations. The fluctuations are defined as the standard deviation of the data scaled by the cell volume. This definition makes the fluctuations independent of the @@ -824,7 +824,7 @@ def apply_operator( args: dict[str, Any] | None = None, **kwargs, ) -> DataFieldBase: - r"""apply a (differential) operator and return result as a field + r"""Apply a (differential) operator and return result as a field. Args: operator (str): @@ -871,7 +871,7 @@ def apply_operator( def make_dot_operator( self, backend: Literal["numpy", "numba"] = "numba", *, conjugate: bool = True ) -> Callable[[np.ndarray, np.ndarray, np.ndarray | None], np.ndarray]: - """return operator calculating the dot product between two fields + """Return operator calculating the dot product between two fields. This supports both products between two vectors as well as products between a vector and a tensor. @@ -892,13 +892,13 @@ def make_dot_operator( @register_jitable def maybe_conj(arr: np.ndarray) -> np.ndarray: - """helper function implementing optional conjugation""" + """Helper function implementing optional conjugation.""" return arr.conjugate() if conjugate else arr def dot( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """numpy implementation to calculate dot product between two fields""" + """Numpy implementation to calculate dot product between two fields.""" rank_a = a.ndim - num_axes rank_b = b.ndim - num_axes if rank_a < 1 or rank_b < 1: @@ -929,7 +929,7 @@ def dot( # overload `dot` and return a compiled version def get_rank(arr: nb.types.Type | nb.types.Optional) -> int: - """determine rank of field with type `arr`""" + """Determine rank of field with type `arr`""" arr_typ = arr.type if isinstance(arr, nb.types.Optional) else arr if not isinstance(arr_typ, (np.ndarray, nb.types.Array)): raise nb.errors.TypingError( @@ -947,7 +947,7 @@ def get_rank(arr: nb.types.Type | nb.types.Optional) -> int: def dot_ol( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """numba implementation to calculate dot product between two fields""" + """Numba implementation to calculate dot product between two fields.""" # get (and check) rank of the input arrays rank_a = get_rank(a) rank_b = get_rank(b) @@ -1002,7 +1002,7 @@ def calc(a: np.ndarray, b: np.ndarray, out: np.ndarray) -> None: def dot_impl( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """helper function allocating output array""" + """Helper function allocating output array.""" assert a.shape == a_shape assert b.shape == b_shape out = np.empty(out_shape, dtype=dtype) @@ -1015,7 +1015,7 @@ def dot_impl( def dot_impl( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """helper function without allocating output array""" + """Helper function without allocating output array.""" assert a.shape == a_shape assert b.shape == b_shape assert out.shape == out_shape # type: ignore @@ -1028,7 +1028,7 @@ def dot_impl( def dot_compiled( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """numba implementation to calculate dot product between two fields""" + """Numba implementation to calculate dot product between two fields.""" return dot(a, b, out) return dot_compiled # type: ignore @@ -1043,7 +1043,7 @@ def smooth( out: TDataField | None = None, label: str | None = None, ) -> TDataField: - """applies Gaussian smoothing with the given standard deviation + """Applies Gaussian smoothing with the given standard deviation. This function respects periodic boundary conditions of the underlying grid, using reflection when no periodicity is specified. @@ -1125,7 +1125,7 @@ def get_image_data( return data def get_vector_data(self, transpose: bool = False, **kwargs) -> dict[str, Any]: - r"""return data for a vector plot of the field + r"""Return data for a vector plot of the field. Args: \**kwargs: Additional parameters are forwarded to @@ -1145,7 +1145,7 @@ def _plot_line( ylim: tuple[float, float] | None = None, **kwargs, ) -> PlotReference: - r"""visualize a field using a 1d line plot + r"""Visualize a field using a 1d line plot. Args: ax (:class:`matplotlib.axes.Axes`): @@ -1193,7 +1193,7 @@ def _plot_line( ) def _update_line_plot(self, reference: PlotReference) -> None: - """update a line plot with the current field values + """Update a line plot with the current field values. Args: reference (:class:`PlotReference`): @@ -1223,7 +1223,7 @@ def _plot_image( transpose: bool = False, **kwargs, ) -> PlotReference: - r"""visualize a field using a 2d density plot + r"""Visualize a field using a 2d density plot. Args: ax (:class:`matplotlib.axes.Axes`): @@ -1276,7 +1276,7 @@ def _plot_image( return PlotReference(ax, axes_image, parameters) def _update_image_plot(self, reference: PlotReference) -> None: - """update an image plot with the current field values + """Update an image plot with the current field values. Args: reference (:class:`PlotReference`): @@ -1304,7 +1304,7 @@ def _plot_vector( max_points: int | None = 16, **kwargs, ) -> PlotReference: - r"""visualize a field using a 2d vector plot + r"""Visualize a field using a 2d vector plot. Args: ax (:class:`matplotlib.axes.Axes`): @@ -1361,7 +1361,7 @@ def _plot_vector( return PlotReference(ax, element, parameters) def _update_vector_plot(self, reference: PlotReference) -> None: - """update a vector plot with the current field values + """Update a vector plot with the current field values. Args: reference (:class:`PlotReference`): @@ -1392,7 +1392,7 @@ def _update_vector_plot(self, reference: PlotReference) -> None: raise ValueError(f"Vector plot `{method}` is not supported.") def _update_plot(self, reference: PlotReference) -> None: - """update a plot with the current field values + """Update a plot with the current field values. Args: reference (:class:`PlotReference`): @@ -1413,7 +1413,7 @@ def _update_plot(self, reference: PlotReference) -> None: @plot_on_axes(update_method="_update_plot") def plot(self, kind: str = "auto", **kwargs) -> PlotReference: - r"""visualize the field + r"""Visualize the field. Args: kind (str): @@ -1456,7 +1456,6 @@ def plot(self, kind: str = "auto", **kwargs) -> PlotReference: - `max_points` Sets max. number of points along each axis in quiver plots - Additional arguments are passed to :func:`matplotlib.pyplot.quiver` or :func:`matplotlib.pyplot.streamplot`. - """ # determine the correct kind of plotting if kind == "auto": @@ -1498,7 +1497,7 @@ def plot(self, kind: str = "auto", **kwargs) -> PlotReference: def _get_napari_layer_data( self, scalar: str = "auto", args: dict[str, Any] | None = None ) -> dict[str, Any]: - """returns data for plotting on a single napari layer + """Returns data for plotting on a single napari layer. Args: scalar (str): @@ -1519,7 +1518,7 @@ def _get_napari_layer_data( return result def _get_napari_data(self, **kwargs) -> dict[str, dict[str, Any]]: - r"""returns data for plotting this field using :mod:`napari` + r"""Returns data for plotting this field using :mod:`napari` Args: \**kwargs: all arguments are forwarded to `_get_napari_layer_data` diff --git a/pde/fields/scalar.py b/pde/fields/scalar.py index e189d867..046a36b2 100644 --- a/pde/fields/scalar.py +++ b/pde/fields/scalar.py @@ -1,5 +1,4 @@ -""" -Defines a scalar field over a grid +"""Defines a scalar field over a grid. .. codeauthor:: David Zwicker """ @@ -27,7 +26,7 @@ class ScalarField(DataFieldBase): - """Scalar field discretized on a grid""" + """Scalar field discretized on a grid.""" rank = 0 @@ -43,7 +42,7 @@ def from_expression( label: str | None = None, dtype: DTypeLike | None = None, ) -> ScalarField: - """create a scalar field on a grid from a given expression + """Create a scalar field on a grid from a given expression. Warning: {WARNING_EXEC} @@ -103,7 +102,7 @@ def from_image( *, label: str | None = None, ) -> ScalarField: - """create a scalar field from an image + """Create a scalar field from an image. Args: path (:class:`Path` or str): @@ -144,11 +143,11 @@ def from_image( @DataFieldBase._data_flat.setter # type: ignore def _data_flat(self, value): - """set the data from a value from a collection""" + """Set the data from a value from a collection.""" self._data_full = value[0] def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - """support unary numpy ufuncs, like np.sin, but also np.multiply""" + """Support unary numpy ufuncs, like np.sin, but also np.multiply.""" if method == "__call__": # only support unary functions in simple calls @@ -190,7 +189,7 @@ def laplace( out: ScalarField | None = None, **kwargs, ) -> ScalarField: - """apply Laplace operator and return result as a field + """Apply Laplace operator and return result as a field. Args: bc: @@ -215,7 +214,7 @@ def gradient_squared( out: ScalarField | None = None, **kwargs, ) -> ScalarField: - r"""apply squared gradient operator and return result as a field + r"""Apply squared gradient operator and return result as a field. This evaluates :math:`|\nabla \phi|^2` for the scalar field :math:`\phi` @@ -242,7 +241,7 @@ def gradient( out: VectorField | None = None, **kwargs, ) -> VectorField: - """apply gradient operator and return result as a field + """Apply gradient operator and return result as a field. Args: bc: @@ -269,7 +268,7 @@ def project( method: Literal["integral", "average", "mean"] = "integral", label: str | None = None, ) -> ScalarField: - """project scalar field along given axes + """Project scalar field along given axes. Args: axes (list of str): @@ -325,7 +324,7 @@ def slice( method: Literal["nearest"] = "nearest", label: str | None = None, ) -> ScalarField: - """slice data at a given position + """Slice data at a given position. Note: This method should not be used to evaluate fields right at the boundary @@ -410,7 +409,7 @@ def slice( def to_scalar( self, scalar: str | Callable = "auto", *, label: str | None = None ) -> ScalarField: - """return a modified scalar field by applying method `scalar` + """Return a modified scalar field by applying method `scalar` Args: scalar (str or callable): @@ -453,7 +452,7 @@ def interpolate_to_grid( fill: Number | None = None, label: str | None = None, ) -> ScalarField: - """interpolate the data of this scalar field to another grid. + """Interpolate the data of this scalar field to another grid. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -508,7 +507,7 @@ def get_boundary_field( *, label: str | None = None, ) -> ScalarField: - """get the field on the specified boundary + """Get the field on the specified boundary. Args: index (str or tuple): diff --git a/pde/fields/tensorial.py b/pde/fields/tensorial.py index dbaa7a9a..bb9052c0 100644 --- a/pde/fields/tensorial.py +++ b/pde/fields/tensorial.py @@ -1,5 +1,4 @@ -""" -Defines a tensorial field of rank 2 over a grid +"""Defines a tensorial field of rank 2 over a grid. .. codeauthor:: David Zwicker """ @@ -26,7 +25,7 @@ class Tensor2Field(DataFieldBase): - """Tensor field of rank 2 discretized on a grid + """Tensor field of rank 2 discretized on a grid. Warning: Components of the tensor field are given in the local basis. While the local @@ -49,7 +48,7 @@ def from_expression( label: str | None = None, dtype: DTypeLike | None = None, ) -> Tensor2Field: - """create a tensor field on a grid from given expressions + """Create a tensor field on a grid from given expressions. Warning: {WARNING_EXEC} @@ -111,7 +110,7 @@ def from_expression( return cls(grid=grid, data=data, label=label, dtype=dtype) def _get_axes_index(self, key: tuple[int | str, int | str]) -> tuple[int, int]: - """turns a general index of two axis into a tuple of two numeric indices""" + """Turns a general index of two axis into a tuple of two numeric indices.""" try: if len(key) != 2: raise IndexError("Index must be given as two integers") @@ -120,7 +119,7 @@ def _get_axes_index(self, key: tuple[int | str, int | str]) -> tuple[int, int]: return tuple(self.grid.get_axis_index(k) for k in key) # type: ignore def __getitem__(self, key: tuple[int | str, int | str]) -> ScalarField: - """extract a single component of the tensor field as a scalar field""" + """Extract a single component of the tensor field as a scalar field.""" return ScalarField( self.grid, data=self._data_full[self._get_axes_index(key)], @@ -132,7 +131,7 @@ def __setitem__( key: tuple[int | str, int | str], value: NumberOrArray | ScalarField, ): - """set a single component of the tensor field""" + """Set a single component of the tensor field.""" idx = self._get_axes_index(key) if isinstance(value, ScalarField): self.grid.assert_grid_compatible(value.grid) @@ -142,7 +141,7 @@ def __setitem__( @DataFieldBase._data_flat.setter # type: ignore def _data_flat(self, value): - """set the data from a value from a collection""" + """Set the data from a value from a collection.""" # create a view and reshape it to disallow copying data_full = value.view() dim = self.grid.dim @@ -164,7 +163,7 @@ def dot( conjugate: bool = True, label: str = "dot product", ) -> VectorField | Tensor2Field: - """calculate the dot product involving a tensor field + """Calculate the dot product involving a tensor field. This supports the dot product between two tensor fields as well as the product between a tensor and a vector. The resulting fields will be a @@ -211,7 +210,7 @@ def dot( def divergence( self, bc: BoundariesData | None, out: VectorField | None = None, **kwargs ) -> VectorField: - r"""apply tensor divergence and return result as a field + r"""Apply tensor divergence and return result as a field. The tensor divergence is a vector field :math:`v_\alpha` resulting from a contracting of the derivative of the tensor field :math:`t_{\alpha\beta}`: @@ -237,11 +236,11 @@ def divergence( @property def integral(self) -> np.ndarray: - """:class:`~numpy.ndarray`: integral of each component over space""" + """:class:`~numpy.ndarray`: integral of each component over space.""" return self.grid.integrate(self.data) # type: ignore def transpose(self, label: str = "transpose") -> Tensor2Field: - """return the transpose of the tensor field + """Return the transpose of the tensor field. Args: label (str, optional): Name of the returned field @@ -255,7 +254,7 @@ def transpose(self, label: str = "transpose") -> Tensor2Field: def symmetrize( self, make_traceless: bool = False, inplace: bool = False ) -> Tensor2Field: - """symmetrize the tensor field in place + """Symmetrize the tensor field in place. Args: make_traceless (bool): @@ -285,7 +284,7 @@ def symmetrize( def to_scalar( self, scalar: str = "auto", *, label: str | None = "scalar `{scalar}`" ) -> ScalarField: - r"""return scalar variant of the field + r"""Return scalar variant of the field. The invariants of the tensor field :math:`\boldsymbol{A}` are @@ -376,7 +375,7 @@ def to_scalar( return ScalarField(self.grid, data, label=label) def trace(self, label: str | None = "trace") -> ScalarField: - """return the trace of the tensor field as a scalar field + """Return the trace of the tensor field as a scalar field. Args: label (str, optional): Name of the returned field @@ -387,7 +386,7 @@ def trace(self, label: str | None = "trace") -> ScalarField: return self.to_scalar(scalar="trace", label=label) def _update_plot_components(self, reference: list[list[PlotReference]]) -> None: - """update a plot collection with the current field values + """Update a plot collection with the current field values. Args: reference (list of :class:`PlotReference`): @@ -404,7 +403,7 @@ def plot_components( fig=None, **kwargs, ) -> list[list[PlotReference]]: - r"""visualize all the components of this tensor field + r"""Visualize all the components of this tensor field. Args: kind (str or list of str): diff --git a/pde/fields/vectorial.py b/pde/fields/vectorial.py index 9512bac6..163b44e1 100644 --- a/pde/fields/vectorial.py +++ b/pde/fields/vectorial.py @@ -1,5 +1,4 @@ -""" -Defines a vectorial field over a grid +"""Defines a vectorial field over a grid. .. codeauthor:: David Zwicker """ @@ -29,7 +28,7 @@ class VectorField(DataFieldBase): - """Vector field discretized on a grid + """Vector field discretized on a grid. Warning: Components of the vector field are given in the local basis. While the local @@ -48,7 +47,7 @@ def from_scalars( label: str | None = None, dtype: DTypeLike | None = None, ) -> VectorField: - """create a vector field from a list of ScalarFields + """Create a vector field from a list of ScalarFields. Note that the data of the scalar fields is copied in the process @@ -92,7 +91,7 @@ def from_expression( label: str | None = None, dtype: DTypeLike | None = None, ) -> VectorField: - """create a vector field on a grid from given expressions + """Create a vector field on a grid from given expressions. Warning: {WARNING_EXEC} @@ -148,7 +147,7 @@ def from_expression( return cls(grid=grid, data=data, label=label, dtype=dtype) def __getitem__(self, key: int | str) -> ScalarField: - """extract a component of the VectorField""" + """Extract a component of the VectorField.""" axis = self.grid.get_axis_index(key) comp_name = self.grid.c.axes[axis] if self.label: @@ -160,7 +159,7 @@ def __getitem__(self, key: int | str) -> ScalarField: ) def __setitem__(self, key: int | str, value: NumberOrArray | ScalarField): - """set a component of the VectorField""" + """Set a component of the VectorField.""" idx = self.grid.get_axis_index(key) if isinstance(value, ScalarField): self.grid.assert_grid_compatible(value.grid) @@ -176,7 +175,7 @@ def dot( conjugate: bool = True, label: str = "dot product", ) -> ScalarField | VectorField: - """calculate the dot product involving a vector field + """Calculate the dot product involving a vector field. This supports the dot product between two vectors fields as well as the product between a vector and a tensor. The resulting fields will be a @@ -231,7 +230,7 @@ def outer_product( *, label: str | None = None, ) -> Tensor2Field: - """calculate the outer product of this vector field with another + """Calculate the outer product of this vector field with another. Args: other (:class:`~pde.fields.vectorial.VectorField`): @@ -263,7 +262,7 @@ def outer_product( def make_outer_prod_operator( self, backend: Literal["numpy", "numba"] = "numba" ) -> Callable[[np.ndarray, np.ndarray, np.ndarray | None], np.ndarray]: - """return operator calculating the outer product of two vector fields + """Return operator calculating the outer product of two vector fields. Warning: This function does not check types or dimensions. @@ -282,7 +281,7 @@ def make_outer_prod_operator( def outer( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """calculate the outer product using numpy""" + """Calculate the outer product using numpy.""" return np.einsum("i...,j...->ij...", a, b, out=out) if backend == "numpy": @@ -296,7 +295,7 @@ def outer( num_axes = self.grid.num_axes def check_rank(arr: nb.types.Type | nb.types.Optional) -> None: - """determine rank of field with type `arr`""" + """Determine rank of field with type `arr`""" arr_typ = arr.type if isinstance(arr, nb.types.Optional) else arr if not isinstance(arr_typ, (np.ndarray, nb.types.Array)): raise nb.errors.TypingError( @@ -307,7 +306,7 @@ def check_rank(arr: nb.types.Type | nb.types.Optional) -> None: # create the inner function calculating the outer product @register_jitable def calc(a: np.ndarray, b: np.ndarray, out: np.ndarray) -> np.ndarray: - """calculate outer product between fields `a` and `b`""" + """Calculate outer product between fields `a` and `b`""" for i in range(0, dim): for j in range(0, dim): out[i, j, :] = a[i] * b[j] @@ -317,7 +316,8 @@ def calc(a: np.ndarray, b: np.ndarray, out: np.ndarray) -> np.ndarray: def outer_ol( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """numba implementation to calculate outer product between two fields""" + """Numba implementation to calculate outer product between two + fields.""" # get (and check) rank of the input arrays check_rank(a) check_rank(b) @@ -331,7 +331,7 @@ def outer_ol( def outer_impl( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """helper function allocating output array""" + """Helper function allocating output array.""" assert a.shape == b.shape == in_shape out = np.empty(out_shape, dtype=dtype) calc(a, b, out) @@ -343,7 +343,7 @@ def outer_impl( def outer_impl( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """helper function without allocating output array""" + """Helper function without allocating output array.""" # check input assert a.shape == b.shape == in_shape assert out.shape == out_shape # type: ignore @@ -356,7 +356,8 @@ def outer_impl( def outer_compiled( a: np.ndarray, b: np.ndarray, out: np.ndarray | None = None ) -> np.ndarray: - """numba implementation to calculate outer product between two fields""" + """Numba implementation to calculate outer product between two + fields.""" return outer(a, b, out) return outer_compiled # type: ignore @@ -368,7 +369,7 @@ def outer_compiled( def divergence( self, bc: BoundariesData | None, out: ScalarField | None = None, **kwargs ) -> ScalarField: - """apply divergence operator and return result as a field + """Apply divergence operator and return result as a field. Args: bc: @@ -393,7 +394,7 @@ def gradient( out: Tensor2Field | None = None, **kwargs, ) -> Tensor2Field: - r"""apply vector gradient operator and return result as a field + r"""Apply vector gradient operator and return result as a field. The vector gradient field is a tensor field :math:`t_{\alpha\beta}` that specifies the derivatives of the vector field :math:`v_\alpha` with respect to @@ -420,7 +421,7 @@ def gradient( def laplace( self, bc: BoundariesData | None, out: VectorField | None = None, **kwargs ) -> VectorField: - r"""apply vector Laplace operator and return result as a field + r"""Apply vector Laplace operator and return result as a field. The vector Laplacian is a vector field :math:`L_\alpha` containing the second derivatives of the vector field :math:`v_\alpha` with respect to the coordinates @@ -448,7 +449,7 @@ def laplace( @property def integral(self) -> np.ndarray: - """:class:`~numpy.ndarray`: integral of each component over space""" + """:class:`~numpy.ndarray`: integral of each component over space.""" return self.grid.integrate(self.data) # type: ignore def to_scalar( @@ -457,7 +458,7 @@ def to_scalar( *, label: str | None = "scalar `{scalar}`", ) -> ScalarField: - """return scalar variant of the field + """Return scalar variant of the field. Args: scalar (str): @@ -509,7 +510,7 @@ def to_scalar( def get_vector_data( self, transpose: bool = False, max_points: int | None = None, **kwargs ) -> dict[str, Any]: - r"""return data for a vector plot of the field + r"""Return data for a vector plot of the field. Args: transpose (bool): @@ -569,7 +570,7 @@ def interpolate_to_grid( fill: Number | None = None, label: str | None = None, ) -> VectorField: - """interpolate the data of this vector field to another grid. + """Interpolate the data of this vector field to another grid. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -624,7 +625,7 @@ def interpolate_to_grid( def _get_napari_layer_data( # type: ignore self, max_points: int | None = None, args: dict[str, Any] | None = None ) -> dict[str, Any]: - """returns data for plotting on a single napari layer + """Returns data for plotting on a single napari layer. Args: max_points (int): diff --git a/pde/grids/__init__.py b/pde/grids/__init__.py index 37df18d5..3e6a79e4 100644 --- a/pde/grids/__init__.py +++ b/pde/grids/__init__.py @@ -1,5 +1,4 @@ -""" -Grids define the domains on which PDEs will be solved. In particular, symmetries, +"""Grids define the domains on which PDEs will be solved. In particular, symmetries, periodicities, and the discretizations are defined by the underlying grid. We only consider regular, orthogonal grids, which are constructed from orthogonal diff --git a/pde/grids/_mesh.py b/pde/grids/_mesh.py index 4f72c57d..66482724 100644 --- a/pde/grids/_mesh.py +++ b/pde/grids/_mesh.py @@ -1,5 +1,4 @@ -""" -Defines a class used for subdividing a grid for parallel execution using MPI +"""Defines a class used for subdividing a grid for parallel execution using MPI. .. codeauthor:: David Zwicker """ @@ -26,7 +25,7 @@ class MPIFlags(IntEnum): - """enum that contains flags for MPI communication""" + """Enum that contains flags for MPI communication.""" field_split = 1 # split full field onto nodes field_combine = 2 # combine full field from subfields on nodes @@ -35,7 +34,7 @@ class MPIFlags(IntEnum): @classmethod def boundary_lower(cls, my_id: int, other_id: int) -> int: - """flag for connection between my lower boundary and `other_id`""" + """Flag for connection between my lower boundary and `other_id`""" if my_id <= other_id: return 2 * my_id + cls._boundary_lower else: @@ -43,7 +42,7 @@ def boundary_lower(cls, my_id: int, other_id: int) -> int: @classmethod def boundary_upper(cls, my_id: int, other_id: int) -> int: - """flag for connection between my upper boundary and `other_id`""" + """Flag for connection between my upper boundary and `other_id`""" if my_id <= other_id: return 2 * my_id + cls._boundary_upper else: @@ -51,7 +50,7 @@ def boundary_upper(cls, my_id: int, other_id: int) -> int: def _get_optimal_decomposition(shape: Sequence[int], mpi_size: int) -> list[int]: - """determine optimal decomposition of a grid into several chunks + """Determine optimal decomposition of a grid into several chunks. Args: shape (list of int): @@ -88,7 +87,7 @@ def _get_optimal_decomposition(shape: Sequence[int], mpi_size: int) -> list[int] def _subdivide(num: int, chunks: int) -> np.ndarray: - r"""subdivide `num` intervals in `chunk` chunks + r"""Subdivide `num` intervals in `chunk` chunks. Args: num (int): @@ -105,7 +104,7 @@ def _subdivide(num: int, chunks: int) -> np.ndarray: def _subdivide_along_axis(grid: GridBase, axis: int, chunks: int) -> list[GridBase]: - """subdivide the grid along a given axis + """Subdivide the grid along a given axis. Args: axis (int): @@ -157,7 +156,7 @@ def replace_in_axis(arr, value): class GridMesh: - """handles a collection of subgrids arranged in a regular mesh + """Handles a collection of subgrids arranged in a regular mesh. This class provides methods for managing MPI simulations of multiple connected subgrids. Each subgrid is also called a cell and identified with a unique number. @@ -184,7 +183,7 @@ def __init__(self, basegrid: GridBase, subgrids: Sequence): def from_grid( cls, grid: GridBase, decomposition: Literal["auto"] | int | list[int] = "auto" ) -> GridMesh: - """subdivide the grid into subgrids + """Subdivide the grid into subgrids. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -266,7 +265,7 @@ def shape(self) -> tuple[int, ...]: return self.subgrids.shape def __len__(self) -> int: - """total number of subgrids""" + """Total number of subgrids.""" return self.subgrids.size @property @@ -275,7 +274,7 @@ def current_node(self) -> int: return mpi.rank def __getitem__(self, node_id: int | None) -> GridBase: - """extract one subgrid from the mesh + """Extract one subgrid from the mesh. Args: node_id (int): @@ -290,11 +289,11 @@ def __getitem__(self, node_id: int | None) -> GridBase: @property def current_grid(self) -> GridBase: - """:class:`~pde.grids.base.GridBase`:subgrid of current MPI node""" + """:class:`~pde.grids.base.GridBase`:subgrid of current MPI node.""" return self[self.current_node] def _id2idx(self, node_id: int) -> tuple[int, ...]: - """convert linear id into node index + """Convert linear id into node index. Args: node_id (int): @@ -306,7 +305,7 @@ def _id2idx(self, node_id: int) -> tuple[int, ...]: return np.unravel_index(node_id, self.shape) # type: ignore def _idx2id(self, node_idx: Sequence[int]) -> int: - """convert node index to linear index + """Convert node index to linear index. Args: node_idx (tuple): @@ -319,7 +318,7 @@ def _idx2id(self, node_idx: Sequence[int]) -> int: @cached_method() def _get_data_indices_1d(self, with_ghost_cells: bool = False) -> list[list[slice]]: - """indices to extract valid field data for each subgrid + """Indices to extract valid field data for each subgrid. Args: with_ghost_cells (bool): @@ -344,7 +343,7 @@ def _get_data_indices_1d(self, with_ghost_cells: bool = False) -> list[list[slic @cached_method() def _get_data_indices(self, with_ghost_cells: bool = False) -> np.ndarray: - """indices to extract valid field data for each subgrid + """Indices to extract valid field data for each subgrid. Args: with_ghost_cells (bool): @@ -363,7 +362,7 @@ def _get_data_indices(self, with_ghost_cells: bool = False) -> np.ndarray: return indices def get_boundary_flag(self, neighbor: int, upper: bool) -> int: - """get MPI flag indicating the boundary between this node and its neighbor + """Get MPI flag indicating the boundary between this node and its neighbor. Args: node_id (int): @@ -384,7 +383,7 @@ def get_boundary_flag(self, neighbor: int, upper: bool) -> int: def get_neighbor( self, axis: int, upper: bool, *, node_id: int | None = None ) -> int | None: - """get node id of the neighbor along the given axis and direction + """Get node id of the neighbor along the given axis and direction. Args: axis (int): @@ -433,7 +432,7 @@ def extract_field_data( *, with_ghost_cells: bool = False, ) -> np.ndarray: - """extract one subfield from a global one + """Extract one subfield from a global one. Args: field_data (:class:`~numpy.ndarray`): @@ -468,7 +467,7 @@ def extract_subfield( *, with_ghost_cells: bool = False, ) -> TField: - """extract one subfield from a global field + """Extract one subfield from a global field. Args: field (:class:`~pde.fields.base.DataFieldBase`): @@ -516,7 +515,7 @@ def extract_subfield( raise TypeError(f"Field type {field.__class__.__name__} unsupported") def extract_boundary_conditions(self, bcs_base: Boundaries) -> Boundaries: - """extract boundary conditions for current subgrid from global conditions + """Extract boundary conditions for current subgrid from global conditions. Args: bcs_base (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -545,7 +544,7 @@ def extract_boundary_conditions(self, bcs_base: Boundaries) -> Boundaries: def split_field_data_mpi( self, field_data: np.ndarray, *, with_ghost_cells: bool = False ) -> np.ndarray: - """extract one subfield from a global field + """Extract one subfield from a global field. Args: field (:class:`~pde.fields.base.DataFieldBase`): @@ -591,7 +590,7 @@ def split_field_data_mpi( return subfield_data def split_field_mpi(self: GridMesh, field: TField) -> TField: - """split a field onto the subgrids by communicating data via MPI + """Split a field onto the subgrids by communicating data via MPI. The ghost cells of the returned fields will be set according to the values of the original field. @@ -637,7 +636,7 @@ def combine_field_data( *, with_ghost_cells: bool = False, ) -> np.ndarray: - """combine data of multiple fields defined on subgrids + """Combine data of multiple fields defined on subgrids. Args: subfields (:class:`~numpy.ndarray`): @@ -678,7 +677,7 @@ def combine_field_data_mpi( *, with_ghost_cells: bool = False, ) -> np.ndarray | None: - """combine data of all subfields using MPI + """Combine data of all subfields using MPI. Args: subfield (:class:`~numpy.ndarray`): @@ -724,7 +723,7 @@ def combine_field_data_mpi( return None def broadcast(self, data: TData) -> TData: - """distribute a value from the main node to all nodes + """Distribute a value from the main node to all nodes. Args: data: @@ -738,7 +737,7 @@ def broadcast(self, data: TData) -> TData: return COMM_WORLD.bcast(data, root=0) # type: ignore def gather(self, data: TData) -> list[TData] | None: - """gather a value from all nodes + """Gather a value from all nodes. Args: data: @@ -753,7 +752,7 @@ def gather(self, data: TData) -> list[TData] | None: return COMM_WORLD.gather(data, root=0) def allgather(self, data: TData) -> list[TData]: - """gather a value from reach node and sends them to all nodes + """Gather a value from reach node and sends them to all nodes. Args: data: @@ -768,7 +767,7 @@ def allgather(self, data: TData) -> list[TData]: @plot_on_axes() def plot(self, ax, **kwargs) -> None: - r"""visualize the grid mesh + r"""Visualize the grid mesh. Args: {PLOT_ARGS} diff --git a/pde/grids/base.py b/pde/grids/base.py index 12cf74a0..3da7949f 100644 --- a/pde/grids/base.py +++ b/pde/grids/base.py @@ -1,5 +1,4 @@ -""" -Defines the base class for all grids +"""Defines the base class for all grids. .. codeauthor:: David Zwicker """ @@ -48,7 +47,7 @@ class OperatorInfo(NamedTuple): - """stores information about an operator""" + """Stores information about an operator.""" factory: OperatorFactory rank_in: int @@ -57,7 +56,7 @@ class OperatorInfo(NamedTuple): def _check_shape(shape: int | Sequence[int]) -> tuple[int, ...]: - """checks the consistency of shape tuples""" + """Checks the consistency of shape tuples.""" if hasattr(shape, "__iter__"): shape_list: Sequence[int] = shape # type: ignore else: @@ -79,7 +78,7 @@ def _check_shape(shape: int | Sequence[int]) -> tuple[int, ...]: def discretize_interval( x_min: float, x_max: float, num: int ) -> tuple[np.ndarray, float]: - r"""construct a list of equidistantly placed intervals + r"""Construct a list of equidistantly placed intervals. The discretization is defined as @@ -105,15 +104,15 @@ def discretize_interval( class DomainError(ValueError): - """exception indicating that point lies outside domain""" + """Exception indicating that point lies outside domain.""" class PeriodicityError(RuntimeError): - """exception indicating that the grid periodicity is inconsistent""" + """Exception indicating that the grid periodicity is inconsistent.""" class GridBase(metaclass=ABCMeta): - """Base class for all grids defining common methods and interfaces""" + """Base class for all grids defining common methods and interfaces.""" # class properties _subclasses: dict[str, type[GridBase]] = {} # all classes inheriting from this @@ -121,7 +120,7 @@ class GridBase(metaclass=ABCMeta): # properties that are defined in subclasses c: CoordinatesBase - """:class:`~pde.grids.coordinates.CoordinatesBase`: Coordinates of the grid""" + """:class:`~pde.grids.coordinates.CoordinatesBase`: Coordinates of the grid.""" axes: list[str] """list: Names of all axes that are described by the grid""" axes_symmetric: list[str] = [] @@ -151,7 +150,7 @@ class GridBase(metaclass=ABCMeta): """ set: names of all operators defined for this grid """ def __init__(self) -> None: - """initialize the grid""" + """Initialize the grid.""" self._logger = logging.getLogger(self.__class__.__name__) self._mesh: GridMesh | None = None @@ -163,7 +162,7 @@ def __init__(self) -> None: self.axes_symmetric = [self.c.axes[i] for i in self.axes_symmetric] # type: ignore def __init_subclass__(cls, **kwargs) -> None: # @NoSelf - """register all subclassess to reconstruct them later""" + """Register all subclassess to reconstruct them later.""" super().__init_subclass__(**kwargs) if cls is not GridBase: if cls.__name__ in cls._subclasses: @@ -183,7 +182,7 @@ def __setstate__(self, state): @classmethod def from_state(cls, state: str | dict[str, Any]) -> GridBase: - """create a field from a stored `state`. + """Create a field from a stored `state`. Args: state (`str` or `dict`): @@ -234,7 +233,7 @@ def axes_coords(self) -> tuple[np.ndarray, ...]: return self._axes_coords def get_axis_index(self, key: int | str, allow_symmetric: bool = True) -> int: - """return the index belonging to an axis + """Return the index belonging to an axis. Args: key (int or str): @@ -261,7 +260,7 @@ def get_axis_index(self, key: int | str, allow_symmetric: bool = True) -> int: raise IndexError("Index must be an integer or the name of an axes") def _get_boundary_index(self, index: str | tuple[int, bool]) -> tuple[int, bool]: - """return the index of a boundary belonging to an axis + """Return the index of a boundary belonging to an axis. Args: index (str or tuple): @@ -284,7 +283,7 @@ def _get_boundary_index(self, index: str | tuple[int, bool]) -> tuple[int, bool] @property def discretization(self) -> np.ndarray: - """:class:`numpy.array`: the linear size of a cell along each axis""" + """:class:`numpy.array`: the linear size of a cell along each axis.""" return self._discretization @property @@ -308,7 +307,7 @@ def _idx_valid(self) -> tuple[slice, ...]: return tuple(slice(1, s + 1) for s in self.shape) def _make_get_valid(self) -> Callable[[np.ndarray], np.ndarray]: - """create a function to extract the valid part of a full data array + """Create a function to extract the valid part of a full data array. Returns: callable: Mapping a numpy array containing the full data of the grid to a @@ -318,7 +317,7 @@ def _make_get_valid(self) -> Callable[[np.ndarray], np.ndarray]: @jit def get_valid(data_full: np.ndarray) -> np.ndarray: - """return valid part of the data (without ghost cells) + """Return valid part of the data (without ghost cells) Args: data_full (:class:`~numpy.ndarray`): @@ -344,7 +343,7 @@ def _make_set_valid( ) -> Callable[[np.ndarray, np.ndarray, dict], None]: ... def _make_set_valid(self, bcs: Boundaries | None = None) -> Callable: - """create a function to set the valid part of a full data array + """Create a function to set the valid part of a full data array. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`, optional): @@ -362,7 +361,7 @@ def _make_set_valid(self, bcs: Boundaries | None = None) -> Callable: @jit def set_valid(data_full: np.ndarray, data_valid: np.ndarray) -> None: - """set valid part of the data (without ghost cells) + """Set valid part of the data (without ghost cells) Args: data_full (:class:`~numpy.ndarray`): @@ -390,7 +389,7 @@ def set_valid(data_full: np.ndarray, data_valid: np.ndarray) -> None: def set_valid_bcs( data_full: np.ndarray, data_valid: np.ndarray, args=None ) -> None: - """set valid part of the data and the ghost cells using BCs + """Set valid part of the data and the ghost cells using BCs. Args: data_full (:class:`~numpy.ndarray`): @@ -418,13 +417,15 @@ def state_serialized(self) -> str: return json.dumps(state) def copy(self) -> GridBase: - """return a copy of the grid""" + """Return a copy of the grid.""" return self.__class__.from_state(self.state) __copy__ = copy def __deepcopy__(self, memo: dict[int, Any]) -> GridBase: - """create a deep copy of the grid. This function is for instance called when + """Create a deep copy of the grid. + + This function is for instance called when a grid instance appears in another object that is copied using `copy.deepcopy` """ # this implementation assumes that a simple call to copy is sufficient @@ -433,7 +434,7 @@ def __deepcopy__(self, memo: dict[int, Any]) -> GridBase: return result def __repr__(self) -> str: - """return instance as string""" + """Return instance as string.""" args = ", ".join(str(k) + "=" + str(v) for k, v in self.state.items()) return f"{self.__class__.__name__}({args})" @@ -447,7 +448,7 @@ def __eq__(self, other) -> bool: ) def _cache_hash(self) -> int: - """returns a value to determine when a cache needs to be updated""" + """Returns a value to determine when a cache needs to be updated.""" return hash( ( self.__class__.__name__, @@ -458,7 +459,7 @@ def _cache_hash(self) -> int: ) def compatible_with(self, other: GridBase) -> bool: - """tests whether this grid is compatible with other grids. + """Tests whether this grid is compatible with other grids. Grids are compatible when they cover the same area with the same discretization. The difference to equality is that compatible grids do @@ -478,7 +479,7 @@ def compatible_with(self, other: GridBase) -> bool: ) def assert_grid_compatible(self, other: GridBase) -> None: - """checks whether `other` is compatible with the current grid + """Checks whether `other` is compatible with the current grid. Args: other (:class:`~pde.grids.base.GridBase`): @@ -502,12 +503,12 @@ def coordinate_arrays(self) -> tuple[np.ndarray, ...]: @cached_property() def cell_coords(self) -> np.ndarray: - """:class:`~numpy.ndarray`: coordinate values for all axes of each cell""" + """:class:`~numpy.ndarray`: coordinate values for all axes of each cell.""" return np.moveaxis(self.coordinate_arrays, 0, -1) @cached_property() def cell_volumes(self) -> np.ndarray: - """:class:`~numpy.ndarray`: volume of each cell""" + """:class:`~numpy.ndarray`: volume of each cell.""" if self.cell_volume_data is None: # use the self.c to calculate cell volumes d2 = self.discretization / 2 @@ -537,7 +538,7 @@ def _difference_vector( periodic: Sequence[bool], axes_bounds: tuple[tuple[float, float], ...] | None, ) -> np.ndarray: - """return Cartesian vector(s) pointing from p1 to p2 + """Return Cartesian vector(s) pointing from p1 to p2. In case of periodic boundary conditions, the shortest vector is returned. @@ -574,7 +575,7 @@ def _difference_vector( def difference_vector( self, p1: np.ndarray, p2: np.ndarray, *, coords: CoordsType = "grid" ) -> np.ndarray: - """return Cartesian vector(s) pointing from p1 to p2 + """Return Cartesian vector(s) pointing from p1 to p2. In case of periodic boundary conditions, the shortest vector is returned. @@ -606,7 +607,7 @@ def difference_vector_real(self, p1: np.ndarray, p2: np.ndarray) -> np.ndarray: def distance( self, p1: np.ndarray, p2: np.ndarray, *, coords: CoordsType = "grid" ) -> float: - """Calculate the distance between two points given in real coordinates + """Calculate the distance between two points given in real coordinates. This takes periodic boundary conditions into account if necessary. @@ -634,7 +635,7 @@ def distance_real(self, p1: np.ndarray, p2: np.ndarray) -> float: return self.distance(p1, p2) def _iter_boundaries(self) -> Iterator[tuple[int, bool]]: - """iterate over all boundaries of the grid + """Iterate over all boundaries of the grid. Yields: tuple: for each boundary, the generator returns a tuple indicating @@ -646,7 +647,7 @@ def _iter_boundaries(self) -> Iterator[tuple[int, bool]]: def _boundary_coordinates( self, axis: int, upper: bool, *, offset: float = 0 ) -> np.ndarray: - """get coordinates of points on the boundary + """Get coordinates of points on the boundary. Args: axis (int): @@ -687,7 +688,7 @@ def volume(self) -> float: def point_to_cartesian( self, points: np.ndarray, *, full: bool = False ) -> np.ndarray: - """convert coordinates of a point in grid coordinates to Cartesian coordinates + """Convert coordinates of a point in grid coordinates to Cartesian coordinates. Args: points (:class:`~numpy.ndarray`): @@ -711,7 +712,7 @@ def point_to_cartesian( def point_from_cartesian( self, points: np.ndarray, *, full: bool = False ) -> np.ndarray: - """convert points given in Cartesian coordinates to grid coordinates + """Convert points given in Cartesian coordinates to grid coordinates. Args: points (:class:`~numpy.ndarray`): @@ -736,7 +737,7 @@ def point_from_cartesian( def _vector_to_cartesian( self, points: ArrayLike, components: ArrayLike ) -> np.ndarray: - """convert the vectors at given points into a Cartesian basis + """Convert the vectors at given points into a Cartesian basis. Args: points (:class:`~numpy.ndarray`): @@ -767,7 +768,7 @@ def _vector_to_cartesian( def normalize_point( self, point: np.ndarray, *, reflect: bool = False ) -> np.ndarray: - """normalize grid coordinates by applying periodic boundary conditions + """Normalize grid coordinates by applying periodic boundary conditions. Here, points are assumed to be specified by the physical values along the non-symmetric axes of the grid, e.g., by grid coordinates. Normalizing points is @@ -829,7 +830,7 @@ def normalize_point( return point def _coords_symmetric(self, points: np.ndarray) -> np.ndarray: - """return only non-symmetric point coordinates + """Return only non-symmetric point coordinates. Args: points (:class:`~numpy.ndarray`): @@ -846,7 +847,7 @@ def _coords_symmetric(self, points: np.ndarray) -> np.ndarray: def _coords_full( self, points: np.ndarray, *, value: Literal["min", "max"] | float = 0.0 ) -> np.ndarray: - """specify point coordinates along symmetric axes on grids + """Specify point coordinates along symmetric axes on grids. Args: points (:class:`~numpy.ndarray`): @@ -859,7 +860,6 @@ def _coords_full( Returns: :class:`~numpy.ndarray`: The points with all `dim` coordinates - """ if self.num_axes == self.dim: return points @@ -889,7 +889,7 @@ def transform( *, full: bool = False, ) -> np.ndarray: - """converts coordinates from one coordinate system to another + """Converts coordinates from one coordinate system to another. Supported coordinate systems include the following: @@ -1007,7 +1007,7 @@ def contains_point( coords: Literal["cartesian", "cell", "grid"] = "cartesian", full: bool = False, ) -> np.ndarray: - """check whether the point is contained in the grid + """Check whether the point is contained in the grid. Args: point (:class:`~numpy.ndarray`): @@ -1027,7 +1027,7 @@ def contains_point( def iter_mirror_points( self, point: np.ndarray, with_self: bool = False, only_periodic: bool = True ) -> Generator: - """generates all mirror points corresponding to `point` + """Generates all mirror points corresponding to `point` Args: point (:class:`~numpy.ndarray`): @@ -1048,7 +1048,7 @@ def iter_mirror_points( def get_boundary_conditions( self, bc: BoundariesData = "auto_periodic_neumann", rank: int = 0 ) -> Boundaries: - """constructs boundary conditions from a flexible data format + """Constructs boundary conditions from a flexible data format. Args: bc (str or list or tuple or dict): @@ -1083,7 +1083,7 @@ def get_boundary_conditions( return bcs def get_line_data(self, data: np.ndarray, extract: str = "auto") -> dict[str, Any]: - """return a line cut through the grid + """Return a line cut through the grid. Args: data (:class:`~numpy.ndarray`): @@ -1099,7 +1099,7 @@ def get_line_data(self, data: np.ndarray, extract: str = "auto") -> dict[str, An raise NotImplementedError def get_image_data(self, data: np.ndarray) -> dict[str, Any]: - """return a 2d-image of the data + """Return a 2d-image of the data. Args: data (:class:`~numpy.ndarray`): @@ -1111,7 +1111,7 @@ def get_image_data(self, data: np.ndarray) -> dict[str, Any]: raise NotImplementedError def get_vector_data(self, data: np.ndarray, **kwargs) -> dict[str, Any]: - r"""return data to visualize vector field + r"""Return data to visualize vector field. Args: data (:class:`~numpy.ndarray`): @@ -1153,7 +1153,7 @@ def get_random_point( coords: CoordsType = "cartesian", rng: np.random.Generator | None = None, ) -> np.ndarray: - """return a random point within the grid + """Return a random point within the grid. Args: boundary_distance (float): @@ -1178,7 +1178,7 @@ def register_operator( rank_in: int = 0, rank_out: int = 0, ): - """register an operator for this grid + """Register an operator for this grid. Example: The method can either be used directly: @@ -1212,7 +1212,7 @@ def make_operator(grid: GridBase): """ def register_operator(factor_func_arg: OperatorFactory): - """helper function to register the operator""" + """Helper function to register the operator.""" cls._operators[name] = OperatorInfo( factory=factor_func_arg, rank_in=rank_in, rank_out=rank_out, name=name ) @@ -1258,7 +1258,7 @@ def operators(self) -> set[str]: return result def _get_operator_info(self, operator: str | OperatorInfo) -> OperatorInfo: - """return the operator defined on this grid + """Return the operator defined on this grid. Args: operator (str): @@ -1319,7 +1319,7 @@ def make_operator_no_bc( operator: str | OperatorInfo, **kwargs, ) -> OperatorType: - """return a compiled function applying an operator without boundary conditions + """Return a compiled function applying an operator without boundary conditions. A function that takes the discretized full data as an input and an array of valid data points to which the result of applying the operator is written. @@ -1351,7 +1351,7 @@ def make_operator_no_bc( def make_operator( self, operator: str | OperatorInfo, bc: BoundariesData, **kwargs ) -> Callable[..., np.ndarray]: - """return a compiled function applying an operator with boundary conditions + """Return a compiled function applying an operator with boundary conditions. The returned function takes the discretized data on the grid as an input and returns the data to which the operator `operator` has been applied. The function @@ -1395,7 +1395,7 @@ def make_operator( def apply_op( arr: np.ndarray, out: np.ndarray | None = None, args=None ) -> np.ndarray: - """set boundary conditions and apply operator""" + """Set boundary conditions and apply operator.""" assert arr.shape == shape_in_valid # ensure `out` array is allocated if out is None: @@ -1430,14 +1430,14 @@ def apply_op( def apply_op_ol( arr: np.ndarray, out: np.ndarray | None = None, args=None ) -> np.ndarray: - """make numba implementation of the operator""" + """Make numba implementation of the operator.""" if isinstance(out, (nb.types.NoneType, nb.types.Omitted)): # need to allocate memory for `out` def apply_op_impl( arr: np.ndarray, out: np.ndarray | None = None, args=None ) -> np.ndarray: - """allocates `out` and applies operator to the data""" + """Allocates `out` and applies operator to the data.""" assert arr.shape == shape_in_valid out = np.empty(shape_out, dtype=arr.dtype) @@ -1457,7 +1457,7 @@ def apply_op_impl( def apply_op_impl( arr: np.ndarray, out: np.ndarray | None = None, args=None ) -> np.ndarray: - """applies operator to the data wihtout allocating out""" + """Applies operator to the data wihtout allocating out.""" assert arr.shape == shape_in_valid assert out.shape == shape_out # type: ignore @@ -1477,7 +1477,7 @@ def apply_op_impl( def apply_op_compiled( arr: np.ndarray, out: np.ndarray | None = None, args=None ) -> np.ndarray: - """set boundary conditions and apply operator""" + """Set boundary conditions and apply operator.""" return apply_op(arr, out, args) # return the compiled versions of the operator @@ -1488,7 +1488,7 @@ def apply_op_compiled( raise NotImplementedError(f"Undefined backend '{backend}'") def slice(self, indices: Sequence[int]) -> GridBase: - """return a subgrid of only the specified axes + """Return a subgrid of only the specified axes. Args: indices (list): @@ -1502,7 +1502,7 @@ def slice(self, indices: Sequence[int]) -> GridBase: ) def plot(self) -> None: - """visualize the grid""" + """Visualize the grid.""" raise NotImplementedError( f"Plotting is not implemented for class {self.__class__.__name__}" ) @@ -1515,7 +1515,7 @@ def typical_discretization(self) -> float: def integrate( self, data: NumberOrArray, axes: int | Sequence[int] | None = None ) -> NumberOrArray: - """Integrates the discretized data over the grid + """Integrates the discretized data over the grid. Args: data (:class:`~numpy.ndarray`): @@ -1585,7 +1585,7 @@ def integrate( def make_normalize_point_compiled( self, reflect: bool = True ) -> Callable[[np.ndarray], None]: - """return a compiled function that normalizes a point + """Return a compiled function that normalizes a point. Here, the point is assumed to be specified by the physical values along the non-symmetric axes of the grid. Normalizing points is useful to make sure @@ -1612,7 +1612,7 @@ def make_normalize_point_compiled( @jit def normalize_point(point: np.ndarray) -> None: - """helper function normalizing a single point""" + """Helper function normalizing a single point.""" assert point.ndim == 1 # only support single points for i in range(num_axes): if periodic[i]: @@ -1626,7 +1626,7 @@ def normalize_point(point: np.ndarray) -> None: @cached_method() def make_cell_volume_compiled(self, flat_index: bool = False) -> CellVolume: - """return a compiled function returning the volume of a grid cell + """Return a compiled function returning the volume of a grid cell. Args: flat_index (bool): @@ -1671,7 +1671,7 @@ def _make_interpolation_axis_data( with_ghost_cells: bool = False, cell_coords: bool = False, ) -> Callable[[float], tuple[int, int, float, float]]: - """factory for obtaining interpolation information + """Factory for obtaining interpolation information. Args: axis (int): @@ -1697,7 +1697,7 @@ def _make_interpolation_axis_data( @register_jitable def get_axis_data(coord: float) -> tuple[int, int, float, float]: - """determines data for interpolating along one axis""" + """Determines data for interpolating along one axis.""" # determine the index of the left cell and the fraction toward the right if cell_coords: c_l, d_l = divmod(coord, 1.0) @@ -1760,7 +1760,7 @@ def _make_interpolator_compiled( with_ghost_cells: bool = False, cell_coords: bool = False, ) -> Callable[[np.ndarray, np.ndarray], np.ndarray]: - """return a compiled function for linear interpolation on the grid + """Return a compiled function for linear interpolation on the grid. Args: fill (Number, optional): @@ -1791,7 +1791,7 @@ def _make_interpolator_compiled( def interpolate_single( data: np.ndarray, point: np.ndarray ) -> NumberOrArray: - """obtain interpolated value of data at a point + """Obtain interpolated value of data at a point. Args: data (:class:`~numpy.ndarray`): @@ -1823,7 +1823,7 @@ def interpolate_single( def interpolate_single( data: np.ndarray, point: np.ndarray ) -> NumberOrArray: - """obtain interpolated value of data at a point + """Obtain interpolated value of data at a point. Args: data (:class:`~numpy.ndarray`): @@ -1863,7 +1863,7 @@ def interpolate_single( def interpolate_single( data: np.ndarray, point: np.ndarray ) -> NumberOrArray: - """obtain interpolated value of data at a point + """Obtain interpolated value of data at a point. Args: data (:class:`~numpy.ndarray`): @@ -1908,7 +1908,7 @@ def interpolate_single( def make_inserter_compiled( self, *, with_ghost_cells: bool = False ) -> Callable[[np.ndarray, np.ndarray, NumberOrArray], None]: - """return a compiled function to insert values at interpolated positions + """Return a compiled function to insert values at interpolated positions. Args: with_ghost_cells (bool): @@ -1934,7 +1934,7 @@ def make_inserter_compiled( def insert( data: np.ndarray, point: np.ndarray, amount: NumberOrArray ) -> None: - """add an amount to a field at an interpolated position + """Add an amount to a field at an interpolated position. Args: data (:class:`~numpy.ndarray`): @@ -1969,7 +1969,7 @@ def insert( def insert( data: np.ndarray, point: np.ndarray, amount: NumberOrArray ) -> None: - """add an amount to a field at an interpolated position + """Add an amount to a field at an interpolated position. Args: data (:class:`~numpy.ndarray`): @@ -2016,7 +2016,7 @@ def insert( def insert( data: np.ndarray, point: np.ndarray, amount: NumberOrArray ) -> None: - """add an amount to a field at an interpolated position + """Add an amount to a field at an interpolated position. Args: data (:class:`~numpy.ndarray`): @@ -2066,7 +2066,8 @@ def insert( return insert # type: ignore def make_integrator(self) -> Callable[[np.ndarray], NumberOrArray]: - """return function that can be used to integrates discretized data over the grid + """Return function that can be used to integrates discretized data over the + grid. If this function is used in a multiprocessing run (using MPI), the integrals are performed on all subgrids and then accumulated. Each process then receives the @@ -2081,7 +2082,7 @@ def make_integrator(self) -> Callable[[np.ndarray], NumberOrArray]: get_cell_volume = self.make_cell_volume_compiled(flat_index=True) def integrate_local(arr: np.ndarray) -> NumberOrArray: - """integrates data over a grid using numpy""" + """Integrates data over a grid using numpy.""" amounts = arr * self.cell_volumes return amounts.sum(axis=tuple(range(-num_axes, 0, 1))) # type: ignore @@ -2089,13 +2090,13 @@ def integrate_local(arr: np.ndarray) -> NumberOrArray: def ol_integrate_local( arr: np.ndarray, ) -> Callable[[np.ndarray], NumberOrArray]: - """integrates data over a grid using numba""" + """Integrates data over a grid using numba.""" if arr.ndim == num_axes: # `arr` is a scalar field grid_shape = self.shape def impl(arr: np.ndarray) -> Number: - """integrate a scalar field""" + """Integrate a scalar field.""" assert arr.shape == grid_shape total = 0 for i in range(arr.size): @@ -2108,7 +2109,7 @@ def impl(arr: np.ndarray) -> Number: data_shape = tensor_shape + self.shape def impl(arr: np.ndarray) -> np.ndarray: # type: ignore - """integrate a tensorial field""" + """Integrate a tensorial field.""" assert arr.shape == data_shape total = np.zeros(tensor_shape) for idx in np.ndindex(*tensor_shape): @@ -2124,7 +2125,7 @@ def impl(arr: np.ndarray) -> np.ndarray: # type: ignore # standard case of a single integral @jit def integrate_global(arr: np.ndarray) -> NumberOrArray: - """integrate data + """Integrate data. Args: arr (:class:`~numpy.ndarray`): discretized data on grid @@ -2138,7 +2139,7 @@ def integrate_global(arr: np.ndarray) -> NumberOrArray: @jit def integrate_global(arr: np.ndarray) -> NumberOrArray: - """integrate data over MPI parallelized grid + """Integrate data over MPI parallelized grid. Args: arr (:class:`~numpy.ndarray`): discretized data on grid @@ -2150,7 +2151,7 @@ def integrate_global(arr: np.ndarray) -> NumberOrArray: def registered_operators() -> dict[str, list[str]]: - """returns all operators that are currently defined + """Returns all operators that are currently defined. Returns: dict: a dictionary with the names of the operators defined for each grid class diff --git a/pde/grids/boundaries/__init__.py b/pde/grids/boundaries/__init__.py index 5fb71dea..e430407b 100644 --- a/pde/grids/boundaries/__init__.py +++ b/pde/grids/boundaries/__init__.py @@ -1,6 +1,4 @@ -r""" - -This package contains classes for handling the boundary conditions of fields. +r"""This package contains classes for handling the boundary conditions of fields. .. _documentation-boundaries: @@ -53,7 +51,7 @@ Inhomogeneous values can also be specified by directly supplying an array, whose shape needs to be compatible with the boundary, i.e., it needs to have the same shape as the -grid but with the dimension of the axis along which the boundary is specified removed. +grid but with the dimension of the axis along which the boundary is specified removed. The package also supports mixed boundary conditions (depending on both the value and the derivative of the field) and imposing a second derivative. An example is @@ -81,7 +79,7 @@ imposes a value of `2` on all sides of the grid. Finally, the special values 'auto_periodic_neumann' and 'auto_periodic_dirichlet' impose periodic boundary -conditions for periodic axis and a vanishing derivative or value otherwise. For example, +conditions for periodic axis and a vanishing derivative or value otherwise. For example, .. code-block:: python @@ -93,7 +91,7 @@ Note: Derivatives are given relative to the outward normal vector, such that positive - derivatives correspond to a function that increases across the boundary. + derivatives correspond to a function that increases across the boundary. Boundaries overview @@ -116,7 +114,7 @@ boundary given by an expression or a python function * :class:`~pde.grids.boundaries.local.MixedBC`: Imposing the derivative of the field in the outward normal direction proportional to - its value at the boundary + its value at the boundary * :class:`~pde.grids.boundaries.local.ExpressionMixedBC`: Imposing the derivative of the field in the outward normal direction proportional to its value at the boundary with coefficients given by expressions or python functions diff --git a/pde/grids/boundaries/axes.py b/pde/grids/boundaries/axes.py index 56933c4c..cd4e743d 100644 --- a/pde/grids/boundaries/axes.py +++ b/pde/grids/boundaries/axes.py @@ -25,13 +25,13 @@ class Boundaries(list): - """class that bundles all boundary conditions for all axes""" + """Class that bundles all boundary conditions for all axes.""" grid: GridBase - """:class:`~pde.grids.base.GridBase`: grid for which boundaries are defined """ + """:class:`~pde.grids.base.GridBase`: grid for which boundaries are defined.""" def __init__(self, boundaries): - """initialize with a list of boundaries""" + """Initialize with a list of boundaries.""" if len(boundaries) == 0: raise BCDataError("List of boundaries must not be empty") @@ -66,8 +66,7 @@ def __str__(self): @classmethod def from_data(cls, grid: GridBase, boundaries, rank: int = 0) -> Boundaries: - """ - Creates all boundaries from given data + """Creates all boundaries from given data. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -174,13 +173,13 @@ def __eq__(self, other): @property def boundaries(self) -> Iterator[BCBase]: - """iterator over all non-periodic boundaries""" + """Iterator over all non-periodic boundaries.""" for boundary_axis in self: # iterate all axes if not boundary_axis.periodic: # skip periodic axes yield from boundary_axis def check_value_rank(self, rank: int) -> None: - """check whether the values at the boundaries have the correct rank + """Check whether the values at the boundaries have the correct rank. Args: rank (int): @@ -194,7 +193,7 @@ def check_value_rank(self, rank: int) -> None: @classmethod def get_help(cls) -> str: - """Return information on how boundary conditions can be set""" + """Return information on how boundary conditions can be set.""" return ( "Boundary conditions for each axis are set using a list: [bc_x, bc_y, " "bc_z]. If the associated axis is periodic, the boundary condition needs " @@ -202,17 +201,17 @@ def get_help(cls) -> str: ) def copy(self) -> Boundaries: - """create a copy of the current boundaries""" + """Create a copy of the current boundaries.""" return self.__class__([bc.copy() for bc in self]) @property def periodic(self) -> list[bool]: - """:class:`~numpy.ndarray`: a boolean array indicating which dimensions - are periodic according to the boundary conditions""" + """:class:`~numpy.ndarray`: a boolean array indicating which dimensions are + periodic according to the boundary conditions.""" return self.grid.periodic def __getitem__(self, index): - """extract specific boundary conditions + """Extract specific boundary conditions. Args: index (int or str): @@ -234,7 +233,7 @@ def __getitem__(self, index): return super().__getitem__(index) def __setitem__(self, index, data) -> None: - """set specific boundary conditions + """Set specific boundary conditions. Args: index (int or str): @@ -268,7 +267,7 @@ def __setitem__(self, index, data) -> None: return super().__setitem__(index, data) def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" result = [] for b in self: try: @@ -282,7 +281,7 @@ def get_mathematical_representation(self, field_name: str = "C") -> str: def set_ghost_cells( self, data_full: np.ndarray, *, set_corners: bool = False, args=None ) -> None: - """set the ghost cells for all boundaries + """Set the ghost cells for all boundaries. Args: data_full (:class:`~numpy.ndarray`): @@ -323,7 +322,7 @@ def set_ghost_cells( ) def make_ghost_cell_setter(self) -> GhostCellSetter: - """return function that sets the ghost cells on a full array""" + """Return function that sets the ghost cells on a full array.""" ghost_cell_setters = tuple(b.make_ghost_cell_setter() for b in self) # TODO: use numba.literal_unroll @@ -341,7 +340,7 @@ def make_ghost_cell_setter(self) -> GhostCellSetter: def chain( fs: Sequence[GhostCellSetter], inner: GhostCellSetter | None = None ) -> GhostCellSetter: - """helper function composing setters of all axes recursively""" + """Helper function composing setters of all axes recursively.""" first, rest = fs[0], fs[1:] diff --git a/pde/grids/boundaries/axis.py b/pde/grids/boundaries/axis.py index 1d7d36ab..719b3520 100644 --- a/pde/grids/boundaries/axis.py +++ b/pde/grids/boundaries/axis.py @@ -32,12 +32,12 @@ class BoundaryAxisBase: - """base class for defining boundaries of a single axis in a grid""" + """Base class for defining boundaries of a single axis in a grid.""" low: BCBase - """:class:`~pde.grids.boundaries.local.BCBase`: Boundary condition at lower end """ + """:class:`~pde.grids.boundaries.local.BCBase`: Boundary condition at lower end.""" high: BCBase - """:class:`~pde.grids.boundaries.local.BCBase`: Boundary condition at upper end """ + """:class:`~pde.grids.boundaries.local.BCBase`: Boundary condition at upper end.""" def __init__(self, low: BCBase, high: BCBase): """ @@ -79,14 +79,14 @@ def __str__(self): @classmethod def get_help(cls) -> str: - """Return information on how boundary conditions can be set""" + """Return information on how boundary conditions can be set.""" return ( "Boundary conditions for each side can be set using a tuple: " f"(lower_bc, upper_bc). {BCBase.get_help()}" ) def copy(self) -> BoundaryAxisBase: - """return a copy of itself, but with a reference to the same grid""" + """Return a copy of itself, but with a reference to the same grid.""" return self.__class__(self.low.copy(), self.high.copy()) def __eq__(self, other): @@ -112,7 +112,7 @@ def __iter__(self): yield self.high def __getitem__(self, index) -> BCBase: - """returns one of the sides""" + """Returns one of the sides.""" if index == 0 or index is False: return self.low elif index == 1 or index is True: @@ -121,7 +121,7 @@ def __getitem__(self, index) -> BCBase: raise IndexError("Index must be 0/False or 1/True") def __setitem__(self, index, data) -> None: - """set one of the sides""" + """Set one of the sides.""" # determine which side was selected upper = {0: False, False: False, 1: True, True: True}[index] @@ -142,7 +142,7 @@ def __setitem__(self, index, data) -> None: @property def grid(self) -> GridBase: - """:class:`~pde.grids.base.GridBase`: Underlying grid""" + """:class:`~pde.grids.base.GridBase`: Underlying grid.""" return self.low.grid @property @@ -162,7 +162,7 @@ def rank(self) -> int: return self.low.rank def get_mathematical_representation(self, field_name: str = "C") -> tuple[str, str]: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" return ( self.low.get_mathematical_representation(field_name), self.high.get_mathematical_representation(field_name), @@ -171,7 +171,7 @@ def get_mathematical_representation(self, field_name: str = "C") -> tuple[str, s def get_sparse_matrix_data( self, idx: tuple[int, ...] ) -> tuple[float, dict[int, float]]: - """sets the elements of the sparse representation of this condition + """Sets the elements of the sparse representation of this condition. Args: idx (tuple): @@ -193,7 +193,7 @@ def get_sparse_matrix_data( return 0, {axis_coord: 1} def set_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None: - """set the ghost cell values for all boundaries + """Set the ghost cell values for all boundaries. Args: data_full (:class:`~numpy.ndarray`): @@ -212,7 +212,7 @@ def set_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None: self.low.set_ghost_cells(data_full, args=args) def make_ghost_cell_setter(self) -> GhostCellSetter: - """return function that sets the ghost cells for this axis on a full array""" + """Return function that sets the ghost cells for this axis on a full array.""" # get the functions that handle the data ghost_cell_sender_low = self.low.make_ghost_cell_sender() ghost_cell_sender_high = self.high.make_ghost_cell_sender() @@ -221,7 +221,7 @@ def make_ghost_cell_setter(self) -> GhostCellSetter: @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" # send boundary information to other nodes if using MPI ghost_cell_sender_low(data_full, args=args) ghost_cell_sender_high(data_full, args=args) @@ -233,11 +233,11 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: class BoundaryPair(BoundaryAxisBase): - """represents the two boundaries of an axis along a single dimension""" + """Represents the two boundaries of an axis along a single dimension.""" @classmethod def from_data(cls, grid: GridBase, axis: int, data, rank: int = 0) -> BoundaryPair: - """create boundary pair from some data + """Create boundary pair from some data. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -307,7 +307,7 @@ def from_data(cls, grid: GridBase, axis: int, data, rank: int = 0) -> BoundaryPa return cls(low, high) def check_value_rank(self, rank: int) -> None: - """check whether the values at the boundaries have the correct rank + """Check whether the values at the boundaries have the correct rank. Args: rank (int): @@ -321,7 +321,7 @@ def check_value_rank(self, rank: int) -> None: class BoundaryPeriodic(BoundaryPair): - """represent a periodic axis""" + """Represent a periodic axis.""" def __init__(self, grid: GridBase, axis: int, flip_sign: bool = False): """ @@ -356,11 +356,11 @@ def __str__(self): return '"periodic"' def copy(self) -> BoundaryPeriodic: - """return a copy of itself, but with a reference to the same grid""" + """Return a copy of itself, but with a reference to the same grid.""" return self.__class__(grid=self.grid, axis=self.axis, flip_sign=self.flip_sign) def check_value_rank(self, rank: int) -> None: - """check whether the values at the boundaries have the correct rank + """Check whether the values at the boundaries have the correct rank. Args: rank (int): @@ -371,7 +371,7 @@ def check_value_rank(self, rank: int) -> None: def get_boundary_axis( grid: GridBase, axis: int, data, rank: int = 0 ) -> BoundaryAxisBase: - """return object representing the boundary condition for a single axis + """Return object representing the boundary condition for a single axis. Args: grid (:class:`~pde.grids.base.GridBase`): diff --git a/pde/grids/boundaries/local.py b/pde/grids/boundaries/local.py index dba86f11..dc378c49 100644 --- a/pde/grids/boundaries/local.py +++ b/pde/grids/boundaries/local.py @@ -90,11 +90,11 @@ class BCDataError(ValueError): - """exception that signals that incompatible data was supplied for the BC""" + """Exception that signals that incompatible data was supplied for the BC.""" def _get_arr_1d(arr, idx: tuple[int, ...], axis: int) -> tuple[np.ndarray, int, tuple]: - """extract the 1d array along axis at point idx + """Extract the 1d array along axis at point idx. Args: arr (:class:`~numpy.ndarray`): The full data array @@ -148,7 +148,7 @@ def _get_arr_1d(arr, idx: tuple[int, ...], axis: int) -> tuple[np.ndarray, int, def _make_get_arr_1d( dim: int, axis: int ) -> Callable[[np.ndarray, tuple[int, ...]], tuple[np.ndarray, int, tuple]]: - """create function that extracts a 1d array at a given position + """Create function that extracts a 1d array at a given position. Args: dim (int): @@ -172,7 +172,7 @@ def _make_get_arr_1d( if dim == 1: def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: - """extract the 1d array along axis at point idx""" + """Extract the 1d array along axis at point idx.""" i = idx[0] bc_idx: tuple = (...,) arr_1d = arr @@ -182,7 +182,7 @@ def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: if axis == 0: def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: - """extract the 1d array along axis at point idx""" + """Extract the 1d array along axis at point idx.""" i, y = idx bc_idx = (..., y) arr_1d = arr[..., :, y] @@ -191,7 +191,7 @@ def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: elif axis == 1: def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: - """extract the 1d array along axis at point idx""" + """Extract the 1d array along axis at point idx.""" x, i = idx bc_idx = (..., x) arr_1d = arr[..., x, :] @@ -201,7 +201,7 @@ def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: if axis == 0: def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: - """extract the 1d array along axis at point idx""" + """Extract the 1d array along axis at point idx.""" i, y, z = idx bc_idx = (..., y, z) arr_1d = arr[..., :, y, z] @@ -210,7 +210,7 @@ def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: elif axis == 1: def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: - """extract the 1d array along axis at point idx""" + """Extract the 1d array along axis at point idx.""" x, i, z = idx bc_idx = (..., x, z) arr_1d = arr[..., x, :, z] @@ -219,7 +219,7 @@ def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: elif axis == 2: def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: - """extract the 1d array along axis at point idx""" + """Extract the 1d array along axis at point idx.""" x, y, i = idx bc_idx = (..., x, y) arr_1d = arr[..., x, y, :] @@ -235,7 +235,7 @@ def get_arr_1d(arr: np.ndarray, idx: tuple[int, ...]) -> ResultType: class BCBase(metaclass=ABCMeta): - """represents a single boundary in an BoundaryPair instance""" + """Represents a single boundary in an BoundaryPair instance.""" names: list[str] """list: identifiers used to specify the given boundary class""" @@ -290,7 +290,7 @@ def __init__(self, grid: GridBase, axis: int, upper: bool, *, rank: int = 0): self._logger = logging.getLogger(self.__class__.__name__) def __init_subclass__(cls, **kwargs): # @NoSelf - """register all subclasses to reconstruct them later""" + """Register all subclasses to reconstruct them later.""" super().__init_subclass__(**kwargs) if cls is not BCBase: @@ -318,7 +318,7 @@ def axis_coord(self) -> float: return self.grid.axes_bounds[self.axis][0] def _field_repr(self, field_name: str) -> str: - """return representation of the field to which the condition is applied + """Return representation of the field to which the condition is applied. Args: field_name (str): Symbol of the field variable @@ -338,12 +338,12 @@ def _field_repr(self, field_name: str) -> str: return f"{field_name}" def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" raise NotImplementedError @classmethod def get_help(cls) -> str: - """Return information on how boundary conditions can be set""" + """Return information on how boundary conditions can be set.""" types = ", ".join( f"'{subclass.names[0]}'" for subclass in cls._subclasses.values() @@ -377,7 +377,7 @@ def __str__(self): return f"{self.__class__.__name__}({', '.join(args)})" def __eq__(self, other): - """checks for equality neglecting the `upper` property""" + """Checks for equality neglecting the `upper` property.""" if not isinstance(other, self.__class__): return NotImplemented return ( @@ -403,7 +403,7 @@ def from_str( rank: int = 0, **kwargs, ) -> BCBase: - r"""creates boundary from a given string identifier + r"""Creates boundary from a given string identifier. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -441,7 +441,7 @@ def from_dict( *, rank: int = 0, ) -> BCBase: - """create boundary from data given in dictionary + """Create boundary from data given in dictionary. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -486,7 +486,7 @@ def from_data( *, rank: int = 0, ) -> BCBase: - """create boundary from some data + """Create boundary from some data. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -540,7 +540,7 @@ def from_data( return bc def to_subgrid(self: TBC, subgrid: GridBase) -> TBC: - """converts this boundary condition to one valid for a given subgrid + """Converts this boundary condition to one valid for a given subgrid. Args: subgrid (:class:`GridBase`): @@ -552,7 +552,7 @@ def to_subgrid(self: TBC, subgrid: GridBase) -> TBC: raise NotImplementedError("Boundary condition cannot be transfered to subgrid") def check_value_rank(self, rank: int) -> None: - """check whether the values at the boundaries have the correct rank + """Check whether the values at the boundaries have the correct rank. Args: rank (int): @@ -579,7 +579,7 @@ def get_virtual_point(self, arr, idx: tuple[int, ...] | None = None) -> float: @abstractmethod def make_virtual_point_evaluator(self) -> VirtualPointEvaluator: - """returns a function evaluating the value at the virtual support point + """Returns a function evaluating the value at the virtual support point. Returns: function: A function that takes the data array and an index marking @@ -589,7 +589,7 @@ def make_virtual_point_evaluator(self) -> VirtualPointEvaluator: """ def make_adjacent_evaluator(self) -> AdjacentEvaluator: - """returns a function evaluating the value adjacent to a given point + """Returns a function evaluating the value adjacent to a given point. .. deprecated:: Since 2023-12-19 @@ -607,7 +607,7 @@ def make_adjacent_evaluator(self) -> AdjacentEvaluator: @abstractmethod def set_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None: - """set the ghost cell values for this boundary + """Set the ghost cell values for this boundary. Args: data_full (:class:`~numpy.ndarray`): @@ -622,16 +622,17 @@ def set_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None: """ def make_ghost_cell_sender(self) -> GhostCellSetter: - """return function that might mpi_send data to set ghost cells for this boundary""" + """Return function that might mpi_send data to set ghost cells for this + boundary.""" @register_jitable def noop(data_full: np.ndarray, args=None) -> None: - """no-operation as the default case""" + """No-operation as the default case.""" return noop # type: ignore def _get_value_cell_index(self, with_ghost_cells: bool) -> int: - """determine index of the cell from which field value is read + """Determine index of the cell from which field value is read. Args: with_ghost_cells (bool): @@ -650,7 +651,7 @@ def _get_value_cell_index(self, with_ghost_cells: bool) -> int: return 0 def make_ghost_cell_setter(self) -> GhostCellSetter: - """return function that sets the ghost cells for this boundary""" + """Return function that sets the ghost cells for this boundary.""" normal = self.normal axis = self.axis @@ -663,7 +664,7 @@ def make_ghost_cell_setter(self) -> GhostCellSetter: @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" data_valid = data_full[..., 1:-1] val = vp_value(data_valid, (np_idx,), args=args) if normal: @@ -677,7 +678,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" data_valid = data_full[..., 1:-1, 1:-1] for j in range(num_y): val = vp_value(data_valid, (np_idx, j), args=args) @@ -691,7 +692,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" data_valid = data_full[..., 1:-1, 1:-1] for i in range(num_x): val = vp_value(data_valid, (i, np_idx), args=args) @@ -706,7 +707,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" data_valid = data_full[..., 1:-1, 1:-1, 1:-1] for j in range(num_y): for k in range(num_z): @@ -721,7 +722,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" data_valid = data_full[..., 1:-1, 1:-1, 1:-1] for i in range(num_x): for k in range(num_z): @@ -736,7 +737,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" data_valid = data_full[..., 1:-1, 1:-1, 1:-1] for i in range(num_x): for j in range(num_y): @@ -753,7 +754,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: class _MPIBC(BCBase): - """represents a boundary that is exchanged with another MPI process""" + """Represents a boundary that is exchanged with another MPI process.""" homogeneous = False @@ -799,7 +800,7 @@ def _repr_value(self): return [f"neighbor={self._neighbor_id}"] def __eq__(self, other): - """checks for equality neglecting the `upper` property""" + """Checks for equality neglecting the `upper` property.""" if not isinstance(other, self.__class__): return NotImplemented return ( @@ -811,12 +812,12 @@ def __eq__(self, other): ) def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" axis_name = self.grid.axes[self.axis] return f"MPI @ {axis_name}={self.axis_coord}" def send_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None: - """mpi_send the ghost cell values for this boundary + """mpi_send the ghost cell values for this boundary. Args: data_full (:class:`~numpy.ndarray`): @@ -835,7 +836,7 @@ def make_virtual_point_evaluator(self) -> VirtualPointEvaluator: raise NotImplementedError def make_ghost_cell_sender(self) -> GhostCellSetter: - """return function that sends data to set ghost cells for other boundaries""" + """Return function that sends data to set ghost cells for other boundaries.""" from ...tools.mpi import mpi_send cell = self._neighbor_id @@ -882,7 +883,7 @@ def ghost_cell_sender(data_full: np.ndarray, args=None) -> None: return register_jitable(ghost_cell_sender) # type: ignore def make_ghost_cell_setter(self) -> GhostCellSetter: - """return function that sets the ghost cells for this boundary""" + """Return function that sets the ghost cells for this boundary.""" from ...tools.mpi import mpi_recv cell = self._neighbor_id @@ -937,7 +938,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: class UserBC(BCBase): - """represents a boundary whose virtual point are set by the user. + """Represents a boundary whose virtual point are set by the user. Boundary conditions will only be set when a dictionary :code:`{TARGET: value}` is supplied as argument `args` to :meth:`set_ghost_cells` or the numba equivalent. @@ -955,12 +956,12 @@ class UserBC(BCBase): names = ["user"] def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" axis_name = self.grid.axes[self.axis] return f"user-controlled @ {axis_name}={self.axis_coord}" def copy(self: TBC, upper: bool | None = None, rank: int | None = None) -> TBC: - """return a copy of itself, but with a reference to the same grid""" + """Return a copy of itself, but with a reference to the same grid.""" return self.__class__( grid=self.grid, axis=self.axis, @@ -969,7 +970,7 @@ def copy(self: TBC, upper: bool | None = None, rank: int | None = None) -> TBC: ) def to_subgrid(self: TBC, subgrid: GridBase) -> TBC: - """converts this boundary condition to one valid for a given subgrid + """Converts this boundary condition to one valid for a given subgrid. Args: subgrid (:class:`GridBase`): @@ -1029,7 +1030,7 @@ def make_virtual_point_evaluator(self) -> VirtualPointEvaluator: dx = self.grid.discretization[self.axis] def extract_value(values, arr: np.ndarray, idx: tuple[int, ...]): - """helper function that extracts the correct value from supplied ones""" + """Helper function that extracts the correct value from supplied ones.""" if isinstance(values, (nb.types.Number, Number)): # scalar was supplied => simply return it return values @@ -1042,7 +1043,7 @@ def extract_value(values, arr: np.ndarray, idx: tuple[int, ...]): @overload(extract_value) def ol_extract_value(values, arr: np.ndarray, idx: tuple[int, ...]): - """helper function that extracts the correct value from supplied ones""" + """Helper function that extracts the correct value from supplied ones.""" if isinstance(values, (nb.types.Number, Number)): # scalar was supplied => simply return it def impl(values, arr: np.ndarray, idx: tuple[int, ...]): @@ -1062,7 +1063,7 @@ def impl(values, arr: np.ndarray, idx: tuple[int, ...]): @register_jitable def virtual_point(arr: np.ndarray, idx: tuple[int, ...], args): - """evaluate the virtual point at `idx`""" + """Evaluate the virtual point at `idx`""" if "virtual_point" in args: # set the virtual point directly return extract_value(args["virtual_point"], arr, idx) @@ -1084,12 +1085,12 @@ def virtual_point(arr: np.ndarray, idx: tuple[int, ...], args): return virtual_point # type: ignore def make_ghost_cell_setter(self) -> GhostCellSetter: - """return function that sets the ghost cells for this boundary""" + """Return function that sets the ghost cells for this boundary.""" ghost_cell_setter_inner = super().make_ghost_cell_setter() @register_jitable def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: - """helper function setting the conditions on all axes""" + """Helper function setting the conditions on all axes.""" if args is None: return # no-op when no specific arguments are given @@ -1105,7 +1106,7 @@ def ghost_cell_setter(data_full: np.ndarray, args=None) -> None: class ExpressionBC(BCBase): - """represents a boundary whose virtual point is calculated from an expression + """Represents a boundary whose virtual point is calculated from an expression. The expression is given as a string and will be parsed by :mod:`sympy` or a function that is optionally compiled with :mod:`numba`. The expression can contain typical @@ -1245,7 +1246,7 @@ def _test_values(self) -> tuple[float, ...]: return tuple(test_values) def _prepare_function(self, func: Callable | float, do_jit: bool) -> Callable: - """helper function that compiles a single function given as a parameter""" + """Helper function that compiles a single function given as a parameter.""" if not callable(func): # the function is just a number, which we also support func_value = float(func) # TODO: support complex numbers @@ -1285,7 +1286,8 @@ def value_func(*args): return value_func # type: ignore def _get_function_from_userfunc(self, do_jit: bool) -> Callable: - """returns function from user function evaluating the value of the virtual point + """Returns function from user function evaluating the value of the virtual + point. Args: do_jit (bool): @@ -1336,7 +1338,7 @@ def virtual_from_mixed(adjacent_value, dx, *args): raise ValueError(f"Unknown target `{target}` for expression") def _get_function_from_expression(self, do_jit: bool) -> Callable: - """returns function from expression evaluating the value of the virtual point + """Returns function from expression evaluating the value of the virtual point. Args: do_jit (bool): @@ -1400,7 +1402,7 @@ def value_func(grid_value, dx, x, y, z, t): @cached_method() def _func(self, do_jit: bool) -> Callable: - """returns function that evaluates the value of the virtual point + """Returns function that evaluates the value of the virtual point. Args: do_jit (bool): @@ -1428,7 +1430,7 @@ def _repr_value(self): return res def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" axis_name = self.grid.axes[self.axis] target = self._input["target"] @@ -1456,7 +1458,7 @@ def get_mathematical_representation(self, field_name: str = "C") -> str: raise NotImplementedError(f"Unsupported target `{target}`") def __eq__(self, other): - """checks for equality neglecting the `upper` property""" + """Checks for equality neglecting the `upper` property.""" if not isinstance(other, self.__class__): return NotImplemented return ( @@ -1468,7 +1470,7 @@ def __eq__(self, other): def copy( self: ExpressionBC, upper: bool | None = None, rank: int | None = None ) -> ExpressionBC: - """return a copy of itself, but with a reference to the same grid""" + """Return a copy of itself, but with a reference to the same grid.""" return self.__class__( grid=self.grid, axis=self.axis, @@ -1482,7 +1484,7 @@ def copy( ) def to_subgrid(self: ExpressionBC, subgrid: GridBase) -> ExpressionBC: - """converts this boundary condition to one valid for a given subgrid + """Converts this boundary condition to one valid for a given subgrid. Args: subgrid (:class:`GridBase`): @@ -1595,7 +1597,7 @@ def make_virtual_point_evaluator(self) -> VirtualPointEvaluator: @jit def virtual_point(arr: np.ndarray, idx: tuple[int, ...], args=None) -> float: - """evaluate the virtual point at `idx`""" + """Evaluate the virtual point at `idx`""" _, _, bc_idx = get_arr_1d(arr, idx) grid_value = arr[idx] coords = bc_coords[bc_idx] @@ -1629,7 +1631,7 @@ def virtual_point(arr: np.ndarray, idx: tuple[int, ...], args=None) -> float: class ExpressionValueBC(ExpressionBC): - """represents a boundary whose value is calculated from an expression + """Represents a boundary whose value is calculated from an expression. The expression is given as a string and will be parsed by :mod:`sympy`. The expression can contain typical mathematical operators and may depend on the value @@ -1666,7 +1668,7 @@ def __init__( class ExpressionDerivativeBC(ExpressionBC): - """represents a boundary whose outward derivative is calculated from an expression + """Represents a boundary whose outward derivative is calculated from an expression. The expression is given as a string and will be parsed by :mod:`sympy`. The expression can contain typical mathematical operators and may depend on the value @@ -1703,7 +1705,7 @@ def __init__( class ExpressionMixedBC(ExpressionBC): - """represents a boundary whose outward derivative is calculated from an expression + """Represents a boundary whose outward derivative is calculated from an expression. The expression is given as a string and will be parsed by :mod:`sympy`. The expression can contain typical mathematical operators and may depend on the value @@ -1742,7 +1744,7 @@ def __init__( class ConstBCBase(BCBase): - """base class representing a boundary whose virtual point is set from constants""" + """Base class representing a boundary whose virtual point is set from constants.""" _value: np.ndarray @@ -1792,7 +1794,7 @@ def __init__( self.value = value # type: ignore def __eq__(self, other): - """checks for equality neglecting the `upper` property""" + """Checks for equality neglecting the `upper` property.""" if not isinstance(other, self.__class__): return NotImplemented return super().__eq__(other) and np.array_equal(self.value, other.value) @@ -1804,7 +1806,7 @@ def value(self) -> np.ndarray: @value.setter @fill_in_docstring def value(self, value: float | np.ndarray | str = 0): - """set the value of this boundary condition + """Set the value of this boundary condition. Warning: {WARNING_EXEC} @@ -1854,7 +1856,7 @@ def __str__(self): @fill_in_docstring def _parse_value(self, value: float | np.ndarray | str) -> np.ndarray: - """parses a boundary value + """Parses a boundary value. Warning: {WARNING_EXEC} @@ -1953,7 +1955,7 @@ def _parse_value(self, value: float | np.ndarray | str) -> np.ndarray: return result def link_value(self, value: np.ndarray): - """link value of this boundary condition to external array""" + """Link value of this boundary condition to external array.""" assert value.data.c_contiguous shape = self._shape_tensor + self._shape_boundary @@ -1972,7 +1974,7 @@ def copy( rank: int | None = None, value: float | np.ndarray | str | None = None, ) -> ConstBCBase: - """return a copy of itself, but with a reference to the same grid""" + """Return a copy of itself, but with a reference to the same grid.""" obj = self.__class__( grid=self.grid, axis=self.axis, @@ -1985,7 +1987,7 @@ def copy( return obj def to_subgrid(self: ConstBCBase, subgrid: GridBase) -> ConstBCBase: - """converts this boundary condition to one valid for a given subgrid + """Converts this boundary condition to one valid for a given subgrid. Args: subgrid (:class:`GridBase`): @@ -2009,7 +2011,7 @@ def to_subgrid(self: ConstBCBase, subgrid: GridBase) -> ConstBCBase: ) def _make_value_getter(self) -> Callable[[], np.ndarray]: - """return a (compiled) function for obtaining the value. + """Return a (compiled) function for obtaining the value. Note: This should only be used in numba compiled functions that need to @@ -2033,7 +2035,7 @@ def _make_value_getter(self) -> Callable[[], np.ndarray]: @nb.njit(nb.typeof(self._value)(), inline="always") def get_value() -> np.ndarray: - """helper function returning the linked array""" + """Helper function returning the linked array.""" return nb.carray(address_as_void_pointer(mem_addr), shape, dtype) # type: ignore # keep a reference to the array to prevent garbage collection @@ -2043,11 +2045,11 @@ def get_value() -> np.ndarray: class ConstBC1stOrderBase(ConstBCBase): - """represents a single boundary in an BoundaryPair instance""" + """Represents a single boundary in an BoundaryPair instance.""" @abstractmethod def get_virtual_point_data(self, compiled: bool = False) -> tuple[Any, Any, int]: - """return data suitable for calculating virtual points + """Return data suitable for calculating virtual points. Args: compiled (bool): @@ -2062,7 +2064,7 @@ def get_virtual_point_data(self, compiled: bool = False) -> tuple[Any, Any, int] def get_sparse_matrix_data( self, idx: tuple[int, ...] ) -> tuple[float, dict[int, float]]: - """sets the elements of the sparse representation of this condition + """Sets the elements of the sparse representation of this condition. Args: idx (tuple): @@ -2087,7 +2089,7 @@ def get_sparse_matrix_data( return const, {data[2]: factor} def get_virtual_point(self, arr, idx: tuple[int, ...] | None = None) -> float: - """calculate the value of the virtual point outside the boundary + """Calculate the value of the virtual point outside the boundary. Args: arr (array): @@ -2134,7 +2136,7 @@ def make_virtual_point_evaluator(self) -> VirtualPointEvaluator: def virtual_point( arr: np.ndarray, idx: tuple[int, ...], args=None ) -> float: - """evaluate the virtual point at `idx`""" + """Evaluate the virtual point at `idx`""" arr_1d, _, _ = get_arr_1d(arr, idx) if normal: val_field = arr_1d[..., axis, index] @@ -2148,7 +2150,7 @@ def virtual_point( def virtual_point( arr: np.ndarray, idx: tuple[int, ...], args=None ) -> float: - """evaluate the virtual point at `idx`""" + """Evaluate the virtual point at `idx`""" arr_1d, _, bc_idx = get_arr_1d(arr, idx) if normal: val_field = arr_1d[..., axis, index] @@ -2181,7 +2183,7 @@ def make_adjacent_evaluator(self) -> AdjacentEvaluator: def adjacent_point( arr_1d: np.ndarray, i_point: int, bc_idx: tuple[int, ...] ) -> FloatNumerical: - """evaluate the value adjacent to the current point""" + """Evaluate the value adjacent to the current point.""" # determine the parameters for evaluating adjacent point. Note # that defining the variables c and f for the interior points # seems needless, but it turns out that this results in a 10x @@ -2204,7 +2206,7 @@ def adjacent_point( @register_jitable(inline="always") def adjacent_point(arr_1d, i_point, bc_idx) -> float: - """evaluate the value adjacent to the current point""" + """Evaluate the value adjacent to the current point.""" # determine the parameters for evaluating adjacent point. Note # that defining the variables c and f for the interior points # seems needless, but it turns out that this results in a 10x @@ -2250,7 +2252,7 @@ def set_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None: class _PeriodicBC(ConstBC1stOrderBase): - """represents one part of a boundary condition""" + """Represents one part of a boundary condition.""" def __init__( self, @@ -2280,7 +2282,7 @@ def __str__(self): return '"periodic"' def copy(self: _PeriodicBC, upper: bool | None = None) -> _PeriodicBC: # type: ignore - """return a copy of itself, but with a reference to the same grid""" + """Return a copy of itself, but with a reference to the same grid.""" return self.__class__( grid=self.grid, axis=self.axis, @@ -2289,7 +2291,7 @@ def copy(self: _PeriodicBC, upper: bool | None = None) -> _PeriodicBC: # type: ) def to_subgrid(self: _PeriodicBC, subgrid: GridBase) -> _PeriodicBC: - """converts this boundary condition to one valid for a given subgrid + """Converts this boundary condition to one valid for a given subgrid. Args: subgrid (:class:`GridBase`): @@ -2309,7 +2311,7 @@ def to_subgrid(self: _PeriodicBC, subgrid: GridBase) -> _PeriodicBC: ) def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" if self.upper: other_coord = self.grid.axes_bounds[self.axis][0] else: @@ -2343,12 +2345,12 @@ def factor_func(): class DirichletBC(ConstBC1stOrderBase): - """represents a boundary condition imposing the value""" + """Represents a boundary condition imposing the value.""" names = ["value", "dirichlet"] # identifiers for this boundary condition def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" axis_name = self.grid.axes[self.axis] field = self._field_repr(field_name) return f"{field} = {self.value} @ {axis_name}={self.axis_coord}" @@ -2390,13 +2392,13 @@ def factor_func(): class NeumannBC(ConstBC1stOrderBase): - """represents a boundary condition imposing the derivative in the outward - normal direction of the boundary""" + """Represents a boundary condition imposing the derivative in the outward normal + direction of the boundary.""" names = ["derivative", "neumann"] # identifiers for this boundary condition def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" sign = " " if self.upper else "-" axis_name = self.grid.axes[self.axis] deriv = f"∂{self._field_repr(field_name)}/∂{axis_name}" @@ -2505,7 +2507,7 @@ def __init__( self.const = self._parse_value(const) def __eq__(self, other): - """checks for equality neglecting the `upper` property""" + """Checks for equality neglecting the `upper` property.""" if not isinstance(other, self.__class__): return NotImplemented return super().__eq__(other) and self.const == other.const @@ -2517,7 +2519,7 @@ def copy( value: float | np.ndarray | str | None = None, const: float | np.ndarray | str | None = None, ) -> MixedBC: - """return a copy of itself, but with a reference to the same grid""" + """Return a copy of itself, but with a reference to the same grid.""" obj = self.__class__( grid=self.grid, axis=self.axis, @@ -2531,7 +2533,7 @@ def copy( return obj def to_subgrid(self: MixedBC, subgrid: GridBase) -> MixedBC: - """converts this boundary condition to one valid for a given subgrid + """Converts this boundary condition to one valid for a given subgrid. Args: subgrid (:class:`GridBase`): @@ -2556,7 +2558,7 @@ def to_subgrid(self: MixedBC, subgrid: GridBase) -> MixedBC: ) def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" sign = "" if self.upper else "-" axis_name = self.grid.axes[self.axis] field_repr = self._field_repr(field_name) @@ -2627,11 +2629,11 @@ def factor_func(): class ConstBC2ndOrderBase(ConstBCBase): - """abstract base class for boundary conditions of 2nd order""" + """Abstract base class for boundary conditions of 2nd order.""" @abstractmethod def get_virtual_point_data(self) -> tuple[Any, Any, int, Any, int]: - """return data suitable for calculating virtual points + """Return data suitable for calculating virtual points. Returns: tuple: the data structure associated with this virtual point @@ -2640,7 +2642,7 @@ def get_virtual_point_data(self) -> tuple[Any, Any, int, Any, int]: def get_sparse_matrix_data( self, idx: tuple[int, ...] ) -> tuple[float, dict[int, float]]: - """sets the elements of the sparse representation of this condition + """Sets the elements of the sparse representation of this condition. Args: idx (tuple): @@ -2668,7 +2670,7 @@ def get_sparse_matrix_data( return const, {data[2]: factor1, data[4]: factor2} def get_virtual_point(self, arr, idx: tuple[int, ...] | None = None) -> float: - """calculate the value of the virtual point outside the boundary + """Calculate the value of the virtual point outside the boundary. Args: arr (array): @@ -2727,7 +2729,7 @@ def make_virtual_point_evaluator(self) -> VirtualPointEvaluator: @register_jitable def virtual_point(arr: np.ndarray, idx: tuple[int, ...], args=None): - """evaluate the virtual point at `idx`""" + """Evaluate the virtual point at `idx`""" arr_1d, _, _ = get_arr_1d(arr, idx) if normal: val1 = arr_1d[..., axis, data[2]] @@ -2741,7 +2743,7 @@ def virtual_point(arr: np.ndarray, idx: tuple[int, ...], args=None): @register_jitable def virtual_point(arr: np.ndarray, idx: tuple[int, ...], args=None): - """evaluate the virtual point at `idx`""" + """Evaluate the virtual point at `idx`""" arr_1d, _, bc_idx = get_arr_1d(arr, idx) if normal: val1 = arr_1d[..., axis, data[2]] @@ -2784,7 +2786,7 @@ def make_adjacent_evaluator(self) -> AdjacentEvaluator: def adjacent_point( arr_1d: np.ndarray, i_point: int, bc_idx: tuple[int, ...] ) -> float: - """evaluate the value adjacent to the current point""" + """Evaluate the value adjacent to the current point.""" # determine the parameters for evaluating adjacent point if i_point == i_bndry: data = data_vp @@ -2805,7 +2807,7 @@ def adjacent_point( def adjacent_point( arr_1d: np.ndarray, i_point: int, bc_idx: tuple[int, ...] ) -> float: - """evaluate the value adjacent to the current point""" + """Evaluate the value adjacent to the current point.""" # determine the parameters for evaluating adjacent point if i_point == i_bndry: data = data_vp @@ -2863,13 +2865,13 @@ def set_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None: class CurvatureBC(ConstBC2ndOrderBase): - """represents a boundary condition imposing the 2nd normal derivative at the - boundary""" + """Represents a boundary condition imposing the 2nd normal derivative at the + boundary.""" names = ["curvature", "second_derivative", "extrapolate"] # identifiers for this BC def get_mathematical_representation(self, field_name: str = "C") -> str: - """return mathematical representation of the boundary condition""" + """Return mathematical representation of the boundary condition.""" sign = " " if self.upper else "-" axis_name = self.grid.axes[self.axis] deriv = f"∂²{self._field_repr(field_name)}/∂{axis_name}²" @@ -2878,7 +2880,7 @@ def get_mathematical_representation(self, field_name: str = "C") -> str: def get_virtual_point_data( self, ) -> tuple[np.ndarray, np.ndarray, int, np.ndarray, int]: - """return data suitable for calculating virtual points + """Return data suitable for calculating virtual points. Returns: tuple: the data structure associated with this virtual point @@ -2902,15 +2904,15 @@ def get_virtual_point_data( class NormalDirichletBC(DirichletBC): - """represents a boundary condition imposing the value on normal components""" + """Represents a boundary condition imposing the value on normal components.""" names = ["normal_value", "normal_dirichlet", "dirichlet_normal"] normal = True class NormalNeumannBC(NeumannBC): - """represents a boundary condition imposing the derivative of normal components - in the outward normal direction of the boundary""" + """Represents a boundary condition imposing the derivative of normal components in + the outward normal direction of the boundary.""" names = ["normal_derivative", "normal_neumann", "neumann_normal"] normal = True @@ -2946,15 +2948,15 @@ class NormalMixedBC(MixedBC): class NormalCurvatureBC(CurvatureBC): - """represents a boundary condition imposing the 2nd normal derivative onto the - normal components at the boundary""" + """Represents a boundary condition imposing the 2nd normal derivative onto the + normal components at the boundary.""" names = ["normal_curvature"] # identifiers for this boundary condition normal = True def registered_boundary_condition_classes() -> dict[str, type[BCBase]]: - """returns all boundary condition classes that are currently defined + """Returns all boundary condition classes that are currently defined. Returns: dict: a dictionary with the names of the boundary condition classes @@ -2967,7 +2969,7 @@ def registered_boundary_condition_classes() -> dict[str, type[BCBase]]: def registered_boundary_condition_names() -> dict[str, type[BCBase]]: - """returns all named boundary conditions that are currently defined + """Returns all named boundary conditions that are currently defined. Returns: dict: a dictionary with the names of the boundary conditions that can be used diff --git a/pde/grids/cartesian.py b/pde/grids/cartesian.py index e92f3b10..bf01acc1 100644 --- a/pde/grids/cartesian.py +++ b/pde/grids/cartesian.py @@ -1,5 +1,4 @@ -""" -Cartesian grids of arbitrary dimension. +"""Cartesian grids of arbitrary dimension. .. codeauthor:: David Zwicker """ @@ -28,7 +27,7 @@ class CartesianGrid(GridBase): - r""" d-dimensional Cartesian grid with uniform discretization for each axis + r"""D-dimensional Cartesian grid with uniform discretization for each axis. The grids can be thought of as a collection of n-dimensional boxes, called cells, of equal length in each dimension. The bounds then defined the total volume covered by @@ -48,7 +47,7 @@ class CartesianGrid(GridBase): \Delta x^{(k)} &= \frac{x^{(k)}_\mathrm{max} - x^{(k)}_\mathrm{min}}{N^{(k)}} - where :math:`N^{(k)}` is the number of cells along this dimension. Consequently, + where :math:`N^{(k)}` is the number of cells along this dimension. Consequently, cells have dimension :math:`\Delta x^{(k)}` and cover the interval :math:`[x^{(k)}_\mathrm{min}, x^{(k)}_\mathrm{max}]`. """ @@ -150,7 +149,7 @@ def state(self) -> dict[str, Any]: @classmethod def from_state(cls, state: dict[str, Any]) -> CartesianGrid: # type: ignore - """create a field from a stored `state`. + """Create a field from a stored `state`. Args: state (dict): @@ -202,13 +201,13 @@ def volume(self) -> float: @property def cell_volume_data(self): - """size associated with each cell""" + """Size associated with each cell.""" return tuple(self.discretization) def iter_mirror_points( self, point: np.ndarray, with_self: bool = False, only_periodic: bool = True ) -> Generator: - """generates all mirror points corresponding to `point` + """Generates all mirror points corresponding to `point` Args: point (:class:`~numpy.ndarray`): @@ -244,7 +243,7 @@ def get_random_point( coords: CoordsType = "cartesian", rng: np.random.Generator | None = None, ) -> np.ndarray: - """return a random point within the grid + """Return a random point within the grid. Args: boundary_distance (float): @@ -287,7 +286,7 @@ def difference_vector( ) def get_line_data(self, data: np.ndarray, extract: str = "auto") -> dict[str, Any]: - """return a line cut through the given data + """Return a line cut through the given data. Args: data (:class:`~numpy.ndarray`): @@ -314,7 +313,7 @@ def get_line_data(self, data: np.ndarray, extract: str = "auto") -> dict[str, An ) def _get_axis(axis): - """determine the axis from a given specifier""" + """Determine the axis from a given specifier.""" try: axis = int(axis) except ValueError: @@ -410,7 +409,7 @@ def get_vector_data(self, data: np.ndarray, **kwargs) -> dict[str, Any]: @plot_on_axes() def plot(self, ax, **kwargs): - r"""visualize the grid + r"""Visualize the grid. Args: {PLOT_ARGS} @@ -439,7 +438,7 @@ def plot(self, ax, **kwargs): ax.set_aspect(1) def slice(self, indices: Sequence[int]) -> CartesianGrid: - """return a subgrid of only the specified axes + """Return a subgrid of only the specified axes. Args: indices (list): @@ -458,7 +457,7 @@ def slice(self, indices: Sequence[int]) -> CartesianGrid: class UnitGrid(CartesianGrid): - r"""d-dimensional Cartesian grid with unit discretization in all directions + r"""D-dimensional Cartesian grid with unit discretization in all directions. The grids can be thought of as a collection of d-dimensional cells of unit length. The `shape` parameter determines how many boxes there are in each direction. The @@ -499,7 +498,7 @@ def state(self) -> dict[str, Any]: @classmethod def from_state(cls, state: dict[str, Any]) -> UnitGrid: # type: ignore - """create a field from a stored `state`. + """Create a field from a stored `state`. Args: state (dict): @@ -512,7 +511,7 @@ def from_state(cls, state: dict[str, Any]) -> UnitGrid: # type: ignore return obj def to_cartesian(self) -> CartesianGrid: - """convert unit grid to :class:`CartesianGrid` + """Convert unit grid to :class:`CartesianGrid` Returns: :class:`CartesianGrid`: The equivalent cartesian grid @@ -522,7 +521,7 @@ def to_cartesian(self) -> CartesianGrid: ) def slice(self, indices: Sequence[int]) -> UnitGrid: - """return a subgrid of only the specified axes + """Return a subgrid of only the specified axes. Args: indices (list): diff --git a/pde/grids/coordinates/__init__.py b/pde/grids/coordinates/__init__.py index 16bf7add..91f6388a 100644 --- a/pde/grids/coordinates/__init__.py +++ b/pde/grids/coordinates/__init__.py @@ -1,5 +1,4 @@ -""" -Package collecting classes representing orthonormal coordinate systems +"""Package collecting classes representing orthonormal coordinate systems. .. autosummary:: :nosignatures: diff --git a/pde/grids/coordinates/base.py b/pde/grids/coordinates/base.py index 2a4d127e..a16fb230 100644 --- a/pde/grids/coordinates/base.py +++ b/pde/grids/coordinates/base.py @@ -12,11 +12,11 @@ class DimensionError(ValueError): - """exception indicating that dimensions were inconsistent""" + """Exception indicating that dimensions were inconsistent.""" class CoordinatesBase: - """Base class for orthonormal coordinate systems""" + """Base class for orthonormal coordinate systems.""" # properties that are defined in subclasses dim: int @@ -45,7 +45,7 @@ def _pos_to_cart(self, points: np.ndarray) -> np.ndarray: raise NotImplementedError def pos_to_cart(self, points: np.ndarray) -> np.ndarray: - """convert coordinates to Cartesian coordinates + """Convert coordinates to Cartesian coordinates. Args: points (:class:`~numpy.ndarray`): @@ -64,7 +64,7 @@ def _pos_from_cart(self, points: np.ndarray) -> np.ndarray: raise NotImplementedError def pos_from_cart(self, points: np.ndarray) -> np.ndarray: - """convert Cartesian coordinates to coordinates in this system + """Convert Cartesian coordinates to coordinates in this system. Args: points (:class:`~numpy.ndarray`): @@ -79,7 +79,7 @@ def pos_from_cart(self, points: np.ndarray) -> np.ndarray: return self._pos_from_cart(points) def pos_diff(self, p1: np.ndarray, p2: np.ndarray) -> np.ndarray: - """return Cartesian vector(s) pointing from p1 to p2 + """Return Cartesian vector(s) pointing from p1 to p2. Args: p1 (:class:`~numpy.ndarray`): @@ -98,7 +98,7 @@ def pos_diff(self, p1: np.ndarray, p2: np.ndarray) -> np.ndarray: return self.pos_to_cart(p2) - self.pos_to_cart(p1) # type: ignore def distance(self, p1: np.ndarray, p2: np.ndarray) -> float: - """Calculate the distance between two points + """Calculate the distance between two points. Args: p1 (:class:`~numpy.ndarray`): @@ -118,7 +118,7 @@ def _scale_factors(self, points: np.ndarray) -> np.ndarray: return np.diag(self.metric(points)) ** 2 def scale_factors(self, points: np.ndarray) -> np.ndarray: - """calculate the scale factors at various points + """Calculate the scale factors at various points. Args: points (:class:`~numpy.ndarray`): @@ -144,7 +144,7 @@ def _mapping_jacobian(self, points: np.ndarray) -> np.ndarray: return jac def mapping_jacobian(self, points: np.ndarray) -> np.ndarray: - """returns the Jacobian matrix of the coordinate mapping + """Returns the Jacobian matrix of the coordinate mapping. Args: points (:class:`~numpy.ndarray`): @@ -163,7 +163,7 @@ def _volume_factor(self, points: np.ndarray) -> ArrayLike: return np.prod(self._scale_factors(points), axis=0) # type: ignore def volume_factor(self, points: np.ndarray) -> ArrayLike: - """calculate the volume factors at various points + """Calculate the volume factors at various points. Args: points (:class:`~numpy.ndarray`): @@ -188,7 +188,7 @@ def _cell_volume(self, c_low: np.ndarray, c_high: np.ndarray) -> np.ndarray: return cell_volumes def cell_volume(self, c_low: np.ndarray, c_high: np.ndarray) -> np.ndarray: - """calculate the volume between coordinate lines + """Calculate the volume between coordinate lines. Args: c_low (:class:`~numpy.ndarray`): @@ -208,7 +208,7 @@ def cell_volume(self, c_low: np.ndarray, c_high: np.ndarray) -> np.ndarray: return self._cell_volume(c_low, c_high) def metric(self, points: np.ndarray) -> np.ndarray: - """calculate the metric tensor at coordinate points + """Calculate the metric tensor at coordinate points. Args: points (:class:`~numpy.ndarray`): @@ -227,7 +227,7 @@ def _basis_rotation(self, points: np.ndarray) -> np.ndarray: raise NotImplementedError def basis_rotation(self, points: np.ndarray) -> np.ndarray: - """returns rotation matrix rotating basis vectors to Cartesian coordinates + """Returns rotation matrix rotating basis vectors to Cartesian coordinates. Args: points (:class:`~numpy.ndarray`): @@ -244,7 +244,7 @@ def basis_rotation(self, points: np.ndarray) -> np.ndarray: return self._basis_rotation(points) def vec_to_cart(self, points: np.ndarray, components: np.ndarray) -> np.ndarray: - """convert the vectors at given points to a Cartesian basis + """Convert the vectors at given points to a Cartesian basis. Args: points (:class:`~numpy.ndarray`): diff --git a/pde/grids/coordinates/bipolar.py b/pde/grids/coordinates/bipolar.py index b77932a9..e79fbb1b 100644 --- a/pde/grids/coordinates/bipolar.py +++ b/pde/grids/coordinates/bipolar.py @@ -11,7 +11,7 @@ class BipolarCoordinates(CoordinatesBase): - """2-dimensional bipolar coordinates""" + """2-dimensional bipolar coordinates.""" dim = 2 axes = ["σ", "τ"] @@ -25,7 +25,7 @@ def __init__(self, scale_parameter: float = 1): self.scale_parameter = scale_parameter def __repr__(self) -> str: - """return instance as string""" + """Return instance as string.""" return f"{self.__class__.__name__}(scale_parameter={self.scale_parameter})" def __eq__(self, other): diff --git a/pde/grids/coordinates/bispherical.py b/pde/grids/coordinates/bispherical.py index bcf706e7..50f00f1d 100644 --- a/pde/grids/coordinates/bispherical.py +++ b/pde/grids/coordinates/bispherical.py @@ -11,7 +11,7 @@ class BisphericalCoordinates(CoordinatesBase): - """3-dimensional bispherical coordinates""" + """3-dimensional bispherical coordinates.""" dim = 3 axes = ["σ", "τ", "φ"] @@ -25,7 +25,7 @@ def __init__(self, scale_parameter: float = 1): self.scale_parameter = scale_parameter def __repr__(self) -> str: - """return instance as string""" + """Return instance as string.""" return f"{self.__class__.__name__}(scale_parameter={self.scale_parameter})" def __eq__(self, other): diff --git a/pde/grids/coordinates/cartesian.py b/pde/grids/coordinates/cartesian.py index 8d2058b5..b8e4675a 100644 --- a/pde/grids/coordinates/cartesian.py +++ b/pde/grids/coordinates/cartesian.py @@ -11,7 +11,7 @@ class CartesianCoordinates(CoordinatesBase): - """n-dimensional Cartesian coordinates""" + """N-dimensional Cartesian coordinates.""" _objs: dict[int, CartesianCoordinates] = {} @@ -40,7 +40,7 @@ def __init__(self, dim: int): self.coordinate_limits = [(-np.inf, np.inf)] * self.dim def __repr__(self) -> str: - """return instance as string""" + """Return instance as string.""" return f"{self.__class__.__name__}(dim={self.dim})" def __eq__(self, other): diff --git a/pde/grids/coordinates/cylindrical.py b/pde/grids/coordinates/cylindrical.py index 741edadd..22dfa3a0 100644 --- a/pde/grids/coordinates/cylindrical.py +++ b/pde/grids/coordinates/cylindrical.py @@ -11,7 +11,7 @@ class CylindricalCoordinates(CoordinatesBase): - """n-dimensional Cartesian coordinates""" + """N-dimensional Cartesian coordinates.""" _singleton: CylindricalCoordinates | None = None dim = 3 diff --git a/pde/grids/coordinates/polar.py b/pde/grids/coordinates/polar.py index 1d69979d..b6959433 100644 --- a/pde/grids/coordinates/polar.py +++ b/pde/grids/coordinates/polar.py @@ -11,7 +11,7 @@ class PolarCoordinates(CoordinatesBase): - """2-dimensional polar coordinates""" + """2-dimensional polar coordinates.""" dim = 2 axes = ["r", "φ"] @@ -27,7 +27,7 @@ def __new__(cls): return cls._singleton def __repr__(self) -> str: - """return instance as string""" + """Return instance as string.""" return f"{self.__class__.__name__}()" def __eq__(self, other): diff --git a/pde/grids/coordinates/spherical.py b/pde/grids/coordinates/spherical.py index bdfbb3a6..35d711e6 100644 --- a/pde/grids/coordinates/spherical.py +++ b/pde/grids/coordinates/spherical.py @@ -11,7 +11,7 @@ class SphericalCoordinates(CoordinatesBase): - """3-dimensional spherical coordinates""" + """3-dimensional spherical coordinates.""" dim = 3 axes = ["r", "θ", "φ"] @@ -28,7 +28,7 @@ def __new__(cls): return cls._singleton def __repr__(self) -> str: - """return instance as string""" + """Return instance as string.""" return f"{self.__class__.__name__}()" def __eq__(self, other): diff --git a/pde/grids/cylindrical.py b/pde/grids/cylindrical.py index 296b3b9b..b58a77ba 100644 --- a/pde/grids/cylindrical.py +++ b/pde/grids/cylindrical.py @@ -1,5 +1,4 @@ -""" -Cylindrical grids with azimuthal symmetry +"""Cylindrical grids with azimuthal symmetry. .. codeauthor:: David Zwicker """ @@ -28,7 +27,7 @@ class CylindricalSymGrid(GridBase): - r"""3-dimensional cylindrical grid assuming polar symmetry + r"""3-dimensional cylindrical grid assuming polar symmetry. The polar symmetry implies that states only depend on the radial and axial coordinates :math:`r` and :math:`z`, respectively. These are discretized uniformly as @@ -143,7 +142,7 @@ def state(self) -> dict[str, Any]: @classmethod def from_state(cls, state: dict[str, Any]) -> CylindricalSymGrid: # type: ignore - """create a field from a stored `state`. + """Create a field from a stored `state`. Args: state (dict): @@ -221,7 +220,7 @@ def get_random_point( coords: CoordsType = "cartesian", rng: np.random.Generator | None = None, ) -> np.ndarray: - """return a random point within the grid + """Return a random point within the grid. Args: boundary_distance (float): @@ -275,7 +274,7 @@ def difference_vector( ) def get_line_data(self, data: np.ndarray, extract: str = "auto") -> dict[str, Any]: - """return a line cut for the cylindrical grid + """Return a line cut for the cylindrical grid. Args: data (:class:`~numpy.ndarray`): @@ -328,7 +327,7 @@ def get_line_data(self, data: np.ndarray, extract: str = "auto") -> dict[str, An } def get_image_data(self, data: np.ndarray) -> dict[str, Any]: - """return a 2d-image of the data + """Return a 2d-image of the data. Args: data (:class:`~numpy.ndarray`): @@ -359,7 +358,7 @@ def get_image_data(self, data: np.ndarray) -> dict[str, Any]: def iter_mirror_points( self, point: np.ndarray, with_self: bool = False, only_periodic: bool = True ) -> Generator: - """generates all mirror points corresponding to `point` + """Generates all mirror points corresponding to `point` Args: point (:class:`~numpy.ndarray`): @@ -383,7 +382,7 @@ def iter_mirror_points( @cached_property() def cell_volume_data(self) -> tuple[np.ndarray, float]: - """:class:`~numpy.ndarray`: the volumes of all cells""" + """:class:`~numpy.ndarray`: the volumes of all cells.""" dr, dz = self.discretization rs = self.axes_coords[0] r_vols = 2 * np.pi * dr * rs @@ -393,7 +392,7 @@ def cell_volume_data(self) -> tuple[np.ndarray, float]: def get_cartesian_grid( self, mode: Literal["valid", "full"] = "valid" ) -> CartesianGrid: - """return a Cartesian grid for this Cylindrical one + """Return a Cartesian grid for this Cylindrical one. Args: mode (str): @@ -421,7 +420,7 @@ def get_cartesian_grid( return CartesianGrid(grid_bounds, grid_shape) def slice(self, indices: Sequence[int]) -> CartesianGrid | PolarSymGrid: - """return a subgrid of only the specified axes + """Return a subgrid of only the specified axes. Args: indices (list): diff --git a/pde/grids/operators/__init__.py b/pde/grids/operators/__init__.py index ec884dfb..1a5f2b50 100644 --- a/pde/grids/operators/__init__.py +++ b/pde/grids/operators/__init__.py @@ -1,5 +1,4 @@ -""" -Package collecting modules defining discretized operators for different grids. +"""Package collecting modules defining discretized operators for different grids. These operators can either be used directly or they are imported by the respective methods defined on fields and grids. diff --git a/pde/grids/operators/cartesian.py b/pde/grids/operators/cartesian.py index aee1709f..e5a4907f 100644 --- a/pde/grids/operators/cartesian.py +++ b/pde/grids/operators/cartesian.py @@ -1,5 +1,4 @@ -""" -This module implements differential operators on Cartesian grids +"""This module implements differential operators on Cartesian grids. .. autosummary:: :nosignatures: @@ -12,7 +11,7 @@ make_tensor_divergence make_poisson_solver -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -38,7 +37,7 @@ def _get_laplace_matrix_1d(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: - """get sparse matrix for Laplace operator on a 1d Cartesian grid + """Get sparse matrix for Laplace operator on a 1d Cartesian grid. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -80,7 +79,7 @@ def _get_laplace_matrix_1d(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: def _get_laplace_matrix_2d(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: - """get sparse matrix for Laplace operator on a 2d Cartesian grid + """Get sparse matrix for Laplace operator on a 2d Cartesian grid. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -100,7 +99,7 @@ def _get_laplace_matrix_2d(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: scale_x, scale_y = bcs.grid.discretization**-2 def i(x, y): - """helper function for flattening the index + """Helper function for flattening the index. This is equivalent to np.ravel_multi_index((x, y), (dim_x, dim_y)) """ @@ -149,7 +148,7 @@ def i(x, y): def _get_laplace_matrix_3d(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: - """get sparse matrix for Laplace operator on a 3d Cartesian grid + """Get sparse matrix for Laplace operator on a 3d Cartesian grid. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -169,7 +168,7 @@ def _get_laplace_matrix_3d(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: scale_x, scale_y, scale_z = bcs.grid.discretization**-2 def i(x, y, z): - """helper function for flattening the index + """Helper function for flattening the index. This is equivalent to np.ravel_multi_index((x, y, z), (dim_x, dim_y, dim_z)) """ @@ -236,7 +235,7 @@ def i(x, y, z): def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: - """get sparse matrix for Laplace operator on a Cartesian grid + """Get sparse matrix for Laplace operator on a Cartesian grid. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -261,7 +260,7 @@ def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: def _make_laplace_scipy_nd(grid: CartesianGrid) -> OperatorType: - """make a Laplace operator using the scipy module + """Make a Laplace operator using the scipy module. This only supports uniform discretizations. @@ -277,7 +276,7 @@ def _make_laplace_scipy_nd(grid: CartesianGrid) -> OperatorType: scaling = uniform_discretization(grid) ** -2 def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" assert arr.shape == grid._shape_full valid = (...,) + (slice(1, -1),) * grid.dim with np.errstate(all="ignore"): @@ -288,7 +287,7 @@ def laplace(arr: np.ndarray, out: np.ndarray) -> None: def _make_laplace_numba_1d(grid: CartesianGrid) -> OperatorType: - """make a 1d Laplace operator using numba compilation + """Make a 1d Laplace operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -302,7 +301,7 @@ def _make_laplace_numba_1d(grid: CartesianGrid) -> OperatorType: @jit def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" for i in range(1, dim_x + 1): out[i - 1] = (arr[i - 1] - 2 * arr[i] + arr[i + 1]) * scale @@ -310,7 +309,7 @@ def laplace(arr: np.ndarray, out: np.ndarray) -> None: def _make_laplace_numba_2d(grid: CartesianGrid) -> OperatorType: - """make a 2d Laplace operator using numba compilation + """Make a 2d Laplace operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -327,7 +326,7 @@ def _make_laplace_numba_2d(grid: CartesianGrid) -> OperatorType: @jit(parallel=parallel) def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): lap_x = (arr[i - 1, j] - 2 * arr[i, j] + arr[i + 1, j]) * scale_x @@ -338,7 +337,7 @@ def laplace(arr: np.ndarray, out: np.ndarray) -> None: def _make_laplace_numba_3d(grid: CartesianGrid) -> OperatorType: - """make a 3d Laplace operator using numba compilation + """Make a 3d Laplace operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -355,7 +354,7 @@ def _make_laplace_numba_3d(grid: CartesianGrid) -> OperatorType: @jit(parallel=parallel) def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): for k in range(1, dim_z + 1): @@ -369,7 +368,7 @@ def laplace(arr: np.ndarray, out: np.ndarray) -> None: def _make_laplace_numba_spectral_1d(grid: CartesianGrid) -> OperatorType: - """make a 1d spectral Laplace operator using numba compilation + """Make a 1d spectral Laplace operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -387,17 +386,17 @@ def _make_laplace_numba_spectral_1d(grid: CartesianGrid) -> OperatorType: @register_jitable def laplace_impl(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" out[:] = fft.ifft(factor * fft.fft(arr[1:-1])) @overload(laplace_impl) def ol_laplace(arr: np.ndarray, out: np.ndarray): - """integrates data over a grid using numba""" + """Integrates data over a grid using numba.""" if np.isrealobj(arr): # special case of a real array def laplace_real(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" out[:] = fft.ifft(factor * fft.fft(arr[1:-1])).real return laplace_real @@ -407,14 +406,14 @@ def laplace_real(arr: np.ndarray, out: np.ndarray) -> None: @jit def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" laplace_impl(arr, out) return laplace # type: ignore def _make_laplace_numba_spectral_2d(grid: CartesianGrid) -> OperatorType: - """make a 2d spectral Laplace operator using numba compilation + """Make a 2d spectral Laplace operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -432,17 +431,17 @@ def _make_laplace_numba_spectral_2d(grid: CartesianGrid) -> OperatorType: @register_jitable def laplace_impl(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" out[:] = fft.ifft2(factor * fft.fft2(arr[1:-1, 1:-1])) @overload(laplace_impl) def ol_laplace(arr: np.ndarray, out: np.ndarray): - """integrates data over a grid using numba""" + """Integrates data over a grid using numba.""" if np.isrealobj(arr): # special case of a real array def laplace_real(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" out[:] = fft.ifft2(factor * fft.fft2(arr[1:-1, 1:-1])).real return laplace_real @@ -452,7 +451,7 @@ def laplace_real(arr: np.ndarray, out: np.ndarray) -> None: @jit def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply Laplace operator to array `arr`""" + """Apply Laplace operator to array `arr`""" laplace_impl(arr, out) return laplace # type: ignore @@ -464,7 +463,7 @@ def make_laplace( *, backend: Literal["auto", "numba", "numba-spectral", "scipy"] = "auto", ) -> OperatorType: - """make a Laplace operator on a Cartesian grid + """Make a Laplace operator on a Cartesian grid. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -519,7 +518,7 @@ def make_laplace( def _make_gradient_scipy_nd( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a gradient operator using the scipy module + """Make a gradient operator using the scipy module. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -547,7 +546,7 @@ def _make_gradient_scipy_nd( raise ValueError(f"Unknown derivative type `{method}`") def gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" assert arr.shape == grid._shape_full if out is None: out = np.empty(shape_out) @@ -566,7 +565,7 @@ def gradient(arr: np.ndarray, out: np.ndarray) -> None: def _make_gradient_numba_1d( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a 1d gradient operator using numba compilation + """Make a 1d gradient operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -586,7 +585,7 @@ def _make_gradient_numba_1d( @jit def gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in range(1, dim_x + 1): if method == "central": out[0, i - 1] = (arr[i + 1] - arr[i - 1]) / (2 * dx) @@ -601,7 +600,7 @@ def gradient(arr: np.ndarray, out: np.ndarray) -> None: def _make_gradient_numba_2d( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a 2d gradient operator using numba compilation + """Make a 2d gradient operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -626,7 +625,7 @@ def _make_gradient_numba_2d( @jit(parallel=parallel) def gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): if method == "central": @@ -645,7 +644,7 @@ def gradient(arr: np.ndarray, out: np.ndarray) -> None: def _make_gradient_numba_3d( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a 3d gradient operator using numba compilation + """Make a 3d gradient operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -670,7 +669,7 @@ def _make_gradient_numba_3d( @jit(parallel=parallel) def gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): for k in range(1, dim_z + 1): @@ -715,7 +714,7 @@ def make_gradient( backend: Literal["auto", "numba", "scipy"] = "auto", method: Literal["central", "forward", "backward"] = "central", ) -> OperatorType: - """make a gradient operator on a Cartesian grid + """Make a gradient operator on a Cartesian grid. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -763,7 +762,7 @@ def make_gradient( def _make_gradient_squared_numba_1d( grid: CartesianGrid, central: bool = True ) -> OperatorType: - """make a 1d squared gradient operator using numba compilation + """Make a 1d squared gradient operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -785,7 +784,7 @@ def _make_gradient_squared_numba_1d( @jit def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in range(1, dim_x + 1): out[i - 1] = (arr[i + 1] - arr[i - 1]) ** 2 * scale @@ -795,7 +794,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @jit def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in range(1, dim_x + 1): diff_l = (arr[i + 1] - arr[i]) ** 2 diff_r = (arr[i] - arr[i - 1]) ** 2 @@ -807,7 +806,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: def _make_gradient_squared_numba_2d( grid: CartesianGrid, central: bool = True ) -> OperatorType: - """make a 2d squared gradient operator using numba compilation + """Make a 2d squared gradient operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -832,7 +831,7 @@ def _make_gradient_squared_numba_2d( @jit(parallel=parallel) def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): term_x = (arr[i + 1, j] - arr[i - 1, j]) ** 2 * scale_x @@ -845,7 +844,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @jit(parallel=parallel) def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): term_x = ( @@ -864,7 +863,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: def _make_gradient_squared_numba_3d( grid: CartesianGrid, central: bool = True ) -> OperatorType: - """make a 3d squared gradient operator using numba compilation + """Make a 3d squared gradient operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -889,7 +888,7 @@ def _make_gradient_squared_numba_3d( @jit(parallel=parallel) def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): for k in range(1, dim_z + 1): @@ -904,7 +903,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @jit(parallel=parallel) def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): for k in range(1, dim_z + 1): @@ -927,7 +926,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @CartesianGrid.register_operator("gradient_squared", rank_in=0, rank_out=0) def make_gradient_squared(grid: CartesianGrid, *, central: bool = True) -> OperatorType: - """make a gradient operator on a Cartesian grid + """Make a gradient operator on a Cartesian grid. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -960,7 +959,7 @@ def make_gradient_squared(grid: CartesianGrid, *, central: bool = True) -> Opera def _make_divergence_scipy_nd( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a divergence operator using the scipy module + """Make a divergence operator using the scipy module. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -986,7 +985,7 @@ def _make_divergence_scipy_nd( raise ValueError(f"Unknown derivative type `{method}`") def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply divergence operator to array `arr`""" + """Apply divergence operator to array `arr`""" assert arr.shape[0] == len(data_shape) and arr.shape[1:] == data_shape # need to initialize with zeros since data is added later @@ -1007,7 +1006,7 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None: def _make_divergence_numba_1d( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a 1d divergence operator using numba compilation + """Make a 1d divergence operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -1026,7 +1025,7 @@ def _make_divergence_numba_1d( @jit def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in range(1, dim_x + 1): if method == "central": out[i - 1] = (arr[0, i + 1] - arr[0, i - 1]) / (2 * dx) @@ -1041,7 +1040,7 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None: def _make_divergence_numba_2d( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a 2d divergence operator using numba compilation + """Make a 2d divergence operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -1066,7 +1065,7 @@ def _make_divergence_numba_2d( @jit(parallel=parallel) def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): if method == "central": @@ -1086,7 +1085,7 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None: def _make_divergence_numba_3d( grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a 3d divergence operator using numba compilation + """Make a 3d divergence operator using numba compilation. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -1111,7 +1110,7 @@ def _make_divergence_numba_3d( @jit(parallel=parallel) def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in nb.prange(1, dim_x + 1): for j in range(1, dim_y + 1): for k in range(1, dim_z + 1): @@ -1139,7 +1138,7 @@ def make_divergence( backend: Literal["auto", "numba", "scipy"] = "auto", method: Literal["central", "forward", "backward"] = "central", ) -> OperatorType: - """make a divergence operator on a Cartesian grid + """Make a divergence operator on a Cartesian grid. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -1191,7 +1190,7 @@ def _vectorize_operator( backend: Literal["auto", "numba", "scipy"] = "numba", **kwargs, ) -> OperatorType: - """apply an operator to on all dimensions of a vector + """Apply an operator to on all dimensions of a vector. Args: make_operator (callable): @@ -1208,7 +1207,7 @@ def _vectorize_operator( operator = make_operator(grid, backend=backend, **kwargs) def vectorized_operator(arr: np.ndarray, out: np.ndarray) -> None: - """apply vector gradient operator to array `arr`""" + """Apply vector gradient operator to array `arr`""" for i in range(dim): operator(arr[i], out[i]) @@ -1225,7 +1224,7 @@ def make_vector_gradient( backend: Literal["auto", "numba", "scipy"] = "numba", method: Literal["central", "forward", "backward"] = "central", ) -> OperatorType: - """make a vector gradient operator on a Cartesian grid + """Make a vector gradient operator on a Cartesian grid. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -1246,7 +1245,7 @@ def make_vector_gradient( def make_vector_laplace( grid: CartesianGrid, *, backend: Literal["auto", "numba", "scipy"] = "numba" ) -> OperatorType: - """make a vector Laplacian on a Cartesian grid + """Make a vector Laplacian on a Cartesian grid. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -1267,7 +1266,7 @@ def make_tensor_divergence( backend: Literal["auto", "numba", "scipy"] = "numba", method: Literal["central", "forward", "backward"] = "central", ) -> OperatorType: - """make a tensor divergence operator on a Cartesian grid + """Make a tensor divergence operator on a Cartesian grid. Args: grid (:class:`~pde.grids.cartesian.CartesianGrid`): @@ -1288,7 +1287,7 @@ def make_tensor_divergence( def make_poisson_solver( bcs: Boundaries, *, method: Literal["auto", "scipy"] = "auto" ) -> OperatorType: - """make a operator that solves Poisson's equation + """Make a operator that solves Poisson's equation. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): diff --git a/pde/grids/operators/common.py b/pde/grids/operators/common.py index b2ba3bec..7b180a27 100644 --- a/pde/grids/operators/common.py +++ b/pde/grids/operators/common.py @@ -1,5 +1,4 @@ -""" -Common functions that are used by many operators +"""Common functions that are used by many operators. .. codeauthor:: David Zwicker """ @@ -24,7 +23,7 @@ def make_derivative( axis: int = 0, method: Literal["central", "forward", "backward"] = "central", ) -> OperatorType: - """make a derivative operator along a single axis using numba compilation + """Make a derivative operator along a single axis using numba compilation. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -60,7 +59,7 @@ def make_derivative( @jit def diff(arr: np.ndarray, out: np.ndarray) -> None: - """calculate derivative of 1d array `arr`""" + """Calculate derivative of 1d array `arr`""" for i in range(1, shape[0] + 1): if method == "central": out[i - 1] = (arr[i + 1] - arr[i - 1]) / (2 * dx) @@ -73,7 +72,7 @@ def diff(arr: np.ndarray, out: np.ndarray) -> None: @jit def diff(arr: np.ndarray, out: np.ndarray) -> None: - """calculate derivative of 2d array `arr`""" + """Calculate derivative of 2d array `arr`""" for i in range(1, shape[0] + 1): for j in range(1, shape[1] + 1): arr_l = arr[i - di, j - dj] @@ -89,7 +88,7 @@ def diff(arr: np.ndarray, out: np.ndarray) -> None: @jit def diff(arr: np.ndarray, out: np.ndarray) -> None: - """calculate derivative of 3d array `arr`""" + """Calculate derivative of 3d array `arr`""" for i in range(1, shape[0] + 1): for j in range(1, shape[1] + 1): for k in range(1, shape[2] + 1): @@ -111,7 +110,7 @@ def diff(arr: np.ndarray, out: np.ndarray) -> None: def make_derivative2(grid: GridBase, axis: int = 0) -> OperatorType: - """make a second-order derivative operator along a single axis + """Make a second-order derivative operator along a single axis. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -141,7 +140,7 @@ def make_derivative2(grid: GridBase, axis: int = 0) -> OperatorType: @jit def diff(arr: np.ndarray, out: np.ndarray) -> None: - """calculate derivative of 1d array `arr`""" + """Calculate derivative of 1d array `arr`""" for i in range(1, shape[0] + 1): out[i - 1] = (arr[i + 1] - 2 * arr[i] + arr[i - 1]) * scale @@ -149,7 +148,7 @@ def diff(arr: np.ndarray, out: np.ndarray) -> None: @jit def diff(arr: np.ndarray, out: np.ndarray) -> None: - """calculate derivative of 2d array `arr`""" + """Calculate derivative of 2d array `arr`""" for i in range(1, shape[0] + 1): for j in range(1, shape[1] + 1): arr_l = arr[i - di, j - dj] @@ -160,7 +159,7 @@ def diff(arr: np.ndarray, out: np.ndarray) -> None: @jit def diff(arr: np.ndarray, out: np.ndarray) -> None: - """calculate derivative of 3d array `arr`""" + """Calculate derivative of 3d array `arr`""" for i in range(1, shape[0] + 1): for j in range(1, shape[1] + 1): for k in range(1, shape[2] + 1): @@ -179,7 +178,7 @@ def diff(arr: np.ndarray, out: np.ndarray) -> None: def uniform_discretization(grid: GridBase) -> float: - """returns the uniform discretization or raises RuntimeError + """Returns the uniform discretization or raises RuntimeError. Args: grid (:class:`~pde.grids.base.GridBase`): @@ -201,7 +200,7 @@ def uniform_discretization(grid: GridBase) -> float: def make_laplace_from_matrix( matrix, vector ) -> Callable[[np.ndarray, np.ndarray | None], np.ndarray]: - """make a Laplace operator using matrix vector products + """Make a Laplace operator using matrix vector products. Args: matrix: @@ -217,7 +216,7 @@ def make_laplace_from_matrix( vec = vector.toarray()[:, 0] def laplace(arr: np.ndarray, out: np.ndarray | None = None) -> np.ndarray: - """apply the laplace operator to `arr`""" + """Apply the laplace operator to `arr`""" result = mat.dot(arr.flat) + vec if out is None: out = result.reshape(arr.shape) @@ -231,7 +230,7 @@ def laplace(arr: np.ndarray, out: np.ndarray | None = None) -> np.ndarray: def make_general_poisson_solver( matrix, vector, method: Literal["auto", "scipy"] = "auto" ) -> OperatorType: - """make an operator that solves Poisson's problem + """Make an operator that solves Poisson's problem. Args: matrix: @@ -261,7 +260,7 @@ def make_general_poisson_solver( vec = vector.toarray()[:, 0] def solve_poisson(arr: np.ndarray, out: np.ndarray) -> None: - """solves Poisson's equation using sparse linear algebra""" + """Solves Poisson's equation using sparse linear algebra.""" # prepare the right hand side vector rhs = np.ravel(arr) - vec diff --git a/pde/grids/operators/cylindrical_sym.py b/pde/grids/operators/cylindrical_sym.py index 80ba3479..0594fe80 100644 --- a/pde/grids/operators/cylindrical_sym.py +++ b/pde/grids/operators/cylindrical_sym.py @@ -1,5 +1,4 @@ -r""" -This module implements differential operators on cylindrical grids +r"""This module implements differential operators on cylindrical grids. .. autosummary:: :nosignatures: @@ -32,7 +31,7 @@ def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: - """get sparse matrix for Laplace operator on a cylindrical grid + """Get sparse matrix for Laplace operator on a cylindrical grid. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -55,7 +54,7 @@ def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: factor_r = 1 / (2 * grid.axes_coords[0] * grid.discretization[0]) def i(r, z): - """helper function for flattening the inder + """Helper function for flattening the inder. This is equivalent to np.ravel_multi_inder((r, z), (dim_r, dim_z)) """ @@ -106,7 +105,7 @@ def i(r, z): @CylindricalSymGrid.register_operator("laplace", rank_in=0, rank_out=0) @fill_in_docstring def make_laplace(grid: CylindricalSymGrid) -> OperatorType: - """make a discretized laplace operator for a cylindrical grid + """Make a discretized laplace operator for a cylindrical grid. {DESCR_CYLINDRICAL_GRID} @@ -128,7 +127,7 @@ def make_laplace(grid: CylindricalSymGrid) -> OperatorType: @jit(parallel=parallel) def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" for i in nb.prange(1, dim_r + 1): # iterate radial points for j in range(1, dim_z + 1): # iterate axial points arr_z_l, arr_z_h = arr[i, j - 1], arr[i, j + 1] @@ -145,7 +144,7 @@ def laplace(arr: np.ndarray, out: np.ndarray) -> None: @CylindricalSymGrid.register_operator("gradient", rank_in=0, rank_out=1) @fill_in_docstring def make_gradient(grid: CylindricalSymGrid) -> OperatorType: - """make a discretized gradient operator for a cylindrical grid + """Make a discretized gradient operator for a cylindrical grid. {DESCR_CYLINDRICAL_GRID} @@ -165,7 +164,7 @@ def make_gradient(grid: CylindricalSymGrid) -> OperatorType: @jit(parallel=parallel) def gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in nb.prange(1, dim_r + 1): # iterate radial points for j in range(1, dim_z + 1): # iterate axial points out[0, i - 1, j - 1] = (arr[i + 1, j] - arr[i - 1, j]) * scale_r @@ -180,7 +179,7 @@ def gradient(arr: np.ndarray, out: np.ndarray) -> None: def make_gradient_squared( grid: CylindricalSymGrid, central: bool = True ) -> OperatorType: - """make a discretized gradient squared operator for a cylindrical grid + """Make a discretized gradient squared operator for a cylindrical grid. {DESCR_CYLINDRICAL_GRID} @@ -206,7 +205,7 @@ def make_gradient_squared( @jit(parallel=parallel) def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in nb.prange(1, dim_r + 1): # iterate radial points for j in range(1, dim_z + 1): # iterate axial points term_r = (arr[i + 1, j] - arr[i - 1, j]) ** 2 @@ -219,7 +218,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @jit(parallel=parallel) def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in nb.prange(1, dim_r + 1): # iterate radial points for j in range(1, dim_z + 1): # iterate axial points arr_z_l, arr_c, arr_z_h = arr[i, j - 1], arr[i, j], arr[i, j + 1] @@ -233,7 +232,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @CylindricalSymGrid.register_operator("divergence", rank_in=1, rank_out=0) @fill_in_docstring def make_divergence(grid: CylindricalSymGrid) -> OperatorType: - """make a discretized divergence operator for a cylindrical grid + """Make a discretized divergence operator for a cylindrical grid. {DESCR_CYLINDRICAL_GRID} @@ -254,7 +253,7 @@ def make_divergence(grid: CylindricalSymGrid) -> OperatorType: @jit(parallel=parallel) def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply divergence operator to array `arr`""" + """Apply divergence operator to array `arr`""" arr_r, arr_z = arr[0], arr[1] for i in nb.prange(1, dim_r + 1): # iterate radial points @@ -271,7 +270,7 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None: @CylindricalSymGrid.register_operator("vector_gradient", rank_in=1, rank_out=2) @fill_in_docstring def make_vector_gradient(grid: CylindricalSymGrid) -> OperatorType: - """make a discretized vector gradient operator for a cylindrical grid + """Make a discretized vector gradient operator for a cylindrical grid. {DESCR_CYLINDRICAL_GRID} @@ -292,7 +291,7 @@ def make_vector_gradient(grid: CylindricalSymGrid) -> OperatorType: @jit(parallel=parallel) def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" # assign aliases arr_r, arr_z, arr_φ = arr out_rr, out_rz, out_rφ = out[0, 0], out[0, 1], out[0, 2] @@ -319,7 +318,7 @@ def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None: @CylindricalSymGrid.register_operator("vector_laplace", rank_in=1, rank_out=1) @fill_in_docstring def make_vector_laplace(grid: CylindricalSymGrid) -> OperatorType: - """make a discretized vector laplace operator for a cylindrical grid + """Make a discretized vector laplace operator for a cylindrical grid. {DESCR_CYLINDRICAL_GRID} @@ -343,7 +342,7 @@ def make_vector_laplace(grid: CylindricalSymGrid) -> OperatorType: @jit(parallel=parallel) def vector_laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply vector laplace operator to array `arr`""" + """Apply vector laplace operator to array `arr`""" # assign aliases arr_r, arr_z, arr_φ = arr out_r, out_z, out_φ = out @@ -379,7 +378,7 @@ def vector_laplace(arr: np.ndarray, out: np.ndarray) -> None: @CylindricalSymGrid.register_operator("tensor_divergence", rank_in=2, rank_out=1) @fill_in_docstring def make_tensor_divergence(grid: CylindricalSymGrid) -> OperatorType: - """make a discretized tensor divergence operator for a cylindrical grid + """Make a discretized tensor divergence operator for a cylindrical grid. {DESCR_CYLINDRICAL_GRID} @@ -400,7 +399,7 @@ def make_tensor_divergence(grid: CylindricalSymGrid) -> OperatorType: @jit(parallel=parallel) def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply tensor divergence operator to array `arr`""" + """Apply tensor divergence operator to array `arr`""" # assign aliases arr_rr, arr_rz, arr_rφ = arr[0, 0], arr[0, 1], arr[0, 2] arr_zr, arr_zz, _ = arr[1, 0], arr[1, 1], arr[1, 2] @@ -435,7 +434,7 @@ def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: def make_poisson_solver( bcs: Boundaries, *, method: Literal["auto", "scipy"] = "auto" ) -> OperatorType: - """make a operator that solves Poisson's equation + """Make a operator that solves Poisson's equation. {DESCR_CYLINDRICAL_GRID} diff --git a/pde/grids/operators/polar_sym.py b/pde/grids/operators/polar_sym.py index 1d250e16..1a25ee15 100644 --- a/pde/grids/operators/polar_sym.py +++ b/pde/grids/operators/polar_sym.py @@ -1,5 +1,4 @@ -r""" -This module implements differential operators on polar grids +r"""This module implements differential operators on polar grids. .. autosummary:: :nosignatures: @@ -31,7 +30,7 @@ @PolarSymGrid.register_operator("laplace", rank_in=0, rank_out=0) @fill_in_docstring def make_laplace(grid: PolarSymGrid) -> OperatorType: - """make a discretized laplace operator for a polar grid + """Make a discretized laplace operator for a polar grid. {DESCR_POLAR_GRID} @@ -52,7 +51,7 @@ def make_laplace(grid: PolarSymGrid) -> OperatorType: @jit def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points out[i - 1] = (arr[i + 1] - 2 * arr[i] + arr[i - 1]) * dr_2 out[i - 1] += (arr[i + 1] - arr[i - 1]) * factor_r[i - 1] @@ -65,7 +64,7 @@ def laplace(arr: np.ndarray, out: np.ndarray) -> None: def make_gradient( grid: PolarSymGrid, *, method: Literal["central", "forward", "backward"] = "central" ) -> OperatorType: - """make a discretized gradient operator for a polar grid + """Make a discretized gradient operator for a polar grid. {DESCR_POLAR_GRID} @@ -92,7 +91,7 @@ def make_gradient( @jit def gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points if method == "central": out[0, i - 1] = (arr[i + 1] - arr[i - 1]) * scale_r @@ -108,7 +107,7 @@ def gradient(arr: np.ndarray, out: np.ndarray) -> None: @PolarSymGrid.register_operator("gradient_squared", rank_in=0, rank_out=0) @fill_in_docstring def make_gradient_squared(grid: PolarSymGrid, *, central: bool = True) -> OperatorType: - """make a discretized gradient squared operator for a polar grid + """Make a discretized gradient squared operator for a polar grid. {DESCR_POLAR_GRID} @@ -136,7 +135,7 @@ def make_gradient_squared(grid: PolarSymGrid, *, central: bool = True) -> Operat @jit def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points out[i - 1] = (arr[i + 1] - arr[i - 1]) ** 2 * scale @@ -146,7 +145,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @jit def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points term = (arr[i + 1] - arr[i]) ** 2 + (arr[i] - arr[i - 1]) ** 2 out[i - 1] = term * scale @@ -157,7 +156,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @PolarSymGrid.register_operator("divergence", rank_in=1, rank_out=0) @fill_in_docstring def make_divergence(grid: PolarSymGrid) -> OperatorType: - """make a discretized divergence operator for a polar grid + """Make a discretized divergence operator for a polar grid. {DESCR_POLAR_GRID} @@ -178,7 +177,7 @@ def make_divergence(grid: PolarSymGrid) -> OperatorType: @jit def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply divergence operator to array `arr`""" + """Apply divergence operator to array `arr`""" # inner radial boundary condition for i in range(1, dim_r + 1): # iterate radial points out[i - 1] = (arr[0, i + 1] - arr[0, i - 1]) * scale_r @@ -190,7 +189,7 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None: @PolarSymGrid.register_operator("vector_gradient", rank_in=1, rank_out=2) @fill_in_docstring def make_vector_gradient(grid: PolarSymGrid) -> OperatorType: - """make a discretized vector gradient operator for a polar grid + """Make a discretized vector gradient operator for a polar grid. {DESCR_POLAR_GRID} @@ -211,7 +210,7 @@ def make_vector_gradient(grid: PolarSymGrid) -> OperatorType: @jit def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply vector gradient operator to array `arr`""" + """Apply vector gradient operator to array `arr`""" # assign aliases arr_r, arr_φ = arr out_rr, out_rφ = out[0, 0, :], out[0, 1, :] @@ -229,7 +228,7 @@ def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None: @PolarSymGrid.register_operator("tensor_divergence", rank_in=2, rank_out=1) @fill_in_docstring def make_tensor_divergence(grid: PolarSymGrid) -> OperatorType: - """make a discretized tensor divergence operator for a polar grid + """Make a discretized tensor divergence operator for a polar grid. {DESCR_POLAR_GRID} @@ -250,7 +249,7 @@ def make_tensor_divergence(grid: PolarSymGrid) -> OperatorType: @jit def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply tensor divergence operator to array `arr`""" + """Apply tensor divergence operator to array `arr`""" # assign aliases arr_rr, arr_rφ = arr[0, 0, :], arr[0, 1, :] arr_φr, arr_φφ = arr[1, 0, :], arr[1, 1, :] @@ -268,7 +267,7 @@ def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: @fill_in_docstring def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: - """get sparse matrix for laplace operator on a polar grid + """Get sparse matrix for laplace operator on a polar grid. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -329,7 +328,7 @@ def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: def make_poisson_solver( bcs: Boundaries, *, method: Literal["auto", "scipy"] = "auto" ) -> OperatorType: - """make a operator that solves Poisson's equation + """Make a operator that solves Poisson's equation. {DESCR_POLAR_GRID} diff --git a/pde/grids/operators/spherical_sym.py b/pde/grids/operators/spherical_sym.py index b718958a..f5f10b1b 100644 --- a/pde/grids/operators/spherical_sym.py +++ b/pde/grids/operators/spherical_sym.py @@ -1,5 +1,4 @@ -r""" -This module implements differential operators on spherical grids +r"""This module implements differential operators on spherical grids. .. autosummary:: :nosignatures: @@ -31,7 +30,7 @@ @SphericalSymGrid.register_operator("laplace", rank_in=0, rank_out=0) @fill_in_docstring def make_laplace(grid: SphericalSymGrid, *, conservative: bool = True) -> OperatorType: - """make a discretized laplace operator for a spherical grid + """Make a discretized laplace operator for a spherical grid. {DESCR_SPHERICAL_GRID} @@ -65,7 +64,7 @@ def make_laplace(grid: SphericalSymGrid, *, conservative: bool = True) -> Operat @jit def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points term_h = factor_h[i - 1] * (arr[i + 1] - arr[i]) term_l = factor_l[i - 1] * (arr[i] - arr[i - 1]) @@ -76,7 +75,7 @@ def laplace(arr: np.ndarray, out: np.ndarray) -> None: @jit def laplace(arr: np.ndarray, out: np.ndarray) -> None: - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points diff_2 = (arr[i + 1] - 2 * arr[i] + arr[i - 1]) * dr2 diff_1 = (arr[i + 1] - arr[i - 1]) / (rs[i - 1] * dr) @@ -92,7 +91,7 @@ def make_gradient( *, method: Literal["central", "forward", "backward"] = "central", ) -> OperatorType: - """make a discretized gradient operator for a spherical grid + """Make a discretized gradient operator for a spherical grid. {DESCR_SPHERICAL_GRID} @@ -119,7 +118,7 @@ def make_gradient( @jit def gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply gradient operator to array `arr`""" + """Apply gradient operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points if method == "central": out[0, i - 1] = (arr[i + 1] - arr[i - 1]) * scale_r @@ -137,7 +136,7 @@ def gradient(arr: np.ndarray, out: np.ndarray) -> None: def make_gradient_squared( grid: SphericalSymGrid, *, central: bool = True ) -> OperatorType: - """make a discretized gradient squared operator for a spherical grid + """Make a discretized gradient squared operator for a spherical grid. {DESCR_SPHERICAL_GRID} @@ -165,7 +164,7 @@ def make_gradient_squared( @jit def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points out[i - 1] = (arr[i + 1] - arr[i - 1]) ** 2 * scale @@ -175,7 +174,7 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: @jit def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None: - """apply squared gradient operator to array `arr`""" + """Apply squared gradient operator to array `arr`""" for i in range(1, dim_r + 1): # iterate inner radial points term = (arr[i + 1] - arr[i]) ** 2 + (arr[i] - arr[i - 1]) ** 2 out[i - 1] = term * scale @@ -192,7 +191,7 @@ def make_divergence( conservative: bool = True, method: Literal["central", "forward", "backward"] = "central", ) -> OperatorType: - """make a discretized divergence operator for a spherical grid + """Make a discretized divergence operator for a spherical grid. {DESCR_SPHERICAL_GRID} @@ -234,7 +233,7 @@ def make_divergence( @jit def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply divergence operator to array `arr`""" + """Apply divergence operator to array `arr`""" if safe: # the θ-component of the vector field are required to be zero. If this # was not the case the scale field resulting from the divergence would @@ -261,7 +260,7 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None: @jit def divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply divergence operator to array `arr`""" + """Apply divergence operator to array `arr`""" if safe: # the θ-component of the vector field are required to be zero. If this # was not the case the scale field resulting from the divergence would @@ -290,7 +289,7 @@ def make_vector_gradient( method: Literal["central", "forward", "backward"] = "central", safe: bool = True, ) -> OperatorType: - """make a discretized vector gradient operator for a spherical grid + """Make a discretized vector gradient operator for a spherical grid. Warning: This operator ignores the two angular components of the field when calculating @@ -325,7 +324,7 @@ def make_vector_gradient( @jit def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None: - """apply vector gradient operator to array `arr`""" + """Apply vector gradient operator to array `arr`""" if safe: # the θ- and φ-components are required to be zero. If this was not the case # the tensor field resulting from the gradient would contain components that @@ -365,7 +364,7 @@ def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None: def make_tensor_divergence( grid: SphericalSymGrid, *, safe: bool = True, conservative: bool = False ) -> OperatorType: - """make a discretized tensor divergence operator for a spherical grid + """Make a discretized tensor divergence operator for a spherical grid. {DESCR_SPHERICAL_GRID} @@ -400,7 +399,7 @@ def make_tensor_divergence( @jit def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply tensor divergence operator to array `arr`""" + """Apply tensor divergence operator to array `arr`""" # assign aliases arr_rr, arr_rθ, arr_rφ = arr[0, 0, :], arr[0, 1, :], arr[0, 2, :] arr_θr, arr_θθ, arr_θφ = arr[1, 0, :], arr[1, 1, :], arr[1, 2, :] @@ -433,7 +432,7 @@ def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: @jit def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply tensor divergence operator to array `arr`""" + """Apply tensor divergence operator to array `arr`""" # assign aliases arr_rr, arr_rθ, arr_rφ = arr[0, 0, :], arr[0, 1, :], arr[0, 2, :] arr_θr, arr_θθ, arr_θφ = arr[1, 0, :], arr[1, 1, :], arr[1, 2, :] @@ -468,7 +467,7 @@ def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None: def make_tensor_double_divergence( grid: SphericalSymGrid, *, safe: bool = True, conservative: bool = True ) -> OperatorType: - """make a discretized tensor double divergence operator for a spherical grid + """Make a discretized tensor double divergence operator for a spherical grid. {DESCR_SPHERICAL_GRID} @@ -506,7 +505,7 @@ def make_tensor_double_divergence( @jit def tensor_double_divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply double divergence operator to tensor array `arr`""" + """Apply double divergence operator to tensor array `arr`""" # assign aliases arr_rr, arr_rθ, ______ = arr[0, 0, :], arr[0, 1, :], arr[0, 2, :] arr_θr, arr_θθ, ______ = arr[1, 0, :], arr[1, 1, :], arr[1, 2, :] @@ -545,7 +544,7 @@ def tensor_double_divergence(arr: np.ndarray, out: np.ndarray) -> None: @jit def tensor_double_divergence(arr: np.ndarray, out: np.ndarray) -> None: - """apply double divergence operator to tensor array `arr`""" + """Apply double divergence operator to tensor array `arr`""" # assign aliases arr_rr, arr_rθ, ______ = arr[0, 0, :], arr[0, 1, :], arr[0, 2, :] arr_θr, arr_θθ, ______ = arr[1, 0, :], arr[1, 1, :], arr[1, 2, :] @@ -578,7 +577,7 @@ def tensor_double_divergence(arr: np.ndarray, out: np.ndarray) -> None: @fill_in_docstring def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: - """get sparse matrix for laplace operator on a polar grid + """Get sparse matrix for laplace operator on a polar grid. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): @@ -643,7 +642,7 @@ def _get_laplace_matrix(bcs: Boundaries) -> tuple[np.ndarray, np.ndarray]: def make_poisson_solver( bcs: Boundaries, *, method: Literal["auto", "scipy"] = "auto" ) -> OperatorType: - """make a operator that solves Poisson's equation + """Make a operator that solves Poisson's equation. {DESCR_POLAR_GRID} diff --git a/pde/grids/spherical.py b/pde/grids/spherical.py index adcae31b..f1a5fdb9 100644 --- a/pde/grids/spherical.py +++ b/pde/grids/spherical.py @@ -1,9 +1,8 @@ -""" -Spherically-symmetric grids in 2 and 3 dimensions. These are grids that only discretize -the radial direction, assuming symmetry with respect to all angles. This choice implies -that differential operators might not be applicable to all fields. For instance, the -divergence of a vector field on a spherical grid can only be represented as a scalar -field on the same grid if the θ-component of the vector field vanishes. +"""Spherically-symmetric grids in 2 and 3 dimensions. These are grids that only +discretize the radial direction, assuming symmetry with respect to all angles. This +choice implies that differential operators might not be applicable to all fields. For +instance, the divergence of a vector field on a spherical grid can only be represented +as a scalar field on the same grid if the θ-component of the vector field vanishes. .. codeauthor:: David Zwicker """ @@ -29,7 +28,7 @@ def volume_from_radius(radius: TNumArr, dim: int) -> TNumArr: - """Return the volume of a sphere with a given radius + """Return the volume of a sphere with a given radius. Args: radius (float or :class:`~numpy.ndarray`): @@ -51,7 +50,7 @@ def volume_from_radius(radius: TNumArr, dim: int) -> TNumArr: class SphericalSymGridBase(GridBase, metaclass=ABCMeta): - r"""Base class for d-dimensional spherical grids with angular symmetry + r"""Base class for d-dimensional spherical grids with angular symmetry. The angular symmetry implies that states only depend on the radial coordinate :math:`r`, which is discretized uniformly as @@ -111,7 +110,7 @@ def state(self) -> dict[str, Any]: @classmethod def from_state(cls, state: dict[str, Any]) -> SphericalSymGridBase: # type: ignore - """create a field from a stored `state`. + """Create a field from a stored `state`. Args: state (dict): @@ -148,7 +147,7 @@ def from_bounds( # type: ignore @property def has_hole(self) -> bool: - """returns whether the inner radius is larger than zero""" + """Returns whether the inner radius is larger than zero.""" return self.axes_bounds[0][0] > 0 @property @@ -171,7 +170,7 @@ def volume(self) -> float: @cached_property() def cell_volume_data(self) -> tuple[np.ndarray]: - """tuple of :class:`~numpy.ndarray`: the volumes of all cells""" + """Tuple of :class:`~numpy.ndarray`: the volumes of all cells.""" dr = self.discretization[0] rs = self.axes_coords[0] volumes_h = volume_from_radius(rs + 0.5 * dr, dim=self.dim) @@ -186,7 +185,7 @@ def get_random_point( coords: CoordsType = "cartesian", rng: np.random.Generator | None = None, ) -> np.ndarray: - """return a random point within the grid + """Return a random point within the grid. Note that these points will be uniformly distributed in the volume, implying they are not uniformly distributed on the radial axis. @@ -243,7 +242,7 @@ def get_random_point( raise ValueError(f"Unknown coordinate system `{coords}`") def get_line_data(self, data: np.ndarray, extract: str = "auto") -> dict[str, Any]: - """return a line cut along the radial axis + """Return a line cut along the radial axis. Args: data (:class:`~numpy.ndarray`): @@ -274,7 +273,7 @@ def get_image_data( fill_value: float = 0, masked: bool = True, ) -> dict[str, Any]: - """return a 2d-image of the data + """Return a 2d-image of the data. Args: data (:class:`~numpy.ndarray`): @@ -348,7 +347,7 @@ def get_cartesian_grid( mode: Literal["valid", "inscribed", "full", "circumscribed"] = "valid", num: int | None = None, ) -> CartesianGrid: - """return a Cartesian grid for this spherical one + """Return a Cartesian grid for this spherical one. Args: mode (str): @@ -383,7 +382,7 @@ def get_cartesian_grid( @plot_on_axes() def plot(self, ax, **kwargs): - r"""visualize the spherically symmetric grid in two dimensions + r"""Visualize the spherically symmetric grid in two dimensions. Args: {PLOT_ARGS} @@ -416,7 +415,7 @@ def plot(self, ax, **kwargs): class PolarSymGrid(SphericalSymGridBase): - r"""2-dimensional polar grid assuming angular symmetry + r"""2-dimensional polar grid assuming angular symmetry. The angular symmetry implies that states only depend on the radial coordinate :math:`r`, which is discretized uniformly as @@ -438,7 +437,7 @@ class PolarSymGrid(SphericalSymGridBase): class SphericalSymGrid(SphericalSymGridBase): - r"""3-dimensional spherical grid assuming spherical symmetry + r"""3-dimensional spherical grid assuming spherical symmetry. The symmetry implies that states only depend on the radial coordinate :math:`r`, which is discretized as follows: diff --git a/pde/pdes/__init__.py b/pde/pdes/__init__.py index fb669a00..c32ee655 100644 --- a/pde/pdes/__init__.py +++ b/pde/pdes/__init__.py @@ -1,5 +1,4 @@ -""" -Package that defines PDEs describing physical systems. +"""Package that defines PDEs describing physical systems. The examples in this package are often simple version of classical PDEs to demonstrate various aspects of the `py-pde` package. Clearly, not all extensions @@ -33,7 +32,7 @@ ~laplace.solve_poisson_equation -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from .allen_cahn import AllenCahnPDE diff --git a/pde/pdes/allen_cahn.py b/pde/pdes/allen_cahn.py index 4908f764..2b4d0106 100644 --- a/pde/pdes/allen_cahn.py +++ b/pde/pdes/allen_cahn.py @@ -1,7 +1,6 @@ -""" -A Allen-Cahn equation +"""A Allen-Cahn equation. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -19,7 +18,7 @@ class AllenCahnPDE(PDEBase): - r"""A simple Allen-Cahn equation + r"""A simple Allen-Cahn equation. The mathematical definition is @@ -71,7 +70,7 @@ def evolution_rate( # type: ignore state: ScalarField, t: float = 0, ) -> ScalarField: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -92,7 +91,7 @@ def evolution_rate( # type: ignore def _make_pde_rhs_numba( # type: ignore self, state: ScalarField ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -113,7 +112,7 @@ def _make_pde_rhs_numba( # type: ignore @jit(signature) def pde_rhs(state_data: np.ndarray, t: float) -> np.ndarray: - """compiled helper function evaluating right hand side""" + """Compiled helper function evaluating right hand side.""" return mobility * ( # type: ignore interface_width * laplace(state_data, args={"t": t}) - state_data**3 diff --git a/pde/pdes/base.py b/pde/pdes/base.py index fe0b5b1e..b836c405 100644 --- a/pde/pdes/base.py +++ b/pde/pdes/base.py @@ -1,7 +1,6 @@ -""" -Base class for defining partial differential equations +"""Base class for defining partial differential equations. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -29,7 +28,7 @@ class PDEBase(metaclass=ABCMeta): - """base class for defining partial differential equations (PDEs) + """Base class for defining partial differential equations (PDEs) Custom PDEs can be implemented by subclassing :class:`PDEBase` to specify the evolution rate. In the simple case of deterministic PDEs, the methods @@ -112,7 +111,7 @@ def is_sde(self) -> bool: return hasattr(self, "noise") and np.any(self.noise != 0) # type: ignore def make_modify_after_step(self, state: FieldBase) -> Callable[[np.ndarray], float]: - """returns a function that can be called to modify a state + """Returns a function that can be called to modify a state. This function is applied to the state after each integration step when an explicit stepper is used. The default behavior is to not change the state. @@ -128,14 +127,14 @@ def make_modify_after_step(self, state: FieldBase) -> Callable[[np.ndarray], flo """ def modify_after_step(state_data: np.ndarray) -> float: - """no-op function""" + """No-op function.""" return 0 return modify_after_step @abstractmethod def evolution_rate(self, state: TState, t: float = 0) -> TState: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -151,7 +150,7 @@ def evolution_rate(self, state: TState, t: float = 0) -> TState: def _make_pde_rhs_numba( self, state: FieldBase, **kwargs ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function for evaluating the right hand side""" + """Create a compiled function for evaluating the right hand side.""" raise NotImplementedError("No backend `numba`") def check_rhs_consistency( @@ -163,7 +162,7 @@ def check_rhs_consistency( rhs_numba: Callable | None = None, **kwargs, ) -> None: - """check the numba compiled right hand side versus the numpy variant + """Check the numba compiled right hand side versus the numpy variant. Args: state (:class:`~pde.fields.FieldBase`): @@ -208,7 +207,7 @@ def check_rhs_consistency( def _make_pde_rhs_numba_cached( self, state: TState, **kwargs ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function for evaluating the right hand side + """Create a compiled function for evaluating the right hand side. This method implements caching and checking of the actual method, which is defined by overwriting the method `_make_pde_rhs_numba`. @@ -255,7 +254,7 @@ def make_pde_rhs( backend: Literal["auto", "numpy", "numba"] = "auto", **kwargs, ) -> Callable[[np.ndarray, float], np.ndarray]: - """return a function for evaluating the right hand side of the PDE + """Return a function for evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.FieldBase`): @@ -282,7 +281,7 @@ def make_pde_rhs( state = state.copy() # save this exact state for the closure def evolution_rate_numpy(state_data: np.ndarray, t: float) -> np.ndarray: - """evaluate the rhs given only a state without the grid""" + """Evaluate the rhs given only a state without the grid.""" state.data = state_data return self.evolution_rate(state, t, **kwargs).data @@ -305,7 +304,7 @@ def evolution_rate_numpy(state_data: np.ndarray, t: float) -> np.ndarray: def noise_realization( self, state: TState, t: float = 0, *, label: str = "Noise realization" ) -> TState: - """returns a realization for the noise + """Returns a realization for the noise. Args: state (:class:`~pde.fields.ScalarField`): @@ -363,7 +362,7 @@ def noise_realization( def _make_noise_realization_numba( self, state: TState, **kwargs ) -> Callable[[np.ndarray, float], np.ndarray]: - """return a function for evaluating the noise term of the PDE + """Return a function for evaluating the noise term of the PDE. Args: state (:class:`~pde.fields.FieldBase`): @@ -390,7 +389,7 @@ def _make_noise_realization_numba( @jit def noise_realization(state_data: np.ndarray, t: float) -> np.ndarray: - """helper function returning a noise realization""" + """Helper function returning a noise realization.""" out = np.empty(data_shape) for n in range(len(state_data)): if noises_var[n] == 0: @@ -407,7 +406,7 @@ def noise_realization(state_data: np.ndarray, t: float) -> np.ndarray: @jit def noise_realization(state_data: np.ndarray, t: float) -> np.ndarray: - """helper function returning a noise realization""" + """Helper function returning a noise realization.""" out = np.empty(state_data.shape) for i in range(state_data.size): scale = noise_var / cell_volume(i) @@ -418,7 +417,7 @@ def noise_realization(state_data: np.ndarray, t: float) -> np.ndarray: @jit def noise_realization(state_data: np.ndarray, t: float) -> None: - """helper function returning a noise realization""" + """Helper function returning a noise realization.""" return None return noise_realization # type: ignore @@ -426,7 +425,7 @@ def noise_realization(state_data: np.ndarray, t: float) -> None: def _make_sde_rhs_numba( self, state: TState, **kwargs ) -> Callable[[np.ndarray, float], tuple[np.ndarray, np.ndarray]]: - """return a function for evaluating the noise term of the PDE + """Return a function for evaluating the noise term of the PDE. Args: state (:class:`~pde.fields.FieldBase`): @@ -441,7 +440,7 @@ def _make_sde_rhs_numba( @jit def sde_rhs(state_data: np.ndarray, t: float) -> tuple[np.ndarray, np.ndarray]: - """compiled helper function returning a noise realization""" + """Compiled helper function returning a noise realization.""" return (evolution_rate(state_data, t), noise_realization(state_data, t)) return sde_rhs # type: ignore @@ -449,7 +448,7 @@ def sde_rhs(state_data: np.ndarray, t: float) -> tuple[np.ndarray, np.ndarray]: def _make_sde_rhs_numba_cached( self, state: TState, **kwargs ) -> Callable[[np.ndarray, float], tuple[np.ndarray, np.ndarray]]: - """create a compiled function for evaluating the noise term of the PDE + """Create a compiled function for evaluating the noise term of the PDE. Args: state (:class:`~pde.fields.FieldBase`): @@ -483,7 +482,7 @@ def make_sde_rhs( backend: Literal["auto", "numpy", "numba"] = "auto", **kwargs, ) -> Callable[[np.ndarray, float], tuple[np.ndarray, np.ndarray]]: - """return a function for evaluating the right hand side of the SDE + """Return a function for evaluating the right hand side of the SDE. Args: state (:class:`~pde.fields.FieldBase`): @@ -515,7 +514,7 @@ def make_sde_rhs( def sde_rhs( state_data: np.ndarray, t: float ) -> tuple[np.ndarray, np.ndarray]: - """evaluate the rhs given only a state without the grid""" + """Evaluate the rhs given only a state without the grid.""" state.data = state_data return ( self.evolution_rate(state, t, **kwargs).data, @@ -540,7 +539,7 @@ def solve( ret_info: bool = False, **kwargs, ) -> None | TState | tuple[TState | None, dict[str, Any]]: - """solves the partial differential equation + """Solves the partial differential equation. The method constructs a suitable solver (:class:`~pde.solvers.base.SolverBase`) and controller (:class:`~pde.controller.Controller`) to advance the state over @@ -636,7 +635,7 @@ def solve( def expr_prod(factor: float, expression: str) -> str: - """helper function for building an expression with an (optional) pre-factor + """Helper function for building an expression with an (optional) pre-factor. Args: factor (float): The value of the prefactor diff --git a/pde/pdes/cahn_hilliard.py b/pde/pdes/cahn_hilliard.py index f45b4b5b..8fd870a9 100644 --- a/pde/pdes/cahn_hilliard.py +++ b/pde/pdes/cahn_hilliard.py @@ -1,7 +1,6 @@ -""" -A Cahn-Hilliard equation +"""A Cahn-Hilliard equation. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -19,7 +18,7 @@ class CahnHilliardPDE(PDEBase): - r"""A simple Cahn-Hilliard equation + r"""A simple Cahn-Hilliard equation. The mathematical definition is @@ -68,7 +67,7 @@ def evolution_rate( # type: ignore state: ScalarField, t: float = 0, ) -> ScalarField: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -88,7 +87,7 @@ def evolution_rate( # type: ignore def _make_pde_rhs_numba( # type: ignore self, state: ScalarField ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -108,7 +107,7 @@ def _make_pde_rhs_numba( # type: ignore @jit(signature) def pde_rhs(state_data: np.ndarray, t: float): - """compiled helper function evaluating right hand side""" + """Compiled helper function evaluating right hand side.""" mu = ( state_data**3 - state_data diff --git a/pde/pdes/diffusion.py b/pde/pdes/diffusion.py index 795ca372..a3032448 100644 --- a/pde/pdes/diffusion.py +++ b/pde/pdes/diffusion.py @@ -1,7 +1,6 @@ -""" -A simple diffusion equation +"""A simple diffusion equation. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -19,7 +18,7 @@ class DiffusionPDE(PDEBase): - r"""A simple diffusion equation + r"""A simple diffusion equation. The mathematical definition is @@ -72,7 +71,7 @@ def evolution_rate( # type: ignore state: ScalarField, t: float = 0, ) -> ScalarField: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -92,7 +91,7 @@ def evolution_rate( # type: ignore def _make_pde_rhs_numba( # type: ignore self, state: ScalarField ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -112,7 +111,7 @@ def _make_pde_rhs_numba( # type: ignore @jit(signature) def pde_rhs(state_data: np.ndarray, t: float): - """compiled helper function evaluating right hand side""" + """Compiled helper function evaluating right hand side.""" return diffusivity_value * laplace(state_data, args={"t": t}) return pde_rhs # type: ignore diff --git a/pde/pdes/kpz_interface.py b/pde/pdes/kpz_interface.py index 89b5f77d..6d1197c2 100644 --- a/pde/pdes/kpz_interface.py +++ b/pde/pdes/kpz_interface.py @@ -1,7 +1,6 @@ -""" -The Kardar–Parisi–Zhang (KPZ) equation describing the evolution of an interface +"""The Kardar–Parisi–Zhang (KPZ) equation describing the evolution of an interface. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -19,7 +18,7 @@ class KPZInterfacePDE(PDEBase): - r"""The Kardar–Parisi–Zhang (KPZ) equation + r"""The Kardar–Parisi–Zhang (KPZ) equation. The mathematical definition is @@ -80,7 +79,7 @@ def evolution_rate( # type: ignore state: ScalarField, t: float = 0, ) -> ScalarField: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -102,7 +101,7 @@ def evolution_rate( # type: ignore def _make_pde_rhs_numba( # type: ignore self, state: ScalarField ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -123,7 +122,7 @@ def _make_pde_rhs_numba( # type: ignore @jit(signature) def pde_rhs(state_data: np.ndarray, t: float): - """compiled helper function evaluating right hand side""" + """Compiled helper function evaluating right hand side.""" result = nu_value * laplace(state_data, args={"t": t}) result += lambda_value * gradient_squared(state_data, args={"t": t}) return result diff --git a/pde/pdes/kuramoto_sivashinsky.py b/pde/pdes/kuramoto_sivashinsky.py index 859e3415..700411c6 100644 --- a/pde/pdes/kuramoto_sivashinsky.py +++ b/pde/pdes/kuramoto_sivashinsky.py @@ -1,7 +1,6 @@ -""" -The Kardar–Parisi–Zhang (KPZ) equation describing the evolution of an interface +"""The Kardar–Parisi–Zhang (KPZ) equation describing the evolution of an interface. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -19,7 +18,7 @@ class KuramotoSivashinskyPDE(PDEBase): - r"""The Kuramoto-Sivashinsky equation + r"""The Kuramoto-Sivashinsky equation. The mathematical definition is @@ -83,7 +82,7 @@ def evolution_rate( # type: ignore state: ScalarField, t: float = 0, ) -> ScalarField: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -109,7 +108,7 @@ def evolution_rate( # type: ignore def _make_pde_rhs_numba( # type: ignore self, state: ScalarField ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -131,7 +130,7 @@ def _make_pde_rhs_numba( # type: ignore @jit(signature) def pde_rhs(state_data: np.ndarray, t: float): - """compiled helper function evaluating right hand side""" + """Compiled helper function evaluating right hand side.""" result = -laplace(state_data, args={"t": t}) result += nu_value * laplace2(result, args={"t": t}) result -= 0.5 * gradient_sq(state_data, args={"t": t}) diff --git a/pde/pdes/laplace.py b/pde/pdes/laplace.py index 579295c4..e8d58837 100644 --- a/pde/pdes/laplace.py +++ b/pde/pdes/laplace.py @@ -1,5 +1,4 @@ -""" -Solvers for Poisson's and Laplace's equation +"""Solvers for Poisson's and Laplace's equation. .. codeauthor:: David Zwicker """ @@ -19,7 +18,7 @@ def solve_poisson_equation( label: str = "Solution to Poisson's equation", **kwargs, ) -> ScalarField: - r"""Solve Laplace's equation on a given grid + r"""Solve Laplace's equation on a given grid. Denoting the current field by :math:`u`, we thus solve for :math:`f`, defined by the equation diff --git a/pde/pdes/pde.py b/pde/pdes/pde.py index 79f53d90..9345509e 100644 --- a/pde/pdes/pde.py +++ b/pde/pdes/pde.py @@ -1,7 +1,6 @@ -""" -Defines a PDE class whose right hand side is given as a string +"""Defines a PDE class whose right hand side is given as a string. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -52,7 +51,7 @@ class PDE(PDEBase): - """PDE defined by mathematical expressions + """PDE defined by mathematical expressions. Attributes: variables (tuple): @@ -240,7 +239,7 @@ def __init__( @property def expressions(self) -> dict[str, str]: - """show the expressions of the PDE""" + """Show the expressions of the PDE.""" return {k: v.expression for k, v in self._rhs_expr.items()} def _compile_rhs_single( @@ -250,7 +249,7 @@ def _compile_rhs_single( state: FieldBase, backend: Literal["numpy", "numba"] = "numpy", ): - """compile a function determining the right hand side for one variable + """Compile a function determining the right hand side for one variable. Args: var (str): @@ -359,7 +358,7 @@ def _compile_rhs_single( raise ValueError(f"Unsupported backend {backend}") def rhs_func(*args) -> np.ndarray: - """wrapper that inserts the extra arguments and initialized bc_args""" + """Wrapper that inserts the extra arguments and initialized bc_args.""" bc_args = NumbaDict() # args for differential operators bc_args["t"] = args[-1] # pass time to differential operators return func_inner(*args, None, bc_args, *extra_args) # type: ignore @@ -369,7 +368,7 @@ def rhs_func(*args) -> np.ndarray: def _prepare_cache( self, state: TState, backend: Literal["numpy", "numba"] = "numpy" ) -> dict[str, Any]: - """prepare the expression by setting internal variables in the cache + """Prepare the expression by setting internal variables in the cache. Note that the expensive calculations in this method are only carried out if the state attributes change. @@ -473,7 +472,7 @@ def _prepare_cache( stops: tuple[int, ...] = tuple(slc.stop for slc in state._slices) def get_data_tuple(state_data: np.ndarray) -> tuple[np.ndarray, ...]: - """helper for turning state_data into a tuple of field data""" + """Helper for turning state_data into a tuple of field data.""" return tuple( ( state_data[starts[i]] @@ -493,7 +492,7 @@ def get_data_tuple(state_data: np.ndarray) -> tuple[np.ndarray, ...]: return cache def evolution_rate(self, state: TState, t: float = 0.0) -> TState: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.FieldBase`): @@ -529,7 +528,7 @@ def evolution_rate(self, state: TState, t: float = 0.0) -> TState: def _make_pde_rhs_numba_coll( self, state: FieldCollection, cache: dict[str, Any] ) -> Callable[[np.ndarray, float], np.ndarray]: - """create the compiled rhs if `state` is a field collection + """Create the compiled rhs if `state` is a field collection. Args: state (:class:`~pde.fields.FieldCollection`): @@ -556,7 +555,7 @@ def chain( i: int = 0, inner: Callable[[np.ndarray, float, np.ndarray], None] | None = None, ) -> Callable[[np.ndarray, float], np.ndarray]: - """recursive helper function for applying all rhs""" + """Recursive helper function for applying all rhs.""" # run through all functions rhs = rhs_list[i] @@ -594,7 +593,7 @@ def evolution_rate(state_data: np.ndarray, t: float = 0) -> np.ndarray: def _make_pde_rhs_numba( # type: ignore self, state: TState, **kwargs ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.FieldBase`): @@ -626,7 +625,7 @@ def _jacobian_spectral( wave_vector: str | sympy.Symbol = "q", check_steady_state: bool = True, ) -> sympy.Matrix: - """calculate the Jacobian in spectral representation + """Calculate the Jacobian in spectral representation. Note: This method currently only supports scalar fields, so that inner and outer @@ -723,7 +722,7 @@ def _dispersion_relation( *, t: float = 0, ) -> tuple[np.ndarray, np.ndarray]: - """evaluate the dispersion relation + """Evaluate the dispersion relation. Args: state_hom (list or dict): diff --git a/pde/pdes/swift_hohenberg.py b/pde/pdes/swift_hohenberg.py index 18e99c29..c14fb1b6 100644 --- a/pde/pdes/swift_hohenberg.py +++ b/pde/pdes/swift_hohenberg.py @@ -1,7 +1,6 @@ -""" -The Swift-Hohenberg equation +"""The Swift-Hohenberg equation. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -19,7 +18,7 @@ class SwiftHohenbergPDE(PDEBase): - r"""The Swift-Hohenberg equation + r"""The Swift-Hohenberg equation. The mathematical definition is @@ -82,7 +81,7 @@ def evolution_rate( # type: ignore state: ScalarField, t: float = 0, ) -> ScalarField: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -112,7 +111,7 @@ def evolution_rate( # type: ignore def _make_pde_rhs_numba( # type: ignore self, state: ScalarField ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.ScalarField`): @@ -136,7 +135,7 @@ def _make_pde_rhs_numba( # type: ignore @jit(signature) def pde_rhs(state_data: np.ndarray, t: float): - """compiled helper function evaluating right hand side""" + """Compiled helper function evaluating right hand side.""" state_laplace = laplace(state_data, args={"t": t}) state_laplace2 = laplace2(state_laplace, args={"t": t}) diff --git a/pde/pdes/wave.py b/pde/pdes/wave.py index edf0d31f..2f986bc6 100644 --- a/pde/pdes/wave.py +++ b/pde/pdes/wave.py @@ -1,7 +1,6 @@ -""" -A simple wave equation +"""A simple wave equation. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -19,7 +18,7 @@ class WavePDE(PDEBase): - r"""A simple wave equation + r"""A simple wave equation. The mathematical definition, :math:`\partial_t^2 u = c^2 \nabla^2 u`, is implemented as two first-order equations, @@ -52,7 +51,7 @@ def __init__(self, speed: float = 1, bc: BoundariesData = "auto_periodic_neumann self.bc = bc def get_initial_condition(self, u: ScalarField, v: ScalarField | None = None): - """create a suitable initial condition + """Create a suitable initial condition. Args: u (:class:`~pde.fields.ScalarField`): @@ -79,7 +78,7 @@ def evolution_rate( # type: ignore state: FieldCollection, t: float = 0, ) -> FieldCollection: - """evaluate the right hand side of the PDE + """Evaluate the right hand side of the PDE. Args: state (:class:`~pde.fields.FieldCollection`): @@ -103,7 +102,7 @@ def evolution_rate( # type: ignore def _make_pde_rhs_numba( # type: ignore self, state: FieldCollection ) -> Callable[[np.ndarray, float], np.ndarray]: - """create a compiled function evaluating the right hand side of the PDE + """Create a compiled function evaluating the right hand side of the PDE. Args: state (:class:`~pde.fields.FieldCollection`): @@ -122,7 +121,7 @@ def _make_pde_rhs_numba( # type: ignore @jit(signature) def pde_rhs(state_data: np.ndarray, t: float): - """compiled helper function evaluating right hand side""" + """Compiled helper function evaluating right hand side.""" rate = np.empty_like(state_data) rate[0] = state_data[1] rate[1][:] = laplace(state_data[0], args={"t": t}) diff --git a/pde/solvers/__init__.py b/pde/solvers/__init__.py index 232cf872..44308a88 100644 --- a/pde/solvers/__init__.py +++ b/pde/solvers/__init__.py @@ -1,5 +1,4 @@ -""" -Solvers define how a PDE is solved, i.e., how the initial state is advanced in time. +"""Solvers define how a PDE is solved, i.e., how the initial state is advanced in time. .. autosummary:: :nosignatures: @@ -13,7 +12,7 @@ ~scipy.ScipySolver ~registered_solvers -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from .adams_bashforth import AdamsBashforthSolver @@ -31,7 +30,7 @@ def registered_solvers() -> list[str]: - """returns all solvers that are currently registered + """Returns all solvers that are currently registered. Returns: list of str: List with the names of the solvers diff --git a/pde/solvers/adams_bashforth.py b/pde/solvers/adams_bashforth.py index 32de0807..95d13452 100644 --- a/pde/solvers/adams_bashforth.py +++ b/pde/solvers/adams_bashforth.py @@ -1,7 +1,6 @@ -""" -Defines an explicit Adams-Bashforth solver +"""Defines an explicit Adams-Bashforth solver. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -17,14 +16,14 @@ class AdamsBashforthSolver(SolverBase): - """explicit Adams-Bashforth multi-step solver""" + """Explicit Adams-Bashforth multi-step solver.""" name = "adams–bashforth" def _make_fixed_stepper( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float, int], tuple[float, float]]: - """return a stepper function using an explicit scheme with fixed time steps + """Return a stepper function using an explicit scheme with fixed time steps. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -43,7 +42,7 @@ def _make_fixed_stepper( def single_step( state_data: np.ndarray, t: float, state_prev: np.ndarray ) -> None: - """perform a single Adams-Bashforth step""" + """Perform a single Adams-Bashforth step.""" rhs_prev = rhs_pde(state_prev, t - dt).copy() rhs_cur = rhs_pde(state_data, t) state_prev[:] = state_data # save the previous state @@ -60,7 +59,7 @@ def single_step( def fixed_stepper( state_data: np.ndarray, t_start: float, steps: int ) -> tuple[float, float]: - """perform `steps` steps with fixed time steps""" + """Perform `steps` steps with fixed time steps.""" nonlocal state_prev, init_state_prev if init_state_prev: diff --git a/pde/solvers/base.py b/pde/solvers/base.py index 4361b043..46d751a5 100644 --- a/pde/solvers/base.py +++ b/pde/solvers/base.py @@ -1,10 +1,9 @@ -""" -Package that contains base classes for solvers. +"""Package that contains base classes for solvers. Beside the abstract base class defining the interfaces, we also provide :class:`AdaptiveSolverBase`, which contains methods for implementing adaptive solvers. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -28,11 +27,11 @@ class ConvergenceError(RuntimeError): - """indicates that an implicit step did not converge""" + """Indicates that an implicit step did not converge.""" class SolverBase(metaclass=ABCMeta): - """base class for PDE solvers""" + """Base class for PDE solvers.""" dt_default: float = 1e-3 """float: default time step used if no time step was specified""" @@ -61,7 +60,7 @@ def __init__(self, pde: PDEBase, *, backend: BackendType = "auto"): self._logger = logging.getLogger(self.__class__.__name__) def __init_subclass__(cls, **kwargs): # @NoSelf - """register all subclassess to reconstruct them later""" + """Register all subclassess to reconstruct them later.""" super().__init_subclass__(**kwargs) if not isabstract(cls): if cls.__name__ in cls._subclasses: @@ -74,7 +73,7 @@ def __init_subclass__(cls, **kwargs): # @NoSelf @classmethod def from_name(cls, name: str, pde: PDEBase, **kwargs) -> SolverBase: - r"""create solver class based on its name + r"""Create solver class based on its name. Solver classes are automatically registered when they inherit from :class:`SolverBase`. Note that this also requires that the respective python @@ -123,7 +122,7 @@ def _compiled(self) -> bool: def _make_modify_after_step( self, state: FieldBase ) -> Callable[[np.ndarray], float]: - """create a function that modifies a state after each step + """Create a function that modifies a state after each step. A noop function will be returned if `_modify_state_after_step` is `False`, @@ -149,7 +148,7 @@ def modify_after_step(state_data: np.ndarray) -> float: def _make_pde_rhs( self, state: FieldBase, backend: BackendType = "auto" ) -> Callable[[np.ndarray, float], np.ndarray]: - """obtain a function for evaluating the right hand side + """Obtain a function for evaluating the right hand side. Args: state (:class:`~pde.fields.FieldBase`): @@ -188,7 +187,7 @@ def _make_pde_rhs( def _make_sde_rhs( self, state: FieldBase, backend: str = "auto" ) -> Callable[[np.ndarray, float], tuple[np.ndarray, np.ndarray]]: - """obtain a function for evaluating the right hand side + """Obtain a function for evaluating the right hand side. Args: state (:class:`~pde.fields.FieldBase`): @@ -222,7 +221,7 @@ def _make_sde_rhs( def _make_single_step_fixed_dt( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """return a function doing a single step with a fixed time step + """Return a function doing a single step with a fixed time step. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -236,7 +235,7 @@ def _make_single_step_fixed_dt( def _make_fixed_stepper( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float, int], tuple[float, float]]: - """return a stepper function using an explicit scheme with fixed time steps + """Return a stepper function using an explicit scheme with fixed time steps. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -256,7 +255,7 @@ def _make_fixed_stepper( def fixed_stepper( state_data: np.ndarray, t_start: float, steps: int ) -> tuple[float, float]: - """perform `steps` steps with fixed time steps""" + """Perform `steps` steps with fixed time steps.""" modifications = 0.0 for i in range(steps): # calculate the right hand side @@ -276,7 +275,7 @@ def fixed_stepper( def make_stepper( self, state: FieldBase, dt: float | None = None ) -> Callable[[FieldBase, float, float], float]: - """return a stepper function using an explicit scheme + """Return a stepper function using an explicit scheme. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -312,7 +311,7 @@ def make_stepper( fixed_stepper = self._make_fixed_stepper(state, dt_float) def wrapped_stepper(state: FieldBase, t_start: float, t_end: float) -> float: - """advance `state` from `t_start` to `t_end` using fixed steps""" + """Advance `state` from `t_start` to `t_end` using fixed steps.""" # calculate number of steps (which is at least 1) steps = max(1, int(np.ceil((t_end - t_start) / dt_float))) t_last, modifications = fixed_stepper(state.data, t_start, steps) @@ -324,7 +323,7 @@ def wrapped_stepper(state: FieldBase, t_start: float, t_end: float) -> float: class AdaptiveSolverBase(SolverBase): - """base class for adaptive time steppers""" + """Base class for adaptive time steppers.""" dt_min: float = 1e-10 """float: minimal time step that the adaptive solver will use""" @@ -360,7 +359,8 @@ def __init__( self.tolerance = tolerance def _make_error_synchronizer(self) -> Callable[[float], float]: - """return helper function that synchronizes errors between multiple processes""" + """Return helper function that synchronizes errors between multiple + processes.""" @register_jitable def synchronize_errors(error: float) -> float: @@ -369,14 +369,14 @@ def synchronize_errors(error: float) -> float: return synchronize_errors # type: ignore def _make_dt_adjuster(self) -> Callable[[float, float], float]: - """return a function that can be used to adjust time steps""" + """Return a function that can be used to adjust time steps.""" dt_min = self.dt_min dt_min_nan_err = f"Encountered NaN even though dt < {dt_min}" dt_min_err = f"Time step below {dt_min}" dt_max = self.dt_max def adjust_dt(dt: float, error_rel: float) -> float: - """helper function that adjust the time step + """Helper function that adjust the time step. The goal is to keep the relative error `error_rel` close to 1. @@ -419,7 +419,7 @@ def adjust_dt(dt: float, error_rel: float) -> float: def _make_single_step_variable_dt( self, state: FieldBase ) -> Callable[[np.ndarray, float, float], np.ndarray]: - """return a function doing a single step with a variable time step + """Return a function doing a single step with a variable time step. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -434,7 +434,7 @@ def _make_single_step_variable_dt( rhs_pde = self._make_pde_rhs(state, backend=self.backend) def single_step(state_data: np.ndarray, t: float, dt: float) -> np.ndarray: - """basic implementation of Euler scheme""" + """Basic implementation of Euler scheme.""" return state_data + dt * rhs_pde(state_data, t) # type: ignore return single_step @@ -442,7 +442,7 @@ def single_step(state_data: np.ndarray, t: float, dt: float) -> np.ndarray: def _make_single_step_error_estimate( self, state: FieldBase ) -> Callable[[np.ndarray, float, float], tuple[np.ndarray, float]]: - """make a stepper that also estimates the error + """Make a stepper that also estimates the error. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -460,7 +460,7 @@ def _make_single_step_error_estimate( def single_step_error_estimate( state_data: np.ndarray, t: float, dt: float ) -> tuple[np.ndarray, float]: - """basic stepper to estimate error""" + """Basic stepper to estimate error.""" # single step with dt k1 = single_step(state_data, t, dt) @@ -479,7 +479,7 @@ def _make_adaptive_stepper(self, state: FieldBase) -> Callable[ [np.ndarray, float, float, float, OnlineStatistics | None], tuple[float, float, int, float], ]: - """make an adaptive Euler stepper + """Make an adaptive Euler stepper. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -514,7 +514,7 @@ def adaptive_stepper( dt_init: float, dt_stats: OnlineStatistics | None = None, ) -> tuple[float, float, int, float]: - """adaptive stepper that advances the state in time""" + """Adaptive stepper that advances the state in time.""" modifications = 0.0 dt_opt = dt_init t = t_start @@ -565,7 +565,7 @@ def adaptive_stepper( def make_stepper( self, state: FieldBase, dt: float | None = None ) -> Callable[[FieldBase, float, float], float]: - """return a stepper function using an explicit scheme + """Return a stepper function using an explicit scheme. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -602,7 +602,7 @@ def make_stepper( adaptive_stepper = self._make_adaptive_stepper(state) def wrapped_stepper(state: FieldBase, t_start: float, t_end: float) -> float: - """advance `state` from `t_start` to `t_end` using adaptive steps""" + """Advance `state` from `t_start` to `t_end` using adaptive steps.""" nonlocal dt_float # `dt_float` stores value for the next call t_last, dt_float, steps, modifications = adaptive_stepper( diff --git a/pde/solvers/controller.py b/pde/solvers/controller.py index 6ab1ce57..11dd9d94 100644 --- a/pde/solvers/controller.py +++ b/pde/solvers/controller.py @@ -1,5 +1,4 @@ -""" -Defines a class controlling the simulations of PDEs +"""Defines a class controlling the simulations of PDEs. .. codeauthor:: David Zwicker """ @@ -30,7 +29,7 @@ class Controller: - """class controlling a simulation + """Class controlling a simulation. The controller calls a solver to advance the simulation into the future and it takes care of trackers that analyze and modify the state periodically. The controller also @@ -94,7 +93,7 @@ def t_range(self) -> tuple[float, float]: @t_range.setter def t_range(self, value: TRangeType): - """set start and end time of the simulation + """Set start and end time of the simulation. Args: value (float or tuple): @@ -114,10 +113,10 @@ def t_range(self, value: TRangeType): ) def _get_stop_handler(self) -> Callable[[Exception, float], tuple[int, str]]: - """return function that handles messaging""" + """Return function that handles messaging.""" def _handle_stop_iteration(err: Exception, t: float) -> tuple[int, str]: - """helper function for handling interrupts raised by trackers""" + """Helper function for handling interrupts raised by trackers.""" if isinstance(err, FinishedSimulation): # tracker determined that the simulation finished self.info["successful"] = True @@ -145,7 +144,7 @@ def _handle_stop_iteration(err: Exception, t: float) -> tuple[int, str]: return _handle_stop_iteration def _run_single(self, state: TState, dt: float | None = None) -> None: - """run the simulation + """Run the simulation. Diagnostic information about the solver procedure are available in the `diagnostics` property of the instance after this function has been called. @@ -279,7 +278,7 @@ def _run_single(self, state: TState, dt: float | None = None) -> None: ) def _run_mpi_client(self, state: TState, dt: float | None = None) -> None: - """loop for run the simulation on client nodes during an MPI run + """Loop for run the simulation on client nodes during an MPI run. This function just loops the stepper advancing the sub field of the current node in time. All other logic, including trackers, are done in the main node. @@ -310,7 +309,7 @@ def _run_mpi_client(self, state: TState, dt: float | None = None) -> None: t = stepper(state, t, t_end) def run(self, initial_state: TState, dt: float | None = None) -> TState | None: - """run the simulation + """Run the simulation. Diagnostic information about the solver are available in the :attr:`~Controller.diagnostics` property after this function has been called. diff --git a/pde/solvers/crank_nicolson.py b/pde/solvers/crank_nicolson.py index 4720b6df..acbee2a3 100644 --- a/pde/solvers/crank_nicolson.py +++ b/pde/solvers/crank_nicolson.py @@ -1,7 +1,6 @@ -""" -Defines a Crank-Nicolson solver +"""Defines a Crank-Nicolson solver. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -18,7 +17,7 @@ class CrankNicolsonSolver(SolverBase): - """Crank-Nicolson solver""" + """Crank-Nicolson solver.""" name = "crank-nicolson" @@ -57,7 +56,7 @@ def __init__( def _make_single_step_fixed_dt( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """return a function doing a single step with an implicit Euler scheme + """Return a function doing a single step with an implicit Euler scheme. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -81,7 +80,7 @@ def _make_single_step_fixed_dt( # handle deterministic version of the pde def crank_nicolson_step(state_data: np.ndarray, t: float) -> None: - """compiled inner loop for speed""" + """Compiled inner loop for speed.""" nfev = 0 # count function evaluations # keep values at the current time t point used in iteration diff --git a/pde/solvers/explicit.py b/pde/solvers/explicit.py index ac529978..6612886d 100644 --- a/pde/solvers/explicit.py +++ b/pde/solvers/explicit.py @@ -1,7 +1,6 @@ -""" -Defines an explicit solver supporting various methods - -.. codeauthor:: David Zwicker +"""Defines an explicit solver supporting various methods. + +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -20,7 +19,7 @@ class ExplicitSolver(AdaptiveSolverBase): - """various explicit PDE solvers""" + """Various explicit PDE solvers.""" name = "explicit" @@ -58,7 +57,7 @@ def __init__( def _make_single_step_fixed_euler( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """make a simple Euler stepper with fixed time step + """Make a simple Euler stepper with fixed time step. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -78,7 +77,7 @@ def _make_single_step_fixed_euler( rhs_sde = self._make_sde_rhs(state, backend=self.backend) def stepper(state_data: np.ndarray, t: float) -> None: - """perform a single Euler-Maruyama step""" + """Perform a single Euler-Maruyama step.""" evolution_rate, noise_realization = rhs_sde(state_data, t) state_data += dt * evolution_rate if noise_realization is not None: @@ -92,7 +91,7 @@ def stepper(state_data: np.ndarray, t: float) -> None: rhs_pde = self._make_pde_rhs(state, backend=self.backend) def stepper(state_data: np.ndarray, t: float) -> None: - """perform a single Euler step""" + """Perform a single Euler step.""" state_data += dt * rhs_pde(state_data, t) self._logger.info("Init explicit Euler stepper with dt=%g", dt) @@ -102,7 +101,7 @@ def stepper(state_data: np.ndarray, t: float) -> None: def _make_single_step_rk45( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """make function doing a single explicit Runge-Kutta step of order 5(4) + """Make function doing a single explicit Runge-Kutta step of order 5(4) Args: state (:class:`~pde.fields.base.FieldBase`): @@ -123,7 +122,7 @@ def _make_single_step_rk45( rhs = self._make_pde_rhs(state, backend=self.backend) def stepper(state_data: np.ndarray, t: float) -> None: - """compiled inner loop for speed""" + """Compiled inner loop for speed.""" # calculate the intermediate values in Runge-Kutta k1 = dt * rhs(state_data, t) k2 = dt * rhs(state_data + 0.5 * k1, t + 0.5 * dt) @@ -138,7 +137,7 @@ def stepper(state_data: np.ndarray, t: float) -> None: def _make_single_step_fixed_dt( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """return a function doing a single step with a fixed time step + """Return a function doing a single step with a fixed time step. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -159,7 +158,7 @@ def _make_adaptive_euler_stepper(self, state: FieldBase) -> Callable[ [np.ndarray, float, float, float, OnlineStatistics | None], tuple[float, float, int, float], ]: - """make an adaptive Euler stepper + """Make an adaptive Euler stepper. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -195,7 +194,7 @@ def adaptive_stepper( dt_init: float, dt_stats: OnlineStatistics | None = None, ) -> tuple[float, float, int, float]: - """adaptive stepper that advances the state in time""" + """Adaptive stepper that advances the state in time.""" modifications = 0.0 dt_opt = dt_init rate = rhs_pde(state_data, t_start) # calculate initial rate @@ -270,7 +269,7 @@ def adaptive_stepper( def _make_single_step_error_estimate_rkf( self, state: FieldBase ) -> Callable[[np.ndarray, float, float], tuple[np.ndarray, float]]: - """make an adaptive stepper using the explicit Runge-Kutta-Fehlberg method + """Make an adaptive stepper using the explicit Runge-Kutta-Fehlberg method. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -328,7 +327,7 @@ def _make_single_step_error_estimate_rkf( def stepper( state_data: np.ndarray, t: float, dt: float ) -> tuple[np.ndarray, float]: - """basic stepper to estimate error""" + """Basic stepper to estimate error.""" # do the six intermediate steps k1 = dt * rhs(state_data, t) k2 = dt * rhs(state_data + b21 * k1, t + a2 * dt) @@ -356,7 +355,7 @@ def stepper( def _make_single_step_error_estimate( self, state: FieldBase ) -> Callable[[np.ndarray, float, float], tuple[np.ndarray, float]]: - """make a stepper that also estimates the error + """Make a stepper that also estimates the error. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -375,7 +374,7 @@ def _make_adaptive_stepper(self, state: FieldBase) -> Callable[ [np.ndarray, float, float, float, OnlineStatistics | None], tuple[float, float, int, float], ]: - """return a stepper function using an explicit scheme with fixed time steps + """Return a stepper function using an explicit scheme with fixed time steps. Args: state (:class:`~pde.fields.base.FieldBase`): diff --git a/pde/solvers/explicit_mpi.py b/pde/solvers/explicit_mpi.py index 9cbceeb8..7d218df9 100644 --- a/pde/solvers/explicit_mpi.py +++ b/pde/solvers/explicit_mpi.py @@ -1,7 +1,6 @@ -""" -Defines an explicit solver using multiprocessing via MPI +"""Defines an explicit solver using multiprocessing via MPI. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -21,7 +20,7 @@ class ExplicitMPISolver(ExplicitSolver): - """various explicit PDE solve using MPI + """Various explicit PDE solve using MPI. Warning: This solver can only be used if MPI is properly installed. In particular, python @@ -118,7 +117,8 @@ def __init__( self.decomposition = decomposition def _make_error_synchronizer(self) -> Callable[[float], float]: - """return helper function that synchronizes errors between multiple processes""" + """Return helper function that synchronizes errors between multiple + processes.""" if mpi.parallel_run: # in a parallel run, we need to return the maximal error from ..tools.mpi import Operator, mpi_allreduce @@ -127,7 +127,7 @@ def _make_error_synchronizer(self) -> Callable[[float], float]: @register_jitable def synchronize_errors(error: float) -> float: - """return maximal error accross all cores""" + """Return maximal error accross all cores.""" return mpi_allreduce(error, operator_max_id) # type: ignore return synchronize_errors # type: ignore @@ -137,7 +137,7 @@ def synchronize_errors(error: float) -> float: def make_stepper( self, state: FieldBase, dt=None ) -> Callable[[FieldBase, float, float], float]: - """return a stepper function using an explicit scheme + """Return a stepper function using an explicit scheme. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -190,7 +190,7 @@ def make_stepper( def wrapped_stepper( state: FieldBase, t_start: float, t_end: float ) -> float: - """advance `state` from `t_start` to `t_end` using adaptive steps""" + """Advance `state` from `t_start` to `t_end` using adaptive steps.""" nonlocal dt # `dt` stores value for the next call # distribute the end time and the field to all nodes @@ -224,7 +224,7 @@ def wrapped_stepper( def wrapped_stepper( state: FieldBase, t_start: float, t_end: float ) -> float: - """advance `state` from `t_start` to `t_end` using fixed steps""" + """Advance `state` from `t_start` to `t_end` using fixed steps.""" # calculate number of steps (which is at least 1) steps = max(1, int(np.ceil((t_end - t_start) / dt))) diff --git a/pde/solvers/implicit.py b/pde/solvers/implicit.py index 0eb10277..364e493b 100644 --- a/pde/solvers/implicit.py +++ b/pde/solvers/implicit.py @@ -1,7 +1,6 @@ -""" -Defines an implicit Euler solver +"""Defines an implicit Euler solver. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -18,7 +17,7 @@ class ImplicitSolver(SolverBase): - """implicit (backward) Euler PDE solver""" + """Implicit (backward) Euler PDE solver.""" name = "implicit" @@ -49,7 +48,7 @@ def __init__( def _make_single_step_fixed_dt_deterministic( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """return a function doing a deterministic step with an implicit Euler scheme + """Return a function doing a deterministic step with an implicit Euler scheme. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -72,7 +71,7 @@ def _make_single_step_fixed_dt_deterministic( # handle deterministic version of the pde def implicit_step(state_data: np.ndarray, t: float) -> None: - """compiled inner loop for speed""" + """Compiled inner loop for speed.""" nfev = 0 # count function evaluations # save state at current time point t for stepping @@ -116,7 +115,7 @@ def implicit_step(state_data: np.ndarray, t: float) -> None: def _make_single_step_fixed_dt_stochastic( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """return a function doing a step for a SDE with an implicit Euler scheme + """Return a function doing a step for a SDE with an implicit Euler scheme. Args: state (:class:`~pde.fields.base.FieldBase`): @@ -137,7 +136,7 @@ def _make_single_step_fixed_dt_stochastic( # handle deterministic version of the pde def implicit_step(state_data: np.ndarray, t: float) -> None: - """compiled inner loop for speed""" + """Compiled inner loop for speed.""" nfev = 0 # count function evaluations # save state at current time point t for stepping @@ -188,7 +187,7 @@ def implicit_step(state_data: np.ndarray, t: float) -> None: def _make_single_step_fixed_dt( self, state: FieldBase, dt: float ) -> Callable[[np.ndarray, float], None]: - """return a function doing a single step with an implicit Euler scheme + """Return a function doing a single step with an implicit Euler scheme. Args: state (:class:`~pde.fields.base.FieldBase`): diff --git a/pde/solvers/scipy.py b/pde/solvers/scipy.py index 5745fd6a..60921a11 100644 --- a/pde/solvers/scipy.py +++ b/pde/solvers/scipy.py @@ -1,7 +1,6 @@ -""" -Defines a solver using :mod:`scipy.integrate` - -.. codeauthor:: David Zwicker +"""Defines a solver using :mod:`scipy.integrate` + +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -44,7 +43,7 @@ def __init__(self, pde: PDEBase, backend: BackendType = "auto", **kwargs): def make_stepper( self, state: FieldBase, dt: float | None = None ) -> Callable[[FieldBase, float, float], float]: - """return a stepper function + """Return a stepper function. Args: state (:class:`~pde.fields.FieldBase`): @@ -72,7 +71,7 @@ def make_stepper( rhs = self._make_pde_rhs(state, backend=self.backend) def rhs_helper(t: float, state_flat: np.ndarray) -> np.ndarray: - """helper function to provide the correct call convention""" + """Helper function to provide the correct call convention.""" rhs_value = rhs(state_flat.reshape(shape), t) y = np.broadcast_to(rhs_value, shape).flat if np.any(np.isnan(y)): @@ -83,7 +82,8 @@ def rhs_helper(t: float, state_flat: np.ndarray) -> np.ndarray: return y # type: ignore def stepper(state: FieldBase, t_start: float, t_end: float) -> float: - """use scipy.integrate.odeint to advance `state` from `t_start` to `t_end`""" + """Use scipy.integrate.odeint to advance `state` from `t_start` to + `t_end`""" if dt is not None: self.solver_params["first_step"] = min(t_end - t_start, dt) diff --git a/pde/storage/__init__.py b/pde/storage/__init__.py index c3f29ea2..ef2c57d8 100644 --- a/pde/storage/__init__.py +++ b/pde/storage/__init__.py @@ -1,5 +1,4 @@ -""" -Module defining classes for storing simulation data. +"""Module defining classes for storing simulation data. .. autosummary:: :nosignatures: @@ -10,7 +9,7 @@ ~file.FileStorage ~movie.MovieStorage -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from .file import FileStorage diff --git a/pde/storage/base.py b/pde/storage/base.py index e26ddd00..fc5d6497 100644 --- a/pde/storage/base.py +++ b/pde/storage/base.py @@ -1,7 +1,6 @@ -""" -Base classes for storing data +"""Base classes for storing data. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -37,7 +36,7 @@ class StorageBase(metaclass=ABCMeta): - """base class for storing time series of discretized fields + """Base class for storing time series of discretized fields. These classes store time series of :class:`~pde.fields.base.FieldBase`, i.e., they store the values of the fields at particular time points. Iterating of the storage @@ -75,7 +74,7 @@ def __init__( @property def data_shape(self) -> tuple[int, ...]: - """the current data shape. + """The current data shape. Raises: RuntimeError: if data_shape was not set @@ -87,7 +86,7 @@ def data_shape(self) -> tuple[int, ...]: @property def dtype(self) -> DTypeLike: - """the current data type. + """The current data type. Raises: RuntimeError: if data_type was not set @@ -99,7 +98,7 @@ def dtype(self) -> DTypeLike: @abstractmethod def _append_data(self, data: np.ndarray, time: float) -> None: - """append a new data set + """Append a new data set. Args: data (:class:`~numpy.ndarray`): The actual data @@ -107,7 +106,7 @@ def _append_data(self, data: np.ndarray, time: float) -> None: """ def append(self, field: FieldBase, time: float | None = None) -> None: - """add field to the storage + """Add field to the storage. Args: field (:class:`~pde.fields.base.FieldBase`): @@ -125,7 +124,7 @@ def append(self, field: FieldBase, time: float | None = None) -> None: return self._append_data(field.data, time) def clear(self, clear_data_shape: bool = False) -> None: - """truncate the storage by removing all stored data. + """Truncate the storage by removing all stored data. Args: clear_data_shape (bool): @@ -136,12 +135,12 @@ def clear(self, clear_data_shape: bool = False) -> None: self._dtype = None def __len__(self): - """return the number of stored items, i.e., time steps""" + """Return the number of stored items, i.e., time steps.""" return len(self.times) @property def shape(self) -> tuple[int, ...] | None: - """the shape of the stored data""" + """The shape of the stored data.""" if self._data_shape: return (len(self),) + self._data_shape else: @@ -186,7 +185,7 @@ def grid(self) -> GridBase | None: return self._grid def _init_field(self) -> None: - """initialize internal field variable""" + """Initialize internal field variable.""" if self.grid is None: raise RuntimeError( "Could not load grid from data. Please set the `_grid` attribute " @@ -225,7 +224,7 @@ def _init_field(self) -> None: ) def _get_field(self, t_index: int) -> FieldBase: - """return the field corresponding to the given time index + """Return the field corresponding to the given time index. Load the data given an index, i.e., the data at time `self.times[t_index]`. @@ -253,7 +252,7 @@ def _get_field(self, t_index: int) -> FieldBase: return field def __getitem__(self, key: int | slice) -> FieldBase | list[FieldBase]: - """return field at given index or a list of fields for a slice""" + """Return field at given index or a list of fields for a slice.""" if isinstance(key, int): return self._get_field(key) elif isinstance(key, slice): @@ -262,12 +261,12 @@ def __getitem__(self, key: int | slice) -> FieldBase | list[FieldBase]: raise TypeError("Unknown key type") def __iter__(self) -> Iterator[FieldBase]: - """iterate over all stored fields""" + """Iterate over all stored fields.""" for i in range(len(self)): yield self[i] # type: ignore def items(self) -> Iterator[tuple[float, FieldBase]]: - """iterate over all times and stored fields, returning pairs""" + """Iterate over all times and stored fields, returning pairs.""" for i in range(len(self)): yield self.times[i], self[i] # type: ignore @@ -279,7 +278,7 @@ def tracker( transformation: Callable[[FieldBase, float], FieldBase] | None = None, interval=None, ) -> StorageTracker: - """create object that can be used as a tracker to fill this storage + """Create object that can be used as a tracker to fill this storage. Args: interrupts: @@ -320,7 +319,7 @@ def add_to_state(state): ) def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: - """initialize the storage for writing data + """Initialize the storage for writing data. Args: field (:class:`~pde.fields.FieldBase`): @@ -345,10 +344,10 @@ def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: self.info["field_attributes"] = field.attributes_serialized def end_writing(self) -> None: - """finalize the storage after writing""" + """Finalize the storage after writing.""" def view_field(self, field_id: int | str) -> StorageView: - """returns a view into this storage focusing on a particular field + """Returns a view into this storage focusing on a particular field. Note: Modifying data returned by the view will modify the underlying storage @@ -368,7 +367,7 @@ def view_field(self, field_id: int | str) -> StorageView: def extract_field( self, field_id: int | str, label: str | None = None ) -> MemoryStorage: - """extract the time course of a single field from a collection + """Extract the time course of a single field from a collection. This method makes a copy of the underlying data. @@ -421,7 +420,7 @@ def extract_field( def extract_time_range( self, t_range: float | tuple[float, float] | None = None ) -> MemoryStorage: - """extract a particular time interval + """Extract a particular time interval. Note: This might return a view into the original data, so modifying the returned @@ -466,7 +465,7 @@ def apply( *, progress: bool = False, ) -> StorageBase: - """applies function to each field in a storage + """Applies function to each field in a storage. Args: func (callable): @@ -526,7 +525,7 @@ def apply( def copy( self, out: StorageBase | None = None, *, progress: bool = False ) -> StorageBase: - """copies all fields in a storage to a new one + """Copies all fields in a storage to a new one. Args: out (:class:`~pde.storage.base.StorageBase`): @@ -544,7 +543,7 @@ def copy( class StorageTracker(TrackerBase): - """Tracker that stores data in special storage classes + """Tracker that stores data in special storage classes. Attributes: storage (:class:`~pde.storage.base.StorageBase`): @@ -581,7 +580,7 @@ def __init__( self.transformation = transformation def _transform(self, field: FieldBase, t: float) -> FieldBase: - """transforms the field according to the defined transformation""" + """Transforms the field according to the defined transformation.""" if self.transformation is None: return field elif self.transformation.__code__.co_argcount == 1: @@ -605,7 +604,7 @@ def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: return t_first def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -615,7 +614,7 @@ def handle(self, field: FieldBase, t: float) -> None: self.storage.append(self._transform(field, t), time=t) def finalize(self, info: InfoDict | None = None) -> None: - """finalize the tracker, supplying additional information + """Finalize the tracker, supplying additional information. Args: info (dict): @@ -626,7 +625,7 @@ def finalize(self, info: InfoDict | None = None) -> None: class StorageView: - """represents a view into a storage that extracts a particular field""" + """Represents a view into a storage that extracts a particular field.""" has_collection: bool = False @@ -662,15 +661,15 @@ def __len__(self): return len(self.storage) def __getitem__(self, key: int) -> DataFieldBase: - """return field at given index or a list of fields for a slice""" + """Return field at given index or a list of fields for a slice.""" return self.storage[key][self.field_index] # type: ignore def __iter__(self) -> Iterator[DataFieldBase]: - """iterate over all stored fields""" + """Iterate over all stored fields.""" for fields in self.storage: yield fields[self.field_index] # type: ignore def items(self) -> Iterator[tuple[float, DataFieldBase]]: - """iterate over all times and stored fields, returning pairs""" + """Iterate over all times and stored fields, returning pairs.""" for k, v in self.storage.items(): yield k, v[self.field_index] # type: ignore diff --git a/pde/storage/file.py b/pde/storage/file.py index a2fd7dc4..619f3ddd 100644 --- a/pde/storage/file.py +++ b/pde/storage/file.py @@ -1,7 +1,7 @@ -""" -Defines a class storing data on the file system using the hierarchical data format (hdf) +"""Defines a class storing data on the file system using the hierarchical data format +(hdf) -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -20,7 +20,7 @@ class FileStorage(StorageBase): - """store discretized fields in a hdf5 file""" + """Store discretized fields in a hdf5 file.""" def __init__( self, @@ -100,7 +100,7 @@ def _file_state(self) -> str: raise NotImplementedError(f"Do not understand mode `{self._file.mode}") def close(self) -> None: - """close the currently opened file""" + """Close the currently opened file.""" if self._file is not None: self._logger.info(f"Close file `{self.filename}`") self._file.close() @@ -119,7 +119,7 @@ def _create_hdf_dataset( shape: tuple[int, ...] = tuple(), dtype: DTypeLike = np.double, ): - """create a hdf5 dataset with the given name and data_shape + """Create a hdf5 dataset with the given name and data_shape. Args: name (str): Identifier of the hdf5 dataset @@ -148,7 +148,7 @@ def _open( mode: Literal["reading", "appending", "writing", "closed"] = "reading", info: InfoDict | None = None, ) -> None: - """open the hdf file in a particular mode + """Open the hdf file in a particular mode. Args: mode (str): @@ -248,7 +248,7 @@ def _open( raise RuntimeError(f"Mode `{mode}` not implemented") def __len__(self): - """return the number of stored items, i.e., time steps""" + """Return the number of stored items, i.e., time steps.""" # determine size of data in HDF5 file try: length = len(self.times) @@ -263,18 +263,18 @@ def __len__(self): @property def times(self): - """:class:`~numpy.ndarray`: The times at which data is available""" + """:class:`~numpy.ndarray`: The times at which data is available.""" self._open("reading") return self._times @property def data(self): - """:class:`~numpy.ndarray`: The actual data for all time""" + """:class:`~numpy.ndarray`: The actual data for all time.""" self._open("reading") return self._data def clear(self, clear_data_shape: bool = False): - """truncate the storage by removing all stored data. + """Truncate the storage by removing all stored data. Args: clear_data_shape (bool): @@ -311,7 +311,7 @@ def clear(self, clear_data_shape: bool = False): super().clear(clear_data_shape=clear_data_shape) def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: - """initialize the storage for writing data + """Initialize the storage for writing data. Args: field (:class:`~pde.fields.FieldBase`): @@ -359,7 +359,7 @@ def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: self._is_writing = True def _append_data(self, data: np.ndarray, time: float) -> None: - """append a new data set + """Append a new data set. Args: data (:class:`~numpy.ndarray`): The actual data @@ -398,10 +398,10 @@ def _append_data(self, data: np.ndarray, time: float) -> None: self.close() def end_writing(self) -> None: - """finalize the storage after writing. + """Finalize the storage after writing. - This makes sure the data is actually written to a file when - self.keep_opened == False + This makes sure the data is actually written to a file when self.keep_opened == + False """ if not self._is_writing: return # writing mode was already ended diff --git a/pde/storage/memory.py b/pde/storage/memory.py index cbe74633..7444e240 100644 --- a/pde/storage/memory.py +++ b/pde/storage/memory.py @@ -1,7 +1,6 @@ -""" -Defines a class storing data in memory. - -.. codeauthor:: David Zwicker +"""Defines a class storing data in memory. + +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -17,7 +16,7 @@ class MemoryStorage(StorageBase): - """store discretized fields in memory""" + """Store discretized fields in memory.""" def __init__( self, @@ -71,7 +70,7 @@ def from_fields( info: InfoDict | None = None, write_mode: WriteModeType = "truncate_once", ) -> MemoryStorage: - """create MemoryStorage from a list of fields + """Create MemoryStorage from a list of fields. Args: times (:class:`~numpy.ndarray`): @@ -111,7 +110,7 @@ def from_collection( rtol: float = 1.0e-5, atol: float = 1.0e-8, ) -> MemoryStorage: - """combine multiple memory storages into one + """Combine multiple memory storages into one. This method can be used to combine multiple time series of different fields into a single representation. This requires that all time series contain data at the @@ -152,7 +151,7 @@ def from_collection( return cls.from_fields(times, fields=fields) def clear(self, clear_data_shape: bool = False) -> None: - """truncate the storage by removing all stored data. + """Truncate the storage by removing all stored data. Args: clear_data_shape (bool): @@ -163,7 +162,7 @@ def clear(self, clear_data_shape: bool = False) -> None: super().clear(clear_data_shape=clear_data_shape) def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: - """initialize the storage for writing data + """Initialize the storage for writing data. Args: field (:class:`~pde.fields.FieldBase`): @@ -196,7 +195,7 @@ def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: ) def _append_data(self, data: np.ndarray, time: float) -> None: - """append a new data set + """Append a new data set. Args: data (:class:`~numpy.ndarray`): The actual data @@ -210,7 +209,7 @@ def _append_data(self, data: np.ndarray, time: float) -> None: @contextmanager def get_memory_storage(field: FieldBase, info: InfoDict | None = None): - """a context manager that can be used to create a MemoryStorage + """A context manager that can be used to create a MemoryStorage. Example: This can be used to quickly store data:: diff --git a/pde/storage/modelrunner.py b/pde/storage/modelrunner.py index af93ce66..5d9e0f90 100644 --- a/pde/storage/modelrunner.py +++ b/pde/storage/modelrunner.py @@ -1,7 +1,6 @@ -""" -Defines a class storing data using :mod:`modelrunner`. +"""Defines a class storing data using :mod:`modelrunner`. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -14,7 +13,7 @@ class ModelrunnerStorage(StorageBase): - """store discretized fields in a :mod:`modelrunner` storage + """Store discretized fields in a :mod:`modelrunner` storage. This storage class acts as a wrapper for the :mod:`~modelrunner.storage.trajectory` module, which allows handling time-dependent data in :mod:`modelrunner` storages. @@ -29,7 +28,6 @@ class ModelrunnerStorage(StorageBase): r = Result.from_file("data.hdf5") r.result.plot() # plots the final state r.storage["trajectory"] # allows accessing the stored trajectory - """ def __init__( @@ -62,7 +60,7 @@ def __init__( self._reader: mr.storage.Trajectory | None = None def close(self) -> None: - """close the currently opened trajectory writer""" + """Close the currently opened trajectory writer.""" if self._writer is not None: self._writer.close() self._writer = None @@ -74,12 +72,12 @@ def __exit__(self, exc_type, exc_value, exc_traceback): self.close() def __len__(self): - """return the number of stored items, i.e., time steps""" + """Return the number of stored items, i.e., time steps.""" return len(self.times) @property def _io(self) -> mr.storage.TrajectoryWriter | mr.storage.Trajectory: - """:class:`~modelrunner.storage.group.StorageGroup`: Group with all data""" + """:class:`~modelrunner.storage.group.StorageGroup`: Group with all data.""" if self._writer is not None: return self._writer if self._reader is None: @@ -88,16 +86,16 @@ def _io(self) -> mr.storage.TrajectoryWriter | mr.storage.Trajectory: @property def times(self): - """:class:`~numpy.ndarray`: The times at which data is available""" + """:class:`~numpy.ndarray`: The times at which data is available.""" return self._io.times @property def data(self): - """:class:`~numpy.ndarray`: The actual data for all time""" + """:class:`~numpy.ndarray`: The actual data for all time.""" return self._io._storage.read_array(self._io._loc + ["data"]) def clear(self, clear_data_shape: bool = False): - """truncate the storage by removing all stored data. + """Truncate the storage by removing all stored data. Args: clear_data_shape (bool): @@ -108,7 +106,7 @@ def clear(self, clear_data_shape: bool = False): super().clear(clear_data_shape=clear_data_shape) def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: - """initialize the storage for writing data + """Initialize the storage for writing data. Args: field (:class:`~pde.fields.FieldBase`): @@ -149,7 +147,7 @@ def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: ) def _append_data(self, data: np.ndarray, time: float) -> None: - """append a new data set + """Append a new data set. Args: data (:class:`~numpy.ndarray`): The actual data @@ -159,9 +157,9 @@ def _append_data(self, data: np.ndarray, time: float) -> None: self._writer.append(data, float(time)) def end_writing(self) -> None: - """finalize the storage after writing. + """Finalize the storage after writing. - This makes sure the data is actually written to a file when - self.keep_opened == False + This makes sure the data is actually written to a file when self.keep_opened == + False """ self.close() diff --git a/pde/storage/movie.py b/pde/storage/movie.py index f35ec4a3..cb9a5ca8 100644 --- a/pde/storage/movie.py +++ b/pde/storage/movie.py @@ -1,10 +1,9 @@ -""" -Defines a class storing data on the file system as a compressed movie +"""Defines a class storing data on the file system as a compressed movie. This package requires the optional :mod:`ffmpeg-python` package to use FFmpeg for reading and writing movies. -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -34,7 +33,7 @@ def _get_limits(value: float | ArrayLike, dim: int) -> np.ndarray: - """helper function creating sequence of length `dim` from input""" + """Helper function creating sequence of length `dim` from input.""" if np.isscalar(value): return np.full(dim, value, dtype=float) else: @@ -42,7 +41,7 @@ def _get_limits(value: float | ArrayLike, dim: int) -> np.ndarray: class MovieStorage(StorageBase): - """store discretized fields in a movie file + """Store discretized fields in a movie file. This storage only works when the `ffmpeg` program and :mod:`ffmpeg` is installed. The default codec is `FFV1 `_, which supports @@ -143,7 +142,7 @@ def __del__(self): self.close() # ensure open files are closed when the FileStorage is deleted def close(self) -> None: - """close the currently opened file""" + """Close the currently opened file.""" if self._ffmpeg is not None: self._logger.info(f"Close movie file `{self.filename}`") if self._state == "writing": @@ -167,12 +166,12 @@ def __exit__(self, exc_type, exc_value, exc_traceback): self.close() def clear(self): - """truncate the storage by removing all stored data.""" + """Truncate the storage by removing all stored data.""" if self.filename.exists(): self.filename.unlink() def _get_metadata(self) -> str: - """obtain metadata stored in the video""" + """Obtain metadata stored in the video.""" info = self.info.copy() info["version"] = 1 info["vmin"] = self.vmin @@ -181,7 +180,7 @@ def _get_metadata(self) -> str: return json.dumps(info) def _read_metadata(self) -> None: - """read metadata from video and store it in :attr:`info`""" + """Read metadata from video and store it in :attr:`info`""" import ffmpeg # lazy loading so it's not a hard dependence path = Path(self.filename) @@ -244,7 +243,7 @@ def _read_metadata(self) -> None: ) def _init_normalization(self, field: FieldBase) -> None: - """initialize the normalizations of the color information + """Initialize the normalizations of the color information. Args: field (:class:`~pde.fields.base.FieldBase): @@ -272,7 +271,7 @@ def _filename_times(self) -> Path: return self.filename.with_suffix(self.filename.suffix + ".times") def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: - """initialize the storage for writing data + """Initialize the storage for writing data. Args: field (:class:`~pde.fields.FieldBase`): @@ -366,7 +365,7 @@ def start_writing(self, field: FieldBase, info: InfoDict | None = None) -> None: self._state = "writing" def _append_data(self, data: np.ndarray, time: float) -> None: - """append a new data set + """Append a new data set. Args: data (:class:`~numpy.ndarray`): The actual data @@ -434,7 +433,7 @@ def _append_data(self, data: np.ndarray, time: float) -> None: self.info["num_frames"] += 1 def end_writing(self) -> None: - """finalize the storage after writing""" + """Finalize the storage after writing.""" if not self._state == "writing": self._logger.warning("Writing was already terminated") return # writing mode was already ended @@ -442,14 +441,14 @@ def end_writing(self) -> None: self.close() def __len__(self): - """return the number of stored items, i.e., time steps""" + """Return the number of stored items, i.e., time steps.""" if "num_frames" not in self.info: self._read_metadata() return self.info["num_frames"] @cached_property() def times(self): - """:class:`~numpy.ndarray`: The times at which data is available""" + """:class:`~numpy.ndarray`: The times at which data is available.""" times = None if "video_format" not in self.info: self._read_metadata() @@ -475,7 +474,7 @@ def times(self): return times def _iter_data(self) -> Iterator[np.ndarray]: - """iterate over all stored fields""" + """Iterate over all stored fields.""" import ffmpeg # lazy loading so it's not a hard dependence if "width" not in self.info: @@ -510,7 +509,7 @@ def _iter_data(self) -> Iterator[np.ndarray]: @property def data(self): - """:class:`~numpy.ndarray`: The actual data for all times""" + """:class:`~numpy.ndarray`: The actual data for all times.""" it = self._iter_data() # get the iterater of all data first_frame = next(it) # get the first frame to obtain necessary information # allocate memory for all data @@ -521,7 +520,7 @@ def data(self): return data def _get_field(self, t_index: int) -> FieldBase: - """return the field corresponding to the given time index + """Return the field corresponding to the given time index. Load the data given an index, i.e., the data at time `self.times[t_index]`. @@ -574,7 +573,7 @@ def _get_field(self, t_index: int) -> FieldBase: return field def __iter__(self) -> Iterator[FieldBase]: - """iterate over all stored fields""" + """Iterate over all stored fields.""" for data in self._iter_data(): # create the field with the data of the given index assert self._field is not None @@ -583,7 +582,7 @@ def __iter__(self) -> Iterator[FieldBase]: yield field def items(self) -> Iterator[tuple[float, FieldBase]]: - """iterate over all times and stored fields, returning pairs""" + """Iterate over all times and stored fields, returning pairs.""" yield from zip(self.times, self) @fill_in_docstring @@ -593,7 +592,7 @@ def tracker( # type: ignore *, transformation: Callable[[FieldBase, float], FieldBase] | None = None, ) -> StorageTracker: - """create object that can be used as a tracker to fill this storage + """Create object that can be used as a tracker to fill this storage. Args: interrupts: diff --git a/pde/tools/__init__.py b/pde/tools/__init__.py index 33e08a37..f06271ae 100644 --- a/pde/tools/__init__.py +++ b/pde/tools/__init__.py @@ -1,5 +1,4 @@ -""" -Package containing several tools required in py-pde +"""Package containing several tools required in py-pde. .. autosummary:: :nosignatures: @@ -21,5 +20,5 @@ spectral typing -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ diff --git a/pde/tools/cache.py b/pde/tools/cache.py index 89b1e3b2..a3597b0b 100644 --- a/pde/tools/cache.py +++ b/pde/tools/cache.py @@ -1,5 +1,4 @@ -""" -Functions, classes, and decorators for managing caches +"""Functions, classes, and decorators for managing caches. .. autosummary:: :nosignatures: @@ -33,7 +32,7 @@ def objects_equal(a, b) -> bool: - """compares two objects to see whether they are equal + """Compares two objects to see whether they are equal. In particular, this uses :func:`numpy.array_equal` to check for numpy arrays @@ -69,12 +68,12 @@ def objects_equal(a, b) -> bool: def _hash_iter(it: Iterable) -> int: - """get hash of an iterable but turning it into a tuple first""" + """Get hash of an iterable but turning it into a tuple first.""" return hash(tuple(it)) def hash_mutable(obj) -> int: - """return hash also for (nested) mutable objects. + """Return hash also for (nested) mutable objects. Notes: This function might be a bit slow, since it iterates over all containers and @@ -136,7 +135,7 @@ def hash_mutable(obj) -> int: def hash_readable(obj) -> str: - """return human readable hash also for (nested) mutable objects. + """Return human readable hash also for (nested) mutable objects. This function returns a JSON-like representation of the object. The function might be a bit slow, since it iterates over all containers and hashes objects recursively. @@ -205,8 +204,8 @@ def hash_readable(obj) -> str: def make_serializer(method: SerializerMethod) -> Callable: - """returns a function that serialize data with the given method. Note that - some of the methods destroy information and cannot be reverted. + """Returns a function that serialize data with the given method. Note that some of + the methods destroy information and cannot be reverted. Args: method (str): @@ -249,7 +248,7 @@ def make_serializer(method: SerializerMethod) -> Callable: def make_unserializer(method: SerializerMethod) -> Callable: - """returns a function that unserialize data with the given method + """Returns a function that unserialize data with the given method. This is the inverse function of :func:`make_serializer`. @@ -259,7 +258,6 @@ def make_unserializer(method: SerializerMethod) -> Callable: Returns: callable: A function that serializes objects - """ if callable(method): return method @@ -291,7 +289,7 @@ def make_unserializer(method: SerializerMethod) -> Callable: class DictFiniteCapacity(collections.OrderedDict): - """cache with a limited number of items""" + """Cache with a limited number of items.""" default_capacity: int = 100 @@ -300,7 +298,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def check_length(self): - """ensures that the dictionary does not grow beyond its capacity""" + """Ensures that the dictionary does not grow beyond its capacity.""" while len(self) > self.capacity: self.popitem(last=False) @@ -320,7 +318,7 @@ def update(self, values): class SerializedDict(collections.abc.MutableMapping): - """a key value database which is stored on the disk + """A key value database which is stored on the disk. This class provides hooks for converting arbitrary keys and values to strings, which are then stored in the database. @@ -332,7 +330,7 @@ def __init__( value_serialization: SerializerMethod = "pickle", storage_dict: dict | None = None, ): - """provides a dictionary whose keys and values are serialized + """Provides a dictionary whose keys and values are serialized. Args: key_serialization (str): @@ -393,7 +391,7 @@ def __iter__(self): class _class_cache: - """class handling the caching of results of methods and properties""" + """Class handling the caching of results of methods and properties.""" def __init__( self, @@ -404,8 +402,8 @@ def __init__( doc=None, name=None, ): - r"""decorator that caches calls in a dictionary attached to the instances. This - can be used with most classes + r"""Decorator that caches calls in a dictionary attached to the instances. This + can be used with most classes. Example: An example for using the class is:: @@ -495,11 +493,11 @@ def foo(self): self.factory = factory def _get_clear_cache_method(self): - """return a method that can be attached to classes to clear the cache - of the wrapped method""" + """Return a method that can be attached to classes to clear the cache of the + wrapped method.""" def clear_cache(obj) -> None: - """clears the cache associated with this method""" + """Clears the cache associated with this method.""" try: # try getting an initialized cache cache = obj._cache_methods[self.name] @@ -520,7 +518,7 @@ def clear_cache(obj) -> None: return clear_cache def _get_wrapped_function(self, func: TFunc) -> TFunc: - """return the wrapped method, which implements the cache""" + """Return the wrapped method, which implements the cache.""" if self.name is None: self.name = func.__name__ @@ -586,7 +584,7 @@ def wrapper(obj, *args, **kwargs): class cached_property(_class_cache): - r"""Decorator to use a method as a cached property + r"""Decorator to use a method as a cached property. The function is only called the first time and each successive call returns the cached result of the first call. @@ -614,7 +612,7 @@ def bar(self): """ def __call__(self, method): - """apply the cache decorator to the property""" + """Apply the cache decorator to the property.""" # save name, e.g., to be able to delete cache later self._cache_name = self.name self.clear_cache_of_obj = self._get_clear_cache_method() @@ -626,12 +624,12 @@ def __call__(self, method): return self def __get__(self, obj, owner): - """call the method to obtain the result for this property""" + """Call the method to obtain the result for this property.""" return self.func(obj) class cached_method(_class_cache): - r"""Decorator to enable caching of a method + r"""Decorator to enable caching of a method. The function is only called the first time and each successive call returns the cached result of the first call. @@ -657,7 +655,7 @@ def bar(self): """ def __call__(self, method: TFunc) -> TFunc: - """apply the cache decorator to the method""" + """Apply the cache decorator to the method.""" wrapper = self._get_wrapped_function(method) diff --git a/pde/tools/config.py b/pde/tools/config.py index 87e2e4ed..d4fbacae 100644 --- a/pde/tools/config.py +++ b/pde/tools/config.py @@ -1,5 +1,4 @@ -""" -Handles configuration variables of the package +"""Handles configuration variables of the package. .. autosummary:: :nosignatures: @@ -64,7 +63,7 @@ class Config(collections.UserDict): - """class handling the package configuration""" + """Class handling the package configuration.""" def __init__(self, items: dict[str, Any] | None = None, mode: str = "update"): """ @@ -89,7 +88,7 @@ def __init__(self, items: dict[str, Any] | None = None, mode: str = "update"): self.mode = mode def __getitem__(self, key: str): - """retrieve item `key`""" + """Retrieve item `key`""" parameter = self.data[key] if isinstance(parameter, Parameter): return parameter.convert() @@ -97,7 +96,7 @@ def __getitem__(self, key: str): return parameter def __setitem__(self, key: str, value): - """update item `key` with `value`""" + """Update item `key` with `value`""" if self.mode == "insert": self.data[key] = value @@ -117,14 +116,14 @@ def __setitem__(self, key: str, value): raise ValueError(f"Unsupported configuration mode `{self.mode}`") def __delitem__(self, key: str): - """removes item `key`""" + """Removes item `key`""" if self.mode == "insert": del self.data[key] else: raise RuntimeError("Configuration is not in `insert` mode") def to_dict(self) -> dict[str, Any]: - """convert the configuration to a simple dictionary + """Convert the configuration to a simple dictionary. Returns: dict: A representation of the configuration in a normal :class:`dict`. @@ -132,12 +131,12 @@ def to_dict(self) -> dict[str, Any]: return {k: v for k, v in self.items()} def __repr__(self) -> str: - """represent the configuration as a string""" + """Represent the configuration as a string.""" return f"{self.__class__.__name__}({repr(self.to_dict())})" @contextlib.contextmanager def __call__(self, values: dict[str, Any] | None = None, **kwargs): - """context manager temporarily changing the configuration + """Context manager temporarily changing the configuration. Args: values (dict): New configuration parameters @@ -156,7 +155,7 @@ def __call__(self, values: dict[str, Any] | None = None, **kwargs): def get_package_versions( packages: list[str], *, na_str="not available" ) -> dict[str, str]: - """tries to load certain python packages and returns their version + """Tries to load certain python packages and returns their version. Args: packages (list): The names of all packages @@ -177,7 +176,7 @@ def get_package_versions( def parse_version_str(ver_str: str) -> list[int]: - """helper function converting a version string into a list of integers""" + """Helper function converting a version string into a list of integers.""" result = [] for token in ver_str.split(".")[:3]: try: @@ -188,7 +187,7 @@ def parse_version_str(ver_str: str) -> list[int]: def check_package_version(package_name: str, min_version: str): - """checks whether a package has a sufficient version""" + """Checks whether a package has a sufficient version.""" msg = f"`{package_name}` version {min_version} required for py-pde" try: @@ -205,7 +204,7 @@ def check_package_version(package_name: str, min_version: str): def packages_from_requirements(requirements_file: Path | str) -> list[str]: - """read package names from a requirements file + """Read package names from a requirements file. Args: requirements_file (str or :class:`~pathlib.Path`): @@ -231,7 +230,7 @@ def packages_from_requirements(requirements_file: Path | str) -> list[str]: def environment() -> dict[str, Any]: - """obtain information about the compute environment + """Obtain information about the compute environment. Returns: dict: information about the python installation and packages diff --git a/pde/tools/cuboid.py b/pde/tools/cuboid.py index f08b986f..933b30e8 100644 --- a/pde/tools/cuboid.py +++ b/pde/tools/cuboid.py @@ -1,5 +1,4 @@ -""" -An n-dimensional, axes-aligned cuboid +"""An n-dimensional, axes-aligned cuboid. This module defines the :class:`Cuboid` class, which represents an n-dimensional cuboid that is aligned with the axes of a Cartesian coordinate system. @@ -18,10 +17,10 @@ class Cuboid: - """class that represents a cuboid in :math:`n` dimensions""" + """Class that represents a cuboid in :math:`n` dimensions.""" def __init__(self, pos, size, mutable: bool = True): - """defines a cuboid from a position and a size vector + """Defines a cuboid from a position and a size vector. Args: pos (list): @@ -59,7 +58,7 @@ def size(self, value: FloatNumerical): @property def corners(self) -> tuple[np.ndarray, np.ndarray]: - """return coordinates of two extreme corners defining the cuboid""" + """Return coordinates of two extreme corners defining the cuboid.""" return np.copy(self.pos), self.pos + self.size @property @@ -74,7 +73,7 @@ def mutable(self, value: bool): @classmethod def from_points(cls, p1: np.ndarray, p2: np.ndarray, **kwargs) -> Cuboid: - """create cuboid from two points + """Create cuboid from two points. Args: p1 (list): Coordinates of first corner point @@ -89,7 +88,7 @@ def from_points(cls, p1: np.ndarray, p2: np.ndarray, **kwargs) -> Cuboid: @classmethod def from_bounds(cls, bounds: np.ndarray, **kwargs) -> Cuboid: - """create cuboid from bounds + """Create cuboid from bounds. Args: bounds (list): Two dimensional array of axes bounds @@ -104,7 +103,7 @@ def from_bounds(cls, bounds: np.ndarray, **kwargs) -> Cuboid: def from_centerpoint( cls, centerpoint: np.ndarray, size: np.ndarray, **kwargs ) -> Cuboid: - """create cuboid from two points + """Create cuboid from two points. Args: centerpoint (list): Coordinates of the center @@ -126,7 +125,7 @@ def __repr__(self): ) def __add__(self, other: Cuboid) -> Cuboid: - """The sum of two cuboids is the minimal cuboid enclosing both""" + """The sum of two cuboids is the minimal cuboid enclosing both.""" if isinstance(other, Cuboid): if self.dim != other.dim: raise RuntimeError("Incompatible dimensions") @@ -138,7 +137,7 @@ def __add__(self, other: Cuboid) -> Cuboid: return NotImplemented def __eq__(self, other) -> bool: - """override the default equality test""" + """Override the default equality test.""" if not isinstance(other, self.__class__): return NotImplemented return np.all(self.pos == other.pos) and np.all(self.size == other.size) # type: ignore @@ -153,17 +152,17 @@ def bounds(self) -> tuple[tuple[float, float], ...]: @property def vertices(self) -> list[list[float]]: - """return the coordinates of all the corners""" + """Return the coordinates of all the corners.""" return list(itertools.product(*self.bounds)) # type: ignore @property def diagonal(self) -> float: - """returns the length of the diagonal""" + """Returns the length of the diagonal.""" return np.linalg.norm(self.size) # type: ignore @property def surface_area(self) -> float: - """surface area of a cuboid in :math:`n` dimensions. + """Surface area of a cuboid in :math:`n` dimensions. The surface area is the volume of the (:math:`n-1`)-dimensional hypercubes that bound the current cuboid: @@ -195,7 +194,7 @@ def volume(self) -> float: return np.prod(self.size) # type: ignore def buffer(self, amount: FloatNumerical = 0, inplace=False) -> Cuboid: - """dilate the cuboid by a certain amount in all directions""" + """Dilate the cuboid by a certain amount in all directions.""" amount = np.asarray(amount) if inplace: self.pos -= amount @@ -205,7 +204,7 @@ def buffer(self, amount: FloatNumerical = 0, inplace=False) -> Cuboid: return self.__class__(self.pos - amount, self.size + 2 * amount) def contains_point(self, points: np.ndarray) -> np.ndarray: - """returns a True when `points` are within the Cuboid + """Returns a True when `points` are within the Cuboid. Args: points (:class:`~numpy.ndarray`): List of point coordinates @@ -228,7 +227,7 @@ def contains_point(self, points: np.ndarray) -> np.ndarray: def asanyarray_flags(data: np.ndarray, dtype: DTypeLike = None, writeable: bool = True): - """turns data into an array and sets the respective flags. + """Turns data into an array and sets the respective flags. A copy is only made if necessary diff --git a/pde/tools/docstrings.py b/pde/tools/docstrings.py index f1e36841..888a5fc1 100644 --- a/pde/tools/docstrings.py +++ b/pde/tools/docstrings.py @@ -1,5 +1,4 @@ -""" -Methods for automatic transformation of docstrings +"""Methods for automatic transformation of docstrings. .. autosummary:: :nosignatures: @@ -109,7 +108,7 @@ def get_text_block(identifier: str) -> str: - """return a single text block + """Return a single text block. Args: identifier (str): The name of the text block @@ -127,7 +126,7 @@ def get_text_block(identifier: str) -> str: def replace_in_docstring( f: TFunc, token: str, value: str, docstring: str | None = None ) -> TFunc: - """replace a text in a docstring using the correct indentation + """Replace a text in a docstring using the correct indentation. Args: f (callable): The function with the docstring to handle @@ -141,7 +140,7 @@ def replace_in_docstring( # initialize textwrapper for formatting docstring def repl(matchobj) -> str: - """helper function replacing token in docstring""" + """Helper function replacing token in docstring.""" bare_text = textwrap.dedent(value).strip() return textwrap.indent(bare_text, matchobj.group(1)) @@ -160,7 +159,7 @@ def repl(matchobj) -> str: def fill_in_docstring(f: TFunc) -> TFunc: - """decorator that replaces text in the docstring of a function""" + """Decorator that replaces text in the docstring of a function.""" tw = textwrap.TextWrapper( width=80, expand_tabs=True, replace_whitespace=True, drop_whitespace=True ) @@ -168,7 +167,7 @@ def fill_in_docstring(f: TFunc) -> TFunc: for name, value in DOCSTRING_REPLACEMENTS.items(): def repl(matchobj) -> str: - """helper function replacing token in docstring""" + """Helper function replacing token in docstring.""" tw.initial_indent = tw.subsequent_indent = matchobj.group(1) return tw.fill(textwrap.dedent(value)) diff --git a/pde/tools/expressions.py b/pde/tools/expressions.py index ceed8aff..57e7dcc4 100644 --- a/pde/tools/expressions.py +++ b/pde/tools/expressions.py @@ -1,7 +1,6 @@ -""" -Handling mathematical expressions with sympy +"""Handling mathematical expressions with sympy. -This module provides classes representing expressions that can be provided as +This module provides classes representing expressions that can be provided as human-readable strings and are converted to :mod:`numpy` and :mod:`numba` representations using :mod:`sympy`. @@ -13,7 +12,7 @@ TensorExpression evaluate -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -57,7 +56,7 @@ def parse_number( expression: str | Number, variables: Mapping[str, Number] | None = None ) -> Number: - r"""return a number compiled from an expression + r"""Return a number compiled from an expression. Warning: {WARNING_EXEC} @@ -90,7 +89,7 @@ def parse_number( @vectorize() def _heaviside_implemention_ufunc(x1, x2): - """ufunc implementation of the Heaviside function used for numba and sympy + """Ufunc implementation of the Heaviside function used for numba and sympy. Args: x1 (float): Argument of the function @@ -110,7 +109,7 @@ def _heaviside_implemention_ufunc(x1, x2): def _heaviside_implemention(x1, x2): - """normal implementation of the Heaviside function used for numba and sympy + """Normal implementation of the Heaviside function used for numba and sympy. Args: x1 (float): Argument of the function @@ -126,7 +125,7 @@ def _heaviside_implemention(x1, x2): @overload(np.heaviside) def np_heaviside(x1, x2): - """numba implementation of the Heaviside function""" + """Numba implementation of the Heaviside function.""" return _heaviside_implemention @@ -136,7 +135,7 @@ def np_heaviside(x1, x2): class ListArrayPrinter(PythonCodePrinter): - """special sympy printer returning arrays as lists""" + """Special sympy printer returning arrays as lists.""" def _print_ImmutableDenseNDimArray(self, arr): arrays = ", ".join(f"{self._print(expr)}" for expr in arr) @@ -144,7 +143,7 @@ def _print_ImmutableDenseNDimArray(self, arr): class NumpyArrayPrinter(PythonCodePrinter): - """special sympy printer returning numpy arrays""" + """Special sympy printer returning numpy arrays.""" def _print_ImmutableDenseNDimArray(self, arr): arrays = ", ".join(f"asarray({self._print(expr)})" for expr in arr) @@ -152,7 +151,7 @@ def _print_ImmutableDenseNDimArray(self, arr): def parse_expr_guarded(expression: str, symbols=None, functions=None) -> basic.Basic: - """parse an expression using sympy with extra guards + """Parse an expression using sympy with extra guards. Args: expression (str): @@ -174,7 +173,7 @@ def parse_expr_guarded(expression: str, symbols=None, functions=None) -> basic.B local_dict = {} def fill_locals(element, sympy_cls): - """recursive function for obtaining all symbols""" + """Recursive function for obtaining all symbols.""" if isinstance(element, str): local_dict[element] = sympy_cls(element) elif hasattr(element, "__iter__"): @@ -190,7 +189,7 @@ def fill_locals(element, sympy_cls): # upper-case Heaviside, which is directly recognized by sympy. Down the line, this # allows easier handling of special cases def substitude(expr): - """helper function substituting expressions""" + """Helper function substituting expressions.""" if isinstance(expr, list): return [substitude(e) for e in expr] else: @@ -203,7 +202,7 @@ def substitude(expr): class ExpressionBase(metaclass=ABCMeta): - """abstract base class for handling expressions""" + """Abstract base class for handling expressions.""" @fill_in_docstring def __init__( @@ -268,7 +267,7 @@ def __repr__(self): ) def __eq__(self, other): - """compare this expression to another one""" + """Compare this expression to another one.""" if not isinstance(other, self.__class__): return NotImplemented # compare what the expressions depend on @@ -288,7 +287,7 @@ def __eq__(self, other): @property def _free_symbols(self) -> set: - """return symbols that appear in the expression and are not in self.consts""" + """Return symbols that appear in the expression and are not in self.consts.""" return { sym for sym in self._sympy_expr.free_symbols if sym.name not in self.consts } @@ -309,7 +308,7 @@ def shape(self) -> tuple[int, ...]: """tuple: the shape of the tensor""" def _check_signature(self, signature: Sequence[str | list[str]] | None = None): - """validate the variables of the expression against the signature""" + """Validate the variables of the expression against the signature.""" # get arguments of the expressions if self.constant: # constant expression do not depend on any variables @@ -370,7 +369,7 @@ def rank(self) -> int: return len(self.shape) def depends_on(self, variable: str) -> bool: - """determine whether the expression depends on `variable` + """Determine whether the expression depends on `variable` Args: variable (str): the name of the variable to check for @@ -389,7 +388,7 @@ def _get_function( user_funcs: dict[str, Callable] | None = None, prepare_compilation: bool = False, ) -> Callable[..., NumberOrArray]: - """return function evaluating expression + """Return function evaluating expression. Args: single_arg (bool): @@ -471,7 +470,7 @@ def result(*args): def _get_function_cached( self, single_arg: bool = False, prepare_compilation: bool = False ) -> Callable[..., NumberOrArray]: - """return function evaluating expression + """Return function evaluating expression. Args: single_arg (bool): @@ -487,12 +486,12 @@ def _get_function_cached( return self._get_function(single_arg, prepare_compilation=prepare_compilation) def __call__(self, *args, **kwargs) -> NumberOrArray: - """return the value of the expression for the given values""" + """Return the value of the expression for the given values.""" return self._get_function_cached(single_arg=False)(*args, **kwargs) @cached_method() def get_compiled(self, single_arg: bool = False) -> Callable[..., NumberOrArray]: - """return numba function evaluating expression + """Return numba function evaluating expression. Args: single_arg (bool): @@ -510,7 +509,7 @@ def get_compiled(self, single_arg: bool = False) -> Callable[..., NumberOrArray] class ScalarExpression(ExpressionBase): - """describes a mathematical expression of a scalar quantity""" + """Describes a mathematical expression of a scalar quantity.""" shape: tuple[int, ...] = tuple() @@ -606,7 +605,7 @@ def __init__( ) def copy(self) -> ScalarExpression: - """return a copy of the current expression""" + """Return a copy of the current expression.""" # __init__ copies all relevant attributes return self.__class__(self) @@ -639,7 +638,7 @@ def is_zero(self) -> bool: return self.constant and self.value == 0 def __bool__(self) -> bool: - """tests whether the expression is nonzero""" + """Tests whether the expression is nonzero.""" return not self.constant or self.value != 0 def __eq__(self, other): @@ -648,7 +647,7 @@ def __eq__(self, other): return super().__eq__(other) and self.allow_indexed == other.allow_indexed def _prepare_expression(self, expression: str) -> str: - """replace indexed variables, if allowed + """Replace indexed variables, if allowed. Args: expression (str): @@ -662,7 +661,7 @@ def _prepare_expression(self, expression: str) -> str: return expression def _var_indexed(self, var: str) -> bool: - """checks whether the variable `var` is used in an indexed form""" + """Checks whether the variable `var` is used in an indexed form.""" from sympy.tensor.indexed import Indexed return any( @@ -670,7 +669,7 @@ def _var_indexed(self, var: str) -> bool: ) def differentiate(self, var: str) -> ScalarExpression: - """return the expression differentiated with respect to var""" + """Return the expression differentiated with respect to var.""" if self.constant: # return empty expression return ScalarExpression( @@ -698,7 +697,7 @@ def differentiate(self, var: str) -> ScalarExpression: @cached_property() def derivatives(self) -> TensorExpression: - """differentiate the expression with respect to all variables""" + """Differentiate the expression with respect to all variables.""" if self.constant: # return empty expression dim = len(self.vars) @@ -718,7 +717,7 @@ def derivatives(self) -> TensorExpression: class TensorExpression(ExpressionBase): - """describes a mathematical expression of a tensorial quantity""" + """Describes a mathematical expression of a tensorial quantity.""" @fill_in_docstring def __init__( @@ -823,7 +822,7 @@ def __getitem__(self, index): @property def value(self): - """the value for a constant expression""" + """The value for a constant expression.""" if self.constant: try: # try simply evaluating the expression as a number @@ -845,7 +844,7 @@ def value(self): raise TypeError("Only constant expressions have a defined value") def differentiate(self, var: str) -> TensorExpression: - """return the expression differentiated with respect to var""" + """Return the expression differentiated with respect to var.""" if self.constant: derivative = np.zeros(self.shape) else: @@ -854,7 +853,7 @@ def differentiate(self, var: str) -> TensorExpression: @cached_property() def derivatives(self) -> TensorExpression: - """differentiate the expression with respect to all variables""" + """Differentiate the expression with respect to all variables.""" shape = (len(self.vars),) + self.shape if self.constant: @@ -870,7 +869,7 @@ def derivatives(self) -> TensorExpression: def get_compiled_array( self, single_arg: bool = True ) -> Callable[[np.ndarray, np.ndarray | None], np.ndarray]: - """compile the tensor expression such that a numpy array is returned + """Compile the tensor expression such that a numpy array is returned. Args: single_arg (bool): @@ -946,7 +945,7 @@ def evaluate( consts: dict[str, NumberOrArray] | None = None, label: str | None = None, ) -> DataFieldBase: - """evaluate an expression involving fields + """Evaluate an expression involving fields. Warning: {WARNING_EXEC} diff --git a/pde/tools/ffmpeg.py b/pde/tools/ffmpeg.py index 1fcf36cd..1416cba0 100644 --- a/pde/tools/ffmpeg.py +++ b/pde/tools/ffmpeg.py @@ -1,5 +1,4 @@ -""" -Functions for interacting with FFmpeg +"""Functions for interacting with FFmpeg. .. autosummary:: :nosignatures: @@ -20,7 +19,7 @@ @dataclass class FFmpegFormat: - """defines a FFmpeg format used for storing field data in a video + """Defines a FFmpeg format used for storing field data in a video. Note: All pixel formats supported by FFmpeg can be obtained by running @@ -39,29 +38,29 @@ class FFmpegFormat: bits_per_channel: int """int: number of bits per color channel in this pixel format""" dtype: DTypeLike - """numpy dtype corresponding to the data of a single channel""" + """Numpy dtype corresponding to the data of a single channel.""" codec: str = "ffv1" """str: name of the codec that supports this pixel format""" @property def bytes_per_channel(self) -> int: - """int:number of bytes per color channel""" + """Int:number of bytes per color channel.""" return self.bits_per_channel // 8 @property def max_value(self) -> Union[float, int]: - """maximal value stored in a color channel""" + """Maximal value stored in a color channel.""" if np.issubdtype(self.dtype, np.integer): return 2**self.bits_per_channel - 1 # type: ignore else: return 1.0 def data_to_frame(self, normalized_data: np.ndarray) -> np.ndarray: - """converts normalized data to data being stored in a color channel""" + """Converts normalized data to data being stored in a color channel.""" return np.ascontiguousarray(normalized_data * self.max_value, dtype=self.dtype) def data_from_frame(self, frame_data: np.ndarray): - """converts data stored in a color channel to normalized data""" + """Converts data stored in a color channel to normalized data.""" return frame_data.astype(float) / self.max_value @@ -135,11 +134,11 @@ def data_from_frame(self, frame_data: np.ndarray): # dtype=np.dtype(" Optional[str]: - """find a defined FFmpegFormat that satisifies the requirements + """Find a defined FFmpegFormat that satisifies the requirements. Args: channels (int): diff --git a/pde/tools/math.py b/pde/tools/math.py index 0c315d7b..bbbc5eb7 100644 --- a/pde/tools/math.py +++ b/pde/tools/math.py @@ -1,5 +1,4 @@ -""" -Auxiliary mathematical functions +"""Auxiliary mathematical functions. .. codeauthor:: David Zwicker """ @@ -17,7 +16,7 @@ class SmoothData1D: - """allows smoothing data in 1d using a Gaussian kernel of defined width + """Allows smoothing data in 1d using a Gaussian kernel of defined width. The data is given a pairs of `x` and `y`, the assumption being that there is an underlying relation `y = f(x)`. @@ -27,7 +26,7 @@ class SmoothData1D: """float: scale for setting automatic values for sigma""" def __init__(self, x, y, sigma: float | None = None): - """initialize with data + """Initialize with data. Args: x: @@ -56,15 +55,15 @@ def __init__(self, x, y, sigma: float | None = None): @property def bounds(self) -> tuple[float, float]: - """return minimal and maximal `x` values""" + """Return minimal and maximal `x` values.""" return float(self.x.min()), float(self.x.max()) def __contains__(self, x: float) -> bool: - """checks whether the value `x` is contain in the range of x-values""" + """Checks whether the value `x` is contain in the range of x-values.""" return self.x.min() <= x <= self.x.max() # type: ignore def __call__(self, xs: ArrayLike) -> np.ndarray: - """return smoothed y values for the positions given in `xs` + """Return smoothed y values for the positions given in `xs` Args: xs (list of :class:`~numpy.ndarray`): the x-values @@ -88,7 +87,7 @@ def __call__(self, xs: ArrayLike) -> np.ndarray: return result.reshape(shape) # type: ignore def derivative(self, xs: ArrayLike) -> np.ndarray: - """return the derivative of the smoothed values for the positions `xs` + """Return the derivative of the smoothed values for the positions `xs` Note that this value @@ -127,7 +126,7 @@ def derivative(self, xs: ArrayLike) -> np.ndarray: ] ) class OnlineStatistics: - """class for using an online algorithm for calculating statistics""" + """Class for using an online algorithm for calculating statistics.""" mean: float """float: recorded mean""" @@ -156,7 +155,7 @@ def std(self) -> float: return np.sqrt(self.var) # type: ignore def add(self, value: float) -> None: - """add a value to the accumulator + """Add a value to the accumulator. Args: value (float): The value to add @@ -169,7 +168,7 @@ def add(self, value: float) -> None: self._mean2 += delta * (value - self.mean) def to_dict(self) -> dict[str, Any]: - """return the information as a dictionary""" + """Return the information as a dictionary.""" return { "min": self.min, "max": self.max, diff --git a/pde/tools/misc.py b/pde/tools/misc.py index d8a988ce..e405ded5 100644 --- a/pde/tools/misc.py +++ b/pde/tools/misc.py @@ -1,5 +1,4 @@ -""" -Miscellaneous python functions +"""Miscellaneous python functions. .. autosummary:: :nosignatures: @@ -42,7 +41,7 @@ def module_available(module_name: str) -> bool: - """check whether a python module is available + """Check whether a python module is available. Args: module_name (str): The name of the module @@ -59,7 +58,7 @@ def module_available(module_name: str) -> bool: def ensure_directory_exists(folder: str | Path): - """creates a folder if it not already exists + """Creates a folder if it not already exists. Args: folder (str): path of the new folder @@ -75,7 +74,7 @@ def ensure_directory_exists(folder: str | Path): def preserve_scalars(method: TFunc) -> TFunc: - """decorator that makes vectorized methods work with scalars + """Decorator that makes vectorized methods work with scalars. This decorator allows to call functions that are written to work on numpy arrays to also accept python scalars, like `int` and `float`. Essentially, @@ -128,7 +127,7 @@ def new_decorator(*args, **kwargs): def skipUnlessModule(module_names: str | Sequence[str]) -> Callable[[TFunc], TFunc]: - """decorator that skips a test when a module is not available + """Decorator that skips a test when a module is not available. Args: module_names (str): The name of the required module(s) @@ -159,7 +158,7 @@ def wrapper(f: TFunc) -> TFunc: def import_class(identifier: str): - """import a class or module given an identifier + """Import a class or module given an identifier. Args: identifier (str): @@ -178,7 +177,7 @@ def import_class(identifier: str): class classproperty(property): - """decorator that can be used to define read-only properties for classes. + """Decorator that can be used to define read-only properties for classes. This is inspired by the implementation of :mod:`astropy`, see `astropy.org `_. @@ -243,9 +242,8 @@ def fget(obj): class hybridmethod: - """ - descriptor that can be used as a decorator to allow calling a method both - as a classmethod and an instance method + """Descriptor that can be used as a decorator to allow calling a method both as a + classmethod and an instance method. Adapted from https://stackoverflow.com/a/28238047 """ @@ -274,7 +272,7 @@ def __get__(self, instance, cls): def estimate_computation_speed(func: Callable, *args, **kwargs) -> float: - """estimates the computation speed of a function + """Estimates the computation speed of a function. Args: func (callable): The function to call @@ -309,7 +307,7 @@ def hdf_write_attributes( attributes: dict[str, Any] | None = None, raise_serialization_error: bool = False, ) -> None: - """write (JSON-serialized) attributes to a hdf file + """Write (JSON-serialized) attributes to a hdf file. Args: hdf_path: @@ -334,7 +332,7 @@ def hdf_write_attributes( def number(value: Number | str) -> Number: - """convert a value into a float or complex number + """Convert a value into a float or complex number. Args: value (Number or str): @@ -348,7 +346,7 @@ def number(value: Number | str) -> Number: def get_common_dtype(*args): - r"""returns a dtype in which all arguments can be represented + r"""Returns a dtype in which all arguments can be represented. Args: *args: All items (arrays, scalars, etc) to be checked @@ -364,7 +362,7 @@ def get_common_dtype(*args): def number_array( data: ArrayLike, dtype: DTypeLike = None, copy: bool | None = None ) -> np.ndarray: - """convert data into an array, assuming float numbers if no dtype is given + """Convert data into an array, assuming float numbers if no dtype is given. Args: data (:class:`~numpy.ndarray`): diff --git a/pde/tools/modelrunner.py b/pde/tools/modelrunner.py index 623926ee..00dea853 100644 --- a/pde/tools/modelrunner.py +++ b/pde/tools/modelrunner.py @@ -1,5 +1,4 @@ -""" -Establishes hooks for the interplay between :mod:`pde` and :mod:`modelrunner` +"""Establishes hooks for the interplay between :mod:`pde` and :mod:`modelrunner` This package is usually loaded automatically during import if :mod:`modelrunner` is available. In this case, grids and fields of :mod:`pde` can be directly written to @@ -19,7 +18,7 @@ # these actions are inherited by all subclasses by default def load_grid(storage: StorageBase, loc: Sequence[str]) -> GridBase: - """function loading a grid from a modelrunner storage + """Function loading a grid from a modelrunner storage. Args: storage (:class:`~modelrunner.storage.group.StorageGroup`): @@ -40,7 +39,7 @@ def load_grid(storage: StorageBase, loc: Sequence[str]) -> GridBase: def save_grid(storage: StorageBase, loc: Sequence[str], grid: GridBase) -> None: - """function saving a grid to a modelrunner storage + """Function saving a grid to a modelrunner storage. Args: storage (:class:`~modelrunner.storage.group.StorageGroup`): @@ -58,7 +57,7 @@ def save_grid(storage: StorageBase, loc: Sequence[str], grid: GridBase) -> None: # these actions are inherited by all subclasses by default def load_field(storage: StorageBase, loc: Sequence[str]) -> FieldBase: - """function loading a field from a modelrunner storage + """Function loading a field from a modelrunner storage. Args: storage (:class:`~modelrunner.storage.group.StorageGroup`): @@ -79,7 +78,7 @@ def load_field(storage: StorageBase, loc: Sequence[str]) -> FieldBase: def save_field(storage: StorageBase, loc: Sequence[str], field: FieldBase) -> None: - """function saving a field to a modelrunner storage + """Function saving a field to a modelrunner storage. Args: storage (:class:`~modelrunner.storage.group.StorageGroup`): diff --git a/pde/tools/mpi.py b/pde/tools/mpi.py index eb6e37fe..dc9bb509 100644 --- a/pde/tools/mpi.py +++ b/pde/tools/mpi.py @@ -1,5 +1,4 @@ -""" -Auxillary functions and variables for dealing with MPI multiprocessing +"""Auxillary functions and variables for dealing with MPI multiprocessing. Warning: These functions are mostly no-ops unless MPI is properly installed and python code @@ -62,7 +61,7 @@ rank = MPI.COMM_WORLD.rank class _OperatorRegistry: - """collection of operators that MPI supports""" + """Collection of operators that MPI supports.""" _name_ids: dict[str, int] _ids_operators: dict[int, MPI.Op] @@ -106,7 +105,7 @@ def __getattr__(self, name: str): def mpi_send(data, dest: int, tag: int) -> None: - """send data to another MPI node + """Send data to another MPI node. Args: data: The data being send @@ -118,11 +117,11 @@ def mpi_send(data, dest: int, tag: int) -> None: @overload(mpi_send) def ol_mpi_send(data, dest: int, tag: int): - """overload the `mpi_send` function""" + """Overload the `mpi_send` function.""" import numba_mpi def impl(data, dest: int, tag: int) -> None: - """reduce a single number across all cores""" + """Reduce a single number across all cores.""" status = numba_mpi.send(data, dest, tag) assert status == 0 @@ -130,30 +129,28 @@ def impl(data, dest: int, tag: int) -> None: def mpi_recv(data, source, tag) -> None: - """receive data from another MPI node + """Receive data from another MPI node. Args: data: A buffer into which the received data is written dest (int): The ID of the sending node tag (int): A numeric tag identifying the message - """ data[...] = MPI.COMM_WORLD.recv(source=source, tag=tag) @overload(mpi_recv) def ol_mpi_recv(data, source: int, tag: int): - """overload the `mpi_recv` function""" + """Overload the `mpi_recv` function.""" import numba_mpi def impl(data, source: int, tag: int) -> None: - """receive data from another MPI node + """Receive data from another MPI node. Args: data: A buffer into which the received data is written dest (int): The ID of the sending node tag (int): A numeric tag identifying the message - """ status = numba_mpi.recv(data, source, tag) assert status == 0 @@ -162,7 +159,7 @@ def impl(data, source: int, tag: int) -> None: def mpi_allreduce(data, operator: int | str | None = None): - """combines data from all MPI nodes + """Combines data from all MPI nodes. Note that complex datatypes and user-defined functions are not properly supported. @@ -184,7 +181,7 @@ def mpi_allreduce(data, operator: int | str | None = None): @overload(mpi_allreduce) def ol_mpi_allreduce(data, operator: int | str | None = None): - """overload the `mpi_allreduce` function""" + """Overload the `mpi_allreduce` function.""" import numba_mpi if operator is None or isinstance(operator, nb.types.NoneType): @@ -204,7 +201,7 @@ def ol_mpi_allreduce(data, operator: int | str | None = None): @register_jitable def _allreduce(sendobj, recvobj, operator: int | str | None = None) -> int: - """helper function that calls `numba_mpi.allreduce`""" + """Helper function that calls `numba_mpi.allreduce`""" if operator is None: return numba_mpi.allreduce(sendobj, recvobj) # type: ignore elif op_id is None: @@ -215,7 +212,7 @@ def _allreduce(sendobj, recvobj, operator: int | str | None = None) -> int: if isinstance(data, types.Number): def impl(data, operator: int | str | None = None): - """reduce a single number across all cores""" + """Reduce a single number across all cores.""" sendobj = np.array([data]) recvobj = np.empty((1,), sendobj.dtype) status = _allreduce(sendobj, recvobj, operator) @@ -225,7 +222,7 @@ def impl(data, operator: int | str | None = None): elif isinstance(data, types.Array): def impl(data, operator: int | str | None = None): - """reduce an array across all cores""" + """Reduce an array across all cores.""" recvobj = np.empty(data.shape, data.dtype) status = _allreduce(data, recvobj, operator) assert status == 0 diff --git a/pde/tools/numba.py b/pde/tools/numba.py index 8c218b0f..82dca624 100644 --- a/pde/tools/numba.py +++ b/pde/tools/numba.py @@ -1,5 +1,4 @@ -""" -Helper functions for just-in-time compilation with numba +"""Helper functions for just-in-time compilation with numba. .. codeauthor:: David Zwicker """ @@ -29,7 +28,7 @@ # for earlier version of numba, we need to define the function def is_jitted(function: Callable) -> bool: - """determine whether a function has already been jitted""" + """Determine whether a function has already been jitted.""" try: from numba.core.dispatcher import Dispatcher except ImportError: @@ -43,7 +42,7 @@ def is_jitted(function: Callable) -> bool: class Counter: - """helper class for implementing JIT_COUNT + """Helper class for implementing JIT_COUNT. We cannot use a simple integer for this, since integers are immutable, so if one imports JIT_COUNT from this module it would always stay at the fixed value it had @@ -82,7 +81,7 @@ def __repr__(self): def numba_environment() -> dict[str, Any]: - """return information about the numba setup used + """Return information about the numba setup used. Returns: (dict) information about the numba setup @@ -141,7 +140,7 @@ def f(): def flat_idx(arr: np.ndarray, i: int) -> Number: - """helper function allowing indexing of scalars as if they arrays + """Helper function allowing indexing of scalars as if they arrays. Args: arr @@ -154,7 +153,7 @@ def flat_idx(arr: np.ndarray, i: int) -> Number: @overload(flat_idx) def ol_flat_idx(arr, i): - """helper function allowing indexing of scalars as if they arrays""" + """Helper function allowing indexing of scalars as if they arrays.""" if isinstance(arr, nb.types.Number): return lambda arr, i: arr else: @@ -163,7 +162,7 @@ def ol_flat_idx(arr, i): @decorator_arguments def jit(function: TFunc, signature=None, parallel: bool = False, **kwargs) -> TFunc: - """apply nb.jit with predefined arguments + """Apply nb.jit with predefined arguments. Args: function: The function which is jitted @@ -207,7 +206,7 @@ def jit(function: TFunc, signature=None, parallel: bool = False, **kwargs) -> TF if nb.config.DISABLE_JIT: # @UndefinedVariable # dummy function that creates a ctypes pointer def address_as_void_pointer(addr): - """returns a void pointer from a given memory address + """Returns a void pointer from a given memory address. Example: This can for instance be used together with `numba.carray`: @@ -229,7 +228,7 @@ def address_as_void_pointer(addr): # actually useful function that creates a numba pointer @nb.extending.intrinsic def address_as_void_pointer(typingctx, src): - """returns a void pointer from a given memory address + """Returns a void pointer from a given memory address. Example: This can for instance be used together with `numba.carray`: @@ -254,7 +253,7 @@ def codegen(cgctx, builder, sig, args): def make_array_constructor(arr: np.ndarray) -> Callable[[], np.ndarray]: - """returns an array within a jitted function using basic information + """Returns an array within a jitted function using basic information. Args: arr (:class:`~numpy.ndarray`): The array that should be accessible within jit @@ -271,7 +270,7 @@ def make_array_constructor(arr: np.ndarray) -> Callable[[], np.ndarray]: @register_jitable def array_constructor() -> np.ndarray: - """helper that reconstructs the array from the pointer and structural info""" + """Helper that reconstructs the array from the pointer and structural info.""" data: np.ndarray = nb.carray(address_as_void_pointer(data_addr), shape, dtype) if strides is not None: data = np.lib.stride_tricks.as_strided(data, shape, strides) @@ -281,7 +280,7 @@ def array_constructor() -> np.ndarray: def numba_dict(data: dict[str, Any] | None = None) -> NumbaDict | None: - """converts a python dictionary to a numba typed dictionary""" + """Converts a python dictionary to a numba typed dictionary.""" if data is None: return None nb_dict = NumbaDict() @@ -291,7 +290,7 @@ def numba_dict(data: dict[str, Any] | None = None) -> NumbaDict | None: def get_common_numba_dtype(*args): - r"""returns a numba numerical type in which all arrays can be represented + r"""Returns a numba numerical type in which all arrays can be represented. Args: *args: All items to be tested @@ -311,12 +310,12 @@ def get_common_numba_dtype(*args): @jit(nopython=True, nogil=True) def _random_seed_compiled(seed: int) -> None: - """sets the seed of the random number generator of numba""" + """Sets the seed of the random number generator of numba.""" np.random.seed(seed) def random_seed(seed: int = 0) -> None: - """sets the seed of the random number generator of numpy and numba + """Sets the seed of the random number generator of numpy and numba. Args: seed (int): Sets random seed diff --git a/pde/tools/output.py b/pde/tools/output.py index 84c9d0dd..110c4c12 100644 --- a/pde/tools/output.py +++ b/pde/tools/output.py @@ -1,5 +1,4 @@ -""" -Python functions for handling output +"""Python functions for handling output. .. autosummary:: :nosignatures: @@ -22,7 +21,7 @@ def get_progress_bar_class(fancy: bool = True): - """returns a class that behaves as progress bar. + """Returns a class that behaves as progress bar. This either uses classes from the optional `tqdm` package or a simple version that writes dots to stderr, if the class it not available. @@ -51,8 +50,7 @@ def get_progress_bar_class(fancy: bool = True): def display_progress(iterator, total=None, enabled=True, **kwargs): - r""" - displays a progress bar when iterating + r"""Displays a progress bar when iterating. Args: iterator (iter): The iterator @@ -71,11 +69,11 @@ def display_progress(iterator, total=None, enabled=True, **kwargs): class OutputBase(metaclass=ABCMeta): - """base class for output management""" + """Base class for output management.""" @abstractmethod def __call__(self, line: str): - """add a line of text + """Add a line of text. Args: line (str): The text line @@ -83,11 +81,11 @@ def __call__(self, line: str): @abstractmethod def show(self): - """shows the actual text""" + """Shows the actual text.""" class BasicOutput(OutputBase): - """class that writes text line to stdout""" + """Class that writes text line to stdout.""" def __init__(self, stream=sys.stdout): """ @@ -104,7 +102,7 @@ def show(self): class JupyterOutput(OutputBase): - """class that writes text lines as html in a jupyter cell""" + """Class that writes text lines as html in a jupyter cell.""" def __init__(self, header: str = "", footer: str = ""): """ @@ -130,7 +128,7 @@ def show(self): def in_jupyter_notebook() -> bool: - """checks whether we are in a jupyter notebook""" + """Checks whether we are in a jupyter notebook.""" try: from IPython import display, get_ipython # @UnusedImport except ImportError: diff --git a/pde/tools/parameters.py b/pde/tools/parameters.py index 958d2197..d903d257 100644 --- a/pde/tools/parameters.py +++ b/pde/tools/parameters.py @@ -1,5 +1,4 @@ -""" -Infrastructure for managing classes with parameters +"""Infrastructure for managing classes with parameters. One aim is to allow easy management of inheritance of parameters. @@ -29,7 +28,7 @@ class Parameter: - """class representing a single parameter""" + """Class representing a single parameter.""" def __init__( self, @@ -40,7 +39,7 @@ def __init__( hidden: bool = False, extra: dict[str, Any] | None = None, ): - """initialize a parameter + """Initialize a parameter. Args: name (str): @@ -115,8 +114,8 @@ def __setstate__(self, state): self.__dict__.update(state) def convert(self, value=None): - """converts a `value` into the correct type for this parameter. If - `value` is not given, the default value is converted. + """Converts a `value` into the correct type for this parameter. If `value` is + not given, the default value is converted. Note that this does not make a copy of the values, which could lead to unexpected effects where the default value is changed by an instance. @@ -143,11 +142,11 @@ def convert(self, value=None): class DeprecatedParameter(Parameter): - """a parameter that can still be used normally but is deprecated""" + """A parameter that can still be used normally but is deprecated.""" class HideParameter: - """a helper class that allows hiding parameters of the parent classes""" + """A helper class that allows hiding parameters of the parent classes.""" def __init__(self, name: str): """ @@ -162,13 +161,13 @@ def __init__(self, name: str): class Parameterized: - """a mixin that manages the parameters of a class""" + """A mixin that manages the parameters of a class.""" parameters_default: ParameterListType = [] _subclasses: dict[str, type[Parameterized]] = {} def __init__(self, parameters: dict[str, Any] | None = None): - """initialize the parameters of the object + """Initialize the parameters of the object. Args: parameters (dict): @@ -188,7 +187,7 @@ def __init__(self, parameters: dict[str, Any] | None = None): ) def __init_subclass__(cls, **kwargs): # @NoSelf - """register all subclasses to reconstruct them later""" + """Register all subclasses to reconstruct them later.""" # normalize the parameters_default attribute if hasattr(cls, "parameters_default") and isinstance( cls.parameters_default, dict @@ -212,7 +211,7 @@ def get_parameters( include_deprecated: bool = False, sort: bool = True, ) -> dict[str, Parameter]: - """return a dictionary of parameters that the class supports + """Return a dictionary of parameters that the class supports. Args: include_hidden (bool): Include hidden parameters @@ -239,7 +238,7 @@ def get_parameters( # filter parameters based on hidden and deprecated flags def show(p): - """helper function to decide whether parameter will be shown""" + """Helper function to decide whether parameter will be shown.""" # show based on hidden flag? show1 = include_hidden or not p.hidden # show based on deprecated flag? @@ -263,7 +262,7 @@ def _parse_parameters( allow_hidden: bool = True, include_deprecated: bool = False, ) -> dict[str, Any]: - """parse parameters + """Parse parameters. Args: parameters (dict): @@ -307,7 +306,7 @@ def _parse_parameters( return result def get_parameter_default(self, name): - """return the default value for the parameter with `name` + """Return the default value for the parameter with `name` Args: name (str): The parameter name @@ -329,7 +328,7 @@ def _show_parameters( show_deprecated: bool = False, parameter_values: dict[str, Any] | None = None, ): - """private method showing all parameters in human readable format + """Private method showing all parameters in human readable format. Args: description (bool): @@ -409,7 +408,7 @@ def show_parameters( # @NoSelf show_hidden: bool = False, show_deprecated: bool = False, ): - """show all parameters in human readable format + """Show all parameters in human readable format. Args: description (bool): @@ -436,7 +435,7 @@ def show_parameters( show_deprecated: bool = False, default_value: bool = False, ): - """show all parameters in human readable format + """Show all parameters in human readable format. Args: description (bool): @@ -465,7 +464,7 @@ def show_parameters( def get_all_parameters(data: str = "name") -> dict[str, Any]: - """get a dictionary with all parameters of all registered classes + """Get a dictionary with all parameters of all registered classes. Args: data (str): @@ -493,7 +492,7 @@ def get_all_parameters(data: str = "name") -> dict[str, Any]: def sphinx_display_parameters(app, what, name, obj, options, lines): - """helper function to display parameters in sphinx documentation + """Helper function to display parameters in sphinx documentation. Example: This function should be connected to the 'autodoc-process-docstring' diff --git a/pde/tools/parse_duration.py b/pde/tools/parse_duration.py index 13fe90d1..83dc9a1a 100644 --- a/pde/tools/parse_duration.py +++ b/pde/tools/parse_duration.py @@ -1,5 +1,4 @@ -""" -Parsing time durations from strings +"""Parsing time durations from strings. This module provides a function that parses time durations from strings. It has been copied from the django software, which comes with the following notes: diff --git a/pde/tools/plotting.py b/pde/tools/plotting.py index bed61d57..441d449f 100644 --- a/pde/tools/plotting.py +++ b/pde/tools/plotting.py @@ -1,5 +1,4 @@ -""" -Tools for plotting and controlling plot output using context managers +"""Tools for plotting and controlling plot output using context managers. .. autosummary:: :nosignatures: @@ -45,7 +44,7 @@ def add_scaled_colorbar( label: str = "", **kwargs, ): - """add a vertical color bar to an image plot + """Add a vertical color bar to an image plot. The height of the colorbar is now adjusted to the plot, so that the width determined by `aspect` is now given relative to the height. Moreover, the @@ -76,10 +75,8 @@ def add_scaled_colorbar( from mpl_toolkits import axes_grid1 class _AxesXY(axes_grid1.axes_size._Base): - """ - Scaled size whose relative part corresponds to the maximum of the data width and - data height of the *axes* multiplied by the *aspect*. - """ + """Scaled size whose relative part corresponds to the maximum of the data width + and data height of the *axes* multiplied by the *aspect*.""" def __init__(self, axes, aspect=1.0): self._axes = axes @@ -127,7 +124,7 @@ def get_size(self, renderer): class nested_plotting_check: - """context manager that checks whether it is the root plotting call + """Context manager that checks whether it is the root plotting call. Example: The context manager can be used in plotting calls to check for nested @@ -137,7 +134,6 @@ class nested_plotting_check: make_plot(...) # could potentially call other plotting methods if is_outermost_plot_call: plt.show() - """ _is_plotting = False # class variable keeping track of nesting @@ -157,7 +153,7 @@ def __exit__(self, *exc): @contextlib.contextmanager def disable_interactive(): - """context manager disabling the interactive mode of matplotlib + """Context manager disabling the interactive mode of matplotlib. This context manager restores the previous state after it is done. Details of the interactive mode are described in :func:`matplotlib.interactive`. @@ -176,7 +172,7 @@ def disable_interactive(): class PlotReference: - """contains all information to update a plot element""" + """Contains all information to update a plot element.""" __slots__ = ["ax", "element", "parameters"] @@ -196,7 +192,7 @@ def __init__(self, ax, element: Any, parameters: dict[str, Any] | None = None): def plot_on_axes(wrapped=None, update_method=None): - """decorator for a plot method or function that uses a single axes + """Decorator for a plot method or function that uses a single axes. This decorator adds typical options for creating plots that fill a single axes. These options are available via keyword arguments. To avoid redundancy in describing @@ -251,9 +247,9 @@ def wrapper( ax=None, **kwargs, ): - """ - title (str): - Title of the plot. If omitted, the title might be chosen automatically. + """Title (str): + + Title of the plot. If omitted, the title might be chosen automatically. filename (str, optional): If given, the plot is written to the specified file. action (str): @@ -392,7 +388,7 @@ def wrapper( def plot_on_figure(wrapped=None, update_method=None): - """decorator for a plot method or function that fills an entire figure + """Decorator for a plot method or function that fills an entire figure. This decorator adds typical options for creating plots that fill an entire figure. This decorator adds typical options for creating plots that fill a single axes. @@ -455,9 +451,9 @@ def wrapper( fig=None, **kwargs, ): - """ - title (str): - Title of the plot. If omitted, the title might be chosen automatically. + """Title (str): + + Title of the plot. If omitted, the title might be chosen automatically. This is shown above all panels. constrained_layout (bool): Whether to use `constrained_layout` in :func:`matplotlib.pyplot.figure` call @@ -563,7 +559,7 @@ def wrapper( class PlottingContextBase: - """base class of the plotting contexts + """Base class of the plotting contexts. Example: The context wraps calls to the :mod:`matplotlib.pyplot` interface:: @@ -575,8 +571,8 @@ class PlottingContextBase: """ supports_update: bool = True - """ flag indicating whether the context supports that plots can be updated - with out redrawing the entire plot """ + """Flag indicating whether the context supports that plots can be updated with out + redrawing the entire plot.""" fig: mpl_figure.Figure | None @@ -629,7 +625,7 @@ def __exit__(self, *exc): self._title.set_text(self.title) def close(self): - """close the plot""" + """Close the plot.""" # close matplotlib figure if self.fig is not None: import matplotlib.pyplot as plt @@ -638,7 +634,7 @@ def close(self): class BasicPlottingContext(PlottingContextBase): - """basic plotting using just matplotlib""" + """Basic plotting using just matplotlib.""" def __init__(self, fig_or_ax=None, title: str | None = None, show: bool = True): """ @@ -676,12 +672,15 @@ def __exit__(self, *exc): class JupyterPlottingContext(PlottingContextBase): - """plotting in a jupyter widget using the `inline` backend""" + """Plotting in a jupyter widget using the `inline` backend.""" supports_update = False - """ flag indicating whether the context supports that plots can be updated - with out redrawing the entire plot. The jupyter backend (`inline`) requires - replotting of the entire figure, so an update is not supported.""" + """Flag indicating whether the context supports that plots can be updated with out + redrawing the entire plot. + + The jupyter backend (`inline`) requires + replotting of the entire figure, so an update is not supported. + """ def __enter__(self): from IPython.display import display @@ -722,7 +721,7 @@ def __exit__(self, *exc): plt.close(self.fig) def close(self): - """close the plot""" + """Close the plot.""" super().close() # close ipython output try: @@ -734,7 +733,7 @@ def close(self): def get_plotting_context( context=None, title: str | None = None, show: bool = True ) -> PlottingContextBase: - """returns a suitable plotting context + """Returns a suitable plotting context. Args: context: @@ -789,7 +788,7 @@ def get_plotting_context( def in_ipython() -> bool: - """try to detect whether we are in an ipython shell, e.g., a jupyter notebook""" + """Try to detect whether we are in an ipython shell, e.g., a jupyter notebook.""" ipy_module = sys.modules.get("IPython") if ipy_module: return bool(ipy_module.get_ipython()) @@ -801,7 +800,7 @@ def in_ipython() -> bool: def napari_viewer( grid: GridBase, run: bool | None = None, close: bool = False, **kwargs ) -> Generator[napari.viewer.Viewer, None, None]: - """creates an napari viewer for interactive plotting + """Creates an napari viewer for interactive plotting. Args: grid (:class:`pde.grids.base.GridBase`): diff --git a/pde/tools/spectral.py b/pde/tools/spectral.py index be7a6775..dd44b67c 100644 --- a/pde/tools/spectral.py +++ b/pde/tools/spectral.py @@ -1,5 +1,4 @@ -""" -Functions making use of spectral decompositions +"""Functions making use of spectral decompositions. .. autosummary:: :nosignatures: @@ -30,7 +29,7 @@ def make_colored_noise( scale: float = 1, rng: np.random.Generator | None = None, ) -> Callable[[], np.ndarray]: - r"""Return a function creating an array of random values that obey + r"""Return a function creating an array of random values that obey. .. math:: \langle c(\boldsymbol k) c(\boldsymbol k’) \rangle = @@ -67,7 +66,7 @@ def make_colored_noise( if exponent == 0: # fast case of white noise def noise_normal(): - """return array of colored noise""" + """Return array of colored noise.""" return scale * rng.normal(size=shape) return noise_normal @@ -89,7 +88,7 @@ def noise_normal(): scaling.flat[0] = 0 def noise_colored() -> np.ndarray: - """return array of colored noise""" + """Return array of colored noise.""" # random field arr: np.ndarray = rng.normal(size=shape) diff --git a/pde/tools/typing.py b/pde/tools/typing.py index ce8f93b6..7ea328ed 100644 --- a/pde/tools/typing.py +++ b/pde/tools/typing.py @@ -1,5 +1,4 @@ -""" -Provides support for mypy type checking of the package +"""Provides support for mypy type checking of the package. .. codeauthor:: David Zwicker """ @@ -22,36 +21,36 @@ class OperatorType(Protocol): - """an operator that acts on an array""" + """An operator that acts on an array.""" def __call__(self, arr: np.ndarray, out: np.ndarray) -> None: - """evaluate the operator""" + """Evaluate the operator.""" class OperatorFactory(Protocol): - """a factory function that creates an operator for a particular grid""" + """A factory function that creates an operator for a particular grid.""" def __call__(self, grid: GridBase, **kwargs) -> OperatorType: - """create the operator""" + """Create the operator.""" class CellVolume(Protocol): def __call__(self, *args: int) -> float: - """calculate the volume of the cell at the given position""" + """Calculate the volume of the cell at the given position.""" class VirtualPointEvaluator(Protocol): def __call__(self, arr: np.ndarray, idx: tuple[int, ...], args=None) -> float: - """evaluate the virtual point at the given position""" + """Evaluate the virtual point at the given position.""" class AdjacentEvaluator(Protocol): def __call__( self, arr_1d: np.ndarray, i_point: int, bc_idx: tuple[int, ...] ) -> float: - """evaluate the values at adjecent points""" + """Evaluate the values at adjecent points.""" class GhostCellSetter(Protocol): def __call__(self, data_full: np.ndarray, args=None) -> None: - """set the ghost cells""" + """Set the ghost cells.""" diff --git a/pde/trackers/__init__.py b/pde/trackers/__init__.py index aaec6551..5651827c 100644 --- a/pde/trackers/__init__.py +++ b/pde/trackers/__init__.py @@ -1,5 +1,4 @@ -""" -Classes for tracking simulation results in controlled interrupts +"""Classes for tracking simulation results in controlled interrupts. Trackers are classes that periodically receive the state of the simulation to analyze, store, or output it. The trackers defined in this module are: diff --git a/pde/trackers/base.py b/pde/trackers/base.py index e63a2da1..838b5d11 100644 --- a/pde/trackers/base.py +++ b/pde/trackers/base.py @@ -1,5 +1,4 @@ -""" -Base classes for trackers +"""Base classes for trackers. .. codeauthor:: David Zwicker """ @@ -25,11 +24,11 @@ class FinishedSimulation(StopIteration): - """exception for signaling that simulation finished successfully""" + """Exception for signaling that simulation finished successfully.""" class TrackerBase(metaclass=ABCMeta): - """base class for implementing trackers""" + """Base class for implementing trackers.""" _subclasses: dict[str, type[TrackerBase]] = {} # all inheriting classes @@ -51,7 +50,7 @@ def __init__(self, interrupts: InterruptData = 1, *, interval=None): self._logger = logging.getLogger(self.__class__.__name__) def __init_subclass__(cls, **kwargs): # @NoSelf - """register all subclassess to reconstruct them later""" + """Register all subclassess to reconstruct them later.""" super().__init_subclass__(**kwargs) if hasattr(cls, "name"): assert cls.name != "auto" @@ -59,7 +58,7 @@ def __init_subclass__(cls, **kwargs): # @NoSelf @classmethod def from_data(cls, data: TrackerDataType, **kwargs) -> TrackerBase: - """create tracker class from given data + """Create tracker class from given data. Args: data (str or TrackerBase): Data describing the tracker @@ -80,7 +79,7 @@ def from_data(cls, data: TrackerDataType, **kwargs) -> TrackerBase: raise ValueError(f"Unsupported tracker format: `{data}`.") def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: - """initialize the tracker with information about the simulation + """Initialize the tracker with information about the simulation. Args: field (:class:`~pde.fields.FieldBase`): @@ -99,7 +98,7 @@ def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: @abstractmethod def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -109,7 +108,7 @@ def handle(self, field: FieldBase, t: float) -> None: """ def finalize(self, info: InfoDict | None = None) -> None: - """finalize the tracker, supplying additional information + """Finalize the tracker, supplying additional information. Args: info (dict): @@ -121,7 +120,7 @@ def finalize(self, info: InfoDict | None = None) -> None: class TrackerCollection: - """List of trackers providing methods to handle them efficiently + """List of trackers providing methods to handle them efficiently. Attributes: trackers (list): @@ -150,12 +149,12 @@ def __init__(self, trackers: list[TrackerBase] | None = None): self.time_next_action = math.inf def __len__(self) -> int: - """returns the number of trackers in the collection""" + """Returns the number of trackers in the collection.""" return len(self.trackers) @classmethod def from_data(cls, data: TrackerCollectionDataType, **kwargs) -> TrackerCollection: - """create tracker collection from given data + """Create tracker collection from given data. Args: data: Data describing the tracker collection @@ -196,7 +195,7 @@ def from_data(cls, data: TrackerCollectionDataType, **kwargs) -> TrackerCollecti return cls(trackers) def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: - """initialize the tracker with information about the simulation + """Initialize the tracker with information about the simulation. Args: field (:class:`~pde.fields.FieldBase`): @@ -221,7 +220,7 @@ def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: return self.time_next_action def handle(self, state: FieldBase, t: float, atol: float = 1.0e-8) -> float: - """handle all trackers + """Handle all trackers. Args: state (:class:`~pde.fields.FieldBase`): @@ -260,7 +259,7 @@ def handle(self, state: FieldBase, t: float, atol: float = 1.0e-8) -> float: return self.time_next_action def finalize(self, info: InfoDict | None = None) -> None: - """finalize the tracker, supplying additional information + """Finalize the tracker, supplying additional information. Args: info (dict): @@ -271,7 +270,7 @@ def finalize(self, info: InfoDict | None = None) -> None: def get_named_trackers() -> dict[str, type[TrackerBase]]: - """returns all named trackers + """Returns all named trackers. Returns: dict: a mapping of names to the actual tracker classes. diff --git a/pde/trackers/interactive.py b/pde/trackers/interactive.py index bf531ec5..68b7bbaa 100644 --- a/pde/trackers/interactive.py +++ b/pde/trackers/interactive.py @@ -1,5 +1,5 @@ -""" -Special module for defining an interactive tracker that uses napari to display fields +"""Special module for defining an interactive tracker that uses napari to display +fields. .. codeauthor:: David Zwicker """ @@ -77,7 +77,7 @@ def napari_process( label = None def check_signal(msg: str | None): - """helper function that processes messages by the listener thread""" + """Helper function that processes messages by the listener thread.""" if msg is None: return # do nothing elif msg == "close": @@ -87,7 +87,7 @@ def check_signal(msg: str | None): @thread_worker(connect={"yielded": check_signal}) def update_listener(): - """helper thread that listens to the data_channel""" + """Helper thread that listens to the data_channel.""" logger.info("Start napari thread to receive data") # infinite loop waiting for events in the queue @@ -132,7 +132,7 @@ def update_listener(): class NapariViewer: - """allows viewing and updating data in a separate napari process""" + """Allows viewing and updating data in a separate napari process.""" def __init__(self, state: FieldBase, t_initial: float | None = None): """ @@ -180,7 +180,7 @@ def __init__(self, state: FieldBase, t_initial: float | None = None): self._logger.exception("Could not launch napari process") def update(self, state: FieldBase, t: float): - """update the state in the napari viewer + """Update the state in the napari viewer. Args: state (:class:`pde.fields.base.FieldBase`): The new state @@ -199,7 +199,7 @@ def update(self, state: FieldBase, t: float): pass def close(self, force: bool = True): - """closes the napari process + """Closes the napari process. Args: force (bool): @@ -221,7 +221,7 @@ def close(self, force: bool = True): class InteractivePlotTracker(TrackerBase): - """Tracker showing the state interactively in napari + """Tracker showing the state interactively in napari. Note: The interactive tracker uses the python :mod:`multiprocessing` module to run @@ -242,7 +242,6 @@ def main(): The last two lines ensure that the `main` function is only called when the module is run initially and not again when it is re-imported. - """ name = "interactive" @@ -272,7 +271,7 @@ def __init__( self.show_time = show_time def initialize(self, state: FieldBase, info: InfoDict | None = None) -> float: - """initialize the tracker with information about the simulation + """Initialize the tracker with information about the simulation. Args: state (:class:`~pde.fields.FieldBase`): @@ -292,7 +291,7 @@ def initialize(self, state: FieldBase, info: InfoDict | None = None) -> float: return super().initialize(state, info=info) def handle(self, state: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: state (:class:`~pde.fields.FieldBase`): @@ -303,7 +302,7 @@ def handle(self, state: FieldBase, t: float) -> None: self._viewer.update(state, t) def finalize(self, info: InfoDict | None = None) -> None: - """finalize the tracker, supplying additional information + """Finalize the tracker, supplying additional information. Args: info (dict): diff --git a/pde/trackers/interrupts.py b/pde/trackers/interrupts.py index 6b2fec84..2d2ed1ff 100644 --- a/pde/trackers/interrupts.py +++ b/pde/trackers/interrupts.py @@ -1,5 +1,4 @@ -""" -Module defining classes for time interrupts for trackers +"""Module defining classes for time interrupts for trackers. The provided interrupt classes are: @@ -11,7 +10,7 @@ LogarithmicInterrupts RealtimeInterrupts -.. codeauthor:: David Zwicker +.. codeauthor:: David Zwicker """ from __future__ import annotations @@ -32,18 +31,18 @@ class InterruptsBase(metaclass=ABCMeta): - """base class for implementing interrupts""" + """Base class for implementing interrupts.""" dt: float """float: current time difference between interrupts""" @abstractmethod def copy(self: TInterrupt) -> TInterrupt: - """return a copy of this instance""" + """Return a copy of this instance.""" @abstractmethod def initialize(self, t: float) -> float: - """initialize the interrupt class + """Initialize the interrupt class. Args: t (float): The starting time of the simulation @@ -54,7 +53,7 @@ def initialize(self, t: float) -> float: @abstractmethod def next(self, t: float) -> float: - """computes the next time point + """Computes the next time point. Args: t (float): @@ -67,7 +66,7 @@ def next(self, t: float) -> float: class FixedInterrupts(InterruptsBase): - """class representing a list of interrupt times""" + """Class representing a list of interrupt times.""" def __init__(self, interrupts: np.ndarray | Sequence[float]): self.interrupts = np.atleast_1d(interrupts) @@ -109,7 +108,7 @@ def next(self, t: float) -> float: class ConstantInterrupts(InterruptsBase): - """class representing equidistantly spaced time interrupts""" + """Class representing equidistantly spaced time interrupts.""" def __init__(self, dt: float = 1, t_start: float | None = None): """ @@ -156,7 +155,7 @@ def next(self, t: float) -> float: class LogarithmicInterrupts(ConstantInterrupts): - """class representing logarithmically spaced time interrupts""" + """Class representing logarithmically spaced time interrupts.""" def __init__( self, dt_initial: float = 1, factor: float = 1, t_start: float | None = None @@ -190,7 +189,7 @@ def next(self, t: float) -> float: class RealtimeInterrupts(ConstantInterrupts): - """class representing time interrupts spaced equidistantly in real time + """Class representing time interrupts spaced equidistantly in real time. This spacing is only achieved approximately and depends on the initial value set by `dt_initial` and the actual variation in computation speed. @@ -248,7 +247,7 @@ def next(self, t: float) -> float: def parse_interrupt(data: InterruptData) -> InterruptsBase: - """create interrupt class from various data formats + """Create interrupt class from various data formats. Args: data (str or number or :class:`InterruptsBase`): diff --git a/pde/trackers/trackers.py b/pde/trackers/trackers.py index cb8730ae..0fae0065 100644 --- a/pde/trackers/trackers.py +++ b/pde/trackers/trackers.py @@ -1,5 +1,4 @@ -""" -Module defining classes for tracking results from simulations. +"""Module defining classes for tracking results from simulations. The trackers defined in this module are: @@ -49,7 +48,7 @@ class CallbackTracker(TrackerBase): - """Tracker calling a function periodically + """Tracker calling a function periodically. Example: The callback tracker can be used to check for conditions during the simulation: @@ -99,7 +98,7 @@ def __init__( ) def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -114,7 +113,7 @@ def handle(self, field: FieldBase, t: float) -> None: class ProgressTracker(TrackerBase): - """Tracker showing the progress of the simulation""" + """Tracker showing the progress of the simulation.""" name = "progress" @@ -152,7 +151,7 @@ def __init__( self.leave = leave def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: - """initialize the tracker with information about the simulation + """Initialize the tracker with information about the simulation. Args: field (:class:`~pde.fields.FieldBase`): @@ -180,7 +179,7 @@ def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: return result def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -197,7 +196,7 @@ def handle(self, field: FieldBase, t: float) -> None: self.progress_bar.set_description("") def finalize(self, info: InfoDict | None = None) -> None: - """finalize the tracker, supplying additional information + """Finalize the tracker, supplying additional information. Args: info (dict): @@ -262,7 +261,7 @@ def __init__( self.stream = stream def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -277,7 +276,7 @@ def handle(self, field: FieldBase, t: float) -> None: class PlotTracker(TrackerBase): - """Tracker plotting data on screen, to files, or writes a movie + """Tracker plotting data on screen, to files, or writes a movie. This tracker can be used to create movies from simulations or to simply update a single image file on the fly (i.e. to monitor simulations running on a cluster). The @@ -401,7 +400,7 @@ def __init__( self.show = show def initialize(self, state: FieldBase, info: InfoDict | None = None) -> float: - """initialize the tracker with information about the simulation + """Initialize the tracker with information about the simulation. Args: state (:class:`~pde.fields.FieldBase`): @@ -455,7 +454,7 @@ def initialize(self, state: FieldBase, info: InfoDict | None = None) -> float: return super().initialize(state, info=info) def handle(self, state: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: state (:class:`~pde.fields.FieldBase`): @@ -512,7 +511,7 @@ def handle(self, state: FieldBase, t: float) -> None: self._last_update = time.monotonic() def finalize(self, info: InfoDict | None = None) -> None: - """finalize the tracker, supplying additional information + """Finalize the tracker, supplying additional information. Args: info (dict): @@ -530,7 +529,7 @@ def finalize(self, info: InfoDict | None = None) -> None: class LivePlotTracker(PlotTracker): - """PlotTracker with defaults for live plotting + """PlotTracker with defaults for live plotting. The only difference to :class:`PlotTracker` are the changed default values, where output is by default shown on screen and the `interval` is set something more @@ -591,7 +590,7 @@ def __init__( class DataTracker(CallbackTracker): - """Tracker storing custom data obtained by calling a function + """Tracker storing custom data obtained by calling a function. Example: The data tracker can be used to gather statistics during the run @@ -653,7 +652,7 @@ def __init__( self.data: list[Any] = [] def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -668,7 +667,7 @@ def handle(self, field: FieldBase, t: float) -> None: self.data.append(self._callback(field, t)) def finalize(self, info: InfoDict | None = None) -> None: - """finalize the tracker, supplying additional information + """Finalize the tracker, supplying additional information. Args: info (dict): @@ -680,7 +679,7 @@ def finalize(self, info: InfoDict | None = None) -> None: @property def dataframe(self) -> pandas.DataFrame: - """:class:`pandas.DataFrame`: the data in a dataframe + """:class:`pandas.DataFrame`: the data in a dataframe. If `func` returns a dictionary, the keys are used as column names. Otherwise, the returned data is enumerated starting with '0'. In any @@ -695,7 +694,7 @@ def dataframe(self) -> pandas.DataFrame: return df def to_file(self, filename: str, **kwargs): - r"""store data in a file + r"""Store data in a file. The extension of the filename determines what format is being used. For instance, '.pickle' indicates a python pickle file storing a tuple @@ -726,7 +725,7 @@ def to_file(self, filename: str, **kwargs): class SteadyStateTracker(TrackerBase): - """Tracker aborting the simulation once steady state is reached + """Tracker aborting the simulation once steady state is reached. Steady state is obtained when the state does not change anymore, i.e., when the evolution rate is close to zero. If the argument `evolution_rate` is specified, it @@ -742,7 +741,7 @@ class SteadyStateTracker(TrackerBase): progress_bar_format = ( "Convergence: {percentage:3.0f}%|{bar}| [{elapsed}<{remaining}]" ) - """ determines the format of the progress bar shown when `progress = True` """ + """Determines the format of the progress bar shown when `progress = True`""" @fill_in_docstring def __init__( @@ -789,7 +788,7 @@ def __init__( self._best_rate_max: np.ndarray | None = None def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -855,7 +854,7 @@ def handle(self, field: FieldBase, t: float) -> None: class RuntimeTracker(TrackerBase): - """Tracker interrupting the simulation once a duration has passed""" + """Tracker interrupting the simulation once a duration has passed.""" @fill_in_docstring def __init__( @@ -894,7 +893,7 @@ def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: return super().initialize(field, info) def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -908,7 +907,7 @@ def handle(self, field: FieldBase, t: float) -> None: class ConsistencyTracker(TrackerBase): - """Tracker interrupting the simulation when the state is not finite""" + """Tracker interrupting the simulation when the state is not finite.""" name = "consistency" @@ -926,7 +925,7 @@ def __init__(self, interrupts: InterruptData | None = None, *, interval=None): super().__init__(interrupts=interrupts, interval=interval) def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): @@ -939,7 +938,7 @@ def handle(self, field: FieldBase, t: float) -> None: class MaterialConservationTracker(TrackerBase): - """Tracking interrupting the simulation when material conservation is broken""" + """Tracking interrupting the simulation when material conservation is broken.""" name = "material_conservation" @@ -984,7 +983,7 @@ def initialize(self, field: FieldBase, info: InfoDict | None = None) -> float: return super().initialize(field, info) def handle(self, field: FieldBase, t: float) -> None: - """handle data supplied to this tracker + """Handle data supplied to this tracker. Args: field (:class:`~pde.fields.FieldBase`): diff --git a/pde/visualization/__init__.py b/pde/visualization/__init__.py index 5cc5a953..4700b804 100644 --- a/pde/visualization/__init__.py +++ b/pde/visualization/__init__.py @@ -1,6 +1,4 @@ -""" -Functions and classes for visualizing simulations. - +"""Functions and classes for visualizing simulations. .. autosummary:: :nosignatures: diff --git a/pde/visualization/movies.py b/pde/visualization/movies.py index 3437c6fe..f6db4850 100644 --- a/pde/visualization/movies.py +++ b/pde/visualization/movies.py @@ -1,5 +1,4 @@ -""" -Functions for creating movies of simulation results +"""Functions for creating movies of simulation results. .. autosummary:: :nosignatures: @@ -24,7 +23,7 @@ class Movie: - """Class for creating movies from matplotlib figures using ffmpeg + """Class for creating movies from matplotlib figures using ffmpeg. Note: Internally, this class uses :class:`matplotlib.animation.FFMpegWriter`. @@ -86,7 +85,7 @@ def __init__( @classmethod def is_available(cls) -> bool: - """check whether the movie infrastructure is available + """Check whether the movie infrastructure is available. Returns: bool: True if movies can be created @@ -103,13 +102,13 @@ def __exit__(self, exc_type, exc_value, exc_tb): return False def _end(self): - """clear up temporary things if necessary""" + """Clear up temporary things if necessary.""" if self._writer is not None: self._writer.finish() self._writer = None def add_figure(self, fig=None): - """adds the figure `fig` as a frame to the current movie + """Adds the figure `fig` as a frame to the current movie. Args: fig (:class:`~matplotlib.figures.Figure`): @@ -137,7 +136,7 @@ def add_figure(self, fig=None): self._writer.grab_frame(facecolor="white") def save(self): - """convert the recorded images to a movie using ffmpeg""" + """Convert the recorded images to a movie using ffmpeg.""" self._end() @@ -151,7 +150,7 @@ def movie_scalar( tight: bool = False, show: bool = True, ) -> None: - """produce a movie for a simulation of a scalar field + """Produce a movie for a simulation of a scalar field. Args: storage (:class:`~pde.storage.base.StorageBase`): @@ -193,7 +192,7 @@ def movie_multiple( scale: ScaleData = "automatic", progress: bool = True, ) -> None: - """produce a movie for a simulation with n components + """Produce a movie for a simulation with n components. Args: storage (:class:`~pde.storage.base.StorageBase`): @@ -221,7 +220,7 @@ def movie( plot_args: dict[str, Any] | None = None, movie_args: dict[str, Any] | None = None, ) -> None: - """produce a movie by simply plotting each frame + """Produce a movie by simply plotting each frame. Args: storage (:class:`~pde.storage.base.StorageBase`): diff --git a/pde/visualization/plotting.py b/pde/visualization/plotting.py index 0bad80e7..42f09ad1 100644 --- a/pde/visualization/plotting.py +++ b/pde/visualization/plotting.py @@ -1,5 +1,4 @@ -""" -Functions and classes for plotting simulation data +"""Functions and classes for plotting simulation data. .. autosummary:: :nosignatures: @@ -43,7 +42,7 @@ def _add_horizontal_colorbar(im, ax, num_loc: int = 5) -> None: - """adds a horizontal colorbar for image `im` to the axis `ax` + """Adds a horizontal colorbar for image `im` to the axis `ax` Args: im: The result of calling :func:`matplotlib.pyplot.imshow` @@ -113,7 +112,7 @@ def extract_field( class ScalarFieldPlot: - """class managing compound plots of scalar fields""" + """Class managing compound plots of scalar fields.""" @fill_in_docstring def __init__( @@ -186,7 +185,7 @@ def from_storage( tight: bool = False, show: bool = True, ) -> ScalarFieldPlot: - """create ScalarFieldPlot from storage + """Create ScalarFieldPlot from storage. Args: storage (:class:`~pde.storage.base.StorageBase`): @@ -232,7 +231,7 @@ def from_storage( def _prepare_quantities( fields: FieldBase, quantities, scale: ScaleData = "automatic" ) -> list[list[dict[str, Any]]]: - """internal method to prepare quantities + """Internal method to prepare quantities. Args: fields (:class:`~pde.fields.base.FieldBase`): @@ -288,7 +287,7 @@ def _initialize( title: str | None = None, tight: bool = False, ): - """initialize the plot creating the figure and the axes + """Initialize the plot creating the figure and the axes. Args: fields (:class:`~pde.fields.base.FieldBase`): @@ -385,7 +384,7 @@ def _initialize( self.fig.tight_layout(rect=(0, 0.03, 1, 0.95)) def _update_data(self, fields: FieldBase, title: str | None = None) -> None: - """update the fields in the current plot + """Update the fields in the current plot. Args: fields (:class:`~pde.fields.base.FieldBase`): @@ -419,7 +418,7 @@ def _update_data(self, fields: FieldBase, title: str | None = None) -> None: img.set_clim(vmin, vmax) def _show(self): - """show the updated plot""" + """Show the updated plot.""" if self._ipython_out: # seems to be in an ipython instance => update widget from IPython.display import clear_output, display @@ -441,7 +440,7 @@ def _show(self): plt.pause(0.01) def update(self, fields: FieldBase, title: str | None = None) -> None: - """update the plot with the given fields + """Update the plot with the given fields. Args: fields: @@ -456,7 +455,7 @@ def update(self, fields: FieldBase, title: str | None = None) -> None: self._show() def savefig(self, path: str, **kwargs): - """save plot to file + """Save plot to file. Args: path (str): @@ -471,7 +470,7 @@ def savefig(self, path: str, **kwargs): def make_movie( self, storage: StorageBase, filename: str, progress: bool = True ) -> None: - """make a movie from the data stored in storage + """Make a movie from the data stored in storage. Args: storage (:class:`~pde.storage.base.StorageBase`): @@ -503,7 +502,7 @@ def make_movie( def plot_magnitudes( storage: StorageBase, quantities=None, ax=None, **kwargs ) -> PlotReference: - r"""plot spatially averaged quantities as a function of time + r"""Plot spatially averaged quantities as a function of time. For scalar fields, the default is to plot the average value while the averaged norm is plotted for vector fields. @@ -594,7 +593,7 @@ def _plot_kymograph( transpose: bool = False, **kwargs, ) -> PlotReference: - r"""plots a simple kymograph from given data + r"""Plots a simple kymograph from given data. Args: img_data (dict): @@ -660,7 +659,7 @@ def plot_kymograph( ax=None, **kwargs, ) -> PlotReference: - r"""plots a single kymograph from stored data + r"""Plots a single kymograph from stored data. The kymograph shows line data stacked along time. Consequently, the resulting image shows space along the horizontal axis and time along the @@ -735,7 +734,7 @@ def plot_kymographs( fig=None, **kwargs, ) -> list[PlotReference]: - r"""plots kymographs for all fields stored in `storage` + r"""Plots kymographs for all fields stored in `storage` The kymograph shows line data stacked along time. Consequently, the resulting image shows space along the horizontal axis and time along the diff --git a/scripts/create_requirements.py b/scripts/create_requirements.py index b8b4348e..52460cff 100755 --- a/scripts/create_requirements.py +++ b/scripts/create_requirements.py @@ -1,7 +1,5 @@ #!/usr/bin/env python3 -""" -This script creates the requirements files in the project -""" +"""This script creates the requirements files in the project.""" from __future__ import annotations @@ -18,7 +16,7 @@ @dataclass class Requirement: - """simple class collecting data for a single required python package""" + """Simple class collecting data for a single required python package.""" name: str # name of the python package version_min: str # minimal version @@ -42,7 +40,7 @@ def short_version(self) -> str: return version def line(self, relation: str = ">=") -> str: - """create a line for a requirements file + """Create a line for a requirements file. Args: relation (str): @@ -166,6 +164,7 @@ def line(self, relation: str = ">=") -> str: name="jupyter_contrib_nbextensions", version_min="0.5", tests_only=True ), Requirement(name="black", version_min="24", tests_only=True), + Requirement(name="docformatter", version_min="1.7", tests_only=True), Requirement(name="importlib-metadata", version_min="5", tests_only=True), Requirement(name="isort", version_min="5.1", tests_only=True), Requirement(name="mypy", version_min="1.8", tests_only=True), @@ -191,7 +190,7 @@ def write_requirements_txt( ref_base: bool = False, comment: str = None, ): - """write requirements to a requirements.txt file + """Write requirements to a requirements.txt file. Args: path (:class:`Path`): The path where the requirements are written @@ -216,7 +215,7 @@ def write_requirements_txt( def write_requirements_csv( path: Path, requirements: list[Requirement], *, incl_version: bool = True ): - """write requirements to a CSV file + """Write requirements to a CSV file. Args: path (:class:`Path`): The path where the requirements are written @@ -237,7 +236,7 @@ def write_requirements_csv( def write_requirements_py(path: Path, requirements: list[Requirement]): - """write requirements check into a python module + """Write requirements check into a python module. Args: path (:class:`Path`): The path where the requirements are written @@ -274,7 +273,7 @@ def write_from_template( fix_format: bool = False, add_warning: bool = True, ): - """write file based on a template + """Write file based on a template. Args: path (:class:`Path`): The path where the requirements are written @@ -322,7 +321,7 @@ def write_from_template( def main(): - """main function creating all the requirements""" + """Main function creating all the requirements.""" root = Path(PACKAGE_PATH) # write basic requirements diff --git a/scripts/create_storage_test_resources.py b/scripts/create_storage_test_resources.py index fc300d4a..5c8fe9c1 100755 --- a/scripts/create_storage_test_resources.py +++ b/scripts/create_storage_test_resources.py @@ -1,7 +1,5 @@ #!/usr/bin/env python3 -""" -This script creates storage files for backwards compatibility tests -""" +"""This script creates storage files for backwards compatibility tests.""" from __future__ import annotations @@ -15,7 +13,7 @@ def create_storage_test_resources(path, num): - """test storing scalar field as movie""" + """Test storing scalar field as movie.""" grid = pde.CylindricalSymGrid(3, [1, 2], [2, 2]) field = pde.ScalarField(grid, [[1, 3], [2, 4]]) eq = pde.DiffusionPDE() @@ -39,7 +37,7 @@ def create_storage_test_resources(path, num): def main(): - """main function creating all the requirements""" + """Main function creating all the requirements.""" root = Path(PACKAGE_PATH) create_storage_test_resources(root / "tests" / "storage" / "resources", 2) diff --git a/scripts/format_code.sh b/scripts/format_code.sh index 2366dc0b..0d91fc60 100755 --- a/scripts/format_code.sh +++ b/scripts/format_code.sh @@ -9,5 +9,8 @@ popd > /dev/null echo "Formating import statements..." isort .. +echo "Formating docstrings..." +docformatter --in-place --black --recursive .. + echo "Formating source code..." black .. \ No newline at end of file diff --git a/scripts/performance_boundaries.py b/scripts/performance_boundaries.py index cacbb9d5..817e6123 100755 --- a/scripts/performance_boundaries.py +++ b/scripts/performance_boundaries.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 -""" -This script tests the performance of the implementation of different boundary conditions -""" +"""This script tests the performance of the implementation of different boundary +conditions.""" import sys from pathlib import Path @@ -17,7 +16,7 @@ def main(): - """main routine testing the performance""" + """Main routine testing the performance.""" print("Reports calls-per-second (larger is better)\n") # Cartesian grid with different shapes and boundary conditions diff --git a/scripts/performance_laplace.py b/scripts/performance_laplace.py index a36bb4a0..6b0759a2 100755 --- a/scripts/performance_laplace.py +++ b/scripts/performance_laplace.py @@ -1,8 +1,6 @@ #!/usr/bin/env python3 -""" -This script tests the performance of the implementation of the laplace operator as a -primary example for the differential operators supplied by `py-pde`. -""" +"""This script tests the performance of the implementation of the laplace operator as a +primary example for the differential operators supplied by `py-pde`.""" import sys from pathlib import Path @@ -22,14 +20,14 @@ def custom_laplace_2d_periodic(shape, dx=1): - """make laplace operator with periodic boundary conditions""" + """Make laplace operator with periodic boundary conditions.""" dx_2 = 1 / dx**2 dim_x, dim_y = shape parallel = dim_x * dim_y >= config["numba.multithreading_threshold"] @jit(parallel=parallel) def laplace(arr, out=None): - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" if out is None: out = np.empty((dim_x, dim_y)) @@ -60,14 +58,14 @@ def laplace(arr, out=None): def custom_laplace_2d_neumann(shape, dx=1): - """make laplace operator with Neumann boundary conditions""" + """Make laplace operator with Neumann boundary conditions.""" dx_2 = 1 / dx**2 dim_x, dim_y = shape parallel = dim_x * dim_y >= config["numba.multithreading_threshold"] @jit(parallel=parallel) def laplace(arr, out=None): - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" if out is None: out = np.empty((dim_x, dim_y)) @@ -87,7 +85,7 @@ def laplace(arr, out=None): def custom_laplace_2d(shape, periodic, dx=1): - """make laplace operator with Neumann or periodic boundary conditions""" + """Make laplace operator with Neumann or periodic boundary conditions.""" if periodic: return custom_laplace_2d_periodic(shape, dx=dx) else: @@ -95,14 +93,14 @@ def custom_laplace_2d(shape, periodic, dx=1): def optimized_laplace_2d(bcs): - """make laplace operator with flexible boundary conditions""" + """Make laplace operator with flexible boundary conditions.""" set_ghost_cells = bcs.make_ghost_cell_setter() apply_laplace = bcs.grid.make_operator_no_bc("laplace") shape = bcs.grid.shape @jit def laplace(arr): - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" set_ghost_cells(arr) out = np.empty(shape) apply_laplace(arr, out) @@ -112,14 +110,14 @@ def laplace(arr): def custom_laplace_cyl_neumann(shape, dr=1, dz=1): - """make laplace operator with Neumann boundary conditions""" + """Make laplace operator with Neumann boundary conditions.""" dim_r, dim_z = shape dr_2 = 1 / dr**2 dz_2 = 1 / dz**2 @jit def laplace(arr, out=None): - """apply laplace operator to array `arr`""" + """Apply laplace operator to array `arr`""" if out is None: out = np.empty((dim_r, dim_z)) @@ -154,7 +152,7 @@ def laplace(arr, out=None): def main(): - """main routine testing the performance""" + """Main routine testing the performance.""" print("Reports calls-per-second (larger is better)") print(" The `CUSTOM` method implemented by hand is the baseline case.") print(" The `OPTIMIZED` uses some infrastructure form the py-pde package.") diff --git a/scripts/performance_solvers.py b/scripts/performance_solvers.py index 3f2d9baa..a31aa314 100755 --- a/scripts/performance_solvers.py +++ b/scripts/performance_solvers.py @@ -1,7 +1,5 @@ #!/usr/bin/env python3 -""" -This script tests the performance of different solvers -""" +"""This script tests the performance of different solvers.""" import sys from pathlib import Path @@ -27,7 +25,7 @@ def main( t_range: float = 100, size: int = 32, ): - """main routine testing the performance + """Main routine testing the performance. Args: equation (str): diff --git a/scripts/profile_import.py b/scripts/profile_import.py index 497a7265..c4e4c642 100755 --- a/scripts/profile_import.py +++ b/scripts/profile_import.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 -""" -This scripts measures the total time it takes to import the module. The total time -should ideally be below 1 second. +"""This scripts measures the total time it takes to import the module. + +The total time should ideally be below 1 second. """ import sys diff --git a/scripts/run_tests.py b/scripts/run_tests.py index 9393e991..c4f65a69 100755 --- a/scripts/run_tests.py +++ b/scripts/run_tests.py @@ -14,7 +14,7 @@ def _most_severe_exit_code(retcodes: Sequence[int]) -> int: - """returns the most severe exit code of a given list + """Returns the most severe exit code of a given list. Args: retcodes (list): A list of return codes @@ -29,7 +29,7 @@ def _most_severe_exit_code(retcodes: Sequence[int]) -> int: def show_config(): - """show package configuration""" + """Show package configuration.""" from importlib.machinery import SourceFileLoader # imports the package from the package path @@ -50,7 +50,7 @@ def show_config(): def test_codestyle(*, verbose: bool = True) -> int: - """run the codestyle tests + """Run the codestyle tests. Args: verbose (bool): Whether to do extra output @@ -76,7 +76,7 @@ def test_codestyle(*, verbose: bool = True) -> int: def test_types(*, report: bool = False, verbose: bool = True) -> int: - """run mypy to check the types of the python code + """Run mypy to check the types of the python code. Args: report (bool): Whether to write a report @@ -123,7 +123,7 @@ def run_unit_tests( pattern: str = None, pytest_args: list[str] = [], ) -> int: - """run the unit tests + """Run the unit tests. Args: runslow (bool): Whether to run the slow tests @@ -225,7 +225,7 @@ def run_unit_tests( def main() -> int: - """the main program controlling the tests + """The main program controlling the tests. Returns: int: The return code indicating success or failure diff --git a/scripts/show_environment.py b/scripts/show_environment.py index 0b847fe0..0154dc3c 100755 --- a/scripts/show_environment.py +++ b/scripts/show_environment.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -""" -This script shows important information about the current python environment and the -associated installed packages. This information can be helpful in understanding issues -that occur with the package +"""This script shows important information about the current python environment and the +associated installed packages. + +This information can be helpful in understanding issues that occur with the package """ import sys diff --git a/tests/conftest.py b/tests/conftest.py index ad00a8e1..a9f6f67c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,4 @@ -""" -This file is used to configure the test environment when running py.test +"""This file is used to configure the test environment when running py.test. .. codeauthor:: David Zwicker """ @@ -14,7 +13,7 @@ @pytest.fixture(scope="function", autouse=True) def setup_and_teardown(): - """helper function adjusting environment before and after tests""" + """Helper function adjusting environment before and after tests.""" # raise all underflow errors np.seterr(all="raise", under="ignore") @@ -27,7 +26,7 @@ def setup_and_teardown(): @pytest.fixture(scope="function", autouse=False, name="rng") def init_random_number_generators(): - """get a random number generator and set the seed of the random number generator + """Get a random number generator and set the seed of the random number generator. The function returns an instance of :func:`~numpy.random.default_rng()` and initializes the default generators of both :mod:`numpy` and :mod:`numba`. @@ -37,14 +36,14 @@ def init_random_number_generators(): def pytest_configure(config): - """add markers to the configuration""" + """Add markers to the configuration.""" config.addinivalue_line("markers", "interactive: test is interactive") config.addinivalue_line("markers", "multiprocessing: test requires multiprocessing") config.addinivalue_line("markers", "slow: test runs slowly") def pytest_addoption(parser): - """pytest hook to add command line options parsed by pytest""" + """Pytest hook to add command line options parsed by pytest.""" parser.addoption( "--runslow", action="store_true", @@ -66,7 +65,7 @@ def pytest_addoption(parser): def pytest_collection_modifyitems(config, items): - """pytest hook to filter a collection of tests""" + """Pytest hook to filter a collection of tests.""" # parse options provided to py.test running_cov = config.getvalue("--cov") runslow = config.getoption("--runslow", default=False) diff --git a/tests/fields/fixtures/fields.py b/tests/fields/fixtures/fields.py index 887e5109..5c3b64de 100644 --- a/tests/fields/fixtures/fields.py +++ b/tests/fields/fixtures/fields.py @@ -18,7 +18,7 @@ def iter_grids(): - """generator providing some test grids""" + """Generator providing some test grids.""" for periodic in [True, False]: yield UnitGrid([3], periodic=periodic) yield UnitGrid([3, 3, 3], periodic=periodic) @@ -29,7 +29,7 @@ def iter_grids(): def iter_fields(): - """generator providing some test fields""" + """Generator providing some test fields.""" yield ScalarField(UnitGrid([1, 2, 3]), 1) yield VectorField.from_expression(PolarSymGrid(2, 3), ["r**2", "r"]) yield Tensor2Field.random_normal( @@ -41,7 +41,7 @@ def iter_fields(): def get_cartesian_grid(dim=2, periodic=True): - """return a random Cartesian grid of given dimension""" + """Return a random Cartesian grid of given dimension.""" rng = np.random.default_rng(0) bounds = [[0, 1 + rng.random()] for _ in range(dim)] shape = rng.integers(32, 64, size=dim) diff --git a/tests/fields/test_field_collections.py b/tests/fields/test_field_collections.py index 6ae1e2a7..32c72d84 100644 --- a/tests/fields/test_field_collections.py +++ b/tests/fields/test_field_collections.py @@ -13,7 +13,7 @@ @pytest.mark.parametrize("grid", iter_grids()) def test_shapes_nfields(grid, rng): - """test single component field""" + """Test single component field.""" for num in [1, 3]: fields = [ScalarField.random_uniform(grid, rng=rng) for _ in range(num)] field = FieldCollection(fields) @@ -28,7 +28,7 @@ def test_shapes_nfields(grid, rng): def test_collections(rng): - """test field collections""" + """Test field collections.""" grid = UnitGrid([3, 4]) sf = ScalarField.random_uniform(grid, label="sf", rng=rng) vf = VectorField.random_uniform(grid, label="vf", rng=rng) @@ -98,7 +98,7 @@ def test_collections(rng): def test_collections_copy(): - """test copying data of collections""" + """Test copying data of collections.""" grid = UnitGrid([2, 2]) sf = ScalarField(grid, 0) vf = VectorField(grid, 1) @@ -124,7 +124,7 @@ def test_collections_copy(): def test_collections_append(): - """test append data to collections""" + """Test append data to collections.""" grid = UnitGrid([2, 2]) sf = ScalarField(grid, 0) vf = VectorField(grid, 1, label="vector") @@ -156,7 +156,7 @@ def test_collections_append(): def test_collections_operators(): - """test field collections""" + """Test field collections.""" grid = UnitGrid([3, 4]) sf = ScalarField(grid, 1) vf = VectorField(grid, 1) @@ -178,7 +178,7 @@ def test_collections_operators(): def test_smoothing_collection(rng): - """test smoothing of a FieldCollection""" + """Test smoothing of a FieldCollection.""" grid = UnitGrid([3, 4], periodic=[True, False]) sf = ScalarField.random_uniform(grid, rng=rng) vf = VectorField.random_uniform(grid, rng=rng) @@ -197,7 +197,7 @@ def test_smoothing_collection(rng): def test_scalar_random_uniform(): - """test creating collections using scalar_random_uniform""" + """Test creating collections using scalar_random_uniform.""" grid = UnitGrid([3, 4], periodic=[True, False]) fc = FieldCollection.scalar_random_uniform(2, grid, label="c", labels=["a", "b"]) assert fc.label == "c" @@ -209,7 +209,7 @@ def test_scalar_random_uniform(): def test_from_scalar_expressions(): - """test creating field collections from scalar expressions""" + """Test creating field collections from scalar expressions.""" grid = UnitGrid([3]) expressions = ["x**2", "1"] fc = FieldCollection.from_scalar_expressions( @@ -227,7 +227,7 @@ def test_from_scalar_expressions(): @pytest.mark.skipif(not module_available("napari"), reason="requires `napari` module") @pytest.mark.interactive def test_interactive_collection_plotting(rng): - """test the interactive plotting""" + """Test the interactive plotting.""" grid = UnitGrid([3, 3]) sf = ScalarField.random_uniform(grid, 0.1, 0.9, rng=rng) vf = VectorField.random_uniform(grid, 0.1, 0.9, rng=rng) @@ -236,7 +236,7 @@ def test_interactive_collection_plotting(rng): def test_field_labels(): - """test the FieldCollection.labels property""" + """Test the FieldCollection.labels property.""" grid = UnitGrid([5]) s1 = ScalarField(grid, label="s1") s2 = ScalarField(grid) @@ -281,7 +281,7 @@ def test_field_labels(): def test_collection_1_field(): - """test field collections with only one field""" + """Test field collections with only one field.""" grid = UnitGrid([3]) s1 = ScalarField(grid, label="a") fc = FieldCollection([s1]) @@ -291,7 +291,7 @@ def test_collection_1_field(): def test_collection_plotting(): - """test simple plotting of various fields on various grids""" + """Test simple plotting of various fields on various grids.""" grid = UnitGrid([5]) s1 = ScalarField(grid, label="s1") s2 = ScalarField(grid) @@ -308,7 +308,7 @@ def test_collection_plotting(): def test_from_data(rng): - """test the `from_data` method""" + """Test the `from_data` method.""" grid = UnitGrid([3, 5]) s = ScalarField.random_uniform(grid, label="s1", rng=rng) v = VectorField.random_uniform(grid, label="v2", rng=rng) @@ -336,7 +336,7 @@ def test_from_data(rng): def test_collection_apply(rng): - """test the `apply` method""" + """Test the `apply` method.""" grid = UnitGrid([3, 5]) s = ScalarField(grid, 2, label="s1") v = VectorField.random_uniform(grid, label="v2", rng=rng) @@ -347,7 +347,7 @@ def test_collection_apply(rng): @pytest.mark.parametrize("num", [1, 2, 3]) def test_rgb_image_plotting(num): - """test plotting of collections as rgb fields""" + """Test plotting of collections as rgb fields.""" grid = UnitGrid([16, 8]) fc = FieldCollection([ScalarField.random_uniform(grid) for _ in range(num)]) @@ -357,7 +357,7 @@ def test_rgb_image_plotting(num): @pytest.mark.parametrize("num", [1, 2, 3, 4]) def test_merged_image_plotting(num): - """test plotting of collections as merged images""" + """Test plotting of collections as merged images.""" grid = UnitGrid([16, 8]) fc = FieldCollection([ScalarField.random_uniform(grid) for _ in range(num)]) diff --git a/tests/fields/test_generic_fields.py b/tests/fields/test_generic_fields.py index 6165a6d0..f3858992 100644 --- a/tests/fields/test_generic_fields.py +++ b/tests/fields/test_generic_fields.py @@ -23,7 +23,7 @@ @pytest.mark.parametrize("field_class", [ScalarField, VectorField, Tensor2Field]) def test_set_label(field_class): - """test setting the field labels""" + """Test setting the field labels.""" grid = UnitGrid([2]) assert field_class(grid).label is None f = field_class(grid, label="a") @@ -42,7 +42,7 @@ def test_set_label(field_class): @pytest.mark.parametrize("grid", iter_grids()) @pytest.mark.parametrize("field_class", [ScalarField, Tensor2Field]) def test_interpolation_natural(grid, field_class, rng): - """test some interpolation for natural boundary conditions""" + """Test some interpolation for natural boundary conditions.""" msg = f"grid={grid}, field={field_class}" f = field_class.random_uniform(grid, rng=rng) @@ -64,7 +64,7 @@ def get_point(): @pytest.mark.parametrize("num", [1, 3]) @pytest.mark.parametrize("grid", iter_grids()) def test_shapes_nfields(num, grid, rng): - """test single component field""" + """Test single component field.""" fields = [ScalarField.random_uniform(grid, rng=rng) for _ in range(num)] field = FieldCollection(fields) data_shape = (num,) + grid.shape @@ -79,7 +79,7 @@ def test_shapes_nfields(num, grid, rng): @pytest.mark.parametrize("field_class", [ScalarField, VectorField, Tensor2Field]) def test_arithmetics(field_class): - """test simple arithmetics for fields""" + """Test simple arithmetics for fields.""" grid = UnitGrid([2, 2]) f1 = field_class(grid, data=1) f2 = field_class(grid, data=2) @@ -134,7 +134,7 @@ def test_arithmetics(field_class): def test_scalar_arithmetics(rng): - """test simple arithmetics involving scalar fields""" + """Test simple arithmetics involving scalar fields.""" grid = UnitGrid([3, 4]) s = ScalarField(grid, data=2) v = VectorField.random_uniform(grid, rng=rng) @@ -168,7 +168,7 @@ def test_scalar_arithmetics(rng): @pytest.mark.parametrize("field_class", [ScalarField, VectorField, Tensor2Field]) def test_data_managment(field_class): - """test how data is set""" + """Test how data is set.""" grid = UnitGrid([2, 2]) s1 = field_class(grid, data=1) np.testing.assert_allclose(s1.data, 1) @@ -203,7 +203,7 @@ def test_data_managment(field_class): @pytest.mark.parametrize("field_class", [ScalarField, VectorField, Tensor2Field]) def test_complex_fields(field_class, rng): - """test operations on complex fields""" + """Test operations on complex fields.""" grid = UnitGrid([3]) field = field_class.random_uniform(grid, 0, 1 + 1j, rng=rng) @@ -217,7 +217,7 @@ def test_complex_fields(field_class, rng): @pytest.mark.skipif(not module_available("h5py"), reason="requires `h5py` module") def test_hdf_input_output(tmp_path, rng): - """test writing and reading files""" + """Test writing and reading files.""" grid = UnitGrid([4, 4]) s = ScalarField.random_uniform(grid, label="scalar", rng=rng) v = VectorField.random_uniform(grid, label="vector", rng=rng) @@ -235,7 +235,7 @@ def test_hdf_input_output(tmp_path, rng): def test_writing_images(tmp_path, rng): - """test writing and reading files""" + """Test writing and reading files.""" from matplotlib.pyplot import imread grid = UnitGrid([4, 4]) @@ -253,7 +253,7 @@ def test_writing_images(tmp_path, rng): @pytest.mark.parametrize("ndim", [1, 2]) def test_interpolation_to_grid_fields(ndim): - """test whether data is interpolated correctly for different fields""" + """Test whether data is interpolated correctly for different fields.""" grid = CartesianGrid([[0, 2 * np.pi]] * ndim, 6) grid2 = CartesianGrid([[0, 2 * np.pi]] * ndim, 8) fc = FieldCollection.from_scalar_expressions(grid, ["cos(x)", "sin(x)"]) @@ -271,7 +271,7 @@ def test_interpolation_to_grid_fields(ndim): @pytest.mark.parametrize("field_cls", [ScalarField, VectorField, Tensor2Field]) def test_interpolation_values(field_cls, rng): - """test whether data is interpolated correctly for different fields""" + """Test whether data is interpolated correctly for different fields.""" grid = UnitGrid([3, 4]) f = field_cls.random_uniform(grid, rng=rng) f.set_ghost_cells("auto_periodic_neumann") @@ -292,7 +292,7 @@ def test_interpolation_values(field_cls, rng): def test_interpolation_ghost_cells(): - """test whether data is interpolated correctly with or without ghost cells""" + """Test whether data is interpolated correctly with or without ghost cells.""" grid = UnitGrid([3]) f = ScalarField(grid, [1, 2, 3]) f.set_ghost_cells({"value": 0}) @@ -328,7 +328,7 @@ def test_interpolation_ghost_cells(): ], ) def test_interpolation_to_cartesian(grid): - """test whether data is interpolated correctly to Cartesian grid""" + """Test whether data is interpolated correctly to Cartesian grid.""" dim = grid.dim sf = ScalarField(grid, 2) fc = FieldCollection([sf, sf]) @@ -352,7 +352,7 @@ def test_interpolation_to_cartesian(grid): [PolarSymGrid(6, 4), SphericalSymGrid(7, 4), CylindricalSymGrid(6, (0, 8), (7, 8))], ) def test_get_cartesian_grid(grid): - """test whether Cartesian grids can be created""" + """Test whether Cartesian grids can be created.""" cart = grid.get_cartesian_grid(mode="valid") assert cart.volume < grid.volume cart = grid.get_cartesian_grid(mode="full") @@ -361,7 +361,7 @@ def test_get_cartesian_grid(grid): @pytest.mark.parametrize("grid", iter_grids()) def test_simple_plotting(grid, rng): - """test simple plotting of various fields on various grids""" + """Test simple plotting of various fields on various grids.""" vf = VectorField.random_uniform(grid, rng=rng) tf = Tensor2Field.random_uniform(grid, rng=rng) sf = tf[0, 0] # test extraction of fields @@ -378,7 +378,7 @@ def test_simple_plotting(grid, rng): @pytest.mark.parametrize("field_cls", [ScalarField, VectorField, Tensor2Field]) def test_random_uniform(field_cls, rng): - """test whether random uniform fields behave correctly""" + """Test whether random uniform fields behave correctly.""" grid = UnitGrid([256, 256]) a = rng.random() b = 2 + rng.random() @@ -391,7 +391,7 @@ def test_random_uniform(field_cls, rng): def test_random_uniform_types(rng): - """test whether random uniform fields behave correctly for different types""" + """Test whether random uniform fields behave correctly for different types.""" grid = UnitGrid([8]) for dtype in [bool, int, float, complex]: field = VectorField.random_uniform(grid, dtype=dtype, rng=rng) @@ -412,7 +412,7 @@ def test_random_uniform_types(rng): @pytest.mark.parametrize("field_cls", [ScalarField, VectorField, Tensor2Field]) def test_random_normal(field_cls, rng): - """test whether random normal fields behave correctly""" + """Test whether random normal fields behave correctly.""" grid = UnitGrid([256, 256]) m = rng.random() s = 1 + rng.random() @@ -423,7 +423,7 @@ def test_random_normal(field_cls, rng): def test_random_normal_types(rng): - """test whether random normal fields behave correctly for different types""" + """Test whether random normal fields behave correctly for different types.""" grid = UnitGrid([8]) for dtype in [bool, int, float, complex]: field = VectorField.random_normal(grid, dtype=dtype, rng=rng) @@ -452,7 +452,7 @@ def test_random_normal_types(rng): @pytest.mark.parametrize("field_cls", [ScalarField, VectorField, Tensor2Field]) def test_random_colored(field_cls, rng): - """test whether random colored fields behave correctly""" + """Test whether random colored fields behave correctly.""" grid = UnitGrid([128, 128]) exponent = rng.uniform(-4, 4) scale = 1 + rng.random() @@ -462,7 +462,7 @@ def test_random_colored(field_cls, rng): def test_random_rng(): - """test whether the random number generator arguments are accepted""" + """Test whether the random number generator arguments are accepted.""" grid = UnitGrid([2, 2]) for create_random_field in [ ScalarField.random_colored, @@ -478,7 +478,7 @@ def test_random_rng(): @pytest.mark.parametrize("dim", [1, 2]) @pytest.mark.parametrize("size", [256, 512]) def test_fluctuations(dim, size, rng): - """test the scaling of fluctuations""" + """Test the scaling of fluctuations.""" if dim == 1: size **= 2 grid = CartesianGrid([[0, 1]] * dim, [size] * dim) @@ -492,7 +492,7 @@ def test_fluctuations(dim, size, rng): def test_smoothing(rng): - """test smoothing on different grids""" + """Test smoothing on different grids.""" for grid in [ CartesianGrid([[-2, 3]], 4), UnitGrid(7, periodic=False), @@ -526,7 +526,7 @@ def test_smoothing(rng): def test_vector_from_scalars(): - """test how to compile vector fields from scalar fields""" + """Test how to compile vector fields from scalar fields.""" g = UnitGrid([1, 2]) s1 = ScalarField(g, [[0, 1]]) s2 = ScalarField(g, [[2, 3]]) @@ -542,7 +542,7 @@ def test_vector_from_scalars(): "grid", [UnitGrid([3, 2]), UnitGrid([3]), CylindricalSymGrid(1, (0, 2), 3)] ) def test_dot_product(grid, rng): - """test dot products between vectors and tensors""" + """Test dot products between vectors and tensors.""" vf = VectorField.random_normal(grid, rng=rng) tf = Tensor2Field.random_normal(grid, rng=rng) dot = vf.make_dot_operator() @@ -582,7 +582,7 @@ def test_dot_product(grid, rng): @pytest.mark.parametrize("grid", iter_grids()) def test_complex_operator(grid, rng): - """test using a complex operator on grid""" + """Test using a complex operator on grid.""" r = ScalarField.random_normal(grid, rng=rng) i = ScalarField.random_normal(grid, rng=rng) c = r + 1j * i @@ -595,7 +595,7 @@ def test_complex_operator(grid, rng): def test_get_field_class_by_rank(): - """test _get_field_class_by_rank function""" + """Test _get_field_class_by_rank function.""" assert DataFieldBase.get_class_by_rank(0) is ScalarField assert DataFieldBase.get_class_by_rank(1) is VectorField assert DataFieldBase.get_class_by_rank(2) is Tensor2Field @@ -608,7 +608,7 @@ def test_get_field_class_by_rank(): ) @pytest.mark.parametrize("field", iter_fields()) def test_field_modelrunner_storage(field, tmp_path): - """test storing fields in modelrunner storages""" + """Test storing fields in modelrunner storages.""" from modelrunner import open_storage path = tmp_path / "field.json" diff --git a/tests/fields/test_scalar_fields.py b/tests/fields/test_scalar_fields.py index 0b653454..f104816f 100644 --- a/tests/fields/test_scalar_fields.py +++ b/tests/fields/test_scalar_fields.py @@ -17,7 +17,7 @@ def test_interpolation_singular(): - """test interpolation on singular dimensions""" + """Test interpolation on singular dimensions.""" grid = UnitGrid([1]) field = ScalarField(grid, data=3) @@ -36,7 +36,7 @@ def test_interpolation_singular(): def test_interpolation_edge(): - """test interpolation close to the boundary""" + """Test interpolation close to the boundary.""" grid = UnitGrid([2]) field = ScalarField(grid, data=[1, 2]) @@ -48,7 +48,7 @@ def test_interpolation_edge(): @pytest.mark.parametrize("grid", iter_grids()) def test_simple_shapes(grid, rng): - """test simple scalar fields""" + """Test simple scalar fields.""" pf = ScalarField.random_uniform(grid, rng=rng) np.testing.assert_equal(pf.data.shape, grid.shape) pf_lap = pf.laplace("auto_periodic_neumann") @@ -68,7 +68,7 @@ def test_simple_shapes(grid, rng): def test_scalars(rng): - """test some scalar fields""" + """Test some scalar fields.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4]) s1 = ScalarField(grid, np.full(grid.shape, 1)) s2 = ScalarField(grid, np.full(grid.shape, 2)) @@ -104,7 +104,7 @@ def test_scalars(rng): def test_laplacian(rng): - """test the gradient operator""" + """Test the gradient operator.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True) s = ScalarField.random_harmonic(grid, axis_combination=np.add, modes=1, rng=rng) @@ -118,7 +118,7 @@ def test_laplacian(rng): def test_gradient(): - """test the gradient operator""" + """Test the gradient operator.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True) x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1] data = np.cos(x) + np.sin(y) @@ -137,14 +137,14 @@ def test_gradient(): @pytest.mark.parametrize("grid", iter_grids()) def test_interpolation_to_grid(grid, rng): - """test whether data is interpolated correctly for different grids""" + """Test whether data is interpolated correctly for different grids.""" sf = ScalarField.random_uniform(grid, rng=rng) sf2 = sf.interpolate_to_grid(grid) np.testing.assert_allclose(sf.data, sf2.data, rtol=1e-6) def test_interpolation_bcs(): - """test interpolation of data involving boundary conditions""" + """Test interpolation of data involving boundary conditions.""" grid = UnitGrid([3]) f = ScalarField(grid, [1, 2, 3]) res = f.interpolate(np.c_[-1:5], bc="extrapolate", fill=42) @@ -154,7 +154,7 @@ def test_interpolation_bcs(): @pytest.mark.parametrize("grid", iter_grids()) @pytest.mark.parametrize("compiled", [True, False]) def test_insert_scalar(grid, compiled, rng): - """test the `insert` method""" + """Test the `insert` method.""" f = ScalarField(grid) a = rng.random() @@ -172,7 +172,7 @@ def test_insert_scalar(grid, compiled, rng): def test_insert_1d(rng): - """test the `insert` method for 1d systems""" + """Test the `insert` method for 1d systems.""" grid = UnitGrid([2], periodic=True) f = ScalarField(grid) g = f.copy() @@ -186,7 +186,7 @@ def test_insert_1d(rng): def test_insert_polar(rng): - """test the `insert` method for polar systems""" + """Test the `insert` method for polar systems.""" grid = PolarSymGrid(3, 5) f = ScalarField(grid) g = f.copy() @@ -200,7 +200,7 @@ def test_insert_polar(rng): def test_random_harmonic(rng): - """test whether random harmonic fields behave correctly""" + """Test whether random harmonic fields behave correctly.""" grid = get_cartesian_grid(2) # get random Cartesian grid x = ScalarField.random_harmonic(grid, modes=1, rng=rng) scaling = sum((2 * np.pi / L) ** 2 for L in grid.cuboid.size) @@ -209,7 +209,7 @@ def test_random_harmonic(rng): def test_get_line_data(rng): - """test different extraction methods for line data""" + """Test different extraction methods for line data.""" grid = UnitGrid([16, 32]) c = ScalarField.random_harmonic(grid, rng=rng) @@ -226,7 +226,7 @@ def test_get_line_data(rng): def test_from_expression(): - """test creating scalar field from expression""" + """Test creating scalar field from expression.""" grid = UnitGrid([1, 2]) sf = ScalarField.from_expression(grid, "x * y", label="abc") assert sf.label == "abc" @@ -254,7 +254,7 @@ def test_from_image(tmp_path, rng): def test_to_scalar(rng): - """test conversion to scalar field""" + """Test conversion to scalar field.""" sf = ScalarField.random_uniform(UnitGrid([3, 3]), rng=rng) np.testing.assert_allclose(sf.to_scalar().data, sf.data) np.testing.assert_allclose(sf.to_scalar("norm_squared").data, sf.data**2) @@ -273,7 +273,7 @@ def test_to_scalar(rng): @pytest.mark.parametrize("grid", (grid for grid in iter_grids() if grid.num_axes > 1)) @pytest.mark.parametrize("method", ["integral", "average"]) def test_projection(grid, method, rng): - """test scalar projection""" + """Test scalar projection.""" sf = ScalarField.random_uniform(grid, rng=rng) for ax in grid.axes: sp = sf.project(ax, method=method) @@ -290,7 +290,7 @@ def test_projection(grid, method, rng): @pytest.mark.parametrize("grid", (grid for grid in iter_grids() if grid.num_axes > 1)) def test_slice(grid, rng): - """test scalar slicing""" + """Test scalar slicing.""" sf = ScalarField(grid, 0.5) p = grid.get_random_point(coords="grid", rng=rng) for i in range(grid.num_axes): @@ -306,7 +306,7 @@ def test_slice(grid, rng): def test_slice_positions(): - """test scalar slicing at standard positions""" + """Test scalar slicing at standard positions.""" grid = UnitGrid([3, 1]) sf = ScalarField(grid, np.arange(3).reshape(3, 1)) assert sf.slice({"x": "min"}).data == 0 @@ -320,7 +320,7 @@ def test_slice_positions(): def test_interpolation_mutable(): - """test interpolation on mutable fields""" + """Test interpolation on mutable fields.""" grid = UnitGrid([2], periodic=True) field = ScalarField(grid) @@ -341,7 +341,7 @@ def test_interpolation_mutable(): def test_boundary_interpolation_1d(): - """test boundary interpolation for 1d fields""" + """Test boundary interpolation for 1d fields.""" grid = UnitGrid([5]) field = ScalarField(grid, np.arange(grid.shape[0])) @@ -359,7 +359,7 @@ def test_boundary_interpolation_1d(): def test_boundary_interpolation_2d(rng): - """test boundary interpolation for 2d fields""" + """Test boundary interpolation for 2d fields.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 3]) field = ScalarField.random_normal(grid, rng=rng) @@ -377,7 +377,7 @@ def test_boundary_interpolation_2d(rng): def test_numpy_ufuncs(rng): - """test numpy ufuncs""" + """Test numpy ufuncs.""" grid = UnitGrid([2, 2]) f1 = ScalarField.random_uniform(grid, 0.1, 0.9, rng=rng) @@ -394,7 +394,7 @@ def test_numpy_ufuncs(rng): def test_plotting_1d(rng): - """test plotting of 1d scalar fields""" + """Test plotting of 1d scalar fields.""" grid = UnitGrid([3]) field = ScalarField.random_uniform(grid, 0.1, 0.9, rng=rng) @@ -403,7 +403,7 @@ def test_plotting_1d(rng): def test_plotting_2d(rng): - """test plotting of 2d scalar fields""" + """Test plotting of 2d scalar fields.""" grid = UnitGrid([3, 3]) field = ScalarField.random_uniform(grid, 0.1, 0.9, rng=rng) @@ -414,7 +414,7 @@ def test_plotting_2d(rng): @pytest.mark.skipif(not module_available("napari"), reason="requires `napari` module") @pytest.mark.interactive def test_interactive_plotting(rng): - """test the interactive plotting""" + """Test the interactive plotting.""" grid = UnitGrid([3, 3]) field = ScalarField.random_uniform(grid, 0.1, 0.9, rng=rng) field.plot_interactive(viewer_args={"show": False, "close": True}) @@ -425,7 +425,7 @@ def test_interactive_plotting(rng): def test_complex_dtype(): - """test the support of a complex data type""" + """Test the support of a complex data type.""" grid = UnitGrid([2]) f = ScalarField(grid, 1j) assert f.is_complex @@ -446,14 +446,14 @@ def test_complex_dtype(): def test_complex_plotting(): - """test plotting of complex fields""" + """Test plotting of complex fields.""" for dim in (1, 2): f = ScalarField(UnitGrid([3] * dim), 1j) f.plot() def test_complex_methods(): - """test special methods for complex data type""" + """Test special methods for complex data type.""" grid = UnitGrid([2, 2]) f = ScalarField(grid, 1j) val = f.interpolate([1, 1]) @@ -465,13 +465,13 @@ def test_complex_methods(): def test_complex_operators(): - """test differential operators for complex data type""" + """Test differential operators for complex data type.""" f = ScalarField(UnitGrid([2, 2]), 1j) assert f.laplace("auto_periodic_neumann").magnitude == pytest.approx(0) def test_interpolation_after_free(rng): - """test whether interpolation is possible when the original field is removed""" + """Test whether interpolation is possible when the original field is removed.""" f = ScalarField.from_expression(UnitGrid([5]), "x") intp = f.make_interpolator() @@ -486,7 +486,7 @@ def test_interpolation_after_free(rng): def test_corner_interpolation(): - """test whether the field can also be interpolated up to the corner of the grid""" + """Test whether the field can also be interpolated up to the corner of the grid.""" grid = UnitGrid([1, 1], periodic=False) field = ScalarField(grid) field.set_ghost_cells({"value": 1}) @@ -499,7 +499,7 @@ def test_corner_interpolation(): @pytest.mark.parametrize("grid", iter_grids()) def test_generic_derivatives(grid, rng): - """test generic derivatives operators""" + """Test generic derivatives operators.""" sf = ScalarField.random_uniform(grid, rng=rng) sf_grad = sf.gradient("auto_periodic_neumann") sf_lap = ScalarField(grid) @@ -524,7 +524,7 @@ def test_generic_derivatives(grid, rng): def test_piecewise_expressions(): - """test special expressions for creating fields""" + """Test special expressions for creating fields.""" grid = CartesianGrid([[0, 4]], 32) field = ScalarField.from_expression(grid, "Piecewise((x**2, x>2), (1+x, x<=2))") x = grid.axes_coords[0] @@ -533,7 +533,7 @@ def test_piecewise_expressions(): def test_boundary_expressions_with_t(): - """test special case of imposing time-dependent boundary conditions""" + """Test special case of imposing time-dependent boundary conditions.""" field = ScalarField(UnitGrid([3]), 0) res = field.laplace({"value_expression": "t"}, args={"t": 0}) np.testing.assert_allclose(res.data, [0, 0, 0]) @@ -544,7 +544,7 @@ def test_boundary_expressions_with_t(): @pytest.mark.multiprocessing @pytest.mark.parametrize("decomp", ["auto", (-1, 1), (1, -1)]) def test_field_split(decomp, rng): - """test the field splitting function in an MPI context""" + """Test the field splitting function in an MPI context.""" grid = UnitGrid([4, 4]) field = ScalarField.random_uniform(grid, rng=rng) @@ -561,7 +561,7 @@ def test_field_split(decomp, rng): def test_field_corner_interpolation_2d(): - """test corner interpolation for a 2d field""" + """Test corner interpolation for a 2d field.""" f = ScalarField(UnitGrid([1, 1]), 0) bc_x = [{"value": -1}, {"value": 2}] bc_y = [{"value": -2}, {"value": 1}] @@ -571,7 +571,7 @@ def test_field_corner_interpolation_2d(): def test_field_corner_interpolation_3d(): - """test corner interpolation for a 3d field""" + """Test corner interpolation for a 3d field.""" f = ScalarField(UnitGrid([1, 1, 1]), 0) f.set_ghost_cells(bc=[[{"value": -3}, {"value": 3}]] * 3, set_corners=True) expect = np.array( diff --git a/tests/fields/test_tensorial_fields.py b/tests/fields/test_tensorial_fields.py index bdb9957a..1bf6e2c2 100644 --- a/tests/fields/test_tensorial_fields.py +++ b/tests/fields/test_tensorial_fields.py @@ -11,7 +11,7 @@ def test_tensors_basic(rng): - """test some tensor calculations""" + """Test some tensor calculations.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4]) t1 = Tensor2Field(grid, np.full((2, 2) + grid.shape, 1)) @@ -70,7 +70,7 @@ def test_tensors_basic(rng): @pytest.mark.parametrize("grid", [UnitGrid([1, 1]), PolarSymGrid(2, 1)]) def test_tensors_transpose(grid): - """test transposing tensors""" + """Test transposing tensors.""" def broadcast(arr): return np.asarray(arr)[(...,) + (np.newaxis,) * grid.num_axes] @@ -82,7 +82,7 @@ def broadcast(arr): def test_tensor_symmetrize(): - """test advanced tensor calculations""" + """Test advanced tensor calculations.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [2, 2]) t1 = Tensor2Field(grid) t1.data[0, 0, :] = 1 @@ -117,7 +117,7 @@ def test_tensor_symmetrize(): @pytest.mark.parametrize("grid", iter_grids()) @pytest.mark.parametrize("compiled", [True, False]) def test_insert_tensor(grid, compiled, rng): - """test the `insert` method""" + """Test the `insert` method.""" f = Tensor2Field(grid) a = rng.random(f.data_shape) @@ -136,7 +136,7 @@ def test_insert_tensor(grid, compiled, rng): def test_tensor_invariants(rng): - """test the invariants""" + """Test the invariants.""" # dim == 1 f = Tensor2Field.random_uniform(UnitGrid([3]), rng=rng) np.testing.assert_allclose( @@ -182,7 +182,7 @@ def test_tensor_invariants(rng): @pytest.mark.parametrize("backend", ["numba", "numpy"]) def test_complex_tensors(backend, rng): - """test some complex tensor fields""" + """Test some complex tensor fields.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4]) shape = (2, 2, 2) + grid.shape numbers = rng.random(shape) + rng.random(shape) * 1j @@ -206,7 +206,7 @@ def test_complex_tensors(backend, rng): def test_from_expressions(): - """test initializing tensor fields with expressions""" + """Test initializing tensor fields with expressions.""" grid = UnitGrid([4, 4]) tf = Tensor2Field.from_expression(grid, [[1, 1], ["x**2", "x * y"]]) xs = grid.cell_coords[..., 0] diff --git a/tests/fields/test_vectorial_fields.py b/tests/fields/test_vectorial_fields.py index b44b7a67..39016e36 100644 --- a/tests/fields/test_vectorial_fields.py +++ b/tests/fields/test_vectorial_fields.py @@ -12,7 +12,7 @@ def test_vectors_basic(): - """test some vector fields""" + """Test some vector fields.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4]) v1 = VectorField(grid, np.full((2,) + grid.shape, 1)) v2 = VectorField(grid, np.full((2,) + grid.shape, 2)) @@ -75,7 +75,7 @@ def test_vectors_basic(): def test_divergence(): - """test the divergence operator""" + """Test the divergence operator.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True) x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1] data = [np.cos(x) + y, np.sin(y) - x] @@ -92,7 +92,7 @@ def test_divergence(): def test_vector_gradient_field(): - """test the vector gradient operator""" + """Test the vector gradient operator.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True) x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1] data = [np.cos(x) + y, np.sin(y) - x] @@ -117,7 +117,7 @@ def test_vector_gradient_field(): def test_vector_laplace(): - """test the laplace operator""" + """Test the laplace operator.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True) x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1] data = [np.cos(x) + np.sin(y), np.sin(y) - np.cos(x)] @@ -133,7 +133,7 @@ def test_vector_laplace(): def test_vector_boundary_conditions(): - """test some boundary conditions of operators of vector fields""" + """Test some boundary conditions of operators of vector fields.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 1]], 32, periodic=[False, True]) vf = VectorField.from_expression(grid, ["sin(x)", "0"]) @@ -148,7 +148,7 @@ def test_vector_boundary_conditions(): def test_outer_product(): - """test outer product of vector fields""" + """Test outer product of vector fields.""" vf = VectorField(UnitGrid([1, 1]), [[[1]], [[2]]]) for backend in ["numpy", "numba"]: @@ -168,7 +168,7 @@ def test_outer_product(): def test_from_expressions(): - """test initializing vector fields with expressions""" + """Test initializing vector fields with expressions.""" grid = UnitGrid([4, 4]) vf = VectorField.from_expression(grid, ["x**2", "x * y"]) xs = grid.cell_coords[..., 0] @@ -204,7 +204,7 @@ def f(x, y): def test_vector_plot_quiver_reduction(rng): - """test whether quiver plots reduce the resolution""" + """Test whether quiver plots reduce the resolution.""" grid = UnitGrid([6, 6]) field = VectorField.random_normal(grid, rng=rng) ref = field.plot(method="quiver", max_points=4) @@ -212,7 +212,7 @@ def test_vector_plot_quiver_reduction(rng): def test_boundary_interpolation_vector(rng): - """test boundary interpolation""" + """Test boundary interpolation.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 3]) field = VectorField.random_normal(grid, rng=rng) @@ -225,7 +225,7 @@ def test_boundary_interpolation_vector(rng): @pytest.mark.parametrize("transpose", [True, False]) def test_vector_plotting_2d(transpose, rng): - """test plotting of 2d vector fields""" + """Test plotting of 2d vector fields.""" grid = UnitGrid([3, 4]) field = VectorField.random_uniform(grid, 0.1, 0.9, rng=rng) @@ -242,14 +242,14 @@ def test_vector_plotting_2d(transpose, rng): @pytest.mark.skipif(not module_available("napari"), reason="requires `napari` module") @pytest.mark.interactive def test_interactive_vector_plotting(rng): - """test the interactive plotting""" + """Test the interactive plotting.""" grid = UnitGrid([3, 3]) field = VectorField.random_uniform(grid, 0.1, 0.9, rng=rng) field.plot_interactive(viewer_args={"show": False, "close": True}) def test_complex_vectors(rng): - """test some complex vector fields""" + """Test some complex vector fields.""" grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4]) shape = (2, 2) + grid.shape numbers = rng.random(shape) + rng.random(shape) * 1j @@ -279,7 +279,7 @@ def test_complex_vectors(rng): def test_vector_bcs(): - """test boundary conditions on vector fields""" + """Test boundary conditions on vector fields.""" grid = UnitGrid([3, 3], periodic=False) v = VectorField.from_expression(grid, ["x", "cos(y)"]) @@ -295,7 +295,7 @@ def test_vector_bcs(): def test_interpolation_vector_fields_cylindrical(): - """test interpolation of a vector field on cylindrical coordinates""" + """Test interpolation of a vector field on cylindrical coordinates.""" grid = CylindricalSymGrid(5, [-2, 3], 10) vf = VectorField.from_expression(grid, ["r", "1", "z"]) grid_cart = grid.get_cartesian_grid(mode="valid") @@ -312,7 +312,7 @@ def test_interpolation_vector_fields_cylindrical(): def test_interpolation_vector_fields_polar(): - """test interpolation of a vector field on polar coordinates""" + """Test interpolation of a vector field on polar coordinates.""" grid = PolarSymGrid(5, 10) vf = VectorField.from_expression(grid, ["r", "1"]) grid_cart = grid.get_cartesian_grid(mode="valid") @@ -328,7 +328,7 @@ def test_interpolation_vector_fields_polar(): def test_interpolation_vector_fields_spherical(): - """test interpolation of a vector field on polar coordinates""" + """Test interpolation of a vector field on polar coordinates.""" grid = SphericalSymGrid(5, 10) vf = VectorField.from_expression(grid, ["r", "1", "r"]) grid_cart = grid.get_cartesian_grid(mode="valid") diff --git a/tests/grids/boundaries/test_axes_boundaries.py b/tests/grids/boundaries/test_axes_boundaries.py index 2fe5c143..19ec8693 100644 --- a/tests/grids/boundaries/test_axes_boundaries.py +++ b/tests/grids/boundaries/test_axes_boundaries.py @@ -15,7 +15,7 @@ def test_boundaries(): - """test setting boundaries for multiple systems""" + """Test setting boundaries for multiple systems.""" b = ["periodic", "value", {"type": "derivative", "value": 1}] for bx, by in itertools.product(b, b): periodic = [b == "periodic" for b in (bx, by)] @@ -47,7 +47,7 @@ def test_boundaries(): def test_boundaries_edge_cases(): - """test treatment of invalid data""" + """Test treatment of invalid data.""" grid = UnitGrid([3, 3]) bcs = grid.get_boundary_conditions("auto_periodic_neumann") with pytest.raises(BCDataError): @@ -69,7 +69,7 @@ def test_boundaries_edge_cases(): def test_boundary_specifications(): - """test different ways of specifying boundary conditions""" + """Test different ways of specifying boundary conditions.""" g = UnitGrid([2]) bc1 = Boundaries.from_data( g, [{"type": "derivative", "value": 0}, {"type": "value", "value": 0}] @@ -80,7 +80,7 @@ def test_boundary_specifications(): def test_mixed_boundary_condition(rng): - """test limiting cases of the mixed boundary condition""" + """Test limiting cases of the mixed boundary condition.""" g = UnitGrid([2]) d = rng.random(2) g1 = g.make_operator("gradient", bc=[{"mixed": 0}, {"mixed": np.inf}]) @@ -96,7 +96,7 @@ def test_mixed_boundary_condition(rng): ], ) def test_natural_boundary_conditions(cond, is_value): - """test special automatic boundary conditions""" + """Test special automatic boundary conditions.""" g = UnitGrid([2, 2], periodic=[True, False]) for bc in [ Boundaries.from_data(g, cond), @@ -110,7 +110,7 @@ def test_natural_boundary_conditions(cond, is_value): def test_special_cases(): - """test some special boundary conditions""" + """Test some special boundary conditions.""" g = UnitGrid([5]) s = ScalarField(g, np.arange(5)) for bc in ["extrapolate", {"curvature": 0}]: @@ -118,7 +118,7 @@ def test_special_cases(): def test_bc_values(): - """test setting the values of boundary conditions""" + """Test setting the values of boundary conditions.""" g = UnitGrid([5]) bc = g.get_boundary_conditions([{"value": 2}, {"derivative": 3}]) assert bc[0].low.value == 2 and bc[0].high.value == 3 @@ -127,7 +127,7 @@ def test_bc_values(): @pytest.mark.parametrize("dim", [1, 2, 3]) @pytest.mark.parametrize("periodic", [True, False]) def test_set_ghost_cells(dim, periodic, rng): - """test setting values for ghost cells""" + """Test setting values for ghost cells.""" grid = UnitGrid([1] * dim, periodic=periodic) field = ScalarField.random_uniform(grid, rng=rng) bcs = grid.get_boundary_conditions("auto_periodic_neumann") @@ -147,7 +147,7 @@ def test_set_ghost_cells(dim, periodic, rng): def test_setting_specific_bcs(): - """test the interface of setting specific conditions""" + """Test the interface of setting specific conditions.""" grid = UnitGrid([4, 4], periodic=[False, True]) bcs = grid.get_boundary_conditions("auto_periodic_neumann") @@ -184,7 +184,7 @@ def test_setting_specific_bcs(): def test_boundaries_property(): - """test boundaries property""" + """Test boundaries property.""" g = UnitGrid([2, 2]) bc = Boundaries.from_data(g, ["neumann", "dirichlet"]) assert len(list(bc.boundaries)) == 4 diff --git a/tests/grids/boundaries/test_axis_boundaries.py b/tests/grids/boundaries/test_axis_boundaries.py index 9e8bb6f9..4ead63da 100644 --- a/tests/grids/boundaries/test_axis_boundaries.py +++ b/tests/grids/boundaries/test_axis_boundaries.py @@ -12,7 +12,7 @@ def test_boundary_pair(): - """test setting boundary conditions for whole axis""" + """Test setting boundary conditions for whole axis.""" g = UnitGrid([2, 3]) b = ["value", {"type": "derivative", "value": 1}] for bl, bh in itertools.product(b, b): @@ -53,7 +53,7 @@ def test_boundary_pair(): def test_get_axis_boundaries(): - """test setting boundary conditions including periodic ones""" + """Test setting boundary conditions including periodic ones.""" for data in ["value", "derivative", "periodic", "anti-periodic"]: g = UnitGrid([2], periodic=("periodic" in data)) b = get_boundary_axis(g, 0, data) diff --git a/tests/grids/boundaries/test_local_boundaries.py b/tests/grids/boundaries/test_local_boundaries.py index fd2e58a8..315d838b 100644 --- a/tests/grids/boundaries/test_local_boundaries.py +++ b/tests/grids/boundaries/test_local_boundaries.py @@ -19,7 +19,7 @@ def test_get_arr_1d(): - """test the _get_arr_1d function""" + """Test the _get_arr_1d function.""" # 1d a = np.arange(3) arr_1d, i, bc_idx = _get_arr_1d(a, [1], 0) @@ -58,7 +58,7 @@ def test_get_arr_1d(): def test_individual_boundaries(): - """test setting individual boundaries""" + """Test setting individual boundaries.""" g = UnitGrid([2]) for data in [ "value", @@ -87,7 +87,7 @@ def test_individual_boundaries(): def test_individual_boundaries_multidimensional(): - """test setting individual boundaries in 2d""" + """Test setting individual boundaries in 2d.""" g2 = UnitGrid([2, 3]) bc = BCBase.from_data(g2, 0, True, {"type": "value", "value": [1, 2]}, rank=1) @@ -106,7 +106,7 @@ def test_individual_boundaries_multidimensional(): def test_virtual_points(): - """test the calculation of virtual points""" + """Test the calculation of virtual points.""" g = UnitGrid([2]) data = np.array([1, 2]) @@ -146,7 +146,7 @@ def test_virtual_points(): @pytest.mark.parametrize("upper", [False, True]) def test_virtual_points_linked_data(upper): - """test the calculation of virtual points with linked_data""" + """Test the calculation of virtual points with linked_data.""" g = UnitGrid([2, 2]) point = (1, 1) if upper else (0, 0) data = np.zeros(g.shape) @@ -187,7 +187,7 @@ def test_virtual_points_linked_data(upper): def test_mixed_condition(): - """test the calculation of virtual points""" + """Test the calculation of virtual points.""" g = UnitGrid([2]) data = np.array([1, 2]) @@ -211,7 +211,7 @@ def test_mixed_condition(): def test_inhomogeneous_bcs_1d(): - """test inhomogeneous boundary conditions in 1d grids""" + """Test inhomogeneous boundary conditions in 1d grids.""" g = UnitGrid([2]) data = np.ones((2,)) # field is 1 everywhere @@ -234,7 +234,7 @@ def test_inhomogeneous_bcs_1d(): def test_inhomogeneous_bcs_2d(): - """test inhomogeneous boundary conditions in 2d grids""" + """Test inhomogeneous boundary conditions in 2d grids.""" g = UnitGrid([2, 2]) data = np.ones((2, 2)) @@ -276,7 +276,7 @@ def test_inhomogeneous_bcs_2d(): @pytest.mark.parametrize("expr", ["1", "x + y**2"]) def test_expression_bc_setting_value(expr, rng): - """test boundary conditions that use an expression""" + """Test boundary conditions that use an expression.""" grid = CartesianGrid([[0, 1], [0, 1]], 4) if expr == "1": @@ -311,7 +311,7 @@ def func(adjacent_value, dx, x, y, t): @pytest.mark.parametrize("expr", ["1", "x + y**2"]) def test_expression_bc_setting_derivative(expr, rng): - """test boundary conditions that use an expression""" + """Test boundary conditions that use an expression.""" grid = CartesianGrid([[0, 1], [0, 1]], 4) if expr == "1": @@ -345,7 +345,7 @@ def func(adjacent_value, dx, x, y, t): @pytest.mark.parametrize("value_expr, const_expr", [["1", "1"], ["x", "y**2"]]) def test_expression_bc_setting_mixed(value_expr, const_expr, rng): - """test boundary conditions that use an expression""" + """Test boundary conditions that use an expression.""" grid = CartesianGrid([[0, 1], [0, 1]], 4) if value_expr == "1": @@ -395,7 +395,7 @@ def const_func(adjacent_value, dx, x, y, t): @pytest.mark.parametrize("dim", [1, 2, 3]) def test_expression_bc_operator(dim): - """test boundary conditions that use an expression in an operator""" + """Test boundary conditions that use an expression in an operator.""" grid = CartesianGrid([[0, 1]] * dim, 4) bc1 = grid.get_boundary_conditions({"value": 1}) bc2 = grid.get_boundary_conditions({"virtual_point": f"2 - value"}) @@ -411,7 +411,7 @@ def test_expression_bc_operator(dim): @pytest.mark.parametrize("dim", [1, 2, 3]) def test_expression_bc_value(dim): - """test boundary conditions that use an expression to calculate the value""" + """Test boundary conditions that use an expression to calculate the value.""" def unity(*args): return 1 @@ -434,7 +434,7 @@ def unity(*args): @pytest.mark.parametrize("dim", [1, 2, 3]) def test_expression_bc_derivative(dim): - """test boundary conditions that use an expression to calculate the derivative""" + """Test boundary conditions that use an expression to calculate the derivative.""" def zeros(*args): return 0 @@ -457,7 +457,7 @@ def zeros(*args): @pytest.mark.parametrize("dim", [1, 2, 3]) def test_expression_bc_mixed(dim): - """test boundary conditions that use an expression to calculate the derivative""" + """Test boundary conditions that use an expression to calculate the derivative.""" def zeros(*args): return 0 @@ -479,14 +479,14 @@ def zeros(*args): def test_expression_invalid_args(): - """test boundary conditions use an expression with invalid data""" + """Test boundary conditions use an expression with invalid data.""" grid = CartesianGrid([[0, 1]], 4) with pytest.raises(BCDataError): grid.get_boundary_conditions({"derivative_expression": "unknown(x)"}) def test_expression_bc_polar_grid(): - """test whether expression BCs work on polar grids""" + """Test whether expression BCs work on polar grids.""" grid = PolarSymGrid(radius=1, shape=8) bcs = grid.get_boundary_conditions([{"value": 1}, {"value_expression": "1"}]) @@ -505,7 +505,7 @@ def test_expression_bc_polar_grid(): @pytest.mark.parametrize("dim", [1, 2]) @pytest.mark.parametrize("compiled", [True, False]) def test_expression_bc_specific_value(dim, compiled): - """test boundary conditions that use a value at a different position""" + """Test boundary conditions that use a value at a different position.""" n = 2 grid = CartesianGrid([[0, 1]] * dim, n) @@ -549,7 +549,7 @@ def set_bcs(): def test_expression_bc_user_func(): - """test user functions in boundary expressions""" + """Test user functions in boundary expressions.""" grid = UnitGrid([2]) bc1 = grid.get_boundary_conditions({"virtual_point": "sin(value)"}) bc2 = grid.get_boundary_conditions( @@ -568,7 +568,7 @@ def test_expression_bc_user_func(): @pytest.mark.parametrize("dim", [1, 2]) def test_expression_bc_user_func_nojit(dim): - """test user functions in boundary expressions that cannot be compiled""" + """Test user functions in boundary expressions that cannot be compiled.""" grid = UnitGrid([3] * dim) class C: @@ -606,7 +606,7 @@ def func(value, dx, x, y, t): @pytest.mark.parametrize("dim", [1, 2, 3]) def test_expression_bc_user_expr_nojit(dim): - """test user expressions in boundary expressions that cannot be compiled""" + """Test user expressions in boundary expressions that cannot be compiled.""" grid = UnitGrid([3] * dim) class C: @@ -646,7 +646,7 @@ def func(value): def test_getting_registered_bcs(): - """test the functions that return the registered BCs""" + """Test the functions that return the registered BCs.""" assert isinstance(registered_boundary_condition_classes(), dict) assert isinstance(registered_boundary_condition_names(), dict) @@ -654,7 +654,7 @@ def test_getting_registered_bcs(): @pytest.mark.parametrize("dim", [1, 2]) @pytest.mark.parametrize("target", ["value", "derivative"]) def test_user_bcs_numpy(dim, target): - """test setting user BCs""" + """Test setting user BCs.""" value = np.arange(3) if dim == 2 else 1 grid = UnitGrid([3] * dim) bcs = grid.get_boundary_conditions({"type": "user"}) @@ -683,7 +683,7 @@ def test_user_bcs_numpy(dim, target): @pytest.mark.parametrize("dim", [1, 2, 3]) @pytest.mark.parametrize("target", ["value", "derivative"]) def test_user_bcs_numba(dim, target): - """test setting user BCs""" + """Test setting user BCs.""" if dim == 1: value = 1 elif dim == 2: @@ -715,7 +715,7 @@ def test_user_bcs_numba(dim, target): def test_mpi_bc(): - """test some basic methods of _MPIBC""" + """Test some basic methods of _MPIBC.""" grid = UnitGrid([4], periodic=True) mesh = GridMesh.from_grid(grid, decomposition=[2]) assert len(mesh) == 2 diff --git a/tests/grids/operators/test_cartesian_operators.py b/tests/grids/operators/test_cartesian_operators.py index 77be4907..304629dc 100644 --- a/tests/grids/operators/test_cartesian_operators.py +++ b/tests/grids/operators/test_cartesian_operators.py @@ -24,7 +24,7 @@ def _get_random_grid_bcs(ndim: int, dx="random", periodic="random", rank=0): - """create a random Cartesian grid with natural bcs""" + """Create a random Cartesian grid with natural bcs.""" rng = np.random.default_rng(0) shape = tuple(rng.integers(2, 5, ndim)) @@ -45,7 +45,7 @@ def _get_random_grid_bcs(ndim: int, dx="random", periodic="random", rank=0): @pytest.mark.parametrize("periodic", [True, False]) def test_singular_dimensions_2d(periodic, rng): - """test grids with singular dimensions""" + """Test grids with singular dimensions.""" dim = rng.integers(3, 5) g1 = UnitGrid([dim], periodic=periodic) g2a = UnitGrid([dim, 1], periodic=periodic) @@ -61,7 +61,7 @@ def test_singular_dimensions_2d(periodic, rng): @pytest.mark.parametrize("periodic", [True, False]) def test_singular_dimensions_3d(periodic, rng): - """test grids with singular dimensions""" + """Test grids with singular dimensions.""" dim = rng.integers(3, 5) g1 = UnitGrid([dim], periodic=periodic) g3a = UnitGrid([dim, 1, 1], periodic=periodic) @@ -77,7 +77,7 @@ def test_singular_dimensions_3d(periodic, rng): @pytest.mark.parametrize("periodic", [True, False]) def test_laplace_1d(periodic, rng): - """test the implementatio,rngn of the laplace operator""" + """Test the implementatio,rngn of the laplace operator.""" bcs = _get_random_grid_bcs(1, periodic=periodic) field = ScalarField.random_colored(bcs.grid, -6, rng=rng) l1 = field.laplace(bcs, backend="scipy") @@ -89,7 +89,7 @@ def test_laplace_1d(periodic, rng): @pytest.mark.parametrize("ndim", [1, 2]) @pytest.mark.parametrize("dtype", [float, complex]) def test_laplace_spectral(ndim, dtype, rng): - """test the implementation of the spectral laplace operator""" + """Test the implementation of the spectral laplace operator.""" shape = np.c_[rng.uniform(-20, -10, ndim), rng.uniform(10, 20, ndim)] grid = CartesianGrid(shape, 30, periodic=True) field = ScalarField.random_colored(grid, -8, dtype=dtype, rng=rng) @@ -103,7 +103,7 @@ def test_laplace_spectral(ndim, dtype, rng): @pytest.mark.parametrize("periodic", [True, False]) def test_laplace_2d(periodic, rng): - """test the implementation of the laplace operator""" + """Test the implementation of the laplace operator.""" bcs = _get_random_grid_bcs(2, dx="uniform", periodic=periodic) a = rng.random(bcs.grid.shape) # test data @@ -121,7 +121,7 @@ def test_laplace_2d(periodic, rng): @pytest.mark.parametrize("periodic", [True, False]) def test_laplace_2d_nonuniform(periodic, rng): - """test the implementation of the laplace operator for non-uniform coordinates""" + """Test the implementation of the laplace operator for non-uniform coordinates.""" bcs = _get_random_grid_bcs(ndim=2, dx="random", periodic=periodic) dx = bcs.grid.discretization @@ -140,7 +140,7 @@ def test_laplace_2d_nonuniform(periodic, rng): @pytest.mark.parametrize("periodic", [True, False]) def test_laplace_3d(periodic, rng): - """test the implementation of the laplace operator""" + """Test the implementation of the laplace operator.""" bcs = _get_random_grid_bcs(ndim=3, dx="uniform", periodic=periodic) field = ScalarField.random_uniform(bcs.grid, rng=rng) l1 = field.laplace(bcs, backend="scipy") @@ -149,7 +149,7 @@ def test_laplace_3d(periodic, rng): def test_gradient_1d(): - """test specific boundary conditions for the 1d gradient""" + """Test specific boundary conditions for the 1d gradient.""" grid = UnitGrid(5) b_l = {"type": "derivative", "value": -1} @@ -171,7 +171,7 @@ def test_gradient_1d(): @pytest.mark.parametrize("method", ["central", "forward", "backward"]) @pytest.mark.parametrize("periodic", [True, False]) def test_gradient_cart(ndim, method, periodic, rng): - """test different gradient operators""" + """Test different gradient operators.""" bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic=periodic) field = ScalarField.random_uniform(bcs.grid, rng=rng) res1 = field.gradient(bcs, backend="scipy", method=method).data @@ -184,7 +184,7 @@ def test_gradient_cart(ndim, method, periodic, rng): @pytest.mark.parametrize("method", ["central", "forward", "backward"]) @pytest.mark.parametrize("periodic", [True, False]) def test_divergence_cart(ndim, method, periodic, rng): - """test different divergence operators""" + """Test different divergence operators.""" bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic=periodic, rank=1) field = VectorField.random_uniform(bcs.grid, rng=rng) res1 = field.divergence(bcs, backend="scipy", method=method).data @@ -194,7 +194,7 @@ def test_divergence_cart(ndim, method, periodic, rng): @pytest.mark.parametrize("ndim", [1, 2, 3]) def test_vector_gradient(ndim, rng): - """test different vector gradient operators""" + """Test different vector gradient operators.""" bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic="random", rank=1) field = VectorField.random_uniform(bcs.grid, rng=rng) res1 = field.gradient(bcs, backend="scipy").data @@ -205,7 +205,7 @@ def test_vector_gradient(ndim, rng): @pytest.mark.parametrize("ndim", [1, 2, 3]) def test_vector_laplace_cart(ndim, rng): - """test different vector laplace operators""" + """Test different vector laplace operators.""" bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic="random", rank=1) field = VectorField.random_uniform(bcs.grid, rng=rng) res1 = field.laplace(bcs, backend="scipy").data @@ -216,7 +216,7 @@ def test_vector_laplace_cart(ndim, rng): @pytest.mark.parametrize("ndim", [1, 2, 3]) def test_tensor_divergence_cart(ndim, rng): - """test different tensor divergence operators""" + """Test different tensor divergence operators.""" bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic="random", rank=2) field = Tensor2Field.random_uniform(bcs.grid, rng=rng) res1 = field.divergence(bcs, backend="scipy").data @@ -226,7 +226,7 @@ def test_tensor_divergence_cart(ndim, rng): def test_div_grad_const(): - """compare div grad to laplace operator""" + """Compare div grad to laplace operator.""" grid = CartesianGrid([[-1, 1]], 32) # test constant @@ -240,7 +240,7 @@ def test_div_grad_const(): def test_div_grad_linear(rng): - """compare div grad to laplace operator""" + """Compare div grad to laplace operator.""" grid = CartesianGrid([[-1, 1]], 32) x = grid.axes_coords[0] @@ -259,7 +259,7 @@ def test_div_grad_linear(rng): def test_div_grad_quadratic(): - """compare div grad to laplace operator""" + """Compare div grad to laplace operator.""" grid = CartesianGrid([[-1, 1]], 32) x = grid.axes_coords[0] @@ -276,7 +276,7 @@ def test_div_grad_quadratic(): @pytest.mark.parametrize("dim", [1, 2, 3]) def test_gradient_squared_cart(dim, rng): - """compare gradient squared operator""" + """Compare gradient squared operator.""" grid = CartesianGrid( [[0, 2 * np.pi]] * dim, shape=rng.integers(30, 35, dim), @@ -292,7 +292,7 @@ def test_gradient_squared_cart(dim, rng): def test_rect_div_grad(): - """compare div grad to laplacian""" + """Compare div grad to laplacian.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True) x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1] field = ScalarField(grid, data=np.cos(x) + np.sin(y)) @@ -306,7 +306,7 @@ def test_rect_div_grad(): def test_degenerated_grid(rng): - """test operators on grids with singular dimensions""" + """Test operators on grids with singular dimensions.""" g1 = CartesianGrid([[0, 1]], 4) g2 = CartesianGrid([[0, 1], [0, 0.1]], [4, 1], periodic=[False, True]) f1 = ScalarField.random_uniform(g1, rng=rng) @@ -318,7 +318,7 @@ def test_degenerated_grid(rng): def test_2nd_order_bc(rng): - """test whether 2nd order boundary conditions can be used""" + """Test whether 2nd order boundary conditions can be used.""" grid = UnitGrid([8, 8]) field = ScalarField.random_uniform(grid, rng=rng) field.laplace([{"value": "sin(y)"}, {"value": "x"}]) @@ -326,7 +326,7 @@ def test_2nd_order_bc(rng): @pytest.mark.parametrize("ndim", [1, 2, 3]) def test_laplace_matrix(ndim, rng): - """test laplace operator implemented using matrix multiplication""" + """Test laplace operator implemented using matrix multiplication.""" if ndim == 1: periodic, bc = [False], [{"value": "sin(x)"}] elif ndim == 2: @@ -350,7 +350,7 @@ def test_laplace_matrix(ndim, rng): ) @pytest.mark.parametrize("bc_val", ["auto_periodic_neumann", {"value": "sin(x)"}]) def test_poisson_solver_cartesian(grid, bc_val, rng): - """test the poisson solver on cartesian grids""" + """Test the poisson solver on cartesian grids.""" bcs = grid.get_boundary_conditions(bc_val) d = ScalarField.random_uniform(grid, rng=rng) d -= d.average # balance the right hand side diff --git a/tests/grids/operators/test_common_operators.py b/tests/grids/operators/test_common_operators.py index ec7caaf1..f81b273a 100644 --- a/tests/grids/operators/test_common_operators.py +++ b/tests/grids/operators/test_common_operators.py @@ -13,7 +13,7 @@ @pytest.mark.parametrize("ndim,axis", [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]) def test_make_derivative(ndim, axis, rng): - """test the _make_derivative function""" + """Test the _make_derivative function.""" periodic = random.choice([True, False]) grid = CartesianGrid([[0, 6 * np.pi]] * ndim, 16, periodic=periodic) field = ScalarField.random_harmonic(grid, modes=1, axis_combination=np.add, rng=rng) @@ -34,7 +34,7 @@ def test_make_derivative(ndim, axis, rng): @pytest.mark.parametrize("ndim,axis", [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]) def test_make_derivative2(ndim, axis, rng): - """test the _make_derivative2 function""" + """Test the _make_derivative2 function.""" periodic = random.choice([True, False]) grid = CartesianGrid([[0, 6 * np.pi]] * ndim, 16, periodic=periodic) field = ScalarField.random_harmonic(grid, modes=1, axis_combination=np.add, rng=rng) diff --git a/tests/grids/operators/test_cylindrical_operators.py b/tests/grids/operators/test_cylindrical_operators.py index 1b30f84d..28364671 100644 --- a/tests/grids/operators/test_cylindrical_operators.py +++ b/tests/grids/operators/test_cylindrical_operators.py @@ -18,7 +18,7 @@ def test_laplacian_field_cyl(): - """test the gradient operator""" + """Test the gradient operator.""" grid = CylindricalSymGrid(2 * np.pi, [0, 2 * np.pi], [8, 16], periodic_z=True) r, z = grid.cell_coords[..., 0], grid.cell_coords[..., 1] s = ScalarField(grid, data=np.cos(r) + np.sin(z)) @@ -29,7 +29,7 @@ def test_laplacian_field_cyl(): def test_gradient_field_cyl(): - """test the gradient operator""" + """Test the gradient operator.""" grid = CylindricalSymGrid(2 * np.pi, [0, 2 * np.pi], [8, 16], periodic_z=True) r, z = grid.cell_coords[..., 0], grid.cell_coords[..., 1] s = ScalarField(grid, data=np.cos(r) + np.sin(z)) @@ -41,7 +41,7 @@ def test_gradient_field_cyl(): def test_divergence_field_cyl(): - """test the divergence operator""" + """Test the divergence operator.""" grid = CylindricalSymGrid(2 * np.pi, [0, 2 * np.pi], [16, 32], periodic_z=True) v = VectorField.from_expression(grid, ["cos(r) + sin(z)**2", "z * cos(r)**2", 0]) s = v.divergence(bc="auto_periodic_neumann") @@ -70,7 +70,7 @@ def test_divergence_field_cyl(): def test_vector_gradient_divergence_field_cyl(): - """test the divergence operator""" + """Test the divergence operator.""" grid = CylindricalSymGrid(2 * np.pi, [0, 2 * np.pi], [8, 16], periodic_z=True) r, z = grid.cell_coords[..., 0], grid.cell_coords[..., 1] data = [np.cos(r) + np.sin(z) ** 2, np.cos(r) ** 2 + np.sin(z), np.zeros_like(r)] @@ -82,8 +82,10 @@ def test_vector_gradient_divergence_field_cyl(): def test_findiff_cyl(): - """test operator for a simple cylindrical grid. Note that we only - really test the polar symmetry""" + """Test operator for a simple cylindrical grid. + + Note that we only really test the polar symmetry + """ grid = CylindricalSymGrid(1.5, [0, 1], (3, 2), periodic_z=True) _, r1, r2 = grid.axes_coords[0] np.testing.assert_array_equal(grid.discretization, np.full(2, 0.5)) @@ -100,7 +102,7 @@ def test_findiff_cyl(): def test_grid_laplace(): - """test the cylindrical implementation of the laplace operator""" + """Test the cylindrical implementation of the laplace operator.""" grid_cyl = CylindricalSymGrid(7, (0, 4), (4, 4)) grid_cart = CartesianGrid([[-5, 5], [-5, 5], [0, 4]], [10, 10, 4]) @@ -115,7 +117,7 @@ def test_grid_laplace(): def test_gradient_squared_cyl(rng): - """compare gradient squared operator""" + """Compare gradient squared operator.""" grid = CylindricalSymGrid(2 * np.pi, [0, 2 * np.pi], 64) field = ScalarField.random_harmonic(grid, modes=1, rng=rng) s1 = field.gradient("auto_periodic_neumann").to_scalar("squared_sum") @@ -127,7 +129,7 @@ def test_gradient_squared_cyl(rng): def test_grid_div_grad_cyl(): - """compare div grad to laplacian""" + """Compare div grad to laplacian.""" grid = CylindricalSymGrid(2 * np.pi, (0, 2 * np.pi), (16, 16), periodic_z=True) field = ScalarField.from_expression(grid, "cos(r) + sin(z)") @@ -143,7 +145,7 @@ def test_grid_div_grad_cyl(): def test_examples_scalar_cyl(): - """compare derivatives of scalar fields for cylindrical grids""" + """Compare derivatives of scalar fields for cylindrical grids.""" grid = CylindricalSymGrid(1, [0, 2 * np.pi], 32) expr = "r**3 * sin(z)" sf = ScalarField.from_expression(grid, expr) @@ -171,7 +173,7 @@ def test_examples_scalar_cyl(): def test_examples_vector_cyl(): - """compare derivatives of vector fields for cylindrical grids""" + """Compare derivatives of vector fields for cylindrical grids.""" grid = CylindricalSymGrid(1, [0, 2 * np.pi], 32) e_r = "r**3 * sin(z)" e_φ = "r**2 * sin(z)" @@ -213,7 +215,7 @@ def test_examples_vector_cyl(): def test_examples_tensor_cyl(): - """compare derivatives of tensorial fields for cylindrical grids""" + """Compare derivatives of tensorial fields for cylindrical grids.""" grid = CylindricalSymGrid(1, [0, 2 * np.pi], 32, periodic_z=True) tf = Tensor2Field.from_expression(grid, [["r**3 * sin(z)"] * 3] * 3) @@ -235,7 +237,7 @@ def test_examples_tensor_cyl(): @pytest.mark.parametrize("r_inner", (0, 1)) def test_laplace_matrix(r_inner, rng): - """test laplace operator implemented using matrix multiplication""" + """Test laplace operator implemented using matrix multiplication.""" grid = CylindricalSymGrid((r_inner, 2), (2.5, 4.3), 16) if r_inner == 0: bcs = ["neumann", {"derivative": "cos(r) + z"}] @@ -253,7 +255,7 @@ def test_laplace_matrix(r_inner, rng): @pytest.mark.parametrize("r_inner", (0, 1)) def test_poisson_solver_cylindrical(r_inner, rng): - """test the poisson solver on Cylindrical grids""" + """Test the poisson solver on Cylindrical grids.""" grid = CylindricalSymGrid((r_inner, 2), (2.5, 4.3), 16) if r_inner == 0: bcs = ["neumann", {"value": "cos(r) + z"}] diff --git a/tests/grids/operators/test_polar_operators.py b/tests/grids/operators/test_polar_operators.py index a32b5bc1..48ecf1a6 100644 --- a/tests/grids/operators/test_polar_operators.py +++ b/tests/grids/operators/test_polar_operators.py @@ -18,7 +18,7 @@ def test_findiff_polar(): - """test operator for a simple polar grid""" + """Test operator for a simple polar grid.""" grid = PolarSymGrid(1.5, 3) _, _, r2 = grid.axes_coords[0] assert grid.discretization == (0.5,) @@ -43,7 +43,7 @@ def test_findiff_polar(): def test_conservative_laplace_polar(rng): - """test and compare the two implementation of the laplace operator""" + """Test and compare the two implementation of the laplace operator.""" grid = PolarSymGrid(1.5, 8) f = ScalarField.random_uniform(grid, rng=rng) @@ -61,7 +61,7 @@ def test_conservative_laplace_polar(rng): ], ) def test_small_annulus_polar(op_name, field, rng): - """test whether a small annulus gives the same result as a sphere""" + """Test whether a small annulus gives the same result as a sphere.""" grids = [ PolarSymGrid((0, 1), 8), PolarSymGrid((1e-8, 1), 8), @@ -80,7 +80,7 @@ def test_small_annulus_polar(op_name, field, rng): def test_grid_laplace_polar(): - """test the polar implementation of the laplace operator""" + """Test the polar implementation of the laplace operator.""" grid_sph = PolarSymGrid(7, 8) grid_cart = CartesianGrid([[-5, 5], [-5, 5]], [12, 11]) @@ -97,7 +97,7 @@ def test_grid_laplace_polar(): @pytest.mark.parametrize("r_inner", (0, 2 * np.pi)) def test_gradient_squared_polar(r_inner): - """compare gradient squared operator""" + """Compare gradient squared operator.""" grid = PolarSymGrid((r_inner, 4 * np.pi), 32) field = ScalarField.from_expression(grid, "cos(r)") s1 = field.gradient("auto_periodic_neumann").to_scalar("squared_sum") @@ -109,7 +109,7 @@ def test_gradient_squared_polar(r_inner): def test_grid_div_grad_polar(): - """compare div grad to laplacian for polar grids""" + """Compare div grad to laplacian for polar grids.""" grid = PolarSymGrid(2 * np.pi, 16) field = ScalarField.from_expression(grid, "cos(r)") @@ -125,7 +125,7 @@ def test_grid_div_grad_polar(): @pytest.mark.parametrize("grid", [PolarSymGrid(4, 8), PolarSymGrid([2, 4], 8)]) @pytest.mark.parametrize("bc_val", ["auto_periodic_neumann", {"value": 1}]) def test_poisson_solver_polar(grid, bc_val, rng): - """test the poisson solver on Polar grids""" + """Test the poisson solver on Polar grids.""" bcs = grid.get_boundary_conditions(bc_val) d = ScalarField.random_uniform(grid, rng=rng) d -= d.average # balance the right hand side @@ -136,7 +136,7 @@ def test_poisson_solver_polar(grid, bc_val, rng): def test_examples_scalar_polar(): - """compare derivatives of scalar fields for polar grids""" + """Compare derivatives of scalar fields for polar grids.""" grid = PolarSymGrid(1, 32) sf = ScalarField.from_expression(grid, "r**3") @@ -158,7 +158,7 @@ def test_examples_scalar_polar(): def test_examples_vector_polar(): - """compare derivatives of vector fields for polar grids""" + """Compare derivatives of vector fields for polar grids.""" grid = PolarSymGrid(1, 32) vf = VectorField.from_expression(grid, ["r**3", "r**2"]) @@ -180,7 +180,7 @@ def test_examples_vector_polar(): def test_examples_tensor_polar(): - """compare derivatives of tensorial fields for polar grids""" + """Compare derivatives of tensorial fields for polar grids.""" grid = PolarSymGrid(1, 32) tf = Tensor2Field.from_expression(grid, [["r**3"] * 2] * 2) @@ -196,7 +196,7 @@ def test_examples_tensor_polar(): @pytest.mark.parametrize("r_inner", (0, 1)) def test_laplace_matrix(r_inner, rng): - """test laplace operator implemented using matrix multiplication""" + """Test laplace operator implemented using matrix multiplication.""" grid = PolarSymGrid((r_inner, 2), 16) if r_inner == 0: bcs = grid.get_boundary_conditions({"neumann"}) diff --git a/tests/grids/operators/test_spherical_operators.py b/tests/grids/operators/test_spherical_operators.py index 41a8cd15..512d3029 100644 --- a/tests/grids/operators/test_spherical_operators.py +++ b/tests/grids/operators/test_spherical_operators.py @@ -18,7 +18,7 @@ def test_findiff_sph(): - """test operator for a simple spherical grid""" + """Test operator for a simple spherical grid.""" grid = SphericalSymGrid(1.5, 3) _, r1, r2 = grid.axes_coords[0] assert grid.discretization == (0.5,) @@ -45,7 +45,7 @@ def test_findiff_sph(): def test_conservative_sph(): - """test whether the integral over a divergence vanishes""" + """Test whether the integral over a divergence vanishes.""" grid = SphericalSymGrid((0, 2), 50) expr = "1 / cosh((r - 1) * 10)" @@ -77,7 +77,7 @@ def test_conservative_sph(): ], ) def test_small_annulus_sph(op_name, field, rng): - """test whether a small annulus gives the same result as a sphere""" + """Test whether a small annulus gives the same result as a sphere.""" grids = [ SphericalSymGrid((0, 1), 8), SphericalSymGrid((1e-8, 1), 8), @@ -98,7 +98,7 @@ def test_small_annulus_sph(op_name, field, rng): def test_grid_laplace(): - """test the polar implementation of the laplace operator""" + """Test the polar implementation of the laplace operator.""" grid_sph = SphericalSymGrid(9, 11) grid_cart = CartesianGrid([[-5, 5], [-5, 5], [-5, 5]], [12, 10, 11]) @@ -117,7 +117,7 @@ def test_grid_laplace(): @pytest.mark.parametrize("r_inner", (0, 1)) def test_gradient_squared(r_inner, rng): - """compare gradient squared operator""" + """Compare gradient squared operator.""" grid = SphericalSymGrid((r_inner, 5), 64) field = ScalarField.random_harmonic(grid, modes=1, rng=rng) s1 = field.gradient("auto_periodic_neumann").to_scalar("squared_sum") @@ -129,7 +129,7 @@ def test_gradient_squared(r_inner, rng): def test_grid_div_grad_sph(): - """compare div grad to laplacian""" + """Compare div grad to laplacian.""" grid = SphericalSymGrid(2 * np.pi, 16) field = ScalarField.from_expression(grid, "cos(r)") @@ -145,7 +145,7 @@ def test_grid_div_grad_sph(): @pytest.mark.parametrize("grid", [SphericalSymGrid(4, 8), SphericalSymGrid([2, 4], 8)]) @pytest.mark.parametrize("bc_val", ["auto_periodic_neumann", {"value": 1}]) def test_poisson_solver_spherical(grid, bc_val, rng): - """test the poisson solver on Spherical grids""" + """Test the poisson solver on Spherical grids.""" bcs = grid.get_boundary_conditions(bc_val) d = ScalarField.random_uniform(grid, rng=rng) d -= d.average # balance the right hand side @@ -157,7 +157,7 @@ def test_poisson_solver_spherical(grid, bc_val, rng): def test_examples_scalar_sph(): - """compare derivatives of scalar fields for spherical grids""" + """Compare derivatives of scalar fields for spherical grids.""" grid = SphericalSymGrid(1, 32) sf = ScalarField.from_expression(grid, "r**3") @@ -179,7 +179,7 @@ def test_examples_scalar_sph(): def test_examples_vector_sph_div(): - """compare derivatives of vector fields for spherical grids""" + """Compare derivatives of vector fields for spherical grids.""" grid = SphericalSymGrid(1, 32) vf = VectorField.from_expression(grid, ["r**3", 0, "r**2"]) res = vf.divergence([{"derivative": 0}, {"value": 1}]) @@ -189,7 +189,7 @@ def test_examples_vector_sph_div(): @pytest.mark.parametrize("method", ["central", "forward", "backward"]) def test_examples_vector_sph_grad(method): - """compare derivatives of vector fields for spherical grids""" + """Compare derivatives of vector fields for spherical grids.""" grid = SphericalSymGrid(1, 32) vf = VectorField.from_expression(grid, ["r**3", 0, 0]) res = vf.gradient([{"derivative": 0}, {"value": [1, 1, 1]}], method=method) @@ -200,7 +200,7 @@ def test_examples_vector_sph_grad(method): @pytest.mark.parametrize("conservative", [True, False]) def test_examples_tensor_sph(conservative): - """compare derivatives of tensorial fields for spherical grids""" + """Compare derivatives of tensorial fields for spherical grids.""" # test explicit expression for which we know the results grid = SphericalSymGrid(1, 32) expressions = [["r**4", 0, 0], [0, "r**3", 0], [0, 0, "r**3"]] @@ -236,7 +236,7 @@ def test_examples_tensor_sph(conservative): def test_tensor_sph_symmetry(): - """test treatment of symmetric tensor field""" + """Test treatment of symmetric tensor field.""" grid = SphericalSymGrid(1, 16) vf = VectorField.from_expression(grid, ["r**2", 0, 0]) vf_grad = vf.gradient(["derivative", {"derivative": 2}]) @@ -254,7 +254,7 @@ def test_tensor_sph_symmetry(): def test_tensor_div_div_analytical(): - """test double divergence of a tensor field against analytical expression""" + """Test double divergence of a tensor field against analytical expression.""" grid = SphericalSymGrid([0.5, 1], 12) tf = Tensor2Field.from_expression( grid, [["r**4", 0, 0], [0, "r**3", 0], [0, 0, "r**3"]] @@ -266,7 +266,7 @@ def test_tensor_div_div_analytical(): @pytest.mark.parametrize("conservative", [True, False]) def test_tensor_div_div(conservative): - """test double divergence of a tensor field by comparison with two divergences""" + """Test double divergence of a tensor field by comparison with two divergences.""" grid = SphericalSymGrid([0, 1], 64) expr = "r * tanh((0.5 - r) * 10)" bc = "auto_periodic_neumann" @@ -290,7 +290,7 @@ def test_tensor_div_div(conservative): @pytest.mark.parametrize("r_inner", (0, 1)) def test_laplace_matrix(r_inner, rng): - """test laplace operator implemented using matrix multiplication""" + """Test laplace operator implemented using matrix multiplication.""" grid = SphericalSymGrid((r_inner, 2), 16) if r_inner == 0: bcs = grid.get_boundary_conditions({"neumann"}) diff --git a/tests/grids/test_cartesian_grids.py b/tests/grids/test_cartesian_grids.py index 164c95ac..74e5e0c0 100644 --- a/tests/grids/test_cartesian_grids.py +++ b/tests/grids/test_cartesian_grids.py @@ -14,7 +14,7 @@ def _get_cartesian_grid(dim=2, periodic=True): - """return a random Cartesian grid of given dimension""" + """Return a random Cartesian grid of given dimension.""" rng = np.random.default_rng(0) bounds = [[0, 1 + rng.random()] for _ in range(dim)] shape = rng.integers(32, 64, size=dim) @@ -22,7 +22,7 @@ def _get_cartesian_grid(dim=2, periodic=True): def test_degenerated_grid(): - """test degenerated grids""" + """Test degenerated grids.""" with pytest.raises(ValueError): UnitGrid([]) with pytest.raises(ValueError): @@ -31,7 +31,7 @@ def test_degenerated_grid(): @pytest.mark.parametrize("dim", [1, 2, 3]) def test_generic_cartesian_grid(dim, rng): - """test generic cartesian grid functions""" + """Test generic cartesian grid functions.""" periodic = random.choices([True, False], k=dim) shape = rng.integers(2, 8, size=dim) a = rng.random(dim) @@ -59,7 +59,7 @@ def test_generic_cartesian_grid(dim, rng): @pytest.mark.parametrize("periodic", [True, False]) def test_unit_grid_1d(periodic, rng): - """test 1D grids""" + """Test 1D grids.""" grid = UnitGrid(4, periodic=periodic) assert grid.dim == 1 assert grid.numba_type == "f8[:]" @@ -95,7 +95,7 @@ def norm_numba_wrap(x): def test_unit_grid_2d(rng): - """test 2D grids""" + """Test 2D grids.""" # test special case grid = UnitGrid([4, 4], periodic=True) assert grid.dim == 2 @@ -133,7 +133,7 @@ def test_unit_grid_2d(rng): def test_unit_grid_3d(rng): - """test 3D grids""" + """Test 3D grids.""" grid = UnitGrid([4, 4, 4]) assert grid.dim == 3 assert grid.numba_type == "f8[:, :, :]" @@ -156,7 +156,7 @@ def test_unit_grid_3d(rng): def test_rect_grid_1d(rng): - """test 1D grids""" + """Test 1D grids.""" grid = CartesianGrid([32], 16, periodic=False) assert grid.dim == 1 assert grid.volume == 32 @@ -177,7 +177,7 @@ def test_rect_grid_1d(rng): def test_rect_grid_2d(rng): - """test 2D grids""" + """Test 2D grids.""" grid = CartesianGrid([[2], [2]], 4, periodic=True) assert grid.get_image_data(np.zeros(grid.shape))["extent"] == [0, 2, 0, 2] @@ -195,7 +195,7 @@ def test_rect_grid_2d(rng): def test_rect_grid_3d(rng): - """test 3D grids""" + """Test 3D grids.""" grid = CartesianGrid([4, 4, 4], 4) assert grid.dim == 3 assert grid.volume == 64 @@ -212,7 +212,7 @@ def test_rect_grid_3d(rng): @pytest.mark.parametrize("periodic", [True, False]) def test_unit_rect_grid(periodic, rng): - """test whether the rectangular grid behaves like a unit grid in special cases""" + """Test whether the rectangular grid behaves like a unit grid in special cases.""" dim = random.randrange(1, 4) shape = rng.integers(2, 10, size=dim) g1 = UnitGrid(shape, periodic=periodic) @@ -233,7 +233,7 @@ def test_unit_rect_grid(periodic, rng): def test_conversion_unit_rect_grid(rng): - """test the conversion from unit to rectangular grid""" + """Test the conversion from unit to rectangular grid.""" dim = random.randrange(1, 4) shape = rng.integers(2, 10, size=dim) periodic = random.choices([True, False], k=dim) @@ -246,7 +246,7 @@ def test_conversion_unit_rect_grid(rng): def test_setting_boundary_conditions(): - """test setting some boundary conditions""" + """Test setting some boundary conditions.""" grid = UnitGrid([3, 3], periodic=[True, False]) for bc in [ grid.get_boundary_conditions("auto_periodic_neumann"), @@ -268,7 +268,7 @@ def test_setting_boundary_conditions(): def test_setting_domain_rect(): - """test various versions of settings bcs for Cartesian grids""" + """Test various versions of settings bcs for Cartesian grids.""" grid = UnitGrid([2, 2]) grid.get_boundary_conditions(["derivative", "derivative"]) @@ -293,7 +293,7 @@ def test_setting_domain_rect(): @pytest.mark.parametrize("reflect", [True, False]) def test_normalize_point(reflect): - """test normalize_point method for Cartesian Grids""" + """Test normalize_point method for Cartesian Grids.""" grid = CartesianGrid([[1, 3]], [1], periodic=False) norm_numba = grid.make_normalize_point_compiled(reflect=reflect) @@ -315,7 +315,7 @@ def norm_numba_wrap(x): @pytest.mark.parametrize("method", ["central", "forward", "backward"]) def test_generic_operators(method, rng): - """test the `d_dx` version of the operator""" + """Test the `d_dx` version of the operator.""" grid = CartesianGrid([[1, 3]], 5, periodic=True) bcs = grid.get_boundary_conditions("periodic") data = rng.uniform(0, 1, size=5) @@ -335,7 +335,7 @@ def test_generic_operators(method, rng): def test_boundary_coordinates(): - """test _boundary_coordinates method""" + """Test _boundary_coordinates method.""" grid = UnitGrid([2, 2]) c = grid._boundary_coordinates(axis=0, upper=False) diff --git a/tests/grids/test_coordinates.py b/tests/grids/test_coordinates.py index d349e417..7216559a 100644 --- a/tests/grids/test_coordinates.py +++ b/tests/grids/test_coordinates.py @@ -11,7 +11,7 @@ def iter_coordinates(): - """generator providing some test coordinate systems""" + """Generator providing some test coordinate systems.""" yield coordinates.CartesianCoordinates(1) yield coordinates.CartesianCoordinates(2) yield coordinates.CartesianCoordinates(3) @@ -26,7 +26,7 @@ def iter_coordinates(): @pytest.mark.parametrize("c", iter_coordinates()) def test_basic_coordinates(c, rng): - """test basic coordinate properties""" + """Test basic coordinate properties.""" assert len(c.coordinate_limits) == c.dim assert len(c.axes) == c.dim x = rng.uniform(size=c.dim) @@ -38,7 +38,7 @@ def test_basic_coordinates(c, rng): @pytest.mark.parametrize("c", iter_coordinates()) def test_coordinate_volume_factors(c, rng): - """test basic coordinate properties""" + """Test basic coordinate properties.""" p1 = c.pos_from_cart(rng.uniform(-1, 1, size=c.dim)) p2 = c.pos_from_cart(rng.uniform(-1, 1, size=c.dim)) p_l = np.minimum(p1, p2) @@ -72,7 +72,7 @@ def test_coordinate_metric(c, rng): @pytest.mark.parametrize("c", iter_coordinates()) def test_coordinate_vector_fields(c, rng): - """test basic coordinate properties""" + """Test basic coordinate properties.""" # anchor point x1 = rng.uniform(-1, 1, size=c.dim) p = c.pos_from_cart(x1) @@ -94,7 +94,7 @@ def test_coordinate_vector_fields(c, rng): def test_invalid_coordinates(): - """test some invalid initializations""" + """Test some invalid initializations.""" with pytest.raises(ValueError): coordinates.CartesianCoordinates(0) with pytest.raises(ValueError): diff --git a/tests/grids/test_cylindrical_grids.py b/tests/grids/test_cylindrical_grids.py index bf27ab16..9b0ae77c 100644 --- a/tests/grids/test_cylindrical_grids.py +++ b/tests/grids/test_cylindrical_grids.py @@ -12,7 +12,7 @@ @pytest.mark.parametrize("periodic", [True, False]) @pytest.mark.parametrize("r_inner", [0, 2]) def test_cylindrical_grid(periodic, r_inner, rng): - """test simple cylindrical grid""" + """Test simple cylindrical grid.""" grid = CylindricalSymGrid((r_inner, 4), (-1, 2), (8, 9), periodic_z=periodic) if r_inner == 0: assert grid == CylindricalSymGrid(4, (-1, 2), (8, 9), periodic_z=periodic) @@ -46,7 +46,7 @@ def test_cylindrical_grid(periodic, r_inner, rng): def test_cylindrical_to_cartesian(): - """test conversion of cylindrical grid to Cartesian""" + """Test conversion of cylindrical grid to Cartesian.""" expr_cyl = "cos(z / 2) / (1 + r**2)" expr_cart = expr_cyl.replace("r**2", "(x**2 + y**2)") @@ -61,7 +61,7 @@ def test_cylindrical_to_cartesian(): def test_setting_boundary_conditions(): - """test various versions of settings bcs for cylindrical grids""" + """Test various versions of settings bcs for cylindrical grids.""" grid = CylindricalSymGrid(1, [0, 1], [2, 2], periodic_z=False) grid.get_boundary_conditions("auto_periodic_neumann") grid.get_boundary_conditions(["derivative", "derivative"]) diff --git a/tests/grids/test_generic_grids.py b/tests/grids/test_generic_grids.py index 23e88172..518f5409 100644 --- a/tests/grids/test_generic_grids.py +++ b/tests/grids/test_generic_grids.py @@ -19,7 +19,7 @@ def iter_grids(): - """generator providing some test grids""" + """Generator providing some test grids.""" for periodic in [True, False]: yield grids.UnitGrid([3], periodic=periodic) yield grids.UnitGrid([3, 3, 3], periodic=periodic) @@ -31,7 +31,7 @@ def iter_grids(): @pytest.mark.parametrize("grid", iter_grids()) def test_basic_grid_properties(grid): - """test basic grid properties""" + """Test basic grid properties.""" with pytest.raises(AttributeError): grid.periodic = True with pytest.raises(AttributeError): @@ -39,7 +39,7 @@ def test_basic_grid_properties(grid): def test_discretize(rng): - """test the discretize function""" + """Test the discretize function.""" x_min = rng.uniform(0, 1) x_max = rng.uniform(2, 3) num = rng.integers(5, 8) @@ -51,7 +51,7 @@ def test_discretize(rng): @pytest.mark.parametrize("grid", iter_grids()) def test_serialization(grid): - """test whether grid can be serialized and copied""" + """Test whether grid can be serialized and copied.""" g = GridBase.from_state(grid.state_serialized) assert grid == g assert grid._cache_hash() == g._cache_hash() @@ -62,7 +62,7 @@ def test_serialization(grid): def test_iter_mirror_points(): - """test iterating mirror points in grids""" + """Test iterating mirror points in grids.""" grid_cart = grids.UnitGrid([2, 2], periodic=[True, False]) grid_cyl = grids.CylindricalSymGrid(2, (0, 2), (2, 2), periodic_z=False) grid_sph = grids.SphericalSymGrid(2, 2) @@ -86,7 +86,7 @@ def test_iter_mirror_points(): @pytest.mark.parametrize("grid", iter_grids()) def test_coordinate_conversion(grid, rng): - """test the conversion between cells and points""" + """Test the conversion between cells and points.""" p_empty = np.zeros((0, grid.dim)) c_empty = np.zeros((0, grid.num_axes)) @@ -107,7 +107,7 @@ def test_coordinate_conversion(grid, rng): @pytest.mark.parametrize("grid", iter_grids()) def test_coordinate_conversion_full(grid, rng): - """test the conversion between cells and points""" + """Test the conversion between cells and points.""" p_empty = np.zeros((0, grid.dim)) g_empty = np.zeros((0, grid.dim)) @@ -127,7 +127,7 @@ def test_coordinate_conversion_full(grid, rng): @pytest.mark.parametrize("grid", iter_grids()) def test_integration_serial(grid, rng): - """test integration of fields""" + """Test integration of fields.""" arr = rng.normal(size=grid.shape) res = grid.make_integrator()(arr) assert np.isscalar(res) @@ -139,7 +139,7 @@ def test_integration_serial(grid, rng): def test_grid_plotting(): - """test plotting of grids""" + """Test plotting of grids.""" grids.UnitGrid([4]).plot() grids.UnitGrid([4, 4]).plot() @@ -152,7 +152,7 @@ def test_grid_plotting(): @pytest.mark.parametrize("grid", iter_grids()) def test_operators(grid): - """test operator mechanism""" + """Test operator mechanism.""" def make_op(state): return lambda state: state @@ -170,14 +170,14 @@ def make_op(state): def test_cartesian_operator_infos(): - """test special case of cartesian operators""" + """Test special case of cartesian operators.""" assert "d_dx" not in grids.UnitGrid.operators assert "d_dx" in grids.UnitGrid([2]).operators assert "d_dy" not in grids.UnitGrid([2]).operators def test_registered_operators(): - """test the registered_operators function""" + """Test the registered_operators function.""" for grid_name, ops in registered_operators().items(): grid_class_ops = getattr(grids, grid_name).operators assert all(op in grid_class_ops for op in ops) @@ -185,7 +185,7 @@ def test_registered_operators(): @pytest.mark.parametrize("grid", iter_grids()) def test_cell_volumes(grid): - """test calculation of cell volumes""" + """Test calculation of cell volumes.""" d2 = grid.discretization / 2 x_low = grid._coords_full(grid.cell_coords - d2, value="min") x_high = grid._coords_full(grid.cell_coords + d2, value="max") @@ -198,7 +198,7 @@ def test_cell_volumes(grid): ) @pytest.mark.parametrize("grid", iter_grids()) def test_grid_modelrunner_storage(grid, tmp_path): - """test storing grids in modelrunner storages""" + """Test storing grids in modelrunner storages.""" from modelrunner import open_storage path = tmp_path / "grid.json" diff --git a/tests/grids/test_grid_mesh.py b/tests/grids/test_grid_mesh.py index e7734ba1..a3c028c7 100644 --- a/tests/grids/test_grid_mesh.py +++ b/tests/grids/test_grid_mesh.py @@ -20,7 +20,7 @@ @pytest.mark.multiprocessing def test_basic_mpi_methods(): - """test very basic methods""" + """Test very basic methods.""" mesh = GridMesh.from_grid(UnitGrid([4])) value = mesh.broadcast(mpi.rank) @@ -38,7 +38,7 @@ def test_basic_mpi_methods(): @pytest.mark.parametrize("grid, decomposition", GRIDS) def test_generic_meshes(grid, decomposition): - """test generic functions of the grid mesh""" + """Test generic functions of the grid mesh.""" mesh = GridMesh.from_grid(grid, decomposition) assert len(mesh) == mpi.size mesh.plot(action="close") @@ -46,7 +46,7 @@ def test_generic_meshes(grid, decomposition): @pytest.mark.parametrize("decomp", ["auto", (1, 1), (2, 1), (1, 2), (2, 2)]) def test_split_fields(decomp, rng): - """test splitting and recombining fields""" + """Test splitting and recombining fields.""" grid = UnitGrid([8, 8]) mesh = GridMesh.from_grid(grid, decomp) @@ -62,7 +62,7 @@ def test_split_fields(decomp, rng): @pytest.mark.parametrize("decomp", ["auto", (-1,), (-1, 1), (1, -1)]) @pytest.mark.parametrize("dtype", [int, float, complex]) def test_split_fields_mpi(decomp, dtype, rng): - """test splitting and recombining fields using multiprocessing""" + """Test splitting and recombining fields using multiprocessing.""" dim = len(decomp) grid = UnitGrid([8] * dim) mesh = GridMesh.from_grid(grid, decomp) @@ -109,7 +109,7 @@ def test_split_fields_mpi(decomp, dtype, rng): @pytest.mark.multiprocessing @pytest.mark.parametrize("decomp", ["auto", (-1,), (-1, 1), (1, -1)]) def test_split_fieldcollections_mpi(decomp, rng): - """test splitting and recombining field collections using multiprocessing""" + """Test splitting and recombining field collections using multiprocessing.""" dim = len(decomp) grid = UnitGrid([8] * dim) mesh = GridMesh.from_grid(grid, decomp) @@ -156,7 +156,7 @@ def test_split_fieldcollections_mpi(decomp, rng): @pytest.mark.multiprocessing @pytest.mark.parametrize("bc", ["periodic", "value", "derivative", "curvature"]) def test_boundary_conditions_numpy(bc, rng): - """test setting boundary conditions using numpy""" + """Test setting boundary conditions using numpy.""" grid = UnitGrid([8, 8], periodic=(bc == "periodic")) mesh = GridMesh.from_grid(grid) @@ -177,7 +177,7 @@ def test_boundary_conditions_numpy(bc, rng): @pytest.mark.multiprocessing @pytest.mark.parametrize("bc", ["periodic", "value", "derivative", "curvature"]) def test_boundary_conditions_numba(bc, rng): - """test setting boundary conditions using numba""" + """Test setting boundary conditions using numba.""" grid = UnitGrid([8, 8], periodic=(bc == "periodic")) mesh = GridMesh.from_grid(grid) @@ -199,7 +199,7 @@ def test_boundary_conditions_numba(bc, rng): @pytest.mark.multiprocessing def test_vector_boundary_conditions(rng): - """test setting vectorial boundary conditions""" + """Test setting vectorial boundary conditions.""" grid = UnitGrid([8, 8]) mesh = GridMesh.from_grid(grid) @@ -220,7 +220,7 @@ def test_vector_boundary_conditions(rng): @pytest.mark.multiprocessing @pytest.mark.parametrize("grid, decomposition", GRIDS) def test_noncartesian_grids(grid, decomposition, rng): - """test whether we can deal with non-cartesian grids""" + """Test whether we can deal with non-cartesian grids.""" field = ScalarField.random_uniform(grid, rng=rng) eq = DiffusionPDE() @@ -243,7 +243,7 @@ def test_noncartesian_grids(grid, decomposition, rng): @pytest.mark.parametrize("grid, decomposition", GRIDS) @pytest.mark.parametrize("rank", [0, 2]) def test_integration_parallel(grid, decomposition, rank): - """test integration of fields over grids""" + """Test integration of fields over grids.""" mesh = GridMesh.from_grid(grid, decomposition=decomposition) if rank == 0: field = ScalarField(grid, 1) @@ -264,7 +264,7 @@ def test_integration_parallel(grid, decomposition, rank): def test_get_optimal_decomposition(): - """test _get_optimal_decomposition function""" + """Test _get_optimal_decomposition function.""" assert _get_optimal_decomposition([1], 1) == [1] assert _get_optimal_decomposition([1], 2) == [1] diff --git a/tests/grids/test_spherical_grids.py b/tests/grids/test_spherical_grids.py index 1cac3645..115a5ed6 100644 --- a/tests/grids/test_spherical_grids.py +++ b/tests/grids/test_spherical_grids.py @@ -10,7 +10,7 @@ def test_polar_grid(rng): - """test simple polar grid""" + """Test simple polar grid.""" grid = PolarSymGrid(4, 8) assert grid.dim == 2 assert grid.num_cells == 8 @@ -36,7 +36,7 @@ def test_polar_grid(rng): def test_polar_annulus(rng): - """test simple polar grid with a hole""" + """Test simple polar grid with a hole.""" grid = PolarSymGrid((2, 4), 8) assert grid.dim == 2 assert grid.num_cells == 8 @@ -66,7 +66,7 @@ def test_polar_annulus(rng): def test_polar_to_cartesian(): - """test conversion of polar grid to Cartesian""" + """Test conversion of polar grid to Cartesian.""" expr_pol = "(1 + r**2) ** -2" expr_cart = expr_pol.replace("r**2", "(x**2 + y**2)") @@ -80,7 +80,7 @@ def test_polar_to_cartesian(): def test_spherical_grid(rng): - """test simple spherical grid""" + """Test simple spherical grid.""" grid = SphericalSymGrid(4, 8) assert grid.dim == 3 assert grid.num_cells == 8 @@ -106,7 +106,7 @@ def test_spherical_grid(rng): def test_spherical_annulus(rng): - """test simple spherical grid with a hole""" + """Test simple spherical grid with a hole.""" grid = SphericalSymGrid((2, 4), 8) assert grid.dim == 3 assert grid.num_cells == 8 @@ -136,7 +136,7 @@ def test_spherical_annulus(rng): def test_spherical_to_cartesian(): - """test conversion of spherical grid to cartesian""" + """Test conversion of spherical grid to cartesian.""" expr_sph = "1. / (1 + r**2)" expr_cart = expr_sph.replace("r**2", "(x**2 + y**2 + z**2)") @@ -151,7 +151,7 @@ def test_spherical_to_cartesian(): @pytest.mark.parametrize("grid_class", [PolarSymGrid, SphericalSymGrid]) def test_setting_boundary_conditions(grid_class): - """test setting some boundary conditions""" + """Test setting some boundary conditions.""" grid = grid_class([0, 1], 3) b_inner = NeumannBC(grid, 0, upper=False) diff --git a/tests/pdes/test_diffusion_pdes.py b/tests/pdes/test_diffusion_pdes.py index 639365fd..97ef6769 100644 --- a/tests/pdes/test_diffusion_pdes.py +++ b/tests/pdes/test_diffusion_pdes.py @@ -10,7 +10,7 @@ def test_diffusion_single(rng): - """test some methods of the simple diffusion model""" + """Test some methods of the simple diffusion model.""" eq = DiffusionPDE() assert isinstance(str(eq), str) assert isinstance(repr(eq), str) @@ -25,7 +25,7 @@ def test_diffusion_single(rng): def test_simple_diffusion_value(rng): - """test a simple diffusion equation with constant boundaries""" + """Test a simple diffusion equation with constant boundaries.""" grid = CartesianGrid([[0, 1]], [16]) c = ScalarField.random_uniform(grid, 0, 1, rng=rng) b_l = {"type": "value", "value": 0} @@ -37,7 +37,7 @@ def test_simple_diffusion_value(rng): def test_simple_diffusion_flux_right(rng): - """test a simple diffusion equation with flux boundary on the right""" + """Test a simple diffusion equation with flux boundary on the right.""" grid = CartesianGrid([[0, 1]], [16]) c = ScalarField.random_uniform(grid, 0, 1, rng=rng) b_l = {"type": "value", "value": 0} @@ -48,7 +48,7 @@ def test_simple_diffusion_flux_right(rng): def test_simple_diffusion_flux_left(rng): - """test a simple diffusion equation with flux boundary on the left""" + """Test a simple diffusion equation with flux boundary on the left.""" grid = CartesianGrid([[0, 1]], [16]) c = ScalarField.random_uniform(grid, 0, 1, rng=rng) b_l = {"type": "derivative", "value": 2} @@ -59,7 +59,7 @@ def test_simple_diffusion_flux_left(rng): def test_diffusion_cached(rng): - """test some caching of rhs of the simple diffusion model""" + """Test some caching of rhs of the simple diffusion model.""" grid = UnitGrid([8]) c0 = ScalarField.random_uniform(grid, rng=rng) @@ -89,7 +89,7 @@ def test_diffusion_cached(rng): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_diffusion_time_dependent_bcs(backend): - """test PDE with time-dependent BCs""" + """Test PDE with time-dependent BCs.""" field = ScalarField(UnitGrid([3])) eq = DiffusionPDE(bc={"value_expression": "Heaviside(t - 1.5)"}) @@ -110,7 +110,7 @@ def test_diffusion_time_dependent_bcs(backend): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_diffusion_sde(backend, rng): - """test scaling of noise using a stochastic diffusion equation""" + """Test scaling of noise using a stochastic diffusion equation.""" # we disable diffusivity to have a simple analytical solution var_local, t_range = 0.35, 0.1 eq = DiffusionPDE(diffusivity=0, noise=var_local, rng=rng) diff --git a/tests/pdes/test_generic_pdes.py b/tests/pdes/test_generic_pdes.py index 64f41563..4f67ab9c 100644 --- a/tests/pdes/test_generic_pdes.py +++ b/tests/pdes/test_generic_pdes.py @@ -21,7 +21,7 @@ ], ) def test_pde_consistency(pde_class, dim, rng): - """test some methods of generic PDE models""" + """Test some methods of generic PDE models.""" eq = pde_class() assert isinstance(str(eq), str) assert isinstance(repr(eq), str) @@ -42,7 +42,7 @@ def test_pde_consistency(pde_class, dim, rng): def test_pde_consistency_test(rng): - """test whether the consistency of a pde implementation is checked""" + """Test whether the consistency of a pde implementation is checked.""" class TestPDE(pdes.PDEBase): def evolution_rate(self, field, t=0): diff --git a/tests/pdes/test_laplace_pdes.py b/tests/pdes/test_laplace_pdes.py index ad92d78c..25023f80 100644 --- a/tests/pdes/test_laplace_pdes.py +++ b/tests/pdes/test_laplace_pdes.py @@ -10,7 +10,7 @@ def test_pde_poisson_solver_1d(): - """test the poisson solver on 1d grids""" + """Test the poisson solver on 1d grids.""" # solve Laplace's equation grid = UnitGrid([4]) res = solve_laplace_equation(grid, bc=[{"value": -1}, {"value": 3}]) @@ -37,7 +37,7 @@ def test_pde_poisson_solver_1d(): def test_pde_poisson_solver_2d(): - """test the poisson solver on 2d grids""" + """Test the poisson solver on 2d grids.""" grid = CartesianGrid([[0, 2 * np.pi]] * 2, 16) bcs = [{"value": "sin(y)"}, {"value": "sin(x)"}] diff --git a/tests/pdes/test_pde_class.py b/tests/pdes/test_pde_class.py index 464acbfc..cf5b050b 100644 --- a/tests/pdes/test_pde_class.py +++ b/tests/pdes/test_pde_class.py @@ -15,7 +15,7 @@ def iter_grids(): - """generate some test grids""" + """Generate some test grids.""" yield grids.UnitGrid([2, 2], periodic=[True, False]) yield grids.CartesianGrid([[0, 1]], [2], periodic=[False]) yield grids.CylindricalSymGrid(2, (0, 2), (2, 2), periodic_z=True) @@ -24,7 +24,7 @@ def iter_grids(): def test_pde_critical_input(rng): - """test some wrong input and edge cases""" + """Test some wrong input and edge cases.""" # test whether reserved symbols can be used as variables grid = grids.UnitGrid([4]) eq = PDE({"E": 1}) @@ -55,7 +55,7 @@ def test_pde_critical_input(rng): def test_pde_scalar(rng): - """test PDE with a single scalar field""" + """Test PDE with a single scalar field.""" eq = PDE({"u": "laplace(u) + exp(-t) + sin(t)"}) assert eq.explicit_time_dependence assert not eq.complex_valued @@ -70,7 +70,7 @@ def test_pde_scalar(rng): def test_pde_vector(rng): - """test PDE with a single vector field""" + """Test PDE with a single vector field.""" eq = PDE({"u": "vector_laplace(u) + exp(-t)"}) assert eq.explicit_time_dependence assert not eq.complex_valued @@ -85,7 +85,7 @@ def test_pde_vector(rng): def test_pde_2scalar(): - """test PDE with two scalar fields""" + """Test PDE with two scalar fields.""" eq = PDE({"u": "laplace(u) - u", "v": "- u * v"}) assert not eq.explicit_time_dependence assert not eq.complex_valued @@ -101,7 +101,7 @@ def test_pde_2scalar(): @pytest.mark.slow def test_pde_vector_scalar(rng): - """test PDE with a vector and a scalar field""" + """Test PDE with a vector and a scalar field.""" eq = PDE({"u": "vector_laplace(u) - u + gradient(v)", "v": "- divergence(u)"}) assert not eq.explicit_time_dependence assert not eq.complex_valued @@ -122,7 +122,7 @@ def test_pde_vector_scalar(rng): @pytest.mark.parametrize("grid", iter_grids()) def test_compare_swift_hohenberg(grid, rng): - """compare custom class to swift-Hohenberg""" + """Compare custom class to swift-Hohenberg.""" rate, kc2, delta = rng.uniform(0.5, 2, size=3) eq1 = SwiftHohenbergPDE(rate=rate, kc2=kc2, delta=delta) eq2 = PDE( @@ -143,7 +143,7 @@ def test_compare_swift_hohenberg(grid, rng): def test_custom_operators(rng): - """test using a custom operator""" + """Test using a custom operator.""" grid = grids.UnitGrid([32]) field = ScalarField.random_normal(grid, rng=rng) eq = PDE({"u": "undefined(u)"}) @@ -168,7 +168,7 @@ def op(arr, out): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_pde_noise(backend, rng): - """test noise operator on PDE class""" + """Test noise operator on PDE class.""" grid = grids.UnitGrid([128, 128]) state = FieldCollection([ScalarField(grid), ScalarField(grid)]) @@ -193,7 +193,7 @@ def test_pde_noise(backend, rng): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_pde_spatial_args(backend): - """test PDE with spatial dependence""" + """Test PDE with spatial dependence.""" field = ScalarField(grids.UnitGrid([4])) eq = PDE({"a": "x"}) @@ -213,7 +213,7 @@ def test_pde_spatial_args(backend): def test_pde_user_funcs(rng): - """test user supplied functions""" + """Test user supplied functions.""" # test a simple case eq = PDE({"u": "get_x(gradient(u))"}, user_funcs={"get_x": lambda arr: arr[0]}) field = ScalarField.random_colored(grids.UnitGrid([32, 32]), rng=rng) @@ -228,7 +228,7 @@ def test_pde_user_funcs(rng): def test_pde_complex_serial(rng): - """test complex valued PDE""" + """Test complex valued PDE.""" eq = PDE({"p": "I * laplace(p)"}) assert not eq.explicit_time_dependence assert eq.complex_valued @@ -244,7 +244,7 @@ def test_pde_complex_serial(rng): def test_pde_product_operators(): - """test inner and outer products""" + """Test inner and outer products.""" eq = PDE( {"p": "gradient(dot(p, p) + inner(p, p)) + tensor_divergence(outer(p, p))"} ) @@ -256,7 +256,7 @@ def test_pde_product_operators(): def test_pde_setting_noise(): - """test setting the noise strength""" + """Test setting the noise strength.""" for noise in [[0, 1], {"b": 1}, {"b": 1, "a": 0}, {"b": 1, "c": 1}]: eq = PDE({"a": "0", "b": "0"}, noise=noise) assert eq.is_sde @@ -271,7 +271,7 @@ def test_pde_setting_noise(): def test_pde_consts(): - """test using the consts argument in PDE""" + """Test using the consts argument in PDE.""" field = ScalarField(grids.UnitGrid([3]), 1) eq = PDE({"a": "b"}, consts={"b": 2}) @@ -294,7 +294,7 @@ def test_pde_consts(): @pytest.mark.parametrize("bc", ["auto_periodic_neumann", {"value": 1}]) def test_pde_bcs(bc, rng): - """test PDE with boundary conditions""" + """Test PDE with boundary conditions.""" eq = PDE({"u": "laplace(u)"}, bc=bc) assert not eq.explicit_time_dependence assert not eq.complex_valued @@ -309,7 +309,7 @@ def test_pde_bcs(bc, rng): def test_pde_bcs_warning(caplog): - """test whether a warning is thrown correctly""" + """Test whether a warning is thrown correctly.""" with caplog.at_level(logging.WARNING): eq = PDE({"u": "laplace(u)"}, bc_ops={"u:gradient": "value"}) eq.evolution_rate(ScalarField(grids.UnitGrid([6]))) @@ -323,7 +323,7 @@ def test_pde_bcs_warning(caplog): @pytest.mark.parametrize("bc", ["asdf", [{"value": 1}] * 3]) def test_pde_bcs_error(bc, rng): - """test PDE with wrong boundary conditions""" + """Test PDE with wrong boundary conditions.""" eq = PDE({"u": "laplace(u)"}, bc=bc) grid = grids.UnitGrid([8, 8]) field = ScalarField.random_normal(grid, rng=rng) @@ -335,7 +335,7 @@ def test_pde_bcs_error(bc, rng): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_pde_time_dependent_bcs(backend): - """test PDE with time-dependent BCs""" + """Test PDE with time-dependent BCs.""" field = ScalarField(grids.UnitGrid([3])) eq = PDE({"c": "laplace(c)"}, bc={"value_expression": "Heaviside(t - 1.5)"}) @@ -349,7 +349,7 @@ def test_pde_time_dependent_bcs(backend): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_pde_integral(backend, rng): - """test PDE with integral""" + """Test PDE with integral.""" grid = grids.UnitGrid([16]) field = ScalarField.random_uniform(grid, rng=rng) eq = PDE({"c": "-integral(c)"}) @@ -366,7 +366,7 @@ def test_pde_integral(backend, rng): def test_anti_periodic_bcs(): - """test a simulation with anti-periodic BCs""" + """Test a simulation with anti-periodic BCs.""" grid = grids.CartesianGrid([[-10, 10]], 32, periodic=True) field = ScalarField.from_expression(grid, "0.01 * x**2") field -= field.average @@ -386,7 +386,7 @@ def test_anti_periodic_bcs(): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_pde_heaviside(backend): - """test PDE with a heaviside right hand side""" + """Test PDE with a heaviside right hand side.""" field = ScalarField(grids.CartesianGrid([[-1, 1]], 2), [-1, 1]) eq = PDE({"c": "Heaviside(x)"}) res = eq.solve(field, 0.999, dt=0.1, backend=backend, tracker=None) @@ -394,7 +394,7 @@ def test_pde_heaviside(backend): def test_jacobian_spectral(): - """test jacobian_spectral method""" + """Test jacobian_spectral method.""" eq = PDE({"c": "laplace(c**3 - c - laplace(c))"}) expected = "q**2*(-3*c**2 - q**2 + 1)" expr = eq._jacobian_spectral()[0] @@ -418,7 +418,7 @@ def test_jacobian_spectral(): def test_jacobian_spectral_bad_input(): - """test jacobian_spectral method with bad input""" + """Test jacobian_spectral method with bad input.""" with pytest.raises(ValueError): PDE({"a": "a"})._jacobian_spectral(wave_vector="t") with pytest.raises(ValueError): @@ -438,7 +438,7 @@ def test_jacobian_spectral_bad_input(): def test_dispersion_relationn(): - """test dispersion_relation method""" + """Test dispersion_relation method.""" eq = PDE({"c": "laplace(c**3 - c - laplace(c))"}) qs, evs = eq._dispersion_relation(state_hom=0, qs=[0, 0.5, 1]) np.testing.assert_allclose(qs, np.array([0, 0.5, 1])) diff --git a/tests/pdes/test_pdes_mpi.py b/tests/pdes/test_pdes_mpi.py index efe1a770..c65a4fd9 100644 --- a/tests/pdes/test_pdes_mpi.py +++ b/tests/pdes/test_pdes_mpi.py @@ -14,7 +14,7 @@ @pytest.mark.parametrize("dim", [1, 2, 3]) @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_pde_complex_bcs_mpi(dim, backend, rng): - """test PDE with complex BCs using multiprocessing""" + """Test PDE with complex BCs using multiprocessing.""" eq = DiffusionPDE() grid = grids.UnitGrid([8] * dim) field = ScalarField.random_normal(grid, rng=rng).smooth(1) @@ -35,7 +35,7 @@ def test_pde_complex_bcs_mpi(dim, backend, rng): @pytest.mark.multiprocessing def test_pde_vector_mpi(rng): - """test PDE with a single vector field using multiprocessing""" + """Test PDE with a single vector field using multiprocessing.""" eq = PDE({"u": "vector_laplace(u) + exp(-t)"}) assert eq.explicit_time_dependence assert not eq.complex_valued @@ -58,7 +58,7 @@ def test_pde_vector_mpi(rng): @pytest.mark.multiprocessing def test_pde_complex_mpi(rng): - """test complex valued PDE""" + """Test complex valued PDE.""" eq = PDE({"p": "I * laplace(p)"}) assert not eq.explicit_time_dependence assert eq.complex_valued @@ -92,7 +92,7 @@ def test_pde_complex_mpi(rng): @pytest.mark.multiprocessing @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_pde_const_mpi(backend): - """test PDE with a field constant using multiprocessing""" + """Test PDE with a field constant using multiprocessing.""" grid = grids.UnitGrid([8]) eq = PDE({"u": "k"}, consts={"k": ScalarField.from_expression(grid, "x")}) diff --git a/tests/pdes/test_wave_pdes.py b/tests/pdes/test_wave_pdes.py index d7dcb8ca..ecf9625e 100644 --- a/tests/pdes/test_wave_pdes.py +++ b/tests/pdes/test_wave_pdes.py @@ -10,7 +10,7 @@ @pytest.mark.parametrize("dim", [1, 2]) def test_wave_consistency(dim, rng): - """test some methods of the wave model""" + """Test some methods of the wave model.""" eq = WavePDE() assert isinstance(str(eq), str) assert isinstance(repr(eq), str) diff --git a/tests/requirements.txt b/tests/requirements.txt index 67fd2612..e4695b5c 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,5 +1,6 @@ -r ../requirements.txt black>=24 +docformatter>=1.7 importlib-metadata>=5 isort>=5.1 jupyter_contrib_nbextensions>=0.5 diff --git a/tests/resources/run_pde.py b/tests/resources/run_pde.py index 9a86b035..b624844b 100755 --- a/tests/resources/run_pde.py +++ b/tests/resources/run_pde.py @@ -2,7 +2,7 @@ def run_pde(t_range, storage): - """run a pde and store trajectory""" + """Run a pde and store trajectory.""" field = pde.ScalarField.random_uniform(pde.UnitGrid([8, 8])) storage["initial_state"] = field diff --git a/tests/solvers/test_adams_bashforth_solver.py b/tests/solvers/test_adams_bashforth_solver.py index e1d7c8c6..861ee85c 100644 --- a/tests/solvers/test_adams_bashforth_solver.py +++ b/tests/solvers/test_adams_bashforth_solver.py @@ -8,7 +8,7 @@ def test_adams_bashforth(): - """test the adams_bashforth method""" + """Test the adams_bashforth method.""" eq = pde.PDE({"y": "y"}) state = pde.ScalarField(pde.UnitGrid([1]), 1) storage = pde.MemoryStorage() diff --git a/tests/solvers/test_controller.py b/tests/solvers/test_controller.py index 2fbb8008..60c2073c 100644 --- a/tests/solvers/test_controller.py +++ b/tests/solvers/test_controller.py @@ -8,7 +8,7 @@ def test_controller_abort(): - """test how controller deals with errors""" + """Test how controller deals with errors.""" class ErrorPDEException(RuntimeError): ... diff --git a/tests/solvers/test_explicit_mpi_solvers.py b/tests/solvers/test_explicit_mpi_solvers.py index 7abfdf1a..67918603 100644 --- a/tests/solvers/test_explicit_mpi_solvers.py +++ b/tests/solvers/test_explicit_mpi_solvers.py @@ -16,7 +16,7 @@ [("euler", "auto"), ("euler", [1, -1]), ("runge-kutta", [-1, 1])], ) def test_simple_pde_mpi(scheme, decomposition, rng): - """test setting boundary conditions using numba""" + """Test setting boundary conditions using numba.""" grid = UnitGrid([8, 8], periodic=[True, False]) field = ScalarField.random_uniform(grid, rng=rng) @@ -57,7 +57,7 @@ def test_simple_pde_mpi(scheme, decomposition, rng): @pytest.mark.multiprocessing @pytest.mark.parametrize("backend", ["numba", "numpy"]) def test_stochastic_mpi_solvers(backend, rng): - """test simple version of the stochastic solver""" + """Test simple version of the stochastic solver.""" field = ScalarField.random_uniform(UnitGrid([16]), -1, 1, rng=rng) eq = DiffusionPDE() seq = DiffusionPDE(noise=1e-10) diff --git a/tests/solvers/test_explicit_solvers.py b/tests/solvers/test_explicit_solvers.py index 64f1b5fe..4b9da38f 100644 --- a/tests/solvers/test_explicit_solvers.py +++ b/tests/solvers/test_explicit_solvers.py @@ -15,7 +15,7 @@ @pytest.mark.parametrize("scheme", ["euler", "runge-kutta"]) @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_solvers_simple_fixed(scheme, backend): - """test explicit solvers""" + """Test explicit solvers.""" grid = UnitGrid([4]) xs = grid.axes_coords[0] field = ScalarField.from_expression(grid, "x") @@ -40,7 +40,7 @@ def test_solvers_simple_fixed(scheme, backend): @pytest.mark.parametrize("scheme", ["euler", "runge-kutta"]) @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_solvers_simple_adaptive(scheme, backend): - """test explicit solvers""" + """Test explicit solvers.""" grid = UnitGrid([4]) y0 = np.array([1e-3, 1e-3, 1e3, 1e3]) field = ScalarField(grid, y0) @@ -71,7 +71,7 @@ def test_solvers_simple_adaptive(scheme, backend): @pytest.mark.parametrize("scheme", ["euler", "runge-kutta"]) @pytest.mark.parametrize("adaptive", [True, False]) def test_solvers_time_dependent(scheme, adaptive): - """test explicit solvers with a simple ODE""" + """Test explicit solvers with a simple ODE.""" grid = UnitGrid([1]) field = ScalarField(grid, 1) eq = PDE({"y": "2*sin(t) - y"}) @@ -96,7 +96,7 @@ def test_solvers_time_dependent(scheme, adaptive): @pytest.mark.parametrize("backend", ["numba", "numpy"]) def test_stochastic_solvers(backend, rng): - """test simple version of the stochastic solver""" + """Test simple version of the stochastic solver.""" field = ScalarField.random_uniform(UnitGrid([16]), -1, 1, rng=rng) eq = DiffusionPDE() seq = DiffusionPDE(noise=1e-10) @@ -118,7 +118,7 @@ def test_stochastic_solvers(backend, rng): def test_stochastic_adaptive_solver(caplog, rng): - """test using an adaptive, stochastic solver""" + """Test using an adaptive, stochastic solver.""" field = ScalarField.random_uniform(UnitGrid([16]), -1, 1, rng=rng) eq = DiffusionPDE(noise=1e-6) @@ -129,7 +129,7 @@ def test_stochastic_adaptive_solver(caplog, rng): def test_unsupported_stochastic_solvers(rng): - """test some solvers that do not support stochasticity""" + """Test some solvers that do not support stochasticity.""" field = ScalarField.random_uniform(UnitGrid([16]), -1, 1, rng=rng) eq = DiffusionPDE(noise=1) @@ -141,13 +141,13 @@ def test_unsupported_stochastic_solvers(rng): @pytest.mark.parametrize("scheme", ["euler", "runge-kutta"]) def test_adaptive_solver_nan(scheme): - """test whether the adaptive solver can treat nans""" + """Test whether the adaptive solver can treat nans.""" # Note for programmer: A similar test for the `numba` backend is difficult to # implement, since we only want to fail very rarely. We tried doing it with random # failure, but this often resulted in hitting the minimal time step. class MockPDE(PDEBase): - """simple PDE which returns NaN every 5 evaluations""" + """Simple PDE which returns NaN every 5 evaluations.""" evaluations = 0 diff --git a/tests/solvers/test_generic_solvers.py b/tests/solvers/test_generic_solvers.py index d5d1303d..cbf7f706 100644 --- a/tests/solvers/test_generic_solvers.py +++ b/tests/solvers/test_generic_solvers.py @@ -27,7 +27,7 @@ def test_solver_registration(): - """test solver registration""" + """Test solver registration.""" solvers = registered_solvers() assert "explicit" in solvers assert "implicit" in solvers @@ -36,7 +36,7 @@ def test_solver_registration(): def test_solver_in_pde_class(rng): - """test whether solver instances can be used in pde instances""" + """Test whether solver instances can be used in pde instances.""" field = ScalarField.random_uniform(UnitGrid([16, 16]), -1, 1, rng=rng) eq = DiffusionPDE() eq.solve(field, t_range=1, solver=ScipySolver, tracker=None) @@ -44,7 +44,7 @@ def test_solver_in_pde_class(rng): @pytest.mark.parametrize("solver_class", SOLVER_CLASSES) def test_compare_solvers(solver_class, rng): - """compare several solvers""" + """Compare several solvers.""" field = ScalarField.random_uniform(UnitGrid([8, 8]), -1, 1, rng=rng) eq = DiffusionPDE() @@ -62,7 +62,7 @@ def test_compare_solvers(solver_class, rng): @pytest.mark.parametrize("solver_class", SOLVER_CLASSES) @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_solvers_complex(solver_class, backend): - """test solvers with a complex PDE""" + """Test solvers with a complex PDE.""" r = FieldCollection.scalar_random_uniform(2, UnitGrid([3]), labels=["a", "b"]) c = r["a"] + 1j * r["b"] assert c.is_complex @@ -80,7 +80,7 @@ def test_solvers_complex(solver_class, backend): def test_basic_adaptive_solver(): - """test basic adaptive solvers""" + """Test basic adaptive solvers.""" grid = UnitGrid([4]) y0 = np.array([1e-3, 1e-3, 1e3, 1e3]) field = ScalarField(grid, y0) diff --git a/tests/solvers/test_implicit_solvers.py b/tests/solvers/test_implicit_solvers.py index c6ee4dbb..eb085ae2 100644 --- a/tests/solvers/test_implicit_solvers.py +++ b/tests/solvers/test_implicit_solvers.py @@ -12,7 +12,7 @@ @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_implicit_solvers_simple_fixed(backend): - """test implicit solvers""" + """Test implicit solvers.""" grid = UnitGrid([4]) xs = grid.axes_coords[0] field = ScalarField.from_expression(grid, "x") @@ -31,7 +31,7 @@ def test_implicit_solvers_simple_fixed(backend): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_implicit_stochastic_solvers(backend, rng): - """test simple version of the stochastic implicit solver""" + """Test simple version of the stochastic implicit solver.""" field = ScalarField.random_uniform(UnitGrid([16]), -1, 1, rng=rng) eq = DiffusionPDE() seq = DiffusionPDE(noise=1e-10) diff --git a/tests/solvers/test_scipy_solvers.py b/tests/solvers/test_scipy_solvers.py index 7816ad58..5c0a92ec 100644 --- a/tests/solvers/test_scipy_solvers.py +++ b/tests/solvers/test_scipy_solvers.py @@ -9,7 +9,7 @@ def test_scipy_no_dt(rng): - """test scipy solver without timestep""" + """Test scipy solver without timestep.""" grid = UnitGrid([16]) field = ScalarField.random_uniform(grid, -1, 1, rng=rng) eq = DiffusionPDE() @@ -24,7 +24,7 @@ def test_scipy_no_dt(rng): def test_scipy_field_collection(): - """test scipy solver with field collection""" + """Test scipy solver with field collection.""" grid = UnitGrid([2]) field = FieldCollection.from_scalar_expressions(grid, ["x", "0"]) eq = PDE({"a": "1", "b": "a"}) diff --git a/tests/storage/test_file_storages.py b/tests/storage/test_file_storages.py index 43856471..2f6cd1f9 100644 --- a/tests/storage/test_file_storages.py +++ b/tests/storage/test_file_storages.py @@ -13,7 +13,7 @@ @pytest.mark.skipif(not module_available("h5py"), reason="requires `h5py` module") @pytest.mark.parametrize("collection", [True, False]) def test_storage_persistence(collection, tmp_path): - """test writing to persistent trackers""" + """Test writing to persistent trackers.""" dim = 5 grid = UnitGrid([dim]) scalar = ScalarField(grid) @@ -24,7 +24,7 @@ def test_storage_persistence(collection, tmp_path): state = scalar def assert_storage_content(storage, expect): - """helper function testing storage content""" + """Helper function testing storage content.""" if collection: for i in range(2): field_data = storage.extract_field(i).data @@ -79,7 +79,7 @@ def assert_storage_content(storage, expect): @pytest.mark.skipif(not module_available("h5py"), reason="requires `h5py` module") @pytest.mark.parametrize("compression", [True, False]) def test_simulation_persistence(compression, tmp_path, rng): - """test whether a tracker can accurately store information about simulation""" + """Test whether a tracker can accurately store information about simulation.""" path = tmp_path / "test_simulation_persistence.hdf5" storage = FileStorage(path, compression=compression) @@ -104,7 +104,7 @@ def test_simulation_persistence(compression, tmp_path, rng): @pytest.mark.skipif(not module_available("h5py"), reason="requires `h5py` module") @pytest.mark.parametrize("compression", [True, False]) def test_storage_fixed_size(compression, tmp_path): - """test setting fixed size of FileStorage objects""" + """Test setting fixed size of FileStorage objects.""" c = ScalarField(UnitGrid([2]), data=1) for fixed in [True, False]: @@ -132,7 +132,7 @@ def test_storage_fixed_size(compression, tmp_path): @pytest.mark.skipif(not module_available("h5py"), reason="requires `h5py` module") def test_appending(tmp_path): - """test the appending data""" + """Test the appending data.""" path = tmp_path / "test_appending.hdf5" c = ScalarField(UnitGrid([2]), data=1) @@ -154,7 +154,7 @@ def test_appending(tmp_path): @pytest.mark.skipif(not module_available("h5py"), reason="requires `h5py` module") def test_keep_opened(tmp_path): - """test the keep opened option""" + """Test the keep opened option.""" path = tmp_path / "test_keep_opened.hdf5" c = ScalarField(UnitGrid([2]), data=1) @@ -183,7 +183,7 @@ def test_keep_opened(tmp_path): @pytest.mark.skipif(not module_available("h5py"), reason="requires `h5py` module") @pytest.mark.parametrize("dtype", [bool, float, complex]) def test_write_types(dtype, tmp_path, rng): - """test whether complex data can be written""" + """Test whether complex data can be written.""" path = tmp_path / "test_type_writing.hdf5" grid = UnitGrid([32]) diff --git a/tests/storage/test_generic_storages.py b/tests/storage/test_generic_storages.py index 6f688e5a..368da7f9 100644 --- a/tests/storage/test_generic_storages.py +++ b/tests/storage/test_generic_storages.py @@ -43,7 +43,7 @@ @pytest.fixture def storage_factory(tmp_path, storage_class): - """helper fixture that provides a storage factory that initializes files""" + """Helper fixture that provides a storage factory that initializes files.""" if storage_class is None: return None @@ -76,7 +76,7 @@ def storage_factory(tmp_path, storage_class): @pytest.mark.parametrize("atol,can_clear,storage_class", STORAGE_CLASSES_ALL) def test_storage_write(atol, can_clear, storage_factory): - """test simple memory storage""" + """Test simple memory storage.""" dim = 5 grid = UnitGrid([dim]) field = ScalarField(grid) @@ -112,7 +112,7 @@ def test_storage_write(atol, can_clear, storage_factory): def test_storage_truncation(tmp_path, rng): - """test whether simple trackers can be used""" + """Test whether simple trackers can be used.""" file = tmp_path / "test_storage_truncation.hdf5" for truncate in [True, False]: storages = [MemoryStorage()] @@ -147,7 +147,7 @@ def test_storage_truncation(tmp_path, rng): @pytest.mark.parametrize("atol,can_clear,storage_class", STORAGE_CLASSES_ALL) def test_storing_extract_range(atol, can_clear, storage_factory): - """test methods specific to FieldCollections in memory storage""" + """Test methods specific to FieldCollections in memory storage.""" sf = ScalarField(UnitGrid([1])) # store some data @@ -183,7 +183,7 @@ def test_storing_extract_range(atol, can_clear, storage_factory): @pytest.mark.parametrize("storage_class", STORAGE_CLASSES) def test_storing_collection(storage_factory, rng): - """test methods specific to FieldCollections in memory storage""" + """Test methods specific to FieldCollections in memory storage.""" grid = UnitGrid([2, 2]) f1 = ScalarField.random_uniform(grid, 0.1, 0.4, label="a", rng=rng) f2 = VectorField.random_uniform(grid, 0.1, 0.4, label="b", rng=rng) @@ -224,7 +224,7 @@ def test_storing_collection(storage_factory, rng): "atol,can_clear,storage_class", STORAGE_CLASSES_ALL + [(0, False, None)] ) def test_storage_apply(atol, can_clear, storage_factory): - """test the apply function of StorageBase""" + """Test the apply function of StorageBase.""" grid = UnitGrid([2]) field = ScalarField(grid) @@ -254,7 +254,7 @@ def test_storage_apply(atol, can_clear, storage_factory): "atol,can_clear,storage_class", STORAGE_CLASSES_ALL + [(0, False, None)] ) def test_storage_copy(atol, can_clear, storage_factory): - """test the copy function of StorageBase""" + """Test the copy function of StorageBase.""" grid = UnitGrid([2]) field = ScalarField(grid) @@ -283,7 +283,7 @@ def test_storage_copy(atol, can_clear, storage_factory): @pytest.mark.parametrize("storage_class", STORAGE_CLASSES) @pytest.mark.parametrize("dtype", [bool, complex]) def test_storage_types(storage_factory, dtype, rng): - """test storing different types""" + """Test storing different types.""" grid = UnitGrid([32]) field = ScalarField.random_uniform(grid, rng=rng).copy(dtype=dtype) if dtype == complex: @@ -304,7 +304,7 @@ def test_storage_types(storage_factory, dtype, rng): @pytest.mark.multiprocessing @pytest.mark.parametrize("atol,can_clear,storage_class", STORAGE_CLASSES_ALL) def test_storage_mpi(atol, can_clear, storage_factory, rng): - """test writing data using MPI""" + """Test writing data using MPI.""" eq = DiffusionPDE() grid = UnitGrid([8]) field = ScalarField.random_normal(grid, rng=rng).smooth(1) @@ -321,7 +321,7 @@ def test_storage_mpi(atol, can_clear, storage_factory, rng): @pytest.mark.parametrize("atol,can_clear,storage_class", STORAGE_CLASSES_ALL) def test_storing_transformation_collection(atol, can_clear, storage_factory, rng): - """test transformation yielding field collections in storage classes""" + """Test transformation yielding field collections in storage classes.""" grid = UnitGrid([8]) field = ScalarField.random_normal(grid, rng=rng).smooth(1) @@ -347,7 +347,7 @@ def trans1(field, t): @pytest.mark.parametrize("atol,can_clear,storage_class", STORAGE_CLASSES_ALL) def test_storing_transformation_scalar(atol, can_clear, storage_factory, rng): - """test transformations yielding scalar fields in storage classes""" + """Test transformations yielding scalar fields in storage classes.""" grid = UnitGrid([8]) field = ScalarField.random_uniform(grid, rng=rng).smooth(1) @@ -366,7 +366,7 @@ def test_storing_transformation_scalar(atol, can_clear, storage_factory, rng): @pytest.mark.parametrize("atol,can_clear,storage_class", STORAGE_CLASSES_ALL) def test_storage_view(atol, can_clear, storage_factory, rng): - """test StorageView""" + """Test StorageView.""" grid = UnitGrid([2, 2]) f1 = ScalarField.random_uniform(grid, 0.1, 0.4, label="a", rng=rng) f2 = VectorField.random_uniform(grid, 0.1, 0.4, label="b", rng=rng) diff --git a/tests/storage/test_memory_storages.py b/tests/storage/test_memory_storages.py index 4c3982ca..07d1a492 100644 --- a/tests/storage/test_memory_storages.py +++ b/tests/storage/test_memory_storages.py @@ -10,7 +10,7 @@ def test_memory_storage(): - """test methods specific to memory storage""" + """Test methods specific to memory storage.""" sf = ScalarField(UnitGrid([1])) s1 = MemoryStorage() s1.start_writing(sf) @@ -38,7 +38,7 @@ def test_memory_storage(): def test_field_type_guessing(rng): - """test the ability to guess the field type""" + """Test the ability to guess the field type.""" for cls in [ScalarField, VectorField, Tensor2Field]: grid = UnitGrid([3]) field = cls.random_normal(grid, rng=rng) diff --git a/tests/storage/test_modelrunner_storages.py b/tests/storage/test_modelrunner_storages.py index 33a71b91..6dedcc20 100644 --- a/tests/storage/test_modelrunner_storages.py +++ b/tests/storage/test_modelrunner_storages.py @@ -13,7 +13,7 @@ not module_available("modelrunner"), reason="requires `py-modelrunner` package" ) def test_storage_write_trajectory(tmp_path): - """test simple storage writing""" + """Test simple storage writing.""" import modelrunner as mr path = tmp_path / "storage.json" diff --git a/tests/storage/test_movie_storages.py b/tests/storage/test_movie_storages.py index a36a17d7..755271c9 100644 --- a/tests/storage/test_movie_storages.py +++ b/tests/storage/test_movie_storages.py @@ -19,7 +19,7 @@ @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") @pytest.mark.parametrize("dim", [1, 2]) def test_movie_storage_scalar(dim, tmp_path, rng): - """test storing scalar field as movie""" + """Test storing scalar field as movie.""" path = tmp_path / f"test_movie_storage_scalar.avi" grid = pde.UnitGrid([16] * dim) @@ -47,7 +47,7 @@ def test_movie_storage_scalar(dim, tmp_path, rng): @pytest.mark.parametrize("dim", [1, 2]) @pytest.mark.parametrize("num_fields", [1, 2, 3]) def test_movie_storage_collection(dim, num_fields, tmp_path): - """test storing field collection as movie""" + """Test storing field collection as movie.""" path = tmp_path / f"test_movie_storage_collection_{dim}_{num_fields}.avi" grid = pde.UnitGrid([8] * dim) @@ -68,7 +68,7 @@ def test_movie_storage_collection(dim, num_fields, tmp_path): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") @pytest.mark.parametrize("dim", [1, 2]) def test_movie_storage_vector(dim, tmp_path, rng): - """test storing scalar field as movie""" + """Test storing scalar field as movie.""" path = tmp_path / f"test_movie_storage_vector.avi" grid = pde.UnitGrid([16] * dim) @@ -98,7 +98,7 @@ def test_movie_storage_vector(dim, tmp_path, rng): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") @pytest.mark.parametrize("ext", [".mov", ".avi", ".mkv"]) def test_movie_storage_containers(ext, tmp_path, rng): - """test storing scalar field as movie with different extensions""" + """Test storing scalar field as movie with different extensions.""" path = tmp_path / f"test_movie_storage_scalar{ext}" grid = pde.UnitGrid([16]) @@ -125,7 +125,7 @@ def test_movie_storage_containers(ext, tmp_path, rng): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") @pytest.mark.parametrize("name,video_format", formats.items()) def test_video_format(name, video_format, tmp_path, rng): - """test all video_formats""" + """Test all video_formats.""" if np.issubdtype(video_format.dtype, np.integer): assert video_format.max_value == np.iinfo(video_format.dtype).max assert np.dtype(video_format.dtype).itemsize == video_format.bytes_per_channel @@ -151,7 +151,7 @@ def test_video_format(name, video_format, tmp_path, rng): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") def test_too_many_channels(tmp_path, rng): - """test that data with too many channels throws an error""" + """Test that data with too many channels throws an error.""" path = tmp_path / f"test_movie_complex.avi" field = pde.FieldCollection.scalar_random_uniform(5, pde.UnitGrid([16]), rng=rng) @@ -163,7 +163,7 @@ def test_too_many_channels(tmp_path, rng): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") def test_complex_data(tmp_path, rng): - """test that complex data throws an error""" + """Test that complex data throws an error.""" path = tmp_path / f"test_movie_complex.avi" field = pde.ScalarField.random_uniform(pde.UnitGrid([16]), dtype=complex, rng=rng) @@ -176,7 +176,7 @@ def test_complex_data(tmp_path, rng): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") def test_wrong_format(): - """test how wrong files are dealt with""" + """Test how wrong files are dealt with.""" reader = MovieStorage(RESOURCES_PATH / "does_not_exist.avi") with pytest.raises(OSError): reader.times @@ -194,7 +194,7 @@ def test_wrong_format(): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") @pytest.mark.parametrize("path", RESOURCES_PATH.glob("*.hdf5")) def test_stored_files(path): - """test stored files""" + """Test stored files.""" file_reader = FileStorage(path) movie_reader = MovieStorage(path.with_suffix(".avi")) @@ -217,7 +217,7 @@ def test_stored_files(path): ], ) def test_stored_times(interrupt, expected, tmp_path): - """test how times are stored""" + """Test how times are stored.""" path = tmp_path / f"test_movie_times.avi" field = pde.ScalarField(pde.UnitGrid([3])) @@ -235,7 +235,7 @@ def test_stored_times(interrupt, expected, tmp_path): @pytest.mark.skipif(not module_available("ffmpeg"), reason="requires `ffmpeg-python`") def test_unequal_spaced_times(tmp_path, caplog): - """test whether a warning is generated for unequally spaced times""" + """Test whether a warning is generated for unequally spaced times.""" path = tmp_path / f"test_movie_unequal_times.avi" field = pde.ScalarField(pde.UnitGrid([3])) diff --git a/tests/test_examples.py b/tests/test_examples.py index 4ddcf79a..630fe9f0 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -34,7 +34,7 @@ @pytest.mark.skipif(sys.platform == "win32", reason="Assumes unix setup") @pytest.mark.parametrize("path", EXAMPLES) def test_example_scripts(path): - """runs an example script given by path""" + """Runs an example script given by path.""" # check whether this test needs to be run if path.name.startswith("_"): pytest.skip("skip examples starting with an underscore") @@ -76,7 +76,7 @@ def test_example_scripts(path): @pytest.mark.skipif(not module_available("nbconvert"), reason="requires `nbconvert`") @pytest.mark.parametrize("path", NOTEBOOKS) def test_jupyter_notebooks(path, tmp_path): - """run the jupyter notebooks""" + """Run the jupyter notebooks.""" import notebook as jupyter_notebook if path.name.startswith("_"): diff --git a/tests/test_integration.py b/tests/test_integration.py index 12c94db3..a4b5ef1b 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -1,5 +1,4 @@ -""" -Integration tests that use multiple modules together +"""Integration tests that use multiple modules together. .. codeauthor:: David Zwicker """ @@ -17,7 +16,7 @@ @pytest.mark.skipif(not misc.module_available("h5py"), reason="requires `h5py` module") def test_writing_to_storage(tmp_path, rng): - """test whether data is written to storage""" + """Test whether data is written to storage.""" state = ScalarField.random_uniform(UnitGrid([3]), rng=rng) pde = DiffusionPDE() path = tmp_path / "test_writing_to_storage.hdf5" @@ -29,7 +28,7 @@ def test_writing_to_storage(tmp_path, rng): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_inhomogeneous_bcs_1(backend): - """test simulation with inhomogeneous boundary conditions""" + """Test simulation with inhomogeneous boundary conditions.""" grid = CartesianGrid([[0, 2 * np.pi], [0, 1]], [32, 2], periodic=[True, False]) state = ScalarField(grid) eq = DiffusionPDE(bc=["auto_periodic_neumann", {"value": "sin(x)"}]) @@ -41,7 +40,7 @@ def test_inhomogeneous_bcs_1(backend): def test_inhomogeneous_bcs_2(): - """test simulation with inhomogeneous boundary conditions""" + """Test simulation with inhomogeneous boundary conditions.""" grid = CartesianGrid([[0, 1], [0, 1]], [8, 8], periodic=False) state = ScalarField(grid) eq = DiffusionPDE(bc={"value": "x + y"}) @@ -52,7 +51,7 @@ def test_inhomogeneous_bcs_2(): @pytest.mark.parametrize("backend", ["numpy", "numba"]) def test_inhomogeneous_bcs_func(backend): - """test simulation with inhomogeneous boundary conditions""" + """Test simulation with inhomogeneous boundary conditions.""" grid = CartesianGrid([[-5, 5], [-5, 5]], 32) field = ScalarField(grid) @@ -70,7 +69,7 @@ def bc_value(adjacent_value, dx, x, y, t): @pytest.mark.multiprocessing def test_custom_pde_mpi(caplog, rng): - """test a custom PDE using the parallelized solver""" + """Test a custom PDE using the parallelized solver.""" class TestPDE(PDEBase): def make_modify_after_step(self, state): @@ -134,7 +133,7 @@ def pde_rhs(state_data, t): not module_available("modelrunner"), reason="requires `py-modelrunner`" ) def test_modelrunner_storage_one(tmp_path, capsys): - """test how modelrunner storage can be used""" + """Test how modelrunner storage can be used.""" import modelrunner as mr SCRIPT_PATH = Path(__file__).parent / "resources" @@ -180,7 +179,7 @@ def test_modelrunner_storage_one(tmp_path, capsys): not module_available("modelrunner"), reason="requires `py-modelrunner`" ) def test_modelrunner_storage_many(tmp_path): - """test how modelrunner storage can be used""" + """Test how modelrunner storage can be used.""" import modelrunner as mr SCRIPT_PATH = Path(__file__).parent / "resources" diff --git a/tests/tools/test_cache.py b/tests/tools/test_cache.py index 2f78b6e9..b94605fe 100644 --- a/tests/tools/test_cache.py +++ b/tests/tools/test_cache.py @@ -14,15 +14,13 @@ def deep_getsizeof(obj, ids=None): - """Find the memory footprint of a Python object + """Find the memory footprint of a Python object. - This is a recursive function that drills down a Python object graph - like a dictionary holding nested dictionaries with lists of lists - and tuples and sets. + This is a recursive function that drills down a Python object graph like a + dictionary holding nested dictionaries with lists of lists and tuples and sets. - The sys.getsizeof function does a shallow size of only. It counts each - object inside a container as pointer only regardless of how big it - really is. + The sys.getsizeof function does a shallow size of only. It counts each object inside + a container as pointer only regardless of how big it really is. Function modified from https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609 @@ -59,7 +57,7 @@ def deep_getsizeof(obj, ids=None): def test_objects_equal(): - """test the objects_equal function""" + """Test the objects_equal function.""" # basic python objects eq = cache.objects_equal assert eq(1, 1) @@ -87,7 +85,7 @@ def test_objects_equal(): def get_serialization_methods(with_none=True): - """returns possible methods for serialization that are supported""" + """Returns possible methods for serialization that are supported.""" methods = ["json", "pickle"] if with_none: @@ -105,7 +103,7 @@ def get_serialization_methods(with_none=True): def test_hashes(): - """test whether the hash key makes sense""" + """Test whether the hash key makes sense.""" class Dummy: def __init__(self, value): @@ -138,7 +136,7 @@ def __hash__(self): def test_serializer_nonsense(): - """test whether errors are thrown for wrong input""" + """Test whether errors are thrown for wrong input.""" with pytest.raises(ValueError): cache.make_serializer("non-sense") with pytest.raises(ValueError): @@ -147,7 +145,7 @@ def test_serializer_nonsense(): @pytest.mark.parametrize("method", get_serialization_methods()) def test_serializer(method): - """tests whether the make_serializer returns a canonical hash""" + """Tests whether the make_serializer returns a canonical hash.""" encode = cache.make_serializer(method) assert encode(1) == encode(1) @@ -159,7 +157,7 @@ def test_serializer(method): def test_serializer_hash_mutable(): - """tests whether the make_serializer returns a canonical hash""" + """Tests whether the make_serializer returns a canonical hash.""" # test special serializer encode = cache.make_serializer("hash_mutable") assert encode({"a": 1, "b": 2}) == encode({"b": 2, "a": 1}) @@ -170,7 +168,7 @@ def test_serializer_hash_mutable(): assert cache.hash_mutable(dict(c1)) == cache.hash_mutable(dict(c2)) class Test: - """test class that neither implements __eq__ nor __hash__""" + """Test class that neither implements __eq__ nor __hash__""" def __init__(self, a): self.a = a @@ -178,7 +176,7 @@ def __init__(self, a): assert cache.hash_mutable(Test(1)) != cache.hash_mutable(Test(1)) class TestEq: - """test class that only implements __eq__ and not __hash__""" + """Test class that only implements __eq__ and not __hash__""" def __init__(self, a): self.a = a @@ -191,8 +189,8 @@ def __eq__(self, other): def test_unserializer(): - """tests whether the make_serializer and make_unserializer return the - original objects""" + """Tests whether the make_serializer and make_unserializer return the original + objects.""" data_list = [None, 1, [1, 2], {"b": 1, "a": 2}] for method in get_serialization_methods(): encode = cache.make_serializer(method) @@ -204,7 +202,7 @@ def test_unserializer(): def _test_SerializedDict( storage, reinitialize=None, key_serialization="pickle", value_serialization="pickle" ): - """tests the SerializedDict class with a particular parameter set""" + """Tests the SerializedDict class with a particular parameter set.""" data = cache.SerializedDict( key_serialization, value_serialization, storage_dict=storage ) @@ -251,11 +249,11 @@ def _test_SerializedDict( @pytest.mark.parametrize("cache_storage", [None, "get_finite_dict"]) def test_property_cache(cache_storage): - """test cached_property decorator""" + """Test cached_property decorator.""" # create test class class CacheTest: - """class for testing caching""" + """Class for testing caching.""" def __init__(self): self.counter = 0 @@ -298,11 +296,11 @@ def cached(self): @pytest.mark.parametrize("serializer", get_serialization_methods(with_none=False)) @pytest.mark.parametrize("cache_factory", [None, "get_finite_dict"]) def test_method_cache(serializer, cache_factory): - """test one particular parameter set of the cached_method decorator""" + """Test one particular parameter set of the cached_method decorator.""" # create test class class CacheTest: - """class for testing caching""" + """Class for testing caching.""" def __init__(self): self.counter = 0 @@ -391,11 +389,11 @@ def cached_kwarg(self, a=0, b=0): @pytest.mark.parametrize("serializer", get_serialization_methods(with_none=False)) @pytest.mark.parametrize("cache_factory", [None, "get_finite_dict"]) def test_method_cache_extra_args(serializer, cache_factory): - """test extra arguments in the cached_method decorator""" + """Test extra arguments in the cached_method decorator.""" # create test class class CacheTest: - """class for testing caching""" + """Class for testing caching.""" def __init__(self, value=0): self.counter = 0 @@ -439,11 +437,11 @@ def cached(self, arg): @pytest.mark.parametrize("cache_factory", [None, "get_finite_dict"]) @pytest.mark.parametrize("ignore_args", ["display", ["display"]]) def test_method_cache_ignore(serializer, cache_factory, ignore_args): - """test ignored parameters of the cached_method decorator""" + """Test ignored parameters of the cached_method decorator.""" # create test class class CacheTest: - """class for testing caching""" + """Class for testing caching.""" def __init__(self): self.counter = 0 @@ -476,10 +474,10 @@ def cached(self, arg, display=True): def test_cache_clearing(): - """make sure that memory is freed when cache is cleared""" + """Make sure that memory is freed when cache is cleared.""" class Test: - """simple test object with a cache""" + """Simple test object with a cache.""" @cache.cached_method() def calc(self, n): @@ -517,7 +515,7 @@ def clear_specific(self): def test_serialized_dict(): - """test SerializedDict""" + """Test SerializedDict.""" d = cache.SerializedDict() assert len(d) == 0 d["a"] = 1 @@ -530,7 +528,7 @@ def test_serialized_dict(): def test_finite_dict(): - """test DictFiniteCapacity""" + """Test DictFiniteCapacity.""" d = cache.DictFiniteCapacity(capacity=1) d["a"] = 1 assert d["a"] == 1 diff --git a/tests/tools/test_config.py b/tests/tools/test_config.py index d87da512..ac9c01fe 100644 --- a/tests/tools/test_config.py +++ b/tests/tools/test_config.py @@ -8,12 +8,12 @@ def test_environment(): - """test the environment function""" + """Test the environment function.""" assert isinstance(environment(), dict) def test_config(): - """test configuration system""" + """Test configuration system.""" c = Config() assert c["numba.multithreading_threshold"] > 0 @@ -26,7 +26,7 @@ def test_config(): def test_config_modes(): - """test configuration system running in different modes""" + """Test configuration system running in different modes.""" c = Config(mode="insert") assert c["numba.multithreading_threshold"] > 0 c["numba.multithreading_threshold"] = 0 @@ -73,7 +73,7 @@ def test_config_modes(): def test_config_contexts(): - """test context manager temporarily changing configuration""" + """Test context manager temporarily changing configuration.""" c = Config() assert c["numba.multithreading_threshold"] > 0 @@ -87,7 +87,7 @@ def test_config_contexts(): def test_packages_from_requirements(): - """test the packages_from_requirements function""" + """Test the packages_from_requirements function.""" results = packages_from_requirements("file_not_existing") assert len(results) == 1 assert "Could not open" in results[0] and "file_not_existing" in results[0] diff --git a/tests/tools/test_cuboid.py b/tests/tools/test_cuboid.py index eeaddebf..b7b4f004 100644 --- a/tests/tools/test_cuboid.py +++ b/tests/tools/test_cuboid.py @@ -9,7 +9,7 @@ def test_cuboid_2d(): - """test Cuboid class in 2d""" + """Test Cuboid class in 2d.""" c = Cuboid([-1, -1], [2, 2]) assert c.dim == 2 assert c.volume == 4 @@ -72,7 +72,7 @@ def test_cuboid_2d(): def test_cuboid_add(): - """test adding two cuboids""" + """Test adding two cuboids.""" assert Cuboid([1], [2]) + Cuboid([1], [2]) == Cuboid([1], [2]) assert Cuboid([1], [2]) + Cuboid([0], [1]) == Cuboid([0], [3]) assert Cuboid([1], [2]) + Cuboid([2], [2]) == Cuboid([1], [3]) @@ -81,7 +81,7 @@ def test_cuboid_add(): def test_cuboid_nd(rng): - """test Cuboid class in n dimensions""" + """Test Cuboid class in n dimensions.""" dim = rng.integers(5, 10) size = rng.normal(size=dim) c = Cuboid(rng.normal(size=dim), size) @@ -102,7 +102,7 @@ def test_cuboid_nd(rng): def test_asanyarray_flags(rng): - """test the asanyarray_flags function""" + """Test the asanyarray_flags function.""" assert np.arange(3) is not asanyarray_flags(range(3)) a = rng.random(3).astype(np.double) diff --git a/tests/tools/test_expressions.py b/tests/tools/test_expressions.py index 0723ceb5..ba583eb3 100644 --- a/tests/tools/test_expressions.py +++ b/tests/tools/test_expressions.py @@ -20,7 +20,7 @@ def test_parse_number(): - """test parse_number function""" + """Test parse_number function.""" assert parse_number(0) == pytest.approx(0) assert parse_number(1.235) == pytest.approx(1.235) assert parse_number("0") == pytest.approx(0) @@ -36,7 +36,7 @@ def test_parse_number(): def test_parse_expr_guarded(): - """test parse_expr_guarded function""" + """Test parse_expr_guarded function.""" peg = parse_expr_guarded assert peg("1") == 1 assert peg("1 + 1") == 2 @@ -51,7 +51,7 @@ def test_parse_expr_guarded(): @pytest.mark.parametrize("expr", [None, 1, "1", "a - a"]) def test_const(expr): - """test simple expressions with constants""" + """Test simple expressions with constants.""" e = ScalarExpression() if expr is None else ScalarExpression(expr) val = 0 if expr is None or expr == "a - a" else float(expr) assert e.constant @@ -81,7 +81,7 @@ def test_const(expr): def test_wrong_const(caplog): - """test simple expressions with wrong_constants""" + """Test simple expressions with wrong_constants.""" # test whether wrong constants are check for field = ScalarField(UnitGrid([3])) e = ScalarExpression("scalar_field", consts={"scalar_field": field}) @@ -94,7 +94,7 @@ def test_wrong_const(caplog): def test_single_arg(rng): - """test simple expressions""" + """Test simple expressions.""" e = ScalarExpression("2 * a") assert not e.constant assert e.depends_on("a") @@ -126,7 +126,7 @@ def test_single_arg(rng): def test_two_args(rng): - """test simple expressions""" + """Test simple expressions.""" e = ScalarExpression("2 * a ** b") assert e.depends_on("b") assert not e.constant @@ -158,7 +158,7 @@ def test_two_args(rng): def test_derivatives(): - """test vector expressions""" + """Test vector expressions.""" e = ScalarExpression("a * b**2") assert e.depends_on("a") and e.depends_on("b") assert not e.constant @@ -184,7 +184,7 @@ def test_derivatives(): def test_indexed(): - """test simple expressions""" + """Test simple expressions.""" e = ScalarExpression("2 * a[0] ** a[1]", allow_indexed=True) assert not e.constant assert e.depends_on("a") @@ -203,14 +203,14 @@ def test_indexed(): def test_synonyms(caplog): - """test using synonyms in expression""" + """Test using synonyms in expression.""" e = ScalarExpression("2 * arbitrary", [["a", "arbitrary"]]) assert e.depends_on("a") assert not e.depends_on("arbitrary") def test_tensor_expression(): - """test TensorExpression""" + """Test TensorExpression.""" e = TensorExpression("[[0, 1], [2, 3]]") assert isinstance(str(e), str) assert e.shape == (2, 2) @@ -243,7 +243,7 @@ def test_tensor_expression(): def test_expression_from_expression(): - """test creating expressions from expressions""" + """Test creating expressions from expressions.""" expr = ScalarExpression("sin(a)") assert expr == ScalarExpression(expr) assert expr != ScalarExpression(expr, ["a", "b"]) @@ -258,7 +258,7 @@ def test_expression_from_expression(): def test_expression_user_funcs(): - """test the usage of user_funcs""" + """Test the usage of user_funcs.""" expr = ScalarExpression("func()", user_funcs={"func": lambda: 1}) assert expr() == 1 assert expr.get_compiled()() == 1 @@ -278,7 +278,7 @@ def test_expression_user_funcs(): def test_complex_expression(): - """test expressions with complex numbers""" + """Test expressions with complex numbers.""" for s in ["sqrt(-1)", "I"]: expr = ScalarExpression(s) assert expr.complex @@ -305,7 +305,7 @@ def test_complex_expression(): [("Heaviside(x)", 0.5), ("Heaviside(x, 0.75)", 0.75), ("heaviside(x, 0.75)", 0.75)], ) def test_expression_heaviside(expression, value): - """test special cases of expressions""" + """Test special cases of expressions.""" expr = ScalarExpression(expression) assert not expr.constant assert expr(-1) == 0 @@ -321,7 +321,7 @@ def test_expression_heaviside(expression, value): def test_expression_consts(): - """test the usage of consts""" + """Test the usage of consts.""" expr = ScalarExpression("a", consts={"a": 1}) assert expr.constant assert not expr.depends_on("a") @@ -342,7 +342,7 @@ def test_expression_consts(): def test_evaluate_func_scalar(): - """test the evaluate function with scalar fields""" + """Test the evaluate function with scalar fields.""" grid = UnitGrid([2, 4]) field = ScalarField.from_expression(grid, "x") @@ -369,7 +369,7 @@ def test_evaluate_func_scalar(): def test_evaluate_func_vector(): - """test the evaluate function with vector fields""" + """Test the evaluate function with vector fields.""" grid = UnitGrid([3]) field = ScalarField.from_expression(grid, "x") vec = VectorField.from_expression(grid, ["x"]) @@ -386,7 +386,7 @@ def test_evaluate_func_vector(): def test_evaluate_func_invalid(): - """test the evaluate function with invalid data""" + """Test the evaluate function with invalid data.""" field = ScalarField.from_expression(UnitGrid([3]), "x") with pytest.raises(ValueError): @@ -407,7 +407,7 @@ def test_evaluate_func_invalid(): def test_evaluate_func_bcs_warning(caplog): - """test whether a warning is thrown correctly""" + """Test whether a warning is thrown correctly.""" field = ScalarField.from_expression(UnitGrid([3]), "x") with caplog.at_level(logging.WARNING): @@ -420,7 +420,7 @@ def test_evaluate_func_bcs_warning(caplog): def test_evaluate_func_collection(): - """test the evaluate function with a field collection""" + """Test the evaluate function with a field collection.""" grid = UnitGrid([3]) field = ScalarField.from_expression(grid, "x") vec = VectorField.from_expression(grid, ["x"]) @@ -439,7 +439,7 @@ def test_evaluate_func_collection(): def test_expression_repl(rng): - """test expressions replacement""" + """Test expressions replacement.""" e = ScalarExpression("2 * a", repl={"a": "b"}) assert not e.constant assert e.depends_on("b") diff --git a/tests/tools/test_ffmpeg.py b/tests/tools/test_ffmpeg.py index 4077ce37..a08c57ac 100644 --- a/tests/tools/test_ffmpeg.py +++ b/tests/tools/test_ffmpeg.py @@ -12,5 +12,5 @@ [(1, 8, "gray"), (2, 7, "rgb24"), (3, 9, "gbrp16le"), (5, 8, None), (1, 17, None)], ) def test_find_format(channels, bits_per_channel, result): - """test_find_format function""" + """test_find_format function.""" assert find_format(channels, bits_per_channel) == result diff --git a/tests/tools/test_math.py b/tests/tools/test_math.py index afd3a573..86365395 100644 --- a/tests/tools/test_math.py +++ b/tests/tools/test_math.py @@ -9,7 +9,7 @@ def test_SmoothData1D(rng): - """test smoothing""" + """Test smoothing.""" x = rng.uniform(0, 1, 500) xs = np.linspace(0, 1, 16)[1:-1] @@ -38,7 +38,7 @@ def test_SmoothData1D(rng): def test_online_statistics(): - """test OnlineStatistics class""" + """Test OnlineStatistics class.""" stat = OnlineStatistics() stat.add(1) diff --git a/tests/tools/test_misc.py b/tests/tools/test_misc.py index eee505b8..63859518 100644 --- a/tests/tools/test_misc.py +++ b/tests/tools/test_misc.py @@ -12,7 +12,7 @@ def test_ensure_directory_exists(tmp_path): - """tests the ensure_directory_exists function""" + """Tests the ensure_directory_exists function.""" # create temporary name path = tmp_path / "test_ensure_directory_exists" assert not path.exists() @@ -28,7 +28,7 @@ def test_ensure_directory_exists(tmp_path): def test_preserve_scalars(): - """test the preserve_scalars decorator""" + """Test the preserve_scalars decorator.""" class Test: @misc.preserve_scalars @@ -42,7 +42,7 @@ def meth(self, arr): def test_hybridmethod(): - """test the hybridmethod decorator""" + """Test the hybridmethod decorator.""" class Test: @misc.hybridmethod @@ -58,7 +58,7 @@ def method(self): def test_estimate_computation_speed(): - """test estimate_computation_speed method""" + """Test estimate_computation_speed method.""" def f(x): return 2 * x @@ -70,7 +70,7 @@ def g(x): def test_classproperty(): - """test classproperty decorator""" + """Test classproperty decorator.""" class Test: _value = 2 @@ -84,7 +84,7 @@ def value(cls): # @NoSelf @pytest.mark.skipif(not misc.module_available("h5py"), reason="requires `h5py` module") def test_hdf_write_attributes(tmp_path): - """test hdf_write_attributes function""" + """Test hdf_write_attributes function.""" import h5py path = tmp_path / "test_hdf_write_attributes.hdf5" @@ -114,5 +114,5 @@ def test_hdf_write_attributes(tmp_path): @misc.skipUnlessModule("undefined_module_name") def test_skipUnlessModule(): - """test skipUnlessModule decorator""" + """Test skipUnlessModule decorator.""" raise RuntimeError # test should never run diff --git a/tests/tools/test_mpi.py b/tests/tools/test_mpi.py index eeaa316f..402bc6aa 100644 --- a/tests/tools/test_mpi.py +++ b/tests/tools/test_mpi.py @@ -10,7 +10,7 @@ @pytest.mark.multiprocessing def test_send_recv(): - """test basic send and receive""" + """Test basic send and receive.""" if size == 1: pytest.skip("Run without multiprocessing") @@ -25,7 +25,7 @@ def test_send_recv(): @pytest.mark.multiprocessing def test_allreduce(): - """test basic send and receive""" + """Test basic send and receive.""" from numba_mpi import Operator data = np.arange(size) diff --git a/tests/tools/test_numba.py b/tests/tools/test_numba.py index dcd6e0df..d9ee45b0 100644 --- a/tests/tools/test_numba.py +++ b/tests/tools/test_numba.py @@ -15,12 +15,12 @@ def test_environment(): - """test function signature checks""" + """Test function signature checks.""" assert isinstance(numba_environment(), dict) def test_flat_idx(): - """test flat_idx function""" + """Test flat_idx function.""" # testing the numpy version assert flat_idx(2, 1) == 2 assert flat_idx(np.arange(2), 1) == 1 @@ -37,7 +37,7 @@ def get_sparse_matrix_data(data): def test_counter(): - """test Counter implementation""" + """Test Counter implementation.""" c1 = Counter() assert int(c1) is 0 assert c1 == 0 @@ -58,7 +58,7 @@ def test_counter(): "arr", [np.arange(5), np.linspace(0, 1, 3), np.arange(12).reshape(3, 4)[1:, 2:]] ) def test_make_array_constructor(arr): - """test implementation to create array""" + """Test implementation to create array.""" constructor = jit(make_array_constructor(arr)) arr2 = constructor() np.testing.assert_equal(arr, arr2) diff --git a/tests/tools/test_output.py b/tests/tools/test_output.py index 6fc935d4..9fde8d8d 100644 --- a/tests/tools/test_output.py +++ b/tests/tools/test_output.py @@ -6,7 +6,7 @@ def test_progress_bars(): - """test progress bars""" + """Test progress bars.""" pb_cls = output.get_progress_bar_class() tot = 0 for i in pb_cls(range(4)): @@ -15,12 +15,12 @@ def test_progress_bars(): def test_in_jupyter_notebook(): - """test the function in_jupyter_notebook""" + """Test the function in_jupyter_notebook.""" assert isinstance(output.in_jupyter_notebook(), bool) def test_display_progress(capsys): - """test whether this works""" + """Test whether this works.""" for _ in output.display_progress(range(2)): pass out, err = capsys.readouterr() diff --git a/tests/tools/test_parameters.py b/tests/tools/test_parameters.py index e494d24a..9647cf12 100644 --- a/tests/tools/test_parameters.py +++ b/tests/tools/test_parameters.py @@ -21,7 +21,7 @@ def test_parameters(): - """test mixing Parameterized""" + """Test mixing Parameterized.""" param = Parameter("a", 1, int, "help", extra={"b": 3}) assert isinstance(str(param), str) @@ -88,7 +88,7 @@ class Test3(Test2): def test_parameters_simple(): - """test adding parameters using a simple dictionary""" + """Test adding parameters using a simple dictionary.""" class TestSimple(Parameterized): parameters_default = {"a": 1} @@ -98,7 +98,7 @@ class TestSimple(Parameterized): def test_parameter_help(monkeypatch, capsys): - """test how parameters are shown""" + """Test how parameters are shown.""" class TestHelp1(Parameterized): parameters_default = [DeprecatedParameter("a", 1, int, "random string")] @@ -120,7 +120,7 @@ class TestHelp2(TestHelp1): def test_hidden_parameter(): - """test how hidden parameters are handled""" + """Test how hidden parameters are handled.""" class TestHidden1(Parameterized): parameters_default = [Parameter("a", 1), Parameter("b", 2)] @@ -152,7 +152,7 @@ class TestHidden3(TestHidden1): def test_convert_default_values(caplog): - """test how default values are handled""" + """Test how default values are handled.""" caplog.set_level(logging.WARNING) class TestConvert1(Parameterized): diff --git a/tests/tools/test_parse_duration.py b/tests/tools/test_parse_duration.py index 2fd7a146..abf3de8c 100644 --- a/tests/tools/test_parse_duration.py +++ b/tests/tools/test_parse_duration.py @@ -6,7 +6,7 @@ def test_parse_duration(): - """test function signature checks""" + """Test function signature checks.""" def p(value): return parse_duration(value).total_seconds() diff --git a/tests/tools/test_plotting_tools.py b/tests/tools/test_plotting_tools.py index 85721a0a..fc1c30bf 100644 --- a/tests/tools/test_plotting_tools.py +++ b/tests/tools/test_plotting_tools.py @@ -10,7 +10,7 @@ def test_plot_on_axes(tmp_path): - """test the plot_on_axes decorator""" + """Test the plot_on_axes decorator.""" @plot_on_axes def plot(ax): @@ -22,7 +22,7 @@ def plot(ax): def test_plot_on_figure(tmp_path): - """test the plot_on_figure decorator""" + """Test the plot_on_figure decorator.""" @plot_on_figure def plot(fig): @@ -37,7 +37,7 @@ def plot(fig): @pytest.mark.interactive def test_plot_colorbar(tmp_path, rng): - """test the plot_on_axes decorator""" + """Test the plot_on_axes decorator.""" data = rng.normal(size=(3, 3)) # do not specify axis diff --git a/tests/tools/test_spectral.py b/tests/tools/test_spectral.py index 1a2f402a..a96f07dd 100644 --- a/tests/tools/test_spectral.py +++ b/tests/tools/test_spectral.py @@ -11,7 +11,7 @@ def spectral_density(data, dx=1.0): - """calculate the power spectral density of a field + """Calculate the power spectral density of a field. Args: data (:class:`~numpy.ndarray`): @@ -38,7 +38,7 @@ def spectral_density(data, dx=1.0): def test_colored_noise(rng): - """test the implementation of the colored noise""" + """Test the implementation of the colored noise.""" grid = UnitGrid([64, 64], periodic=True) for exponent in [0, -1, 2]: scale = rng.uniform(1, 10) @@ -50,8 +50,8 @@ def test_colored_noise(rng): def test_noise_scaling(rng): - """compare the noise strength (in terms of the spectral density of - two different noise sources that should be equivalent)""" + """Compare the noise strength (in terms of the spectral density of two different + noise sources that should be equivalent)""" # create a grid x, w = 2 + 10 * rng.random(2) size = rng.integers(128, 256) diff --git a/tests/trackers/test_interrupts.py b/tests/trackers/test_interrupts.py index bf49858a..704cb1d4 100644 --- a/tests/trackers/test_interrupts.py +++ b/tests/trackers/test_interrupts.py @@ -15,7 +15,7 @@ def test_interrupt_constant(): - """test the ConstantInterrupts class""" + """Test the ConstantInterrupts class.""" ival1 = ConstantInterrupts(2) ival2 = ival1.copy() # test copying too @@ -43,7 +43,7 @@ def test_interrupt_constant(): def test_interrupt_tstart(): - """test the t_start argument of interrupts""" + """Test the t_start argument of interrupts.""" ival = ConstantInterrupts(dt=2, t_start=7) assert ival.initialize(0) == pytest.approx(7) assert ival.next(3) == pytest.approx(9) @@ -53,7 +53,7 @@ def test_interrupt_tstart(): def test_interrupt_logarithmic(): - """test the LogarithmicInterrupts class""" + """Test the LogarithmicInterrupts class.""" ival = LogarithmicInterrupts(2, factor=2) assert ival.initialize(0) == pytest.approx(0) assert ival.dt == 1 @@ -66,7 +66,7 @@ def test_interrupt_logarithmic(): def test_interrupt_realtime(): - """test the RealtimeInterrupts class""" + """Test the RealtimeInterrupts class.""" for ival in [RealtimeInterrupts("0:01"), parse_interrupt("0:01")]: assert ival.initialize(0) == pytest.approx(0) i1, i2, i3 = ival.next(1), ival.next(1), ival.next(1) @@ -74,7 +74,7 @@ def test_interrupt_realtime(): def test_interrupt_fixed(): - """test the FixedInterrupts class""" + """Test the FixedInterrupts class.""" ival = FixedInterrupts([1, 3]) assert ival.initialize(0) == pytest.approx(1) assert ival.dt == 1 diff --git a/tests/trackers/test_trackers.py b/tests/trackers/test_trackers.py index d3d5759b..de4a7920 100644 --- a/tests/trackers/test_trackers.py +++ b/tests/trackers/test_trackers.py @@ -18,7 +18,7 @@ def test_plot_tracker(tmp_path, rng): - """test whether the plot tracker creates files without errors""" + """Test whether the plot tracker creates files without errors.""" output_file = tmp_path / "img.png" def get_title(state, t): @@ -42,7 +42,7 @@ def get_title(state, t): @pytest.mark.skipif(not Movie.is_available(), reason="no ffmpeg") def test_plot_movie_tracker(tmp_path, rng): - """test whether the plot tracker creates files without errors""" + """Test whether the plot tracker creates files without errors.""" output_file = tmp_path / "movie.mov" grid = UnitGrid([4, 4]) @@ -58,7 +58,7 @@ def test_plot_movie_tracker(tmp_path, rng): def test_simple_progress(): - """simple test for basic progress bar""" + """Simple test for basic progress bar.""" pbar = trackers.ProgressTracker(interrupts=1) field = ScalarField(UnitGrid([3])) pbar.initialize(field) @@ -67,7 +67,7 @@ def test_simple_progress(): def test_trackers(rng): - """test whether simple trackers can be used""" + """Test whether simple trackers can be used.""" times = [] def store_time(state, t): @@ -102,7 +102,7 @@ def get_sparse_matrix_data(state): def test_callback_tracker(rng): - """test trackers that support a callback""" + """Test trackers that support a callback.""" data = [] def store_mean_data(state): @@ -142,7 +142,7 @@ def get_time(state, t): def test_data_tracker(tmp_path): - """test the DataTracker""" + """Test the DataTracker.""" field = ScalarField(UnitGrid([4, 4])) eq = DiffusionPDE() @@ -161,7 +161,7 @@ def test_data_tracker(tmp_path): def test_steady_state_tracker(): - """test the SteadyStateTracker""" + """Test the SteadyStateTracker.""" storage = MemoryStorage() c0 = ScalarField.from_expression(UnitGrid([5]), "sin(x)") eq = DiffusionPDE() @@ -183,8 +183,8 @@ def test_steady_state_tracker(): def test_small_tracker_dt(rng): - """test the case where the dt of the tracker is smaller than the dt - of the simulation""" + """Test the case where the dt of the tracker is smaller than the dt of the + simulation.""" storage = MemoryStorage() eq = DiffusionPDE() c0 = ScalarField.random_uniform(UnitGrid([4, 4]), 0.1, 0.2, rng=rng) @@ -195,7 +195,7 @@ def test_small_tracker_dt(rng): def test_runtime_tracker(rng): - """test the RuntimeTracker""" + """Test the RuntimeTracker.""" s = ScalarField.random_uniform(UnitGrid([128]), rng=rng) tracker = trackers.RuntimeTracker("0:01") sol = ExplicitSolver(DiffusionPDE()) @@ -204,7 +204,7 @@ def test_runtime_tracker(rng): def test_consistency_tracker(rng): - """test the ConsistencyTracker""" + """Test the ConsistencyTracker.""" s = ScalarField.random_uniform(UnitGrid([128]), rng=rng) sol = ExplicitSolver(DiffusionPDE(1e3)) con = Controller(sol, t_range=1e5, tracker=["consistency"]) @@ -214,7 +214,7 @@ def test_consistency_tracker(rng): def test_material_conservation_tracker(rng): - """test the MaterialConservationTracker""" + """Test the MaterialConservationTracker.""" state = ScalarField.random_uniform(UnitGrid([8, 8]), 0, 1, rng=rng) solver = ExplicitSolver(CahnHilliardPDE()) @@ -229,7 +229,7 @@ def test_material_conservation_tracker(rng): def test_get_named_trackers(): - """test the get_named_trackers function""" + """Test the get_named_trackers function.""" for name, cls in get_named_trackers().items(): assert isinstance(name, str) tracker = TrackerBase.from_data(name) @@ -237,7 +237,7 @@ def test_get_named_trackers(): def test_double_tracker(rng): - """simple test for using a custom tracker twice""" + """Simple test for using a custom tracker twice.""" interrupts = ConstantInterrupts(1) times1, times2 = [], [] t1 = trackers.CallbackTracker(lambda s, t: times1.append(t), interrupts=interrupts) diff --git a/tests/visualization/test_movies.py b/tests/visualization/test_movies.py index 6df5d79e..8f76c29a 100644 --- a/tests/visualization/test_movies.py +++ b/tests/visualization/test_movies.py @@ -13,7 +13,7 @@ @pytest.mark.skipif(not movies.Movie.is_available(), reason="no ffmpeg") def test_movie_class(tmp_path): - """test Movie class""" + """Test Movie class.""" import matplotlib.pyplot as plt path = tmp_path / "test_movie.mov" @@ -36,7 +36,7 @@ def test_movie_class(tmp_path): @pytest.mark.skipif(not movies.Movie.is_available(), reason="no ffmpeg") @pytest.mark.parametrize("movie_func", [movies.movie_scalar, movies.movie]) def test_movie_scalar(movie_func, tmp_path, rng): - """test Movie class""" + """Test Movie class.""" # create some data state = ScalarField.random_uniform(UnitGrid([4, 4]), rng=rng) eq = DiffusionPDE() @@ -57,6 +57,6 @@ def test_movie_scalar(movie_func, tmp_path, rng): @pytest.mark.skipif(not movies.Movie.is_available(), reason="no ffmpeg") def test_movie_wrong_path(tmp_path): - """test whether there is a useful error message when path doesn't exist""" + """Test whether there is a useful error message when path doesn't exist.""" with pytest.raises(OSError): movies.Movie(tmp_path / "unavailable" / "test.mov") diff --git a/tests/visualization/test_plotting.py b/tests/visualization/test_plotting.py index bf958faf..06826108 100644 --- a/tests/visualization/test_plotting.py +++ b/tests/visualization/test_plotting.py @@ -12,7 +12,7 @@ def test_scalar_field_plot(tmp_path, rng): - """test ScalarFieldPlot class""" + """Test ScalarFieldPlot class.""" path = tmp_path / "test_scalar_field_plot.png" # create some data @@ -28,7 +28,7 @@ def test_scalar_field_plot(tmp_path, rng): def test_scalar_plot(tmp_path, rng): - """test Simple simulation""" + """Test Simple simulation.""" path = tmp_path / "test_scalar_plot.png" # create some data @@ -47,7 +47,7 @@ def test_scalar_plot(tmp_path, rng): def test_collection_plot(tmp_path): - """test Simple simulation""" + """Test Simple simulation.""" # create some data field = FieldCollection( [ScalarField(UnitGrid([8, 8]), label="first"), ScalarField(UnitGrid([8, 8]))] @@ -61,7 +61,7 @@ def test_collection_plot(tmp_path): def test_kymograph_single(tmp_path): - """test making kymographs for single fields""" + """Test making kymographs for single fields.""" # create some storage field = ScalarField(UnitGrid(8)) with get_memory_storage(field) as storage: @@ -81,7 +81,7 @@ def test_kymograph_single(tmp_path): def test_kymograph_collection(tmp_path): - """test making kymographs for field collections""" + """Test making kymographs for field collections.""" # create some storage field = FieldCollection( [ScalarField(UnitGrid(8), label="a"), ScalarField(UnitGrid(8), label="b")] @@ -107,7 +107,7 @@ def test_kymograph_collection(tmp_path): @pytest.mark.skipif(not module_available("napari"), reason="requires `napari` module") @pytest.mark.interactive def test_interactive_plotting(rng): - """test plot_interactive""" + """Test plot_interactive.""" # create some data field = ScalarField.random_uniform(UnitGrid([8]), rng=rng) with get_memory_storage(field) as storage: