From 5366beca1a0d0e04a4f0f29e9c82eba5043b5044 Mon Sep 17 00:00:00 2001
From: Marvin Friede <51965259+marvinfriede@users.noreply.github.com>
Date: Sat, 29 Jun 2024 22:56:48 +0200
Subject: [PATCH] Actions Workflow (#154)
---
.github/workflows/{python.yml => ubuntu.yaml} | 39 +++++-----
README.md | 6 +-
docs/requirements.txt | 11 +++
.../{batch.py => batch-vs-seq-aconfl.py} | 0
.../batch-vs-seq-nicotine.py} | 0
pyproject.toml | 3 +-
src/dxtb/__version__.py | 2 +-
src/dxtb/_src/calculators/types/analytical.py | 7 +-
src/dxtb/_src/calculators/types/base.py | 23 +++---
src/dxtb/_src/calculators/types/energy.py | 7 +-
src/dxtb/_src/constants/labels/integrals.py | 28 +++++++-
src/dxtb/_src/exlibs/libcint/__init__.py | 2 +-
src/dxtb/_src/integral/container.py | 11 ++-
src/dxtb/_src/integral/levels.py | 45 ------------
src/dxtb/_src/io/handler.py | 4 ++
src/dxtb/integrals/__init__.py | 3 +-
src/dxtb/integrals/levels.py | 28 --------
test/test_a_memory_leak/test_higher_deriv.py | 39 +++++++---
test/test_a_memory_leak/test_repulsion.py | 39 +++++++---
test/test_a_memory_leak/test_scf.py | 30 +++++---
test/test_a_memory_leak/util.py | 37 ++++++----
test/test_basis/test_export.py | 13 ++++
test/test_cli/test_entrypoint.py | 5 +-
test/test_dispersion/test_grad_param.py | 65 +++++++++++++++--
test/test_dispersion/test_grad_pos.py | 72 ++++++++++++++++---
test/test_external/test_field.py | 4 +-
test/test_integrals/test_libcint.py | 5 +-
test/test_libcint/test_overlap.py | 52 ++++++++++++--
test/test_multipole/test_dipole_integral.py | 17 ++++-
test/test_overlap/test_grad_pos.py | 65 +++++++++++++++--
test/test_properties/test_dipole.py | 15 +++-
test/test_properties/test_dipole_deriv.py | 15 +++-
test/test_properties/test_forces.py | 4 +-
test/test_properties/test_hessian.py | 14 +++-
test/test_properties/test_hyperpol.py | 19 ++++-
test/test_properties/test_ir.py | 16 ++++-
test/test_properties/test_pol.py | 13 +++-
test/test_properties/test_pol_deriv.py | 13 +++-
test/test_properties/test_raman.py | 16 ++++-
test/test_properties/test_vibration.py | 11 ++-
test/test_properties/test_vibration_ref.py | 16 ++++-
test/test_scf/test_charged.py | 4 +-
test/test_scf/test_full_tracking.py | 27 +++++--
test/test_scf/test_grad.py | 49 ++++++++++++-
test/test_scf/test_scf.py | 44 ++++++++++--
test/test_scf/test_scp.py | 8 ++-
test/test_singlepoint/test_energy.py | 58 ++++++++++++++-
test/test_singlepoint/test_grad.py | 16 +++--
test/test_singlepoint/test_grad_field.py | 3 +-
test/test_singlepoint/test_grad_fieldgrad.py | 1 +
.../test_grad_pos_withfield.py | 1 +
51 files changed, 781 insertions(+), 244 deletions(-)
rename .github/workflows/{python.yml => ubuntu.yaml} (74%)
rename examples/profiling/{batch.py => batch-vs-seq-aconfl.py} (100%)
rename examples/{batch-vs-seq.py => profiling/batch-vs-seq-nicotine.py} (100%)
delete mode 100644 src/dxtb/_src/integral/levels.py
delete mode 100644 src/dxtb/integrals/levels.py
diff --git a/.github/workflows/python.yml b/.github/workflows/ubuntu.yaml
similarity index 74%
rename from .github/workflows/python.yml
rename to .github/workflows/ubuntu.yaml
index 01f8294f3..1d20c9b8c 100644
--- a/.github/workflows/python.yml
+++ b/.github/workflows/ubuntu.yaml
@@ -14,7 +14,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-name: Tests
+name: Tests (Ubuntu)
on:
push:
@@ -41,33 +41,34 @@ jobs:
strategy:
fail-fast: false
matrix:
- os: [ubuntu-latest, macos-latest, windows-latest]
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
+ os: [ubuntu-latest]
+ # python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
+ # torch-version: ["1.11.0", "1.12.1", "1.13.1", "2.0.1", "2.1.2", "2.2.2", "2.3.1"]
+ python-version: ["3.8", "3.9", "3.10", "3.11"]
torch-version: ["1.11.0", "1.12.1", "1.13.1", "2.0.1", "2.1.2", "2.2.2"]
exclude:
+ # Check latest versions here: https://download.pytorch.org/whl/torch/
+ #
# PyTorch now fully supports Python=<3.11
# see: https://github.com/pytorch/pytorch/issues/86566
#
- # PyTorch does not support Python 3.12 (all platforms)
+ # PyTorch does now support Python 3.12 (Linux) for 2.2.0 and newer
# see: https://github.com/pytorch/pytorch/issues/110436
- - os: ubuntu-latest
- python-version: "3.12"
- - os: macos-latest
- python-version: "3.12"
- - os: windows-latest
- python-version: "3.12"
+ - python-version: "3.12"
+ torch-version: "1.11.0"
+ - python-version: "3.12"
+ torch-version: "1.12.1"
+ - python-version: "3.12"
+ torch-version: "1.13.1"
+ - python-version: "3.12"
+ torch-version: "2.0.1"
+ - python-version: "3.12"
+ torch-version: "2.1.2"
# PyTorch<1.13.0 does only support Python=<3.10
- python-version: "3.11"
torch-version: "1.11.0"
- python-version: "3.11"
torch-version: "1.12.1"
- # On macOS and Windows, 1.13.x is also not supported for Python>=3.10
- - os: macos-latest
- python-version: "3.11"
- torch-version: "1.13.1"
- - os: windows-latest
- python-version: "3.11"
- torch-version: "1.13.1"
runs-on: ${{ matrix.os }}
@@ -80,7 +81,7 @@ jobs:
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
@@ -106,3 +107,5 @@ jobs:
matrix.os == 'ubuntu-latest'
with:
files: ./coverage.xml # optional
+ token: ${{ secrets.CODECOV_TOKEN }} # required
+ verbose: true # optional (default = false)
diff --git a/README.md b/README.md
index 3bd75bd71..b742676b3 100644
--- a/README.md
+++ b/README.md
@@ -31,9 +31,9 @@
-
+
+
+
diff --git a/docs/requirements.txt b/docs/requirements.txt
index e8048d2a1..45a7764ec 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,3 +1,14 @@
+numpy<2
+pydantic>=2.0.0
+scipy
+tad-dftd3>=0.3.0
+tad-dftd4>=0.2.0
+tad-libcint>=0.1.0
+tad-mctc>=0.2.0
+tad-multicharge
+tomli
+tomli-w
+torch>=1.11.0,<=2.2.2
sphinx
sphinx-book-theme
sphinx-copybutton
diff --git a/examples/profiling/batch.py b/examples/profiling/batch-vs-seq-aconfl.py
similarity index 100%
rename from examples/profiling/batch.py
rename to examples/profiling/batch-vs-seq-aconfl.py
diff --git a/examples/batch-vs-seq.py b/examples/profiling/batch-vs-seq-nicotine.py
similarity index 100%
rename from examples/batch-vs-seq.py
rename to examples/profiling/batch-vs-seq-nicotine.py
diff --git a/pyproject.toml b/pyproject.toml
index 8f38d6542..acfa4080c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,12 +44,13 @@ plugins = ["covdefaults"]
source = ["./src"]
omit = [
"./src/dxtb/_src/exlibs/xitorch/*",
+ "./src/dxtb/_src/exlibs/scipy/*",
"./src/dxtb/_src/typing.py",
"./src/dxtb/components/*",
]
[tool.coverage.report]
-fail_under = 80
+fail_under = 75
[tool.isort]
diff --git a/src/dxtb/__version__.py b/src/dxtb/__version__.py
index 46f2e018d..54fb8e144 100644
--- a/src/dxtb/__version__.py
+++ b/src/dxtb/__version__.py
@@ -22,5 +22,5 @@
__all__ = ["__version__", "__tversion__"]
-__version__ = "0.0.1"
+__version__ = "0.0.0"
"""Version of ``dxtb`` in semantic versioning."""
diff --git a/src/dxtb/_src/calculators/types/analytical.py b/src/dxtb/_src/calculators/types/analytical.py
index 150568956..93844d962 100644
--- a/src/dxtb/_src/calculators/types/analytical.py
+++ b/src/dxtb/_src/calculators/types/analytical.py
@@ -28,6 +28,7 @@
from dxtb import OutputHandler
from dxtb import integrals as ints
+from dxtb import labels
from dxtb._src import ncoord, scf
from dxtb._src.components.interactions.container import Charges, Potential
from dxtb._src.components.interactions.field import efield as efield
@@ -326,7 +327,7 @@ def _forces_analytical(
self.integrals.overlap.to_pt(write_overlap)
# dipole integral
- if self.opts.ints.level >= ints.levels.INTLEVEL_DIPOLE:
+ if self.opts.ints.level >= labels.INTLEVEL_DIPOLE:
OutputHandler.write_stdout_nf(" - Dipole ... ", v=3)
timer.start("Dipole Integral", parent_uid="Integrals")
intmats.dipole = self.integrals.build_dipole(positions)
@@ -339,7 +340,7 @@ def _forces_analytical(
self.integrals.dipole.to_pt(write_dipole)
# quadrupole integral
- if self.opts.ints.level >= ints.levels.INTLEVEL_QUADRUPOLE:
+ if self.opts.ints.level >= labels.INTLEVEL_QUADRUPOLE:
OutputHandler.write_stdout_nf(" - Quadrupole ... ", v=3)
timer.start("Quadrupole Integral", parent_uid="Integrals")
intmats.quadrupole = self.integrals.build_quadrupole(positions)
@@ -358,7 +359,7 @@ def _forces_analytical(
# To avoid unnecessary data transfer, the core Hamiltonian should
# be last. Internally, the overlap integral is only transfered back
# to GPU when all multipole integrals are calculated.
- if self.opts.ints.level >= ints.levels.INTLEVEL_HCORE:
+ if self.opts.ints.level >= labels.INTLEVEL_HCORE:
OutputHandler.write_stdout_nf(" - Core Hamiltonian ... ", v=3)
timer.start("Core Hamiltonian", parent_uid="Integrals")
intmats.hcore = self.integrals.build_hcore(positions)
diff --git a/src/dxtb/_src/calculators/types/base.py b/src/dxtb/_src/calculators/types/base.py
index d4b0d829f..984e87822 100644
--- a/src/dxtb/_src/calculators/types/base.py
+++ b/src/dxtb/_src/calculators/types/base.py
@@ -32,6 +32,7 @@ class and implement the :meth:`calculate` method and the corresponding methods
from dxtb import IndexHelper, OutputHandler
from dxtb import integrals as ints
+from dxtb import labels
from dxtb._src.calculators.properties.vibration import IRResult, RamanResult, VibResult
from dxtb._src.components.classicals import (
Classical,
@@ -565,25 +566,21 @@ def __init__(
# figure out integral level from interactions
if efield.LABEL_EFIELD in self.interactions.labels:
- if self.opts.ints.level < ints.levels.INTLEVEL_DIPOLE:
+ if self.opts.ints.level < labels.INTLEVEL_DIPOLE:
OutputHandler.warn(
"Setting integral level to DIPOLE "
- f"({ints.levels.INTLEVEL_DIPOLE}) due to electric field "
+ f"({labels.INTLEVEL_DIPOLE}) due to electric field "
"interaction."
)
- self.opts.ints.level = max(
- ints.levels.INTLEVEL_DIPOLE, self.opts.ints.level
- )
+ self.opts.ints.level = max(labels.INTLEVEL_DIPOLE, self.opts.ints.level)
if efield_grad.LABEL_EFIELD_GRAD in self.interactions.labels:
- if self.opts.ints.level < ints.levels.INTLEVEL_DIPOLE:
+ if self.opts.ints.level < labels.INTLEVEL_DIPOLE:
OutputHandler.warn(
"Setting integral level to QUADRUPOLE "
- f"{ints.levels.INTLEVEL_DIPOLE} due to electric field "
+ f"{labels.INTLEVEL_DIPOLE} due to electric field "
"gradient interaction."
)
- self.opts.ints.level = max(
- ints.levels.INTLEVEL_QUADRUPOLE, self.opts.ints.level
- )
+ self.opts.ints.level = max(labels.INTLEVEL_QUADRUPOLE, self.opts.ints.level)
# setup integral
driver = self.opts.ints.driver
@@ -591,14 +588,14 @@ def __init__(
numbers, par, self.ihelp, driver=driver, intlevel=self.opts.ints.level, **dd
)
- if self.opts.ints.level >= ints.levels.INTLEVEL_OVERLAP:
+ if self.opts.ints.level >= labels.INTLEVEL_OVERLAP:
self.integrals.hcore = ints.types.HCore(numbers, par, self.ihelp, **dd)
self.integrals.overlap = ints.types.Overlap(driver=driver, **dd)
- if self.opts.ints.level >= ints.levels.INTLEVEL_DIPOLE:
+ if self.opts.ints.level >= labels.INTLEVEL_DIPOLE:
self.integrals.dipole = ints.types.Dipole(driver=driver, **dd)
- if self.opts.ints.level >= ints.levels.INTLEVEL_QUADRUPOLE:
+ if self.opts.ints.level >= labels.INTLEVEL_QUADRUPOLE:
self.integrals.quadrupole = ints.types.Quadrupole(driver=driver, **dd)
OutputHandler.write_stdout("done\n", v=4)
diff --git a/src/dxtb/_src/calculators/types/energy.py b/src/dxtb/_src/calculators/types/energy.py
index 8e476aeb7..c1f0f84e7 100644
--- a/src/dxtb/_src/calculators/types/energy.py
+++ b/src/dxtb/_src/calculators/types/energy.py
@@ -31,6 +31,7 @@
from dxtb import OutputHandler
from dxtb import integrals as ints
+from dxtb import labels
from dxtb._src import scf
from dxtb._src.constants import defaults
from dxtb._src.integral.container import IntegralMatrices
@@ -171,7 +172,7 @@ def singlepoint(
self.integrals.overlap.to_pt(write_overlap)
# dipole integral
- if self.opts.ints.level >= ints.levels.INTLEVEL_DIPOLE:
+ if self.opts.ints.level >= labels.INTLEVEL_DIPOLE:
OutputHandler.write_stdout_nf(" - Dipole ... ", v=3)
timer.start("Dipole Integral", parent_uid="Integrals")
intmats.dipole = self.integrals.build_dipole(positions)
@@ -184,7 +185,7 @@ def singlepoint(
self.integrals.dipole.to_pt(write_dipole)
# quadrupole integral
- if self.opts.ints.level >= ints.levels.INTLEVEL_QUADRUPOLE:
+ if self.opts.ints.level >= labels.INTLEVEL_QUADRUPOLE:
OutputHandler.write_stdout_nf(" - Quadrupole ... ", v=3)
timer.start("Quadrupole Integral", parent_uid="Integrals")
intmats.quadrupole = self.integrals.build_quadrupole(positions)
@@ -203,7 +204,7 @@ def singlepoint(
# To avoid unnecessary data transfer, the core Hamiltonian should
# be last. Internally, the overlap integral is only transfered back
# to GPU when all multipole integrals are calculated.
- if self.opts.ints.level >= ints.levels.INTLEVEL_HCORE:
+ if self.opts.ints.level >= labels.INTLEVEL_HCORE:
OutputHandler.write_stdout_nf(" - Core Hamiltonian ... ", v=3)
timer.start("Core Hamiltonian", parent_uid="Integrals")
intmats.hcore = self.integrals.build_hcore(positions)
diff --git a/src/dxtb/_src/constants/labels/integrals.py b/src/dxtb/_src/constants/labels/integrals.py
index 301e6c6af..ae5c3f8d8 100644
--- a/src/dxtb/_src/constants/labels/integrals.py
+++ b/src/dxtb/_src/constants/labels/integrals.py
@@ -21,7 +21,26 @@
All labels related to integrals and their computation.
"""
-# integral driver
+__all__ = [
+ "INTDRIVER_LIBCINT",
+ "INTDRIVER_LIBCINT_STRS",
+ "INTDRIVER_AUTOGRAD",
+ "INTDRIVER_AUTOGRAD_STRS",
+ "INTDRIVER_ANALYTICAL",
+ "INTDRIVER_ANALYTICAL_STRS",
+ "INTDRIVER_LEGACY",
+ "INTDRIVER_LEGACY_STRS",
+ "INTDRIVER_MAP",
+ #
+ "INTLEVEL_NONE",
+ "INTLEVEL_OVERLAP",
+ "INTLEVEL_HCORE",
+ "INTLEVEL_DIPOLE",
+ "INTLEVEL_QUADRUPOLE",
+]
+
+# integral drivers
+
INTDRIVER_LIBCINT = 0
"""Integer code for LIBCINT driver."""
@@ -57,8 +76,11 @@
INTLEVEL_OVERLAP = 1
"""Overlap integrals."""
-INTLEVEL_DIPOLE = 2
+INTLEVEL_HCORE = 2
+"""Core Hamiltonian integrals."""
+
+INTLEVEL_DIPOLE = 3
"""Dipole integrals."""
-INTLEVEL_QUADRUPOLE = 3
+INTLEVEL_QUADRUPOLE = 4
"""Quadrupole integrals."""
diff --git a/src/dxtb/_src/exlibs/libcint/__init__.py b/src/dxtb/_src/exlibs/libcint/__init__.py
index 59f5cd5c5..8593387d5 100644
--- a/src/dxtb/_src/exlibs/libcint/__init__.py
+++ b/src/dxtb/_src/exlibs/libcint/__init__.py
@@ -23,7 +23,7 @@
try:
from tad_libcint.basis import AtomCGTOBasis, CGTOBasis
- from tad_libcint.interface.intor import int1e, overlap
+ from tad_libcint.interface.integrals import int1e, overlap
from tad_libcint.interface.wrapper import LibcintWrapper
except ImportError as e:
raise ImportError(
diff --git a/src/dxtb/_src/integral/container.py b/src/dxtb/_src/integral/container.py
index 22c701c96..fde186218 100644
--- a/src/dxtb/_src/integral/container.py
+++ b/src/dxtb/_src/integral/container.py
@@ -27,12 +27,11 @@
import torch
-from dxtb import IndexHelper
+from dxtb import IndexHelper, labels
from dxtb._src.constants import defaults, labels
from dxtb._src.param import Param
from dxtb._src.typing import Any, Tensor
-from . import levels
from .base import IntDriver, IntegralContainer
from .types import Dipole, HCore, Overlap, Quadrupole
@@ -216,7 +215,7 @@ def build_overlap(self, positions: Tensor, **kwargs: Any) -> Tensor:
# move integral to the correct device...
if self.force_cpu_for_libcint is True:
# ... but only if no other multipole integrals are required
- if self._intlevel <= levels.INTLEVEL_HCORE:
+ if self._intlevel <= labels.INTLEVEL_HCORE:
self.overlap.integral = self.overlap.integral.to(device=self.device)
# FIXME: The matrix has to be moved explicitly, because when
@@ -301,7 +300,7 @@ def build_dipole(self, positions: Tensor, shift: bool = True, **kwargs: Any):
# move integral to the correct device, but only if no other multipole
# integrals are required
- if self.force_cpu_for_libcint and self._intlevel <= levels.INTLEVEL_DIPOLE:
+ if self.force_cpu_for_libcint and self._intlevel <= labels.INTLEVEL_DIPOLE:
self.dipole.integral = self.dipole.integral.to(device=self.device)
self.dipole.integral.matrix = self.dipole.integral.matrix.to(
device=self.device
@@ -389,7 +388,7 @@ def build_quadrupole(
# move integral to the correct device, but only if no other multipole
# integrals are required
- if self.force_cpu_for_libcint and self._intlevel <= levels.INTLEVEL_QUADRUPOLE:
+ if self.force_cpu_for_libcint and self._intlevel <= labels.INTLEVEL_QUADRUPOLE:
self.overlap.integral = self.overlap.integral.to(self.device)
self.overlap.integral.matrix = self.overlap.integral.matrix.to(self.device)
@@ -448,7 +447,7 @@ def checks(self) -> None:
if name != "hcore":
family_integral = cls.integral.family # type: ignore
- family_driver = self.driver.family
+ family_driver = self.driver.family # type: ignore
if family_integral != family_driver:
raise RuntimeError(
f"The '{cls.integral.label}' integral implementation "
diff --git a/src/dxtb/_src/integral/levels.py b/src/dxtb/_src/integral/levels.py
deleted file mode 100644
index ee8986a10..000000000
--- a/src/dxtb/_src/integral/levels.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# This file is part of dxtb.
-#
-# SPDX-Identifier: Apache-2.0
-# Copyright (C) 2024 Grimme Group
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Integrals: Levels
-=================
-
-Specifies the levels of integrals that should be computed.
-"""
-
-__all__ = [
- "INTLEVEL_NONE",
- "INTLEVEL_OVERLAP",
- "INTLEVEL_HCORE",
- "INTLEVEL_DIPOLE",
- "INTLEVEL_QUADRUPOLE",
-]
-
-INTLEVEL_NONE = 0
-"""No integrals."""
-
-INTLEVEL_OVERLAP = 1
-"""Overlap integrals."""
-
-INTLEVEL_HCORE = 2
-"""Core Hamiltonian integrals."""
-
-INTLEVEL_DIPOLE = 3
-"""Dipole integrals."""
-
-INTLEVEL_QUADRUPOLE = 4
-"""Quadrupole integrals."""
diff --git a/src/dxtb/_src/io/handler.py b/src/dxtb/_src/io/handler.py
index 3e6b72877..fe0afe731 100644
--- a/src/dxtb/_src/io/handler.py
+++ b/src/dxtb/_src/io/handler.py
@@ -298,6 +298,10 @@ def dump_warnings(self) -> None:
for msg, warning_type in self.warnings:
self.console_logger.warning(f"[{warning_type.__name__}] {msg}")
+ def clear_warnings(self) -> None:
+ """Clear all warnings."""
+ self.warnings = []
+
def format_for_console(
self,
title: str,
diff --git a/src/dxtb/integrals/__init__.py b/src/dxtb/integrals/__init__.py
index 1d581c805..2bb712878 100644
--- a/src/dxtb/integrals/__init__.py
+++ b/src/dxtb/integrals/__init__.py
@@ -52,7 +52,6 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
- from dxtb.integrals import levels as levels
from dxtb.integrals import types as types
from dxtb.integrals import wrappers as wrappers
else:
@@ -60,7 +59,7 @@
__getattr__, __dir__, __all__ = _lazy.attach_module(
__name__,
- ["levels", "types", "wrappers"],
+ ["types", "wrappers"],
)
del _lazy
diff --git a/src/dxtb/integrals/levels.py b/src/dxtb/integrals/levels.py
deleted file mode 100644
index 2b02cea7e..000000000
--- a/src/dxtb/integrals/levels.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# This file is part of dxtb.
-#
-# SPDX-Identifier: Apache-2.0
-# Copyright (C) 2024 Grimme Group
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Integrals: Levels
-=================
-
-Specifies the levels of integrals that should be computed.
-"""
-
-from dxtb._src.integral.levels import INTLEVEL_DIPOLE as INTLEVEL_DIPOLE
-from dxtb._src.integral.levels import INTLEVEL_HCORE as INTLEVEL_HCORE
-from dxtb._src.integral.levels import INTLEVEL_NONE as INTLEVEL_NONE
-from dxtb._src.integral.levels import INTLEVEL_OVERLAP as INTLEVEL_OVERLAP
-from dxtb._src.integral.levels import INTLEVEL_QUADRUPOLE as INTLEVEL_QUADRUPOLE
diff --git a/test/test_a_memory_leak/test_higher_deriv.py b/test/test_a_memory_leak/test_higher_deriv.py
index 464d705f8..732ddeecf 100644
--- a/test/test_a_memory_leak/test_higher_deriv.py
+++ b/test/test_a_memory_leak/test_higher_deriv.py
@@ -35,16 +35,13 @@
from ..conftest import DEVICE
from ..utils import nth_derivative
-from .util import has_memleak_tensor
+from .util import garbage_collect, has_memleak_tensor
-sample_list = ["H2O", "SiH4", "MB16_43_01"]
+slist = ["H2O", "SiH4"]
+slist_large = ["MB16_43_01"]
-@pytest.mark.filterwarnings("ignore")
-@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", sample_list)
-@pytest.mark.parametrize("n", [1, 2, 3, 4])
-def test_single(dtype: torch.dtype, name: str, n: int) -> None:
+def execute(name: str, dtype: torch.dtype, n: int) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
def fcn():
@@ -67,9 +64,33 @@ def fcn():
_ = nth_derivative(energy, positions, n)
+ del numbers
+ del positions
+ del ihelp
+ del rep
+ del cache
+ del energy
+
# run garbage collector to avoid leaks across other tests
- gc.collect()
+ garbage_collect()
leak = has_memleak_tensor(fcn)
- gc.collect()
+ garbage_collect()
assert not leak, "Memory leak detected"
+
+
+@pytest.mark.filterwarnings("ignore::UserWarning")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist)
+@pytest.mark.parametrize("n", [1, 2, 3, 4])
+def test_single(dtype: torch.dtype, name: str, n: int) -> None:
+ execute(name, dtype, n)
+
+
+@pytest.mark.large
+@pytest.mark.filterwarnings("ignore::UserWarning")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist_large)
+@pytest.mark.parametrize("n", [1, 2, 3, 4])
+def test_large(dtype: torch.dtype, name: str, n: int) -> None:
+ execute(name, dtype, n)
diff --git a/test/test_a_memory_leak/test_repulsion.py b/test/test_a_memory_leak/test_repulsion.py
index 148ec9f6c..69858e40b 100644
--- a/test/test_a_memory_leak/test_repulsion.py
+++ b/test/test_a_memory_leak/test_repulsion.py
@@ -35,15 +35,13 @@
from dxtb._src.typing import DD
from ..conftest import DEVICE
-from .util import has_memleak_tensor
+from .util import garbage_collect, has_memleak_tensor
-sample_list = ["H2O", "SiH4", "MB16_43_01"]
+slist = ["H2O", "SiH4"]
+slist_large = ["MB16_43_01"]
-@pytest.mark.filterwarnings("ignore")
-@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", sample_list)
-def test_single(dtype: torch.dtype, name: str) -> None:
+def execute(name: str, dtype: torch.dtype) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
def fcn():
@@ -90,9 +88,34 @@ def fcn():
# known reference cycle for create_graph=True
energy.backward()
+ del numbers
+ del positions
+ del ihelp
+ del rep
+ del cache
+ del energy
+ del arep
+ del zeff
+ del kexp
+
# run garbage collector to avoid leaks across other tests
- gc.collect()
+ garbage_collect()
leak = has_memleak_tensor(fcn)
- gc.collect()
+ garbage_collect()
assert not leak, "Memory leak detected"
+
+
+@pytest.mark.filterwarnings("ignore::UserWarning")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist)
+def test_single(dtype: torch.dtype, name: str) -> None:
+ execute(name, dtype)
+
+
+@pytest.mark.large
+@pytest.mark.filterwarnings("ignore::UserWarning")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist_large)
+def test_large(dtype: torch.dtype, name: str) -> None:
+ execute(name, dtype)
diff --git a/test/test_a_memory_leak/test_scf.py b/test/test_a_memory_leak/test_scf.py
index caa59abd9..5eef810d6 100644
--- a/test/test_a_memory_leak/test_scf.py
+++ b/test/test_a_memory_leak/test_scf.py
@@ -33,7 +33,7 @@
from dxtb._src.typing import DD
from ..conftest import DEVICE
-from .util import has_memleak_tensor
+from .util import garbage_collect, has_memleak_tensor
opts = {"verbosity": 0, "maxiter": 50, "exclude": ["rep", "disp", "hal"]}
repeats = 5
@@ -49,7 +49,7 @@ def test_xitorch(dtype: torch.dtype, run_gc: bool, create_graph: bool) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
def fcn():
- sample = samples["SiH4"]
+ sample = samples["LiH"]
numbers = sample["numbers"].to(DEVICE)
positions = sample["positions"].clone().to(**dd)
charges = torch.tensor(0.0, **dd)
@@ -69,10 +69,17 @@ def fcn():
if create_graph is True:
energy.backward()
+ del numbers
+ del positions
+ del charges
+ del calc
+ del result
+ del energy
+
# run garbage collector to avoid leaks across other tests
- gc.collect()
+ garbage_collect()
leak = has_memleak_tensor(fcn, gccollect=run_gc)
- gc.collect()
+ garbage_collect()
assert not leak, "Memory leak detected"
@@ -105,9 +112,9 @@ def fcn():
energy.backward()
# run garbage collector to avoid leaks across other tests
- gc.collect()
+ garbage_collect()
leak = has_memleak_tensor(fcn, gccollect=run_gc)
- gc.collect()
+ garbage_collect()
assert not leak, "Memory leak detected"
@@ -144,9 +151,16 @@ def fcn():
if create_graph is True:
energy.backward()
+ del numbers
+ del positions
+ del charges
+ del calc
+ del result
+ del energy
+
# run garbage collector to avoid leaks across other tests
- gc.collect()
+ garbage_collect()
leak = has_memleak_tensor(fcn, gccollect=run_gc)
- gc.collect()
+ garbage_collect()
assert not leak, "Memory leak detected"
diff --git a/test/test_a_memory_leak/util.py b/test/test_a_memory_leak/util.py
index e086b9052..a74afc12d 100644
--- a/test/test_a_memory_leak/util.py
+++ b/test/test_a_memory_leak/util.py
@@ -24,12 +24,29 @@
import gc
+import torch
+
from dxtb.__version__ import __tversion__
-from dxtb._src.typing import Callable, Literal, Tensor, overload
+from dxtb._src.typing import Callable, Generator, Literal, Tensor, overload
-def _tensors_from_gc() -> list:
- return [obj for obj in gc.get_objects() if isinstance(obj, Tensor)]
+def garbage_collect() -> None:
+ """
+ Run the garbage collector.
+ """
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+
+
+def _tensors_from_gc() -> Generator[Tensor, None, None]:
+ # return [obj for obj in gc.get_objects() if isinstance(obj, Tensor)]
+ for obj in gc.get_objects():
+ try:
+ if isinstance(obj, Tensor):
+ yield obj
+ except Exception: # nosec B112 pylint: disable=broad-exception-caught
+ continue
@overload
@@ -57,12 +74,12 @@ def _get_tensor_memory(
"""
# obtaining all the tensor objects from the garbage collector
- tensor_objs = _tensors_from_gc()
# iterate each tensor objects uniquely and calculate the total storage
visited_data = set()
total_mem = 0.0
- for tensor in tensor_objs:
+ count = 0
+ for tensor in _tensors_from_gc():
if tensor.is_sparse:
continue
@@ -72,12 +89,7 @@ def _get_tensor_memory(
storage = tensor.untyped_storage()
# check if it has been visited
- if __tversion__ < (2, 0, 0):
- storage = tensor.storage()
- else:
- storage = tensor.untyped_storage()
-
- data_ptr = storage.data_ptr() # type: ignore
+ data_ptr = storage.data_ptr()
if data_ptr in visited_data:
continue
visited_data.add(data_ptr)
@@ -88,9 +100,10 @@ def _get_tensor_memory(
mem = numel * elmt_size / (1024 * 1024) # in MiB
total_mem += mem
+ count += 1
if return_number_tensors is True:
- return total_mem, len(tensor_objs)
+ return total_mem, count
return total_mem
diff --git a/test/test_basis/test_export.py b/test/test_basis/test_export.py
index 68144a90d..93f04d888 100644
--- a/test/test_basis/test_export.py
+++ b/test/test_basis/test_export.py
@@ -63,4 +63,17 @@ def test_export(
with open(p, encoding="utf-8") as f:
content = f.read()
+ def round_numbers(data):
+ rounded_data = []
+ for item in data:
+ try:
+ rounded_item = round(float(item), 10)
+ rounded_data.append(rounded_item)
+ except ValueError:
+ rounded_data.append(item)
+ return rounded_data
+
+ content = round_numbers(content.split())
+ txt = round_numbers(txt.split())
+
assert content == txt
diff --git a/test/test_cli/test_entrypoint.py b/test/test_cli/test_entrypoint.py
index 6111051f6..b21da554b 100644
--- a/test/test_cli/test_entrypoint.py
+++ b/test/test_cli/test_entrypoint.py
@@ -20,7 +20,7 @@
import pytest
-from dxtb import __version__
+from dxtb import OutputHandler, __version__
from dxtb._src.cli import console_entry_point
from ..utils import coordfile
@@ -57,6 +57,9 @@ def test_no_file(
def test_entrypoint(
caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture
) -> None:
+ # avoid pollution from previous tests
+ OutputHandler.clear_warnings()
+
ret = console_entry_point([str(coordfile), "--verbosity", "0"])
assert ret == 0
diff --git a/test/test_dispersion/test_grad_param.py b/test/test_dispersion/test_grad_param.py
index 224c0dc68..a9130306a 100644
--- a/test/test_dispersion/test_grad_param.py
+++ b/test/test_dispersion/test_grad_param.py
@@ -31,7 +31,8 @@
from ..conftest import DEVICE
from .samples import samples
-sample_list = ["LiH", "SiH4", "MB16_43_01", "PbH4-BiH3"]
+slist = ["LiH", "SiH4"]
+slist_large = ["MB16_43_01", "PbH4-BiH3"]
tol = 1e-8
@@ -66,7 +67,7 @@ def func(*inputs: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist)
def test_gradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -77,8 +78,21 @@ def test_gradcheck(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
+@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist_large)
+def test_gradcheck_large(dtype: torch.dtype, name: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradcheck`.
+ """
+ func, diffvars = gradchecker(dtype, name)
+ assert dgradcheck(func, diffvars, atol=tol)
+
+
+@pytest.mark.grad
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist)
def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -88,6 +102,19 @@ def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
assert dgradgradcheck(func, diffvars, atol=tol)
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_large)
+def test_gradgradcheck_large(dtype: torch.dtype, name: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradgradcheck`.
+ """
+ func, diffvars = gradchecker(dtype, name)
+ assert dgradgradcheck(func, diffvars, atol=tol)
+
+
def gradchecker_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[
Callable[[Tensor, Tensor, Tensor, Tensor], Tensor], # autograd function
tuple[Tensor, Tensor, Tensor, Tensor], # differentiable variables
@@ -129,7 +156,7 @@ def func(*inputs: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name2", slist)
def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -139,10 +166,24 @@ def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
assert dgradcheck(func, diffvars, atol=tol)
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist_large)
+def test_gradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradcheck`.
+ """
+ func, diffvars = gradchecker_batch(dtype, name1, name2)
+ assert dgradcheck(func, diffvars, atol=tol)
+
+
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name2", slist)
def test_gradgradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -150,3 +191,17 @@ def test_gradgradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None
"""
func, diffvars = gradchecker_batch(dtype, name1, name2)
assert dgradgradcheck(func, diffvars, atol=tol)
+
+
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist_large)
+def test_gradgradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradgradcheck`.
+ """
+ func, diffvars = gradchecker_batch(dtype, name1, name2)
+ assert dgradgradcheck(func, diffvars, atol=tol)
diff --git a/test/test_dispersion/test_grad_pos.py b/test/test_dispersion/test_grad_pos.py
index 185238a48..8d353dda1 100644
--- a/test/test_dispersion/test_grad_pos.py
+++ b/test/test_dispersion/test_grad_pos.py
@@ -32,7 +32,8 @@
from ..conftest import DEVICE
from .samples import samples
-sample_list = ["LiH", "SiH4", "MB16_43_01", "PbH4-BiH3"]
+slist = ["LiH", "SiH4"]
+slist_large = ["MB16_43_01", "PbH4-BiH3"]
tol = 1e-8
@@ -63,7 +64,7 @@ def func(positions: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist)
def test_gradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -74,8 +75,21 @@ def test_gradcheck(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
+@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist_large)
+def test_gradcheck_large(dtype: torch.dtype, name: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradcheck`.
+ """
+ func, diffvars = gradchecker(dtype, name)
+ assert dgradcheck(func, diffvars, atol=tol)
+
+
+@pytest.mark.grad
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist)
def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -85,6 +99,19 @@ def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
assert dgradgradcheck(func, diffvars, atol=tol)
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_large)
+def test_gradgradcheck_large(dtype: torch.dtype, name: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradgradcheck`.
+ """
+ func, diffvars = gradchecker(dtype, name)
+ assert dgradgradcheck(func, diffvars, atol=tol)
+
+
def gradchecker_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[
Callable[[Tensor], Tensor], # autograd function
Tensor, # differentiable variables
@@ -122,7 +149,7 @@ def func(positions: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name2", slist)
def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -132,10 +159,24 @@ def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
assert dgradcheck(func, diffvars, atol=tol)
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist_large)
+def test_gradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradcheck`.
+ """
+ func, diffvars = gradchecker_batch(dtype, name1, name2)
+ assert dgradcheck(func, diffvars, atol=tol)
+
+
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name2", slist)
def test_gradgradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
@@ -147,7 +188,20 @@ def test_gradgradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist_large)
+def test_gradgradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
+ """
+ Check a single analytical gradient of parameters against numerical
+ gradient from `torch.autograd.gradgradcheck`.
+ """
+ func, diffvars = gradchecker_batch(dtype, name1, name2)
+ assert dgradgradcheck(func, diffvars, atol=tol)
+
+
+@pytest.mark.grad
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist)
def test_autograd(dtype: torch.dtype, name: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
@@ -173,7 +227,7 @@ def test_autograd(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name2", slist)
def test_autograd_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
@@ -215,7 +269,7 @@ def test_autograd_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist)
def test_backward(dtype: torch.dtype, name: str) -> None:
"""Compare with reference values from tblite."""
dd: DD = {"dtype": dtype, "device": DEVICE}
@@ -251,7 +305,7 @@ def test_backward(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name2", slist)
def test_backward_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""Compare with reference values from tblite."""
dd: DD = {"dtype": dtype, "device": DEVICE}
diff --git a/test/test_external/test_field.py b/test/test_external/test_field.py
index 023059b1f..fc688348c 100644
--- a/test/test_external/test_field.py
+++ b/test/test_external/test_field.py
@@ -112,8 +112,8 @@ def test_batch(dtype: torch.dtype, name1: str, name2: str, scf_mode: str) -> Non
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name1", sample_list)
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", ["LiH"])
@pytest.mark.parametrize("name3", sample_list)
@pytest.mark.parametrize(
"scf_mode", [labels.SCF_MODE_IMPLICIT_NON_PURE, labels.SCF_MODE_FULL]
diff --git a/test/test_integrals/test_libcint.py b/test/test_integrals/test_libcint.py
index 6991d3538..6f0c24ad6 100644
--- a/test/test_integrals/test_libcint.py
+++ b/test/test_integrals/test_libcint.py
@@ -27,6 +27,7 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb import integrals as ints
+from dxtb import labels
from dxtb._src.exlibs import libcint
from dxtb._src.integral.driver.libcint import IntDriverLibcint
from dxtb._src.typing import DD
@@ -52,7 +53,7 @@ def test_single(dtype: torch.dtype, name: str, force_cpu_for_libcint: bool):
par,
ihelp,
force_cpu_for_libcint=force_cpu_for_libcint,
- intlevel=ints.levels.INTLEVEL_QUADRUPOLE,
+ intlevel=labels.INTLEVEL_QUADRUPOLE,
**dd,
)
@@ -118,7 +119,7 @@ def test_batch(
numbers,
par,
ihelp,
- intlevel=ints.levels.INTLEVEL_QUADRUPOLE,
+ intlevel=labels.INTLEVEL_QUADRUPOLE,
force_cpu_for_libcint=force_cpu_for_libcint,
**dd,
)
diff --git a/test/test_libcint/test_overlap.py b/test/test_libcint/test_overlap.py
index 5b63f88d4..630a7fa3c 100644
--- a/test/test_libcint/test_overlap.py
+++ b/test/test_libcint/test_overlap.py
@@ -48,7 +48,8 @@
from ..conftest import DEVICE
from .samples import samples
-sample_list = ["H2", "LiH", "Li2", "H2O", "S", "SiH4", "MB16_43_01", "C60"]
+slist = ["H2", "LiH", "Li2", "H2O", "S", "SiH4"]
+slist_large = ["MB16_43_01", "C60"]
def snorm(overlap: Tensor) -> Tensor:
@@ -82,8 +83,20 @@ def extract_blocks(x: Tensor, block_sizes: list[int] | Tensor) -> list[Tensor]:
@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
+ run_single(dtype, name)
+
+
+@pytest.mark.large
+@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist_large)
+def test_large(dtype: torch.dtype, name: str) -> None:
+ run_single(dtype, name)
+
+
+def run_single(dtype: torch.dtype, name: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
tol = sqrt(torch.finfo(dtype).eps) * 1e-2
@@ -120,13 +133,30 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name1", sample_list)
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist)
def test_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Batched overlap using non-batched setup, i.e., one huge matrix is
calculated that is only populated on the diagonal.
"""
+ run_batch(dtype, name1, name2)
+
+
+@pytest.mark.large
+@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist_large)
+def test_large_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
+ """
+ Batched overlap using non-batched setup, i.e., one huge matrix is
+ calculated that is only populated on the diagonal.
+ """
+ run_batch(dtype, name1, name2)
+
+
+def run_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
tol = sqrt(torch.finfo(dtype).eps) * 1e-2
dd: DD = {"dtype": dtype, "device": DEVICE}
@@ -190,8 +220,20 @@ def test_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist)
def test_grad(dtype: torch.dtype, name: str) -> None:
+ run_grad(dtype, name)
+
+
+@pytest.mark.large
+@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist_large)
+def test_large_grad(dtype: torch.dtype, name: str) -> None:
+ run_grad(dtype, name)
+
+
+def run_grad(dtype: torch.dtype, name: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
tol = sqrt(torch.finfo(dtype).eps) * 1e-2
diff --git a/test/test_multipole/test_dipole_integral.py b/test/test_multipole/test_dipole_integral.py
index 1b9674900..5fac21674 100644
--- a/test/test_multipole/test_dipole_integral.py
+++ b/test/test_multipole/test_dipole_integral.py
@@ -45,7 +45,8 @@
from ..conftest import DEVICE
from .samples import samples
-sample_list = ["H2", "LiH", "Li2", "H2O", "S", "SiH4", "MB16_43_01", "C60"]
+slist = ["H2", "LiH", "Li2", "H2O", "S"]
+slist_large = ["SiH4", "MB16_43_01", "C60"]
def snorm(overlap: Tensor) -> Tensor:
@@ -54,8 +55,20 @@ def snorm(overlap: Tensor) -> Tensor:
@pytest.mark.skipif(M is False, reason="PySCF not installed")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
+ run_single(dtype, name)
+
+
+@pytest.mark.large
+@pytest.mark.skipif(M is False, reason="PySCF not installed")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist_large)
+def test_large(dtype: torch.dtype, name: str) -> None:
+ run_single(dtype, name)
+
+
+def run_single(dtype: torch.dtype, name: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
tol = sqrt(torch.finfo(dtype).eps) * 1e-2
diff --git a/test/test_overlap/test_grad_pos.py b/test/test_overlap/test_grad_pos.py
index 4b38f7def..83c97e852 100644
--- a/test/test_overlap/test_grad_pos.py
+++ b/test/test_overlap/test_grad_pos.py
@@ -34,7 +34,8 @@
from ..conftest import DEVICE
from .samples import samples
-sample_list = ["H2", "HHe", "LiH", "Li2", "S2", "H2O", "SiH4"]
+slist = ["LiH", "H2O"]
+slist_large = ["H2", "HHe", "Li2", "S2", "SiH4"]
tol = 1e-7
@@ -65,7 +66,7 @@ def func(pos: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist)
def test_grad(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of positions against numerical
@@ -77,7 +78,19 @@ def test_grad(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", sample_list)
+@pytest.mark.parametrize("name", slist_large)
+def test_grad_large(dtype: torch.dtype, name: str) -> None:
+ """
+ Check a single analytical gradient of positions against numerical
+ gradient from `torch.autograd.gradcheck`.
+ """
+ func, diffvars = gradchecker(dtype, name)
+ assert dgradcheck(func, diffvars, atol=tol)
+
+
+@pytest.mark.grad
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist)
def test_gradgrad(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of positions against numerical
@@ -87,6 +100,18 @@ def test_gradgrad(dtype: torch.dtype, name: str) -> None:
assert dgradgradcheck(func, diffvars, atol=tol)
+@pytest.mark.grad
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_large)
+def test_gradgrad_large(dtype: torch.dtype, name: str) -> None:
+ """
+ Check a single analytical gradient of positions against numerical
+ gradient from `torch.autograd.gradgradcheck`.
+ """
+ func, diffvars = gradchecker(dtype, name)
+ assert dgradgradcheck(func, diffvars, atol=tol)
+
+
def gradchecker_batch(
dtype: torch.dtype, name1: str, name2: str
) -> tuple[Callable[[Tensor], Tensor], Tensor]:
@@ -128,8 +153,8 @@ def func(pos: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.filterwarnings("ignore") # torch.meshgrid from batch.deflate
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name1", ["H2"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist)
def test_grad_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of positions against numerical
@@ -143,7 +168,21 @@ def test_grad_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
@pytest.mark.filterwarnings("ignore") # torch.meshgrid from batch.deflate
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["H2"])
-@pytest.mark.parametrize("name2", sample_list)
+@pytest.mark.parametrize("name2", slist_large)
+def test_grad_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
+ """
+ Check a single analytical gradient of positions against numerical
+ gradient from `torch.autograd.gradcheck`.
+ """
+ func, diffvars = gradchecker_batch(dtype, name1, name2)
+ assert dgradcheck(func, diffvars, atol=tol)
+
+
+@pytest.mark.grad
+@pytest.mark.filterwarnings("ignore") # torch.meshgrid from batch.deflate
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name1", ["LiH"])
+@pytest.mark.parametrize("name2", slist)
def test_gradgrad_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of positions against numerical
@@ -151,3 +190,17 @@ def test_gradgrad_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
func, diffvars = gradchecker_batch(dtype, name1, name2)
assert dgradgradcheck(func, diffvars, atol=tol)
+
+
+@pytest.mark.grad
+@pytest.mark.filterwarnings("ignore") # torch.meshgrid from batch.deflate
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name1", ["H2"])
+@pytest.mark.parametrize("name2", slist_large)
+def test_gradgrad_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
+ """
+ Check a single analytical gradient of positions against numerical
+ gradient from `torch.autograd.gradgradcheck`.
+ """
+ func, diffvars = gradchecker_batch(dtype, name1, name2)
+ assert dgradgradcheck(func, diffvars, atol=tol)
diff --git a/test/test_properties/test_dipole.py b/test/test_properties/test_dipole.py
index da73eaea0..c1c79945f 100644
--- a/test/test_properties/test_dipole.py
+++ b/test/test_properties/test_dipole.py
@@ -35,8 +35,9 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "H2", "LiH", "HHe", "H2O", "CH4", "SiH4", "PbH4-BiH3"]
-slist_large = ["MB16_43_01", "LYS_xao", "C60"]
+slist = ["LiH"]
+slist_more = ["H", "HHe", "H2", "H2O", "CH4", "SiH4"]
+slist_large = ["PbH4-BiH3", "MB16_43_01", "LYS_xao", "C60"]
opts = {
"int_level": INTLEVEL_DIPOLE,
@@ -150,6 +151,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, "dipole", field_vector, dd=dd, atol=1e-3)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+
+ field_vector = torch.tensor([0.0, 0.0, 0.0], **dd) # * VAA2AU
+ single(name, "dipole", field_vector, dd=dd, atol=1e-3)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_dipole_deriv.py b/test/test_properties/test_dipole_deriv.py
index 8536df130..731b8a210 100644
--- a/test/test_properties/test_dipole_deriv.py
+++ b/test/test_properties/test_dipole_deriv.py
@@ -35,8 +35,9 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "HHe", "H2O", "CH4", "PbH4-BiH3"]
-slist_large = ["MB16_43_01"]
+slist = ["LiH"]
+slist_more = ["H", "HHe", "H2O", "CH4", "PbH4-BiH3", "MB16_43_01"]
+slist_large = ["PbH4-BiH3", "MB16_43_01"]
opts = {
"int_level": INTLEVEL_DIPOLE,
@@ -176,6 +177,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+
+ field_vector = torch.tensor([0.0, 0.0, 0.0], **dd)
+ single(name, field_vector, dd=dd)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_forces.py b/test/test_properties/test_forces.py
index f9807451a..67e249091 100644
--- a/test/test_properties/test_forces.py
+++ b/test/test_properties/test_forces.py
@@ -33,8 +33,8 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "HHe", "H2O", "CH4", "SiH4", "PbH4-BiH3"]
-slist_large = ["MB16_43_01"] # "LYS_xao"
+slist = ["H", "LiH", "H2O", "CH4", "SiH4"]
+slist_large = ["HHe", "PbH4-BiH3", "MB16_43_01"] # "LYS_xao"
opts = {
"maxiter": 100,
diff --git a/test/test_properties/test_hessian.py b/test/test_properties/test_hessian.py
index 6be72445d..819072826 100644
--- a/test/test_properties/test_hessian.py
+++ b/test/test_properties/test_hessian.py
@@ -33,8 +33,10 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "HHe", "H2O", "CH4", "SiH4"]
-slist_large = ["PbH4-BiH3"] # "MB16_43_01", "LYS_xao"
+slist = ["LiH"]
+slist_more = ["H", "HHe", "H2O", "CH4", "SiH4"]
+slist_large = ["PbH4-BiH3"]
+# "MB16_43_01", "LYS_xao"
opts = {
"maxiter": 100,
@@ -192,6 +194,14 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+ single(name, dd=dd)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_hyperpol.py b/test/test_properties/test_hyperpol.py
index fefb917a1..2e2b11195 100644
--- a/test/test_properties/test_hyperpol.py
+++ b/test/test_properties/test_hyperpol.py
@@ -35,8 +35,13 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "H2O", "CH4", "PbH4-BiH3"]
-slist_large = ["MB16_43_01"]
+slist = ["LiH"]
+slist_more = [
+ "H",
+ "H2O",
+ "CH4",
+]
+slist_large = ["PbH4-BiH3", "MB16_43_01"]
opts = {
"int_level": INTLEVEL_DIPOLE,
@@ -175,6 +180,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+
+ field_vector = torch.tensor([0.0, 0.0, 0.0], **dd)
+ single(name, field_vector, dd=dd)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_ir.py b/test/test_properties/test_ir.py
index 44dda425f..e7e706d68 100644
--- a/test/test_properties/test_ir.py
+++ b/test/test_properties/test_ir.py
@@ -35,8 +35,10 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "HHe", "H2O", "CH4", "SiH4", "PbH4-BiH3"]
-slist_large = ["MB16_43_01"] # LYS_xao too large for testing
+slist = ["LiH"]
+slist_more = ["H", "HHe", "H2O", "CH4", "SiH4"]
+slist_large = ["PbH4-BiH3", "MB16_43_01"]
+# LYS_xao too large for testing
opts = {
"int_level": INTLEVEL_DIPOLE,
@@ -152,6 +154,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+
+ field_vector = torch.tensor([0.0, 0.0, 0.0], **dd)
+ single(name, field_vector, dd=dd)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_pol.py b/test/test_properties/test_pol.py
index f6bbc3693..32531991f 100644
--- a/test/test_properties/test_pol.py
+++ b/test/test_properties/test_pol.py
@@ -35,7 +35,8 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "HHe", "H2O", "CH4", "SiH4"]
+slist = ["LiH"]
+slist_more = ["H", "HHe", "H2O", "CH4", "SiH4"]
slist_large = ["PbH4-BiH3", "MB16_43_01", "LYS_xao"]
opts = {
@@ -176,6 +177,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+
+ field_vector = torch.tensor([0.0, 0.0, 0.0], **dd)
+ single(name, field_vector, dd=dd)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_pol_deriv.py b/test/test_properties/test_pol_deriv.py
index 7832e03e0..3a250f483 100644
--- a/test/test_properties/test_pol_deriv.py
+++ b/test/test_properties/test_pol_deriv.py
@@ -35,7 +35,8 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "HHe", "H2O", "CH4", "SiH4"]
+slist = ["LiH"]
+slist_more = ["H", "HHe", "H2O", "CH4", "SiH4", "LiH"]
slist_large = ["PbH4-BiH3"]
opts = {
@@ -170,6 +171,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+
+ field_vector = torch.tensor([0.0, 0.0, 0.0], **dd)
+ single(name, field_vector, dd=dd)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_raman.py b/test/test_properties/test_raman.py
index 5d237dc0c..0553afa36 100644
--- a/test/test_properties/test_raman.py
+++ b/test/test_properties/test_raman.py
@@ -35,8 +35,10 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "H2O", "SiH4"]
-slist_large = ["PbH4-BiH3"] # MB16_43_01 and LYS_xao too large for testing
+slist = ["LiH"]
+slist_more = ["H", "HHe", "H2O", "SiH4"]
+slist_large = ["PbH4-BiH3"]
+# MB16_43_01 and LYS_xao too large for testing
opts = {
"int_level": INTLEVEL_DIPOLE,
@@ -164,6 +166,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+
+ field_vector = torch.tensor([0.0, 0.0, 0.0], **dd)
+ single(name, field_vector, dd=dd)
+
+
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
diff --git a/test/test_properties/test_vibration.py b/test/test_properties/test_vibration.py
index 75ddd676a..419c904b2 100644
--- a/test/test_properties/test_vibration.py
+++ b/test/test_properties/test_vibration.py
@@ -34,7 +34,8 @@
from ..conftest import DEVICE
from .samples import samples
-slist = ["H", "LiH", "HHe", "H2O"]
+slist = ["LiH"]
+slist_more = ["H", "H2O"]
# FIXME: Larger systems fail for modes
# slist = ["H", "LiH", "HHe", "H2O", "CH4", "SiH4", "PbH4-BiH3"]
@@ -166,6 +167,14 @@ def test_single(dtype: torch.dtype, name: str) -> None:
single(name, dd=dd)
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+ single(name, dd=dd)
+
+
# TODO: Batched derivatives are not supported yet
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
diff --git a/test/test_properties/test_vibration_ref.py b/test/test_properties/test_vibration_ref.py
index ae18e94d7..bd8a96c00 100644
--- a/test/test_properties/test_vibration_ref.py
+++ b/test/test_properties/test_vibration_ref.py
@@ -34,8 +34,9 @@
from .samples import samples
# FIXME: "HHe" is completely off
-slist = ["H", "H2", "LiH", "H2O", "CH4", "SiH4"]
-slist_large = ["LYS_xao"]
+slist = ["LiH"]
+slist_more = ["H", "H2", "H2O", "CH4", "SiH4"]
+slist_large = ["PbH4-BiH3", "MB16_43_01", "LYS_xao"]
opts = {
"maxiter": 100,
@@ -83,7 +84,16 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
-@pytest.mark.parametrize("name", ["PbH4-BiH3", "MB16_43_01", "LYS_xao"])
+@pytest.mark.parametrize("name", slist_more)
+def test_single_more(dtype: torch.dtype, name: str) -> None:
+ dd: DD = {"dtype": dtype, "device": DEVICE}
+ atol, rtol = 10, 1e-3
+ single(name, dd=dd, atol=atol, rtol=rtol)
+
+
+@pytest.mark.large
+@pytest.mark.parametrize("dtype", [torch.double])
+@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
atol, rtol = 10, 1e-3
diff --git a/test/test_scf/test_charged.py b/test/test_scf/test_charged.py
index 2b822bfcf..abac618e9 100644
--- a/test/test_scf/test_charged.py
+++ b/test/test_scf/test_charged.py
@@ -87,8 +87,8 @@ def test_grad(dtype: torch.dtype, name: str):
**{
"exclude": ["rep", "disp", "hal"],
"maxiter": 50,
- "f_atol": 1.0e-6,
- "x_atol": 1.0e-6,
+ "f_atol": 1.0e-5,
+ "x_atol": 1.0e-5,
},
)
calc = Calculator(numbers, par, opts=options, **dd)
diff --git a/test/test_scf/test_full_tracking.py b/test/test_scf/test_full_tracking.py
index 26447a161..a16e47533 100644
--- a/test/test_scf/test_full_tracking.py
+++ b/test/test_scf/test_full_tracking.py
@@ -30,11 +30,15 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.constants import labels
-from dxtb._src.typing import DD
+from dxtb._src.typing import DD, Tensor
from ..conftest import DEVICE
from .samples import samples
+slist = ["LiH", "SiH4"]
+slist_more = ["H2", "H2O", "CH4"]
+slist_large = ["PbH4-BiH3", "C6H5I-CH3SH", "MB16_43_01", "LYS_xao"]
+
opts = {
"verbosity": 0,
"maxiter": 300,
@@ -85,7 +89,7 @@ def single(
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["H2", "LiH", "H2O", "CH4", "SiH4"])
+@pytest.mark.parametrize("name", slist)
@pytest.mark.parametrize("mixer", ["anderson", "simple"])
@pytest.mark.parametrize("intdriver", drivers)
def test_single(dtype: torch.dtype, name: str, mixer: str, intdriver: int):
@@ -93,9 +97,21 @@ def test_single(dtype: torch.dtype, name: str, mixer: str, intdriver: int):
single(dtype, name, mixer, tol, intdriver=intdriver)
+@pytest.mark.large
+@pytest.mark.filterwarnings("ignore")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", slist_more)
+@pytest.mark.parametrize("mixer", ["anderson", "simple"])
+@pytest.mark.parametrize("intdriver", drivers)
+def test_single_more(dtype: torch.dtype, name: str, mixer: str, intdriver: int):
+ tol = sqrt(torch.finfo(dtype).eps) * 10
+ single(dtype, name, mixer, tol, intdriver=intdriver)
+
+
+@pytest.mark.large
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["PbH4-BiH3", "C6H5I-CH3SH", "MB16_43_01", "LYS_xao"])
+@pytest.mark.parametrize("name", slist_large)
@pytest.mark.parametrize("mixer", ["anderson", "simple"])
def test_single_medium(dtype: torch.dtype, name: str, mixer: str):
"""Test a few larger system."""
@@ -103,6 +119,7 @@ def test_single_medium(dtype: torch.dtype, name: str, mixer: str):
single(dtype, name, mixer, tol)
+@pytest.mark.large
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", ["S2", "LYS_xao_dist"])
@@ -175,7 +192,7 @@ def batched(
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name1", ["H2", "LiH"])
+@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", ["LiH", "SiH4"])
@pytest.mark.parametrize("mixer", ["anderson", "simple"])
@pytest.mark.parametrize("intdriver", drivers)
@@ -187,7 +204,7 @@ def test_batch(
def batched_unconverged(
- ref,
+ ref: Tensor,
dtype: torch.dtype,
name1: str,
name2: str,
diff --git a/test/test_scf/test_grad.py b/test/test_scf/test_grad.py
index ebb08045c..4171cd311 100644
--- a/test/test_scf/test_grad.py
+++ b/test/test_scf/test_grad.py
@@ -50,11 +50,30 @@
@pytest.mark.grad
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["LiH", "SiH4"])
+@pytest.mark.parametrize("name", ["LiH"])
@pytest.mark.parametrize("scp_mode", ["potential", "fock"])
@pytest.mark.parametrize("scf_mode", ["implicit", "nonpure", "full", "single-shot"])
def test_grad_backwards(
name: str, dtype: torch.dtype, scf_mode: str, scp_mode: str
+) -> None:
+ run_grad_backwards(name, dtype, scf_mode, scp_mode)
+
+
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.filterwarnings("ignore")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", ["SiH4"])
+@pytest.mark.parametrize("scp_mode", ["potential", "fock"])
+@pytest.mark.parametrize("scf_mode", ["implicit", "nonpure", "full", "single-shot"])
+def test_grad_backwards_large(
+ name: str, dtype: torch.dtype, scf_mode: str, scp_mode: str
+) -> None:
+ run_grad_backwards(name, dtype, scf_mode, scp_mode)
+
+
+def run_grad_backwards(
+ name: str, dtype: torch.dtype, scf_mode: str, scp_mode: str
) -> None:
tol = sqrt(torch.finfo(dtype).eps) * 10
dd: DD = {"device": DEVICE, "dtype": dtype}
@@ -95,8 +114,17 @@ def test_grad_backwards(
@pytest.mark.grad
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["H2", "LiH", "H2O", "CH4", "SiH4"])
+@pytest.mark.parametrize("name", ["LiH"])
def test_grad_autograd(name: str, dtype: torch.dtype):
+ run_grad_autograd(name, dtype)
+
+
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.filterwarnings("ignore")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", ["H2", "H2O", "CH4", "SiH4"])
+def run_grad_autograd(name: str, dtype: torch.dtype):
tol = sqrt(torch.finfo(dtype).eps) * 10
dd: DD = {"device": DEVICE, "dtype": dtype}
@@ -167,12 +195,27 @@ def test_grad_large(name: str, dtype: torch.dtype):
@pytest.mark.grad
-@pytest.mark.parametrize("name", ["LiH", "H2O", "SiH4", "LYS_xao"])
+@pytest.mark.parametrize("name", ["LiH"])
def test_param_grad_energy(name: str, dtype: torch.dtype = torch.float):
"""
Test autograd of SCF without gradient tracking vs. SCF with full gradient
tracking. References obtained with full tracking and `torch.float`.
"""
+ run_param_grad_energy(name, dtype)
+
+
+@pytest.mark.grad
+@pytest.mark.large
+@pytest.mark.parametrize("name", ["H2O", "SiH4", "LYS_xao"])
+def test_param_grad_energy_large(name: str, dtype: torch.dtype = torch.float):
+ """
+ Test autograd of SCF without gradient tracking vs. SCF with full gradient
+ tracking. References obtained with full tracking and `torch.float`.
+ """
+ run_param_grad_energy(name, dtype)
+
+
+def run_param_grad_energy(name: str, dtype: torch.dtype = torch.float):
tol = sqrt(torch.finfo(dtype).eps) * 10
dd: DD = {"device": DEVICE, "dtype": dtype}
diff --git a/test/test_scf/test_scf.py b/test/test_scf/test_scf.py
index 931b5dc2f..9a9372eb4 100644
--- a/test/test_scf/test_scf.py
+++ b/test/test_scf/test_scf.py
@@ -100,11 +100,11 @@ def test_single_medium(dtype: torch.dtype, name: str, mixer: str):
@pytest.mark.filterwarnings("ignore")
-@pytest.mark.parametrize("dtype", [torch.float])
-@pytest.mark.parametrize("name", ["S2", "LYS_xao_dist"])
-def test_single_difficult(dtype: torch.dtype, name: str):
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", ["S2"])
+def test_single_difficult_1(dtype: torch.dtype, name: str):
"""Test a few larger system (only float32 within tolerance)."""
- tol = sqrt(torch.finfo(dtype).eps) * 10
+ tol = 1e-2
dd: DD = {"device": DEVICE, "dtype": dtype}
sample = samples[name]
@@ -116,10 +116,40 @@ def test_single_difficult(dtype: torch.dtype, name: str):
options = dict(
opts,
**{
- "f_atol": 1e-6,
- "x_atol": 1e-6,
+ "f_atol": 1e-5 if dtype == torch.float else 1e-7,
+ "x_atol": 1e-5 if dtype == torch.float else 1e-7,
"damp": 0.5, # simple mixing
- "maxiter": 300, # simple mixing
+ "maxiter": 400, # simple mixing
+ },
+ )
+ calc = Calculator(numbers, par, opts=options, **dd)
+
+ result = calc.singlepoint(positions, charges)
+ res = result.scf.sum(-1)
+ assert pytest.approx(ref.cpu(), abs=tol) == res.cpu()
+
+
+@pytest.mark.filterwarnings("ignore")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name", ["LYS_xao_dist"])
+def test_single_difficult_2(dtype: torch.dtype, name: str):
+ """Test a few larger system (only float32 within tolerance)."""
+ tol = 1e-3
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ sample = samples[name]
+ numbers = sample["numbers"].to(DEVICE)
+ positions = sample["positions"].to(**dd)
+ ref = sample["escf"].to(**dd)
+ charges = torch.tensor(0.0, **dd)
+
+ options = dict(
+ opts,
+ **{
+ "f_atol": 1e-5 if dtype == torch.float else 1e-7,
+ "x_atol": 1e-5 if dtype == torch.float else 1e-7,
+ "damp": 0.5, # simple mixing
+ "maxiter": 400, # simple mixing
},
)
calc = Calculator(numbers, par, opts=options, **dd)
diff --git a/test/test_scf/test_scp.py b/test/test_scf/test_scp.py
index b1c96bf63..0e8b6233b 100644
--- a/test/test_scf/test_scp.py
+++ b/test/test_scf/test_scp.py
@@ -80,7 +80,7 @@ def single(
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["H2", "LiH", "SiH4"])
+@pytest.mark.parametrize("name", ["LiH"])
@pytest.mark.parametrize("mixer", ["anderson", "simple"])
@pytest.mark.parametrize("scp_mode", ["charges", "potential", "fock"])
@pytest.mark.parametrize("scf_mode", ["full"])
@@ -91,9 +91,10 @@ def test_single(
single(dtype, name, mixer, tol, scp_mode, scf_mode)
+@pytest.mark.large
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["MB16_43_01", "LYS_xao"])
+@pytest.mark.parametrize("name", ["H2", "SiH4", "MB16_43_01", "LYS_xao"])
@pytest.mark.parametrize("mixer", ["anderson", "simple"])
@pytest.mark.parametrize("scp_mode", ["charges", "potential", "fock"])
@pytest.mark.parametrize("scf_mode", ["full"])
@@ -105,6 +106,7 @@ def test_single_medium(
single(dtype, name, mixer, tol, scp_mode, scf_mode)
+@pytest.mark.large
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", ["S2", "LYS_xao_dist"])
@@ -185,7 +187,7 @@ def batched(
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name1", ["H2", "LiH"])
+@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", ["LiH", "SiH4"])
@pytest.mark.parametrize("mixer", ["anderson", "broyden", "simple"])
@pytest.mark.parametrize("scp_mode", ["charges", "potential", "fock"])
diff --git a/test/test_singlepoint/test_energy.py b/test/test_singlepoint/test_energy.py
index fc47493e7..2c01e7e18 100644
--- a/test/test_singlepoint/test_energy.py
+++ b/test/test_singlepoint/test_energy.py
@@ -36,6 +36,9 @@
from ..conftest import DEVICE
from .samples import samples
+slist = ["H2", "H2O", "CH4", "SiH4"]
+slist_large = ["LYS_xao", "C60", "vancoh2", "AD7en+"]
+
opts = {
"verbosity": 0,
"scf_mode": labels.SCF_MODE_IMPLICIT_NON_PURE,
@@ -45,7 +48,7 @@
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["H2", "H2O", "CH4", "SiH4", "LYS_xao"])
+@pytest.mark.parametrize("name", slist)
@pytest.mark.parametrize("scf_mode", ["implicit", "nonpure", "full"])
def test_single(dtype: torch.dtype, name: str, scf_mode: str) -> None:
tol = sqrt(torch.finfo(dtype).eps) * 10
@@ -79,7 +82,7 @@ def test_single(dtype: torch.dtype, name: str, scf_mode: str) -> None:
@pytest.mark.large
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("name", ["C60", "vancoh2", "AD7en+"])
+@pytest.mark.parametrize("name", slist_large)
@pytest.mark.parametrize("scf_mode", ["implicit", "nonpure", "full"])
def test_single_large(dtype: torch.dtype, name: str, scf_mode: str) -> None:
tol = sqrt(torch.finfo(dtype).eps) * 10
@@ -114,7 +117,7 @@ def test_single_large(dtype: torch.dtype, name: str, scf_mode: str) -> None:
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", ["H2", "H2O"])
@pytest.mark.parametrize("name2", ["H2", "CH4"])
-@pytest.mark.parametrize("name3", ["H2", "SiH4", "LYS_xao"])
+@pytest.mark.parametrize("name3", ["H2", "SiH4"])
@pytest.mark.parametrize("scf_mode", ["implicit", "nonpure", "full"])
def test_batch(
dtype: torch.dtype, name1: str, name2: str, name3: str, scf_mode: str
@@ -158,6 +161,55 @@ def test_batch(
assert pytest.approx(ref.cpu(), abs=tol, rel=tol) == res.cpu()
+@pytest.mark.large
+@pytest.mark.filterwarnings("ignore")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("name1", ["H2"])
+@pytest.mark.parametrize("name2", ["CH4"])
+@pytest.mark.parametrize("name3", ["LYS_xao"])
+@pytest.mark.parametrize("scf_mode", ["implicit", "nonpure", "full"])
+def test_batch_large(
+ dtype: torch.dtype, name1: str, name2: str, name3: str, scf_mode: str
+) -> None:
+ tol = sqrt(torch.finfo(dtype).eps) * 10
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers, positions, charge = [], [], []
+ for name in [name1, name2, name3]:
+ base = Path(Path(__file__).parent, "mols", name)
+
+ nums, pos = read_coord(Path(base, "coord"))
+ chrg = read_chrg(Path(base, ".CHRG"))
+
+ numbers.append(torch.tensor(nums, dtype=torch.long, device=DEVICE))
+ positions.append(torch.tensor(pos, **dd))
+ charge.append(torch.tensor(chrg, **dd))
+
+ numbers = pack(numbers)
+ positions = pack(positions)
+ charge = pack(charge)
+ ref = pack(
+ [
+ samples[name1]["etot"].to(**dd),
+ samples[name2]["etot"].to(**dd),
+ samples[name3]["etot"].to(**dd),
+ ]
+ )
+
+ options = dict(
+ opts,
+ **{
+ "scf_mode": scf_mode,
+ "mixer": "anderson" if scf_mode == "full" else "broyden",
+ },
+ )
+ calc = Calculator(numbers, par, opts=options, **dd)
+
+ result = calc.singlepoint(positions, charge)
+ res = result.total.sum(-1)
+ assert pytest.approx(ref.cpu(), abs=tol, rel=tol) == res.cpu()
+
+
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", ["H", "NO2"])
diff --git a/test/test_singlepoint/test_grad.py b/test/test_singlepoint/test_grad.py
index e2fcf2b8d..7246a001e 100644
--- a/test/test_singlepoint/test_grad.py
+++ b/test/test_singlepoint/test_grad.py
@@ -40,8 +40,6 @@
"""['H2', 'H2O', 'CH4', 'SiH4', 'LYS_xao', 'AD7en+', 'C60', 'vancoh2']"""
opts = {
- "f_atol": 1.0e-10,
- "x_atol": 1.0e-10,
"maxiter": 50,
"scf_mode": labels.SCF_MODE_IMPLICIT_NON_PURE,
"scp_mode": labels.SCP_MODE_POTENTIAL,
@@ -101,7 +99,15 @@ def analytical(
positions = torch.tensor(positions, **dd, requires_grad=True)
charge = torch.tensor(charge, **dd)
- options = dict(opts, **{"scf_mode": scf_mode})
+ options = dict(
+ opts,
+ **{
+ "mixer": "anderson" if scf_mode == "full" else "broyden",
+ "f_atol": 1e-5 if dtype == torch.float else 1e-10,
+ "x_atol": 1e-5 if dtype == torch.float else 1e-10,
+ "scf_mode": scf_mode,
+ },
+ )
calc = Calculator(numbers, par, opts=options, **dd)
result = -calc.forces_analytical(positions, charge)
gradient = result.detach()
@@ -133,8 +139,10 @@ def test_backward(dtype: torch.dtype, name: str, scf_mode: str) -> None:
options = dict(
opts,
**{
- "scf_mode": scf_mode,
"mixer": "anderson" if scf_mode == "full" else "broyden",
+ "f_atol": 1e-5 if dtype == torch.float else 1e-10,
+ "x_atol": 1e-5 if dtype == torch.float else 1e-10,
+ "scf_mode": scf_mode,
},
)
calc = Calculator(numbers, par, opts=options, **dd)
diff --git a/test/test_singlepoint/test_grad_field.py b/test/test_singlepoint/test_grad_field.py
index 683ae2f50..30fa34ef3 100644
--- a/test/test_singlepoint/test_grad_field.py
+++ b/test/test_singlepoint/test_grad_field.py
@@ -36,6 +36,7 @@
from .samples import samples
opts = {
+ "int_level": labels.INTLEVEL_DIPOLE,
"f_atol": 1.0e-8,
"x_atol": 1.0e-8,
"maxiter": 100,
@@ -43,7 +44,7 @@
"verbosity": 0,
}
-tol = 1e-2
+tol = 1e-1
# FIXME: There seem to be multiple issues with this gradient here.
# - SiH4 fails for 0.0 (0.01 check depends on eps)
diff --git a/test/test_singlepoint/test_grad_fieldgrad.py b/test/test_singlepoint/test_grad_fieldgrad.py
index 28182b017..978561e29 100644
--- a/test/test_singlepoint/test_grad_fieldgrad.py
+++ b/test/test_singlepoint/test_grad_fieldgrad.py
@@ -35,6 +35,7 @@
from .samples import samples
opts = {
+ "int_level": labels.INTLEVEL_DIPOLE,
"f_atol": 1.0e-8,
"x_atol": 1.0e-8,
"maxiter": 100,
diff --git a/test/test_singlepoint/test_grad_pos_withfield.py b/test/test_singlepoint/test_grad_pos_withfield.py
index 498e35990..3fea66087 100644
--- a/test/test_singlepoint/test_grad_pos_withfield.py
+++ b/test/test_singlepoint/test_grad_pos_withfield.py
@@ -36,6 +36,7 @@
from .samples import samples
opts = {
+ "int_level": labels.INTLEVEL_DIPOLE,
"f_atol": 1.0e-8,
"x_atol": 1.0e-8,
"maxiter": 100,