From d19679f5d34966fbd10dbffd7f73e1e871f0fdf4 Mon Sep 17 00:00:00 2001 From: marvinfriede <51965259+marvinfriede@users.noreply.github.com> Date: Sun, 25 Aug 2024 19:08:13 -0500 Subject: [PATCH] More NONDET_TOL --- test/test_coulomb/test_es2_shell.py | 6 +++--- test/test_coulomb/test_es3.py | 4 ++-- test/test_coulomb/test_grad_atom.py | 4 ---- test/test_coulomb/test_grad_atom_param.py | 10 +++++----- test/test_coulomb/test_grad_atom_pos.py | 10 +++++----- test/test_coulomb/test_grad_shell.py | 4 ---- 6 files changed, 15 insertions(+), 23 deletions(-) diff --git a/test/test_coulomb/test_es2_shell.py b/test/test_coulomb/test_es2_shell.py index 56c28bee..6c7299ca 100644 --- a/test/test_coulomb/test_es2_shell.py +++ b/test/test_coulomb/test_es2_shell.py @@ -34,7 +34,7 @@ from dxtb._src.param.utils import get_elem_param from dxtb._src.typing import DD, Tensor -from ..conftest import DEVICE +from ..conftest import DEVICE, NONDET_TOL from .samples import samples sample_list = ["MB16_43_07", "MB16_43_08", "SiH4"] @@ -129,7 +129,7 @@ def func(p: Tensor): cache = es.get_cache(numbers, p, ihelp) return es.get_shell_energy(qsh, cache) - assert dgradcheck(func, pos) + assert dgradcheck(func, pos, nondet_tol=NONDET_TOL) @pytest.mark.grad @@ -160,4 +160,4 @@ def func(gexp: Tensor, hubbard: Tensor): cache = es.get_cache(numbers, positions, ihelp) return es.get_shell_energy(qsh, cache) - assert dgradcheck(func, (gexp, hubbard)) + assert dgradcheck(func, (gexp, hubbard), nondet_tol=NONDET_TOL) diff --git a/test/test_coulomb/test_es3.py b/test/test_coulomb/test_es3.py index 36d4a834..b04cc6ac 100644 --- a/test/test_coulomb/test_es3.py +++ b/test/test_coulomb/test_es3.py @@ -31,7 +31,7 @@ from dxtb._src.param.utils import get_elem_param from dxtb._src.typing import DD, Tensor -from ..conftest import DEVICE +from ..conftest import DEVICE, NONDET_TOL from .samples import samples sample_list = ["MB16_43_01", "MB16_43_02", "SiH4_atom"] @@ -120,4 +120,4 @@ def func(hubbard_derivs: Tensor): cache = es.get_cache(numbers=numbers, ihelp=ihelp) return es.get_atom_energy(qat, cache) - assert dgradcheck(func, hd) + assert dgradcheck(func, hd, nondet_tol=NONDET_TOL) diff --git a/test/test_coulomb/test_grad_atom.py b/test/test_coulomb/test_grad_atom.py index 30f54626..b7ee647c 100644 --- a/test/test_coulomb/test_grad_atom.py +++ b/test/test_coulomb/test_grad_atom.py @@ -84,8 +84,6 @@ def test_single(dtype: torch.dtype, name: str) -> None: assert pytest.approx(ref.cpu(), abs=tol) == egrad.cpu() assert pytest.approx(egrad.cpu(), abs=tol) == agrad.cpu() - pos.detach_() - @pytest.mark.parametrize("dtype", [torch.float, torch.double]) @pytest.mark.parametrize("name1", ["SiH4_atom"]) @@ -145,8 +143,6 @@ def test_batch(dtype: torch.dtype, name1: str, name2: str) -> None: assert pytest.approx(ref.cpu(), abs=tol) == egrad.cpu() assert pytest.approx(egrad.cpu(), abs=tol) == agrad.cpu() - pos.detach_() - def calc_numerical_gradient( numbers: Tensor, positions: Tensor, ihelp: IndexHelper, charges: Tensor diff --git a/test/test_coulomb/test_grad_atom_param.py b/test/test_coulomb/test_grad_atom_param.py index bad4925f..7ec85b9d 100644 --- a/test/test_coulomb/test_grad_atom_param.py +++ b/test/test_coulomb/test_grad_atom_param.py @@ -31,7 +31,7 @@ from dxtb._src.param import get_elem_param from dxtb._src.typing import DD, Callable, Tensor -from ..conftest import DEVICE +from ..conftest import DEVICE, NONDET_TOL from .samples import samples sample_list = ["LiH", "SiH4", "MB16_43_01"] @@ -81,7 +81,7 @@ def test_grad_param(dtype: torch.dtype, name: str) -> None: gradient from `torch.autograd.gradcheck`. """ func, diffvars = gradcheck_param(dtype, name) - assert dgradcheck(func, diffvars, atol=tol) + assert dgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) @pytest.mark.grad @@ -93,7 +93,7 @@ def test_gradgrad_param(dtype: torch.dtype, name: str) -> None: gradient from `torch.autograd.gradgradcheck`. """ func, diffvars = gradcheck_param(dtype, name) - assert dgradgradcheck(func, diffvars, atol=tol) + assert dgradgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) def gradcheck_param_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[ @@ -157,7 +157,7 @@ def test_grad_param_batch(dtype: torch.dtype, name1: str, name2: str) -> None: # same for both values. diffvars[0].requires_grad_(False) - assert dgradcheck(func, diffvars, atol=tol) + assert dgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) @pytest.mark.grad @@ -177,4 +177,4 @@ def test_gradgrad_param_batch(dtype: torch.dtype, name1: str, name2: str) -> Non # same for both values. diffvars[0].requires_grad_(False) - assert dgradgradcheck(func, diffvars, atol=tol) + assert dgradgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) diff --git a/test/test_coulomb/test_grad_atom_pos.py b/test/test_coulomb/test_grad_atom_pos.py index 3c13eb8b..593257cd 100644 --- a/test/test_coulomb/test_grad_atom_pos.py +++ b/test/test_coulomb/test_grad_atom_pos.py @@ -31,7 +31,7 @@ from dxtb._src.param import get_elem_param from dxtb._src.typing import DD, Callable, Tensor -from ..conftest import DEVICE +from ..conftest import DEVICE, NONDET_TOL from .samples import samples sample_list = ["LiH", "SiH4", "MB16_43_01"] @@ -85,7 +85,7 @@ def test_grad_pos(dtype: torch.dtype, name: str) -> None: gradient from `torch.autograd.gradcheck`. """ func, diffvars = gradcheck_pos(dtype, name) - assert dgradcheck(func, diffvars, atol=tol) + assert dgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) @pytest.mark.grad @@ -97,7 +97,7 @@ def test_gradgrad_pos(dtype: torch.dtype, name: str) -> None: gradient from `torch.autograd.gradgradcheck`. """ func, diffvars = gradcheck_pos(dtype, name) - assert dgradgradcheck(func, diffvars, atol=tol) + assert dgradgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) def gradcheck_pos_batch( @@ -156,7 +156,7 @@ def test_grad_pos_batch(dtype: torch.dtype, name1: str, name2: str) -> None: gradient from `torch.autograd.gradcheck`. """ func, diffvars = gradcheck_pos_batch(dtype, name1, name2) - assert dgradcheck(func, diffvars, atol=tol) + assert dgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) @pytest.mark.grad @@ -169,4 +169,4 @@ def test_gradgrad_pos_batch(dtype: torch.dtype, name1: str, name2: str) -> None: gradient from `torch.autograd.gradgradcheck`. """ func, diffvars = gradcheck_pos_batch(dtype, name1, name2) - assert dgradgradcheck(func, diffvars, atol=tol) + assert dgradgradcheck(func, diffvars, atol=tol, nondet_tol=NONDET_TOL) diff --git a/test/test_coulomb/test_grad_shell.py b/test/test_coulomb/test_grad_shell.py index 5c38fdac..eefbf69b 100644 --- a/test/test_coulomb/test_grad_shell.py +++ b/test/test_coulomb/test_grad_shell.py @@ -82,8 +82,6 @@ def test_single(dtype: torch.dtype, name: str) -> None: assert pytest.approx(ref.cpu(), abs=tol) == egrad.cpu() assert pytest.approx(egrad.cpu(), abs=tol) == agrad.cpu() - pos.detach_() - @pytest.mark.parametrize("dtype", [torch.float, torch.double]) @pytest.mark.parametrize("name1", ["SiH4"]) @@ -144,8 +142,6 @@ def test_batch(dtype: torch.dtype, name1: str, name2: str) -> None: assert pytest.approx(ref.cpu(), abs=tol) == egrad.cpu() assert pytest.approx(egrad.cpu(), abs=tol) == agrad.cpu() - pos.detach_() - def calc_numerical_gradient( numbers: Tensor, positions: Tensor, ihelp: IndexHelper, charges: Tensor