Skip to content

Commit

Permalink
Reduce number of tests
Browse files Browse the repository at this point in the history
  • Loading branch information
marvinfriede committed Jun 29, 2024
1 parent 6c5bf86 commit 3f93593
Show file tree
Hide file tree
Showing 25 changed files with 496 additions and 83 deletions.
26 changes: 20 additions & 6 deletions test/test_a_memory_leak/test_higher_deriv.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,11 @@
from ..utils import nth_derivative
from .util import garbage_collect, has_memleak_tensor

sample_list = ["H2O", "SiH4", "MB16_43_01"]
slist = ["H2O", "SiH4"]
slist_large = ["MB16_43_01"]


@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_single(dtype: torch.dtype, name: str, n: int) -> None:
def execute(name: str, dtype: torch.dtype, n: int) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}

def fcn():
Expand Down Expand Up @@ -80,3 +77,20 @@ def fcn():
garbage_collect()

assert not leak, "Memory leak detected"


@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_single(dtype: torch.dtype, name: str, n: int) -> None:
execute(name, dtype, n)


@pytest.mark.large
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist_large)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_large(dtype: torch.dtype, name: str, n: int) -> None:
execute(name, dtype, n)
23 changes: 18 additions & 5 deletions test/test_a_memory_leak/test_repulsion.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,11 @@
from ..conftest import DEVICE
from .util import garbage_collect, has_memleak_tensor

sample_list = ["H2O", "SiH4", "MB16_43_01"]
slist = ["H2O", "SiH4"]
slist_large = ["MB16_43_01"]


@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_single(dtype: torch.dtype, name: str) -> None:
def execute(name: str, dtype: torch.dtype) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}

def fcn():
Expand Down Expand Up @@ -106,3 +104,18 @@ def fcn():
garbage_collect()

assert not leak, "Memory leak detected"


@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
execute(name, dtype)


@pytest.mark.large
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_large(dtype: torch.dtype, name: str) -> None:
execute(name, dtype)
2 changes: 1 addition & 1 deletion test/test_a_memory_leak/test_scf.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def test_xitorch(dtype: torch.dtype, run_gc: bool, create_graph: bool) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}

def fcn():
sample = samples["SiH4"]
sample = samples["LiH"]
numbers = sample["numbers"].to(DEVICE)
positions = sample["positions"].clone().to(**dd)
charges = torch.tensor(0.0, **dd)
Expand Down
14 changes: 4 additions & 10 deletions test/test_a_memory_leak/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,12 @@ def garbage_collect() -> None:
torch.cuda.empty_cache()


def _tensors_from_gc(gpu_only=False) -> Generator[Tensor, None, None]:
def _tensors_from_gc() -> Generator[Tensor, None, None]:
# return [obj for obj in gc.get_objects() if isinstance(obj, Tensor)]
for obj in gc.get_objects():
try:
if torch.is_tensor(obj):
tensor = obj
elif hasattr(obj, "data") and torch.is_tensor(obj.data):
tensor = obj.data
else:
continue

if tensor.is_cuda or not gpu_only:
yield tensor
if isinstance(obj, Tensor):
yield obj
except Exception: # nosec B112 pylint: disable=broad-exception-caught
continue

Expand Down
65 changes: 60 additions & 5 deletions test/test_dispersion/test_grad_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@
from ..conftest import DEVICE
from .samples import samples

sample_list = ["LiH", "SiH4", "MB16_43_01", "PbH4-BiH3"]
slist = ["LiH", "SiH4"]
slist_large = ["MB16_43_01", "PbH4-BiH3"]

tol = 1e-8

Expand Down Expand Up @@ -66,7 +67,7 @@ def func(*inputs: Tensor) -> Tensor:

@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("name", slist)
def test_gradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
Expand All @@ -77,8 +78,21 @@ def test_gradcheck(dtype: torch.dtype, name: str) -> None:


@pytest.mark.grad
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("name", slist_large)
def test_gradcheck_large(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradcheck`.
"""
func, diffvars = gradchecker(dtype, name)
assert dgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
Expand All @@ -88,6 +102,19 @@ def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
assert dgradgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_gradgradcheck_large(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradgradcheck`.
"""
func, diffvars = gradchecker(dtype, name)
assert dgradgradcheck(func, diffvars, atol=tol)


def gradchecker_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[
Callable[[Tensor, Tensor, Tensor, Tensor], Tensor], # autograd function
tuple[Tensor, Tensor, Tensor, Tensor], # differentiable variables
Expand Down Expand Up @@ -129,7 +156,7 @@ def func(*inputs: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
@pytest.mark.parametrize("name2", slist)
def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
Expand All @@ -139,14 +166,42 @@ def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
assert dgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
def test_gradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradcheck`.
"""
func, diffvars = gradchecker_batch(dtype, name1, name2)
assert dgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
@pytest.mark.parametrize("name2", slist)
def test_gradgradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradgradcheck`.
"""
func, diffvars = gradchecker_batch(dtype, name1, name2)
assert dgradgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
def test_gradgradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradgradcheck`.
"""
func, diffvars = gradchecker_batch(dtype, name1, name2)
assert dgradgradcheck(func, diffvars, atol=tol)
72 changes: 63 additions & 9 deletions test/test_dispersion/test_grad_pos.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@
from ..conftest import DEVICE
from .samples import samples

sample_list = ["LiH", "SiH4", "MB16_43_01", "PbH4-BiH3"]
slist = ["LiH", "SiH4"]
slist_large = ["MB16_43_01", "PbH4-BiH3"]

tol = 1e-8

Expand Down Expand Up @@ -63,7 +64,7 @@ def func(positions: Tensor) -> Tensor:

@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("name", slist)
def test_gradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
Expand All @@ -74,8 +75,21 @@ def test_gradcheck(dtype: torch.dtype, name: str) -> None:


@pytest.mark.grad
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("name", slist_large)
def test_gradcheck_large(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradcheck`.
"""
func, diffvars = gradchecker(dtype, name)
assert dgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
Expand All @@ -85,6 +99,19 @@ def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
assert dgradgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_gradgradcheck_large(dtype: torch.dtype, name: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradgradcheck`.
"""
func, diffvars = gradchecker(dtype, name)
assert dgradgradcheck(func, diffvars, atol=tol)


def gradchecker_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[
Callable[[Tensor], Tensor], # autograd function
Tensor, # differentiable variables
Expand Down Expand Up @@ -122,7 +149,7 @@ def func(positions: Tensor) -> Tensor:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
@pytest.mark.parametrize("name2", slist)
def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
Expand All @@ -132,10 +159,24 @@ def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
assert dgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.large
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
def test_gradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradcheck`.
"""
func, diffvars = gradchecker_batch(dtype, name1, name2)
assert dgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
@pytest.mark.parametrize("name2", slist)
def test_gradgradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
Expand All @@ -147,7 +188,20 @@ def test_gradgradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None

@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
def test_gradgradcheck_batch_large(dtype: torch.dtype, name1: str, name2: str) -> None:
"""
Check a single analytical gradient of parameters against numerical
gradient from `torch.autograd.gradgradcheck`.
"""
func, diffvars = gradchecker_batch(dtype, name1, name2)
assert dgradgradcheck(func, diffvars, atol=tol)


@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_autograd(dtype: torch.dtype, name: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}

Expand All @@ -173,7 +227,7 @@ def test_autograd(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
@pytest.mark.parametrize("name2", slist)
def test_autograd_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}

Expand Down Expand Up @@ -215,7 +269,7 @@ def test_autograd_batch(dtype: torch.dtype, name1: str, name2: str) -> None:

@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("name", slist)
def test_backward(dtype: torch.dtype, name: str) -> None:
"""Compare with reference values from tblite."""
dd: DD = {"dtype": dtype, "device": DEVICE}
Expand Down Expand Up @@ -251,7 +305,7 @@ def test_backward(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
@pytest.mark.parametrize("name2", slist)
def test_backward_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
"""Compare with reference values from tblite."""
dd: DD = {"dtype": dtype, "device": DEVICE}
Expand Down
4 changes: 2 additions & 2 deletions test/test_external/test_field.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ def test_batch(dtype: torch.dtype, name1: str, name2: str, scf_mode: str) -> Non


@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", sample_list)
@pytest.mark.parametrize("name2", sample_list)
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", ["LiH"])
@pytest.mark.parametrize("name3", sample_list)
@pytest.mark.parametrize(
"scf_mode", [labels.SCF_MODE_IMPLICIT_NON_PURE, labels.SCF_MODE_FULL]
Expand Down
Loading

0 comments on commit 3f93593

Please sign in to comment.