Skip to content

Commit

Permalink
Refactor torch device types out of od and into _types (#829)
Browse files Browse the repository at this point in the history
* Refactor torch device types out of od and into _types

* Update types for device param throughout detect

* Update saving to account for `torch.device` option

* Remove redundant logic in _types

* Add saving test for torch device logic

* Add pydantic validation for supported PyTorch devices
  • Loading branch information
mauicv authored Jul 26, 2023
1 parent d19cf09 commit c2f0a5a
Show file tree
Hide file tree
Showing 44 changed files with 321 additions and 188 deletions.
8 changes: 5 additions & 3 deletions alibi_detect/cd/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, \
BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType


from sklearn.base import ClassifierMixin
Expand Down Expand Up @@ -43,7 +44,7 @@ def __init__(
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
device: TorchDeviceType = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
Expand Down Expand Up @@ -122,8 +123,9 @@ def __init__(
Optional additional kwargs when fitting the classifier. Only relevant for 'tensorflow' and
'pytorch' backends.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training. Only relevant for 'tensorflow' and 'pytorch' backends.
dataloader
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/context_aware.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType

if has_pytorch:
from alibi_detect.cd.pytorch.context_aware import ContextMMDDriftTorch
Expand Down Expand Up @@ -32,7 +33,7 @@ def __init__(
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
device: Optional[str] = None,
device: TorchDeviceType = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False
Expand Down Expand Up @@ -77,8 +78,9 @@ def __init__(
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/keops/learned_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from alibi_detect.utils.pytorch import get_device, predict_batch
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import TorchDeviceType


class LearnedKernelDriftKeops(BaseLearnedKernelDrift):
Expand Down Expand Up @@ -38,7 +39,7 @@ def __init__(
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
device: TorchDeviceType = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
Expand Down Expand Up @@ -108,8 +109,9 @@ def __init__(
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Relevant for 'pytorch' and 'keops' backends.
dataset
Dataset object used during training.
dataloader
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/keops/mmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from alibi_detect.utils.keops.kernels import GaussianRBF
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import TorchDeviceType

logger = logging.getLogger(__name__)

Expand All @@ -25,7 +26,7 @@ def __init__(
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
device: Optional[str] = None,
device: TorchDeviceType = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
Expand Down Expand Up @@ -63,8 +64,9 @@ def __init__(
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
input_shape
Shape of input data.
data_type
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/learned_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType

if has_pytorch:
from torch.utils.data import DataLoader
Expand Down Expand Up @@ -44,7 +45,7 @@ def __init__(
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
device: TorchDeviceType = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
Expand Down Expand Up @@ -117,8 +118,9 @@ def __init__(
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Relevant for 'pytorch' and 'keops' backends.
dataset
Dataset object used during training.
dataloader
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/lsdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType

if has_pytorch:
from alibi_detect.cd.pytorch.lsdd import LSDDDriftTorch
Expand All @@ -26,7 +27,7 @@ def __init__(
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
device: TorchDeviceType = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
Expand Down Expand Up @@ -68,8 +69,9 @@ def __init__(
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Expand Down
9 changes: 6 additions & 3 deletions alibi_detect/cd/lsdd_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from typing import Any, Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType

if has_pytorch:
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch

Expand All @@ -23,7 +25,7 @@ def __init__(
n_bootstraps: int = 1000,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
device: TorchDeviceType = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
Expand Down Expand Up @@ -68,8 +70,9 @@ def __init__(
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/mmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType

if has_pytorch:
from alibi_detect.cd.pytorch.mmd import MMDDriftTorch
Expand Down Expand Up @@ -33,7 +34,7 @@ def __init__(
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
device: Optional[str] = None,
device: TorchDeviceType = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
Expand Down Expand Up @@ -74,8 +75,9 @@ def __init__(
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
Only relevant for 'keops' backend.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/mmd_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from typing import Any, Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType

if has_pytorch:
from alibi_detect.cd.pytorch.mmd_online import MMDDriftOnlineTorch
Expand All @@ -23,7 +24,7 @@ def __init__(
kernel: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
device: Optional[str] = None,
device: TorchDeviceType = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
Expand Down Expand Up @@ -61,8 +62,9 @@ def __init__(
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Expand Down
15 changes: 9 additions & 6 deletions alibi_detect/cd/model_uncertainty.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from alibi_detect.cd.utils import encompass_batching, encompass_shuffling_and_batch_filling
from alibi_detect.utils.frameworks import BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
from alibi_detect.utils._types import TorchDeviceType

logger = logging.getLogger(__name__)

Expand All @@ -26,7 +27,7 @@ def __init__(
margin_width: float = 0.1,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
device: Optional[str] = None,
device: TorchDeviceType = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
input_shape: Optional[tuple] = None,
Expand Down Expand Up @@ -69,8 +70,9 @@ def __init__(
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
tokenizer
Optional tokenizer for NLP models.
max_len
Expand Down Expand Up @@ -179,7 +181,7 @@ def __init__(
n_evals: int = 25,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
device: Optional[str] = None,
device: TorchDeviceType = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
input_shape: Optional[tuple] = None,
Expand Down Expand Up @@ -222,8 +224,9 @@ def __init__(
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
tokenizer
Optional tokenizer for NLP models.
max_len
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/pytorch/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from alibi_detect.utils.pytorch.prediction import predict_batch
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import TorchDeviceType


class ClassifierDriftTorch(BaseClassifierDrift):
Expand Down Expand Up @@ -40,7 +41,7 @@ def __init__(
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
device: TorchDeviceType = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
Expand Down Expand Up @@ -108,8 +109,9 @@ def __init__(
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
dataset
Dataset object used during training.
dataloader
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/pytorch/context_aware.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.cd._domain_clf import _SVCDomainClf
from alibi_detect.utils._types import TorchDeviceType
from tqdm import tqdm

logger = logging.getLogger(__name__)
Expand All @@ -32,7 +33,7 @@ def __init__(
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
device: Optional[str] = None,
device: TorchDeviceType = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False,
Expand Down Expand Up @@ -75,8 +76,9 @@ def __init__(
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Expand Down
8 changes: 5 additions & 3 deletions alibi_detect/cd/pytorch/learned_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import TorchDeviceType


class LearnedKernelDriftTorch(BaseLearnedKernelDrift):
Expand Down Expand Up @@ -39,7 +40,7 @@ def __init__(
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
device: TorchDeviceType = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
Expand Down Expand Up @@ -108,8 +109,9 @@ def __init__(
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training.
dataloader
Expand Down
Loading

0 comments on commit c2f0a5a

Please sign in to comment.