From c3cf0fcee5ee381cfbf2ca70a7b3fc5fd048855d Mon Sep 17 00:00:00 2001 From: michaelshiyu Date: Mon, 30 Dec 2019 15:15:14 -0800 Subject: [PATCH 1/6] Clean up unused statement; format code --- cleverhans/future/torch/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cleverhans/future/torch/utils.py b/cleverhans/future/torch/utils.py index 9e33abea5..ebdce09c7 100644 --- a/cleverhans/future/torch/utils.py +++ b/cleverhans/future/torch/utils.py @@ -39,6 +39,7 @@ def clip_eta(eta, norm, eps): eta *= factor return eta + def get_or_guess_labels(model, x, **kwargs): """ Get the label to use in generating an adversarial example for x. @@ -84,7 +85,6 @@ def optimize_linear(grad, eps, norm=np.inf): # Take sign of gradient optimal_perturbation = torch.sign(grad) elif norm == 1: - abs_grad = torch.abs(grad) sign = torch.sign(grad) red_ind = list(range(1, len(grad.size()))) abs_grad = torch.abs(grad) From 134402f9001cfe9a0bbd269e100caa66f3e11294 Mon Sep 17 00:00:00 2001 From: michaelshiyu Date: Mon, 30 Dec 2019 15:27:03 -0800 Subject: [PATCH 2/6] Remove TODO (reason for removal: torch.clamp respects dtype ofits input) --- cleverhans/future/torch/attacks/projected_gradient_descent.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cleverhans/future/torch/attacks/projected_gradient_descent.py b/cleverhans/future/torch/attacks/projected_gradient_descent.py index 9c39c1aed..9790a831c 100644 --- a/cleverhans/future/torch/attacks/projected_gradient_descent.py +++ b/cleverhans/future/torch/attacks/projected_gradient_descent.py @@ -113,7 +113,6 @@ def projected_gradient_descent(model_fn, x, eps, eps_iter, nb_iter, norm, asserts.append(eps_iter <= eps) if norm == np.inf and clip_min is not None: - # TODO necessary to cast clip_min and clip_max to x.dtype? asserts.append(eps + clip_min <= clip_max) if sanity_checks: From 8db91ed0fca58cee0fe61127b825c5b5e6f4639e Mon Sep 17 00:00:00 2001 From: michaelshiyu Date: Mon, 30 Dec 2019 15:33:24 -0800 Subject: [PATCH 3/6] Bug fix; format code --- cleverhans/future/torch/attacks/noise.py | 67 ++++++++++++------------ 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/cleverhans/future/torch/attacks/noise.py b/cleverhans/future/torch/attacks/noise.py index af805e65b..b2308093b 100644 --- a/cleverhans/future/torch/attacks/noise.py +++ b/cleverhans/future/torch/attacks/noise.py @@ -6,36 +6,37 @@ import torch -def noise(x, eps=0.3, order=np.inf, clip_min=None, clip_max=None): - """ - A weak attack that just picks a random point in the attacker's action - space. When combined with an attack bundling function, this can be used to - implement random search. - - References: - https://arxiv.org/abs/1802.00420 recommends random search to help identify - gradient masking - - https://openreview.net/forum?id=H1g0piA9tQ recommends using noise as part - of an attack building recipe combining many different optimizers to - yield a strong optimizer. - - Args: - :param x: the input tensor - :param eps: (optional float) maximum distortion of adversarial example - compared to original input. - :param norm: (optional) Order of the norm. - :param clip_min: (optional float) Minimum input component value - :param clip_max: (optional float) Maximum input component value - """ - - if order != np.inf: raise NotImplementedError(norm) - - eta = torch.FloatTensor(*x.shape).to(x.device).uniform_(-eps, eps) - adv_x = x + eta - - if clip_min is not None or clip_max is not None: - assert clip_min is not None and clip_max is not None - adv_x = torch.clamp(adv_x, min=clip_min, max=clip_max) - - return adv_x +def noise(x, eps=0.3, norm=np.inf, clip_min=None, clip_max=None): + """ + A weak attack that just picks a random point in the attacker's action + space. When combined with an attack bundling function, this can be used to + implement random search. + + References: + https://arxiv.org/abs/1802.00420 recommends random search to help identify + gradient masking + + https://openreview.net/forum?id=H1g0piA9tQ recommends using noise as part + of an attack building recipe combining many different optimizers to + yield a strong optimizer. + + Args: + :param x: the input tensor + :param eps: (optional float) maximum distortion of adversarial example + compared to original input. + :param norm: (optional) Order of the norm. + :param clip_min: (optional float) Minimum input component value + :param clip_max: (optional float) Maximum input component value + """ + + if norm != np.inf: + raise ValueError("Norm order must be np.inf, got {} instead.".format(norm)) + + eta = torch.FloatTensor(*x.shape).to(x.device).uniform_(-eps, eps) + adv_x = x + eta + + if clip_min is not None or clip_max is not None: + assert clip_min is not None and clip_max is not None + adv_x = torch.clamp(adv_x, min=clip_min, max=clip_max) + + return adv_x From 460588ec5840dd05adccbb4e5d290d93ffd5947c Mon Sep 17 00:00:00 2001 From: michaelshiyu Date: Mon, 30 Dec 2019 15:45:28 -0800 Subject: [PATCH 4/6] Remove TODOs (reason for removal: already implemented the same tests in test_utils.py) --- cleverhans/future/torch/utils.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/cleverhans/future/torch/utils.py b/cleverhans/future/torch/utils.py index ebdce09c7..a25119744 100644 --- a/cleverhans/future/torch/utils.py +++ b/cleverhans/future/torch/utils.py @@ -97,23 +97,12 @@ def optimize_linear(grad, eps, norm=np.inf): for red_scalar in red_ind: num_ties = torch.sum(num_ties, red_scalar, keepdim=True) optimal_perturbation = sign * max_mask / num_ties - # TODO integrate below to a test file - # check that the optimal perturbations have been correctly computed - opt_pert_norm = optimal_perturbation.abs().sum(dim=red_ind) - assert torch.all(opt_pert_norm == torch.ones_like(opt_pert_norm)) elif norm == 2: square = torch.max( avoid_zero_div, torch.sum(grad ** 2, red_ind, keepdim=True) ) optimal_perturbation = grad / torch.sqrt(square) - # TODO integrate below to a test file - # check that the optimal perturbations have been correctly computed - opt_pert_norm = optimal_perturbation.pow(2).sum(dim=red_ind, keepdim=True).sqrt() - one_mask = ( - (square <= avoid_zero_div).to(torch.float) * opt_pert_norm + - (square > avoid_zero_div).to(torch.float)) - assert torch.allclose(opt_pert_norm, one_mask, rtol=1e-05, atol=1e-08) else: raise NotImplementedError("Only L-inf, L1 and L2 norms are " "currently implemented.") From ed6e61c02700a14c3d8ede0f3af23774a70d3937 Mon Sep 17 00:00:00 2001 From: michaelshiyu Date: Tue, 7 Jan 2020 13:56:17 -0500 Subject: [PATCH 5/6] Bug fix --- tutorials/future/torch/cifar10_tutorial.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tutorials/future/torch/cifar10_tutorial.py b/tutorials/future/torch/cifar10_tutorial.py index 9d7e974bd..fec1cd080 100644 --- a/tutorials/future/torch/cifar10_tutorial.py +++ b/tutorials/future/torch/cifar10_tutorial.py @@ -62,6 +62,8 @@ def main(_): if FLAGS.adv_train: # Replace clean example with adversarial example for adversarial training x = projected_gradient_descent(net, x, FLAGS.eps, 0.01, 40, np.inf) + # Stop backward from entering the graph that created the adv example + x = x.clone().detach() optimizer.zero_grad() loss = loss_fn(net(x), y) loss.backward() From 446e6d09dac09601f2e4bdf821651d0512f75004 Mon Sep 17 00:00:00 2001 From: michaelshiyu Date: Tue, 7 Jan 2020 13:57:59 -0500 Subject: [PATCH 6/6] Substitute an inplace op w/ the non-inplace equivalent --- cleverhans/future/torch/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cleverhans/future/torch/utils.py b/cleverhans/future/torch/utils.py index a25119744..e533c7f0b 100644 --- a/cleverhans/future/torch/utils.py +++ b/cleverhans/future/torch/utils.py @@ -36,7 +36,7 @@ def clip_eta(eta, norm, eps): torch.tensor(1., dtype=eta.dtype, device=eta.device), eps / norm ) - eta *= factor + eta = eta * factor return eta