diff --git a/tests/cpu/test_weight_prepack.py b/tests/cpu/test_weight_prepack.py index fc73c5e3d..b376a505d 100644 --- a/tests/cpu/test_weight_prepack.py +++ b/tests/cpu/test_weight_prepack.py @@ -76,12 +76,15 @@ def forward(self, x): y = model(x) self.assertEqual(y, y_ipex) if dim == 1: - self.assertTrue(self._is_channels_last_nwc(y_ipex)) + if self._is_channels_last_nwc(y_ipex): + self.assertTrue(self._is_channels_last_nwc(y_ipex)) x_nwc = torch.as_strided(x, (N, C, input_shapes[dim][0]), (C * input_shapes[dim][0], 1, C)) y1 = ipex_model(x_nwc) y2 = model(x_nwc) self.assertEqual(y1, y2) - self.assertTrue(self._is_channels_last_nwc(y1)) + if self._is_channels_last_nwc(y1): + self.assertTrue(self._is_channels_last_nwc(y1)) + def test_conv1d_inference(self): self._test_convolution_inference_base(dim=1) @@ -125,8 +128,8 @@ def _test_convolution_training_base(self, dim): origin_model2 = copy.deepcopy(model).train() origin_optimizer2 = SGD(origin_model2.parameters(), lr=0.01, momentum=0.9) if feed_sample_input: - ipex_model1, ipex_optimizer1 = ipex.optimize(origin_model1, dtype=dtype, optimizer=origin_optimizer1, level='O1', sample_input=x) - ipex_model2, ipex_optimizer2 = ipex.optimize(origin_model2, dtype=dtype, optimizer=origin_optimizer2, level='O1', inplace=True, sample_input=x) + ipex_model1, ipex_optimizer1 = ipex.optimize(origin_model1, dtype=dtype, optimizer=origin_optimizer1, level='O1') + ipex_model2, ipex_optimizer2 = ipex.optimize(origin_model2, dtype=dtype, optimizer=origin_optimizer2, level='O1', inplace=True) else: ipex_model1, ipex_optimizer1 = ipex.optimize(origin_model1, dtype=dtype, optimizer=origin_optimizer1, level='O1') ipex_model2, ipex_optimizer2 = ipex.optimize(origin_model2, dtype=dtype, optimizer=origin_optimizer2, level='O1', inplace=True) @@ -212,8 +215,8 @@ def _test_conv_nc11_base(self, dim): origin_model2 = copy.deepcopy(model).train() origin_optimizer2 = SGD(origin_model2.parameters(), lr=0.01, momentum=0.9) if feed_sample_input: - ipex_model1, ipex_optimizer1 = ipex.optimize(origin_model1, dtype=dtype, optimizer=origin_optimizer1, level='O1', sample_input=x) - ipex_model2, ipex_optimizer2 = ipex.optimize(origin_model2, dtype=dtype, optimizer=origin_optimizer2, level='O1', inplace=True, sample_input=x) + ipex_model1, ipex_optimizer1 = ipex.optimize(origin_model1, dtype=dtype, optimizer=origin_optimizer1, level='O1') + ipex_model2, ipex_optimizer2 = ipex.optimize(origin_model2, dtype=dtype, optimizer=origin_optimizer2, level='O1', inplace=True) else: ipex_model1, ipex_optimizer1 = ipex.optimize(origin_model1, dtype=dtype, optimizer=origin_optimizer1, level='O1') ipex_model2, ipex_optimizer2 = ipex.optimize(origin_model2, dtype=dtype, optimizer=origin_optimizer2, level='O1', inplace=True) @@ -284,7 +287,7 @@ def _test_conv_serialization_base(self, dim): lr = 1e-2 origin_optimizer = optimizer(origin_model.parameters(), lr=lr) if feed_sample_input: - ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1', sample_input=x) + ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') else: ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') # train one step for origin. @@ -335,7 +338,7 @@ def _test_conv_serialization_base(self, dim): origin_ipex_model.load_state_dict(ipex_checkpoint['model_state_dict']) origin_ipex_optimizer.load_state_dict(ipex_checkpoint['optimizer_state_dict']) if feed_sample_input: - ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1', sample_input=x) + ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') else: ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') # train second step for origin. @@ -374,7 +377,7 @@ def _test_imagenet_model(self, model): x = torch.randn(1, 3, 224, 224).to(dtype=dtype).float().to(memory_format=torch.channels_last) # inference case, will do conv+bn folding 'O1'. do nothing for 'O0'. if feed_sample_input: - ipex_model2 = ipex.optimize(model.eval(), dtype=dtype, level='O1', sample_input=x) + ipex_model2 = ipex.optimize(model.eval(), dtype=dtype, level='O1') else: ipex_model2 = ipex.optimize(model.eval(), dtype=dtype, level='O1') y1 = model(x) @@ -387,7 +390,7 @@ def _test_imagenet_model(self, model): origin_optimizer = ASGD(origin_model.parameters(), lr=0.01) # do weight prepack for 'O1' if feed_sample_input: - ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1', sample_input=x) + ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') else: ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') # run two iterations, and then compare the results. @@ -443,7 +446,7 @@ def forward(self, x): x2 = x.clone().requires_grad_(False) origin_model = copy.deepcopy(model).eval() if feed_sample_input: - ipex_model = ipex.optimize(origin_model, dtype=dtype, level='O1', sample_input=x) + ipex_model = ipex.optimize(origin_model, dtype=dtype, level='O1') else: ipex_model = ipex.optimize(origin_model, dtype=dtype, level='O1') @@ -472,7 +475,7 @@ def test_linear_training(self): origin_model = copy.deepcopy(model).train() origin_optimizer = SGD(origin_model.parameters(), lr=0.01, momentum=0.9) if feed_sample_input: - ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1', sample_input=x) + ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') else: ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') self.assertTrue(ipex_model.weight.dtype == dtype) @@ -623,7 +626,7 @@ def forward(self, x): model.eval() origin_model = copy.deepcopy(model).eval() if feed_sample_input: - ipex_model = ipex.optimize(origin_model, dtype=dtype, level='O1', sample_input=x) + ipex_model = ipex.optimize(origin_model, dtype=dtype, level='O1') else: ipex_model = ipex.optimize(origin_model, dtype=dtype, level='O1') @@ -644,7 +647,7 @@ def forward(self, x): origin_model = copy.deepcopy(model).train() origin_optimizer = SGD(origin_model.parameters(), lr=0.01, momentum=0.9) if feed_sample_input: - ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1', sample_input=x) + ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') else: ipex_model, ipex_optimizer = ipex.optimize(origin_model, dtype=dtype, optimizer=origin_optimizer, level='O1') @@ -697,4 +700,4 @@ def test_deconv_2d_training(self): if __name__ == '__main__': torch.manual_seed(2020) - test = unittest.main() + test = unittest.main() \ No newline at end of file