From d945b29a07cb26295e4fdfa8730e2e99ff339096 Mon Sep 17 00:00:00 2001 From: Davis Vann Bennett Date: Mon, 24 Jan 2022 18:35:14 -0500 Subject: [PATCH] black and fix failure for singleton dimensions --- src/xarray_multiscale/multiscale.py | 10 +- tests/test_multiscale.py | 147 +++++++++++++++++----------- tests/test_reducers.py | 19 ++-- 3 files changed, 107 insertions(+), 69 deletions(-) diff --git a/src/xarray_multiscale/multiscale.py b/src/xarray_multiscale/multiscale.py index 79a3194..7cc0168 100644 --- a/src/xarray_multiscale/multiscale.py +++ b/src/xarray_multiscale/multiscale.py @@ -333,16 +333,14 @@ def get_downscale_depth( _scale_factors = np.array(scale_factors).astype("int") _shape = np.array(shape).astype("int") - if np.all(_scale_factors == 1): - result = 0 - elif np.any(_scale_factors > _shape): + valid = (_scale_factors > 1) + if not valid.any(): result = 0 else: if pad: - depths = np.ceil(logn(shape, scale_factors)).astype("int") + depths = np.ceil(logn(_shape[valid], _scale_factors[valid])).astype("int") else: - lg = logn(shape, scale_factors) - depths = np.floor(logn(shape, scale_factors)).astype("int") + depths = np.floor(logn(_shape[valid], _scale_factors[valid])).astype("int") result = min(depths) return result diff --git a/tests/test_multiscale.py b/tests/test_multiscale.py index 941107d..0baa598 100644 --- a/tests/test_multiscale.py +++ b/tests/test_multiscale.py @@ -9,9 +9,13 @@ multiscale, get_downscale_depth, normalize_chunks, - ensure_minimum_chunks + ensure_minimum_chunks, +) +from xarray_multiscale.reducers import ( + reshape_with_windows, + windowed_mean, + windowed_mode, ) -from xarray_multiscale.reducers import reshape_with_windows, windowed_mean, windowed_mode import dask.array as da import numpy as np from xarray import DataArray @@ -20,6 +24,8 @@ def test_downscale_depth(): assert get_downscale_depth((1,), (1,)) == 0 + assert get_downscale_depth((2,), (3,)) == 0 + assert get_downscale_depth((2, 1), (2, 1)) == 1 assert get_downscale_depth((2, 2, 2), (2, 2, 2)) == 1 assert get_downscale_depth((1, 2, 2), (2, 2, 2)) == 0 assert get_downscale_depth((4, 4, 4), (2, 2, 2)) == 2 @@ -31,72 +37,88 @@ def test_downscale_depth(): assert get_downscale_depth((1500, 5495, 5200), (2, 2, 2)) == 10 -@pytest.mark.parametrize(("size", "scale"), ((10, 2), (11, 2), ((10,11), (2,3)))) +@pytest.mark.parametrize(("size", "scale"), ((10, 2), (11, 2), ((10, 11), (2, 3)))) def test_adjust_shape(size, scale): arr = DataArray(np.zeros(size)) padded = adjust_shape(arr, scale, mode="constant") scale_array = np.array(scale) old_shape_array = np.array(arr.shape) new_shape_array = np.array(padded.shape) - + if np.all((old_shape_array % scale_array) == 0): assert np.array_equal(new_shape_array, old_shape_array) else: - assert np.array_equal(new_shape_array, old_shape_array + ((scale_array - (old_shape_array % scale_array)))) + assert np.array_equal( + new_shape_array, + old_shape_array + ((scale_array - (old_shape_array % scale_array))), + ) cropped = adjust_shape(arr, scale, mode="crop") new_shape_array = np.array(cropped.shape) if np.all((old_shape_array % scale_array) == 0): assert np.array_equal(new_shape_array, old_shape_array) else: - assert np.array_equal(new_shape_array, old_shape_array - (old_shape_array % scale_array)) + assert np.array_equal( + new_shape_array, old_shape_array - (old_shape_array % scale_array) + ) + def test_downscale_2d(): chunks = (2, 2) scale = (2, 1) - data = DataArray(da.from_array(np.array( - [[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype="uint8" - ), chunks=chunks)) + data = DataArray( + da.from_array( + np.array( + [[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype="uint8" + ), + chunks=chunks, + ) + ) answer = DataArray(np.array([[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]])) - downscaled = downscale(data, windowed_mean, scale, pad_mode='crop').compute() + downscaled = downscale(data, windowed_mean, scale, pad_mode="crop").compute() assert np.array_equal(downscaled, answer) def test_downscale_coords(): - data = DataArray(np.zeros((10, 10)), dims=('x','y'), coords={'x': np.arange(10)}) - scale_factors = (2,1) + data = DataArray(np.zeros((10, 10)), dims=("x", "y"), coords={"x": np.arange(10)}) + scale_factors = (2, 1) downscaled = downscale_coords(data, scale_factors) - answer = {'x': data['x'].coarsen({'x' : scale_factors[0]}).mean()} - + answer = {"x": data["x"].coarsen({"x": scale_factors[0]}).mean()} + assert downscaled.keys() == answer.keys() for k in downscaled: assert_equal(answer[k], downscaled[k]) - data = DataArray(np.zeros((10, 10)), - dims=('x','y'), - coords={'x': np.arange(10), - 'y': 5 + np.arange(10)}) - scale_factors = (2,1) + data = DataArray( + np.zeros((10, 10)), + dims=("x", "y"), + coords={"x": np.arange(10), "y": 5 + np.arange(10)}, + ) + scale_factors = (2, 1) downscaled = downscale_coords(data, scale_factors) - answer = {'x': data['x'].coarsen({'x' : scale_factors[0]}).mean(), - 'y' : data['y'].coarsen({'y' : scale_factors[1]}).mean()} - + answer = { + "x": data["x"].coarsen({"x": scale_factors[0]}).mean(), + "y": data["y"].coarsen({"y": scale_factors[1]}).mean(), + } + assert downscaled.keys() == answer.keys() for k in downscaled: assert_equal(answer[k], downscaled[k]) - data = DataArray(np.zeros((10, 10)), - dims=('x','y'), - coords={'x': np.arange(10), - 'y': 5 + np.arange(10), - 'foo' : 5}) - scale_factors = (2,2) + data = DataArray( + np.zeros((10, 10)), + dims=("x", "y"), + coords={"x": np.arange(10), "y": 5 + np.arange(10), "foo": 5}, + ) + scale_factors = (2, 2) downscaled = downscale_coords(data, scale_factors) - answer = {'x': data['x'].coarsen({'x' : scale_factors[0]}).mean(), - 'y' : data['y'].coarsen({'y' : scale_factors[1]}).mean(), - 'foo': data['foo']} - + answer = { + "x": data["x"].coarsen({"x": scale_factors[0]}).mean(), + "y": data["y"].coarsen({"y": scale_factors[1]}).mean(), + "foo": data["foo"], + } + assert downscaled.keys() == answer.keys() for k in downscaled: assert_equal(answer[k], downscaled[k]) @@ -106,7 +128,7 @@ def test_invalid_multiscale(): with pytest.raises(ValueError): downscale_dask(np.arange(10), windowed_mean, (3,)) with pytest.raises(ValueError): - downscale_dask(np.arange(16).reshape(4,4), windowed_mean, (3,3)) + downscale_dask(np.arange(16).reshape(4, 4), windowed_mean, (3, 3)) def test_multiscale(): @@ -163,14 +185,20 @@ def test_chunking(): assert m.data.chunksize == chunks or m.data.chunksize == m.data.shape chunks = (3,) * ndim - multi = multiscale(base_array, reducer, 2, chunks=chunks, chunk_mode='minimum') + multi = multiscale(base_array, reducer, 2, chunks=chunks, chunk_mode="minimum") for m in multi: - assert np.greater_equal(m.data.chunksize, chunks).all() or m.data.chunksize == m.data.shape + assert ( + np.greater_equal(m.data.chunksize, chunks).all() + or m.data.chunksize == m.data.shape + ) chunks = 3 - multi = multiscale(base_array, reducer, 2, chunks=chunks, chunk_mode='minimum') + multi = multiscale(base_array, reducer, 2, chunks=chunks, chunk_mode="minimum") for m in multi: - assert np.greater_equal(m.data.chunksize, (chunks,) * ndim).all() or m.data.chunksize == m.data.shape + assert ( + np.greater_equal(m.data.chunksize, (chunks,) * ndim).all() + or m.data.chunksize == m.data.shape + ) def test_depth(): @@ -182,16 +210,16 @@ def test_depth(): assert len(full) == 5 partial = multiscale(base_array, reducer, 2, depth=-2) - assert len(partial) == len(full) - 1 - [assert_equal(a,b) for a,b in zip(full, partial)] + assert len(partial) == len(full) - 1 + [assert_equal(a, b) for a, b in zip(full, partial)] partial = multiscale(base_array, reducer, 2, depth=2) - assert len(partial) == 3 - [assert_equal(a,b) for a,b in zip(full, partial)] + assert len(partial) == 3 + [assert_equal(a, b) for a, b in zip(full, partial)] partial = multiscale(base_array, reducer, 2, depth=0) - assert len(partial) == 1 - [assert_equal(a,b) for a,b in zip(full, partial)] + assert len(partial) == 1 + [assert_equal(a, b) for a, b in zip(full, partial)] def test_coords(): @@ -215,23 +243,23 @@ def test_coords(): def test_normalize_chunks(): - data = DataArray(da.zeros((4,6), chunks=(1,1))) - assert normalize_chunks(data, {'dim_0' : 2, 'dim_1' : 1}) == (2,1) + data = DataArray(da.zeros((4, 6), chunks=(1, 1))) + assert normalize_chunks(data, {"dim_0": 2, "dim_1": 1}) == (2, 1) def test_ensure_minimum_chunks(): - data = da.zeros((4,6), chunks=(1,1)) - assert ensure_minimum_chunks(data, (2,2)) == (2,2) + data = da.zeros((4, 6), chunks=(1, 1)) + assert ensure_minimum_chunks(data, (2, 2)) == (2, 2) - data = da.zeros((4,6), chunks=(4,1)) - assert ensure_minimum_chunks(data, (2,2)) == (4,2) + data = da.zeros((4, 6), chunks=(4, 1)) + assert ensure_minimum_chunks(data, (2, 2)) == (4, 2) def test_broadcast_to_rank(): assert broadcast_to_rank(2, 1) == (2,) - assert broadcast_to_rank(2, 2) == (2,2) - assert broadcast_to_rank((2,3), 2) == (2,3) - assert broadcast_to_rank({0 : 2}, 3) == (2,1,1) + assert broadcast_to_rank(2, 2) == (2, 2) + assert broadcast_to_rank((2, 3), 2) == (2, 3) + assert broadcast_to_rank({0: 2}, 3) == (2, 1, 1) def test_align_chunks(): @@ -243,11 +271,18 @@ def test_align_chunks(): rechunked = align_chunks(data, scale_factors=(2,)) assert rechunked.chunks == ((2,) * 5,) - data = da.arange(10, chunks=(1,1,3,5)) + data = da.arange(10, chunks=(1, 1, 3, 5)) rechunked = align_chunks(data, scale_factors=(2,)) - assert rechunked.chunks == ((2, 2, 2, 4,),) + assert rechunked.chunks == ( + ( + 2, + 2, + 2, + 4, + ), + ) def test_reshape_with_windows(): - data = np.arange(36).reshape(6,6) - assert reshape_with_windows(data, (2,2)).shape == (3,2,3,2) \ No newline at end of file + data = np.arange(36).reshape(6, 6) + assert reshape_with_windows(data, (2, 2)).shape == (3, 2, 3, 2) diff --git a/tests/test_reducers.py b/tests/test_reducers.py index 3ca6332..21f64fc 100644 --- a/tests/test_reducers.py +++ b/tests/test_reducers.py @@ -8,13 +8,18 @@ def test_windowed_mode(): results = windowed_mode(data, (4,)) assert np.array_equal(results, answer) - data = np.arange(16).reshape(4,4) % 3 - answer = np.array([[1,0],[0,2]]) - results = windowed_mode(data, (2,2)) + data = np.arange(16).reshape(4, 4) % 3 + answer = np.array([[1, 0], [0, 2]]) + results = windowed_mode(data, (2, 2)) assert np.array_equal(results, answer) + def test_windowed_mean(): - data = np.arange(16).reshape(4,4) % 2 - answer = np.array([[0.5, 0.5],[0.5, 0.5]]) - results = windowed_mean(data, (2,2)) - assert np.array_equal(results, answer) \ No newline at end of file + data = np.arange(16).reshape(4, 4) % 2 + answer = np.array([[0.5, 0.5], [0.5, 0.5]]) + results = windowed_mean(data, (2, 2)) + assert np.array_equal(results, answer) + + data = np.arange(16).reshape(4, 4, 1) % 2 + answer = np.array([[0.5, 0.5], [0.5, 0.5]]).reshape((2,2,1)) + results = windowed_mean(data, (2, 2, 1))