Skip to content

Commit

Permalink
Merge pull request #161 from StingraySoftware/bump_stingray_version
Browse files Browse the repository at this point in the history
Bump stingray version
  • Loading branch information
matteobachetti authored Mar 14, 2024
2 parents 9690bb1 + 1afc501 commit 5d110ac
Show file tree
Hide file tree
Showing 19 changed files with 609 additions and 509 deletions.
34 changes: 21 additions & 13 deletions .github/workflows/ci_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,31 +97,39 @@ jobs:
python: '3.10'
tox_env: 'codestyle'
- os: ubuntu-latest
python: '3.10'
tox_env: 'py310-test-cov'
python: '3.12'
tox_env: 'py312-test-cov'
- os: ubuntu-latest
python: '3.10'
tox_env: 'py310-test-alldeps-cov'
python: '3.9'
tox_env: 'py39-test-cov'
- os: ubuntu-latest
python: '3.11'
tox_env: 'py311-test-alldeps-cov'
use_remote_data: true
- os: macos-14
python: '3.10'
tox_env: 'py310-test-alldeps-cov'
python: '3.11'
tox_env: 'py311-test-alldeps-cov'
use_remote_data: true
- os: ubuntu-latest
python: '3.10'
tox_env: 'py310-test-devdeps'
python: '3.12'
tox_env: 'py312-test-devdeps'
use_remote_data: true
continue-on-error: true
- os: ubuntu-latest
python: '3.12'
tox_env: 'py312-test-devpint'
use_remote_data: true
continue-on-error: true
- os: ubuntu-latest
python: '3.8'
tox_env: 'py38-test-oldestdeps-cov'
use_remote_data: true
- os: macos-latest
python: '3.10'
tox_env: 'py310-test'
python: '3.11'
tox_env: 'py311-test'
- os: windows-latest
python: '3.10'
tox_env: 'py310-test'
python: '3.11'
tox_env: 'py311-test'

steps:
- name: Check out repository
Expand All @@ -132,7 +140,7 @@ jobs:
python-version: ${{ matrix.python }}
- name: Install base dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade pip setuptools wheel
python -m pip install tox
- name: Install graphviz dependency
if: "endsWith(matrix.tox_env, 'build_docs')"
Expand Down
566 changes: 398 additions & 168 deletions docs/scripts/cli.rst

Large diffs are not rendered by default.

111 changes: 37 additions & 74 deletions hendrics/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,10 +243,8 @@ def _look_for_array_in_array(array1, array2):
"""
Examples
--------
>>> _look_for_array_in_array([1, 2], [2, 3, 4])
2
>>> _look_for_array_in_array([1, 2], [3, 4, 5]) is None
True
>>> assert _look_for_array_in_array([1, 2], [2, 3, 4]) == 2
>>> assert _look_for_array_in_array([1, 2], [3, 4, 5]) is None
"""
for a1 in array1:
if a1 in array2:
Expand All @@ -265,17 +263,12 @@ def _order_list_of_arrays(data, order):
--------
>>> order = [1, 2, 0]
>>> new = _order_list_of_arrays({'a': [4, 5, 6], 'b':[7, 8, 9]}, order)
>>> np.all(new['a'] == [5, 6, 4])
True
>>> np.all(new['b'] == [8, 9, 7])
True
>>> assert np.all(new['a'] == [5, 6, 4])
>>> assert np.all(new['b'] == [8, 9, 7])
>>> new = _order_list_of_arrays([[4, 5, 6], [7, 8, 9]], order)
>>> np.all(new[0] == [5, 6, 4])
True
>>> np.all(new[1] == [8, 9, 7])
True
>>> _order_list_of_arrays(2, order) is None
True
>>> assert np.all(new[0] == [5, 6, 4])
>>> assert np.all(new[1] == [8, 9, 7])
>>> assert _order_list_of_arrays(2, order) is None
"""
if hasattr(data, "items"):
data = dict((i[0], np.asarray(i[1])[order]) for i in data.items())
Expand Down Expand Up @@ -388,8 +381,7 @@ def optimal_bin_time(fftlen, tbin):
Examples
--------
>>> optimal_bin_time(512, 1.1)
1.0
>>> assert optimal_bin_time(512, 1.1) == 1.0
"""
current_nbin = fftlen / tbin
new_nbin = 2 ** np.ceil(np.log2(current_nbin))
Expand All @@ -401,8 +393,7 @@ def gti_len(gti):
Examples
--------
>>> gti_len([[0, 1], [2, 4]])
3
>>> assert gti_len([[0, 1], [2, 4]]) == 3
"""
return np.sum(np.diff(gti, axis=1))

Expand Down Expand Up @@ -543,10 +534,8 @@ def check_negative_numbers_in_args(args):
--------
>>> args = ['events.nc', '-f', '103', '--fdot', '-2e-10']
>>> newargs = check_negative_numbers_in_args(args)
>>> args[:4] == newargs[:4]
True
>>> newargs[4] == ' -2e-10'
True
>>> assert args[:4] == newargs[:4]
>>> assert newargs[4] == ' -2e-10'
"""
if args is None:
args = sys.argv[1:]
Expand All @@ -569,10 +558,8 @@ def interpret_bintime(bintime):
Examples
--------
>>> interpret_bintime(2)
2
>>> interpret_bintime(-2) == 0.25
True
>>> assert interpret_bintime(2) == 2
>>> assert interpret_bintime(-2) == 0.25
>>> interpret_bintime(0)
Traceback (most recent call last):
...
Expand Down Expand Up @@ -604,8 +591,7 @@ def get_bin_edges(a, bins):
--------
>>> array = np.array([0., 10.])
>>> bins = 2
>>> np.allclose(get_bin_edges(array, bins), [0, 5, 10])
True
>>> assert np.allclose(get_bin_edges(array, bins), [0, 5, 10])
"""
a_min = np.min(a)
a_max = np.max(a)
Expand All @@ -619,12 +605,9 @@ def compute_bin(x, bin_edges):
Examples
--------
>>> bin_edges = np.array([0, 5, 10])
>>> compute_bin(1, bin_edges)
0
>>> compute_bin(5, bin_edges)
1
>>> compute_bin(10, bin_edges)
1
>>> assert compute_bin(1, bin_edges) == 0
>>> assert compute_bin(5, bin_edges) == 1
>>> assert compute_bin(10, bin_edges) == 1
"""

# assuming uniform bins for now
Expand Down Expand Up @@ -1023,8 +1006,7 @@ def touch(fname):
Examples
--------
>>> touch('bububu')
>>> os.path.exists('bububu')
True
>>> assert os.path.exists('bububu')
>>> os.unlink('bububu')
"""
Path(fname).touch()
Expand Down Expand Up @@ -1059,18 +1041,15 @@ def adjust_dt_for_power_of_two(dt, length, strict=False):
>>> new_dt = adjust_dt_for_power_of_two(dt, length)
INFO: ...
INFO: ...
>>> np.isclose(new_dt, 0.078125)
True
>>> length / new_dt == 128
True
>>> assert np.isclose(new_dt, 0.078125)
>>> assert length / new_dt == 128
>>> length, dt = 6.5, 0.1
>>> # There are 100 bins there. I want them to be 128.
>>> new_dt = adjust_dt_for_power_of_two(dt, length)
INFO: ...
INFO: Too many ...
INFO: ...
>>> length / new_dt == 72
True
>>> assert length / new_dt == 72
"""
log.info("Adjusting bin time to closest power of 2 of bins.")
nbin = length / dt
Expand All @@ -1091,10 +1070,8 @@ def adjust_dt_for_small_power(dt, length):
>>> # There are 99 bins there. I want them to be 100 (2**2 * 5**2).
>>> new_dt = adjust_dt_for_small_power(dt, length)
INFO:...
>>> np.isclose(new_dt, 0.099)
True
>>> np.isclose(length / new_dt, 100)
True
>>> assert np.isclose(new_dt, 0.099)
>>> assert np.isclose(length / new_dt, 100)
"""
nbin = length / dt
losp = get_list_of_small_powers(2 * nbin)
Expand All @@ -1113,11 +1090,9 @@ def memmapped_arange(i0, i1, istep, fname=None, nbin_threshold=10**7, dtype=floa
Examples
--------
>>> i0, i1, istep = 0, 10, 1e-2
>>> np.allclose(np.arange(i0, i1, istep), memmapped_arange(i0, i1, istep))
True
>>> assert np.allclose(np.arange(i0, i1, istep), memmapped_arange(i0, i1, istep))
>>> i0, i1, istep = 0, 10, 1e-7
>>> np.allclose(np.arange(i0, i1, istep), memmapped_arange(i0, i1, istep))
True
>>> assert np.allclose(np.arange(i0, i1, istep), memmapped_arange(i0, i1, istep))
"""
import tempfile
Expand All @@ -1143,14 +1118,10 @@ def nchars_in_int_value(value):
Examples
--------
>>> nchars_in_int_value(2)
1
>>> nchars_in_int_value(1356)
4
>>> nchars_in_int_value(9999)
4
>>> nchars_in_int_value(10000)
5
>>> assert nchars_in_int_value(2) == 1
>>> assert nchars_in_int_value(1356) == 4
>>> assert nchars_in_int_value(9999) == 4
>>> assert nchars_in_int_value(10000) == 5
"""
# "+1" because, e.g., 10000 would return 4
return int(np.ceil(np.log10(value + 1)))
Expand Down Expand Up @@ -1183,11 +1154,9 @@ def find_peaks_in_image(image, n=5, rough=False, **kwargs):
>>> image[7, 2] = 1
>>> image[8, 1] = 3
>>> idxs = find_peaks_in_image(image, n=2)
>>> np.allclose(idxs, [(8, 1), (5, 5)])
True
>>> assert np.allclose(idxs, [(8, 1), (5, 5)])
>>> idxs = find_peaks_in_image(image, n=2, rough=True)
>>> np.allclose(idxs, [(8, 1), (5, 5)])
True
>>> assert np.allclose(idxs, [(8, 1), (5, 5)])
"""
if not HAS_SKIMAGE or rough:
best_cands = [
Expand All @@ -1209,14 +1178,11 @@ def force_iterable(val):
Examples
--------
>>> val = 5.
>>> force_iterable(val)[0] == val
True
>>> assert force_iterable(val)[0] == val
>>> val = None
>>> force_iterable(val) is None
True
>>> assert force_iterable(val) is None
>>> val = np.array([5., 5])
>>> np.all(force_iterable(val) == val)
True
>>> assert np.all(force_iterable(val) == val)
"""
if val is None:
return val
Expand Down Expand Up @@ -1254,14 +1220,11 @@ def normalize_dyn_profile(dynprof, norm):
--------
>>> hist = [[1, 2], [2, 3], [3, 4]]
>>> hnorm = normalize_dyn_profile(hist, "meansub")
>>> np.allclose(hnorm[0], [-0.5, 0.5])
True
>>> assert np.allclose(hnorm[0], [-0.5, 0.5])
>>> hnorm = normalize_dyn_profile(hist, "meannorm")
>>> np.allclose(hnorm[0], [-1/3, 1/3])
True
>>> assert np.allclose(hnorm[0], [-1/3, 1/3])
>>> hnorm = normalize_dyn_profile(hist, "ratios")
>>> np.allclose(hnorm[1], [1, 1])
True
>>> assert np.allclose(hnorm[1], [1, 1])
"""
dynprof = np.array(dynprof, dtype=float)

Expand Down
12 changes: 4 additions & 8 deletions hendrics/calibrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,19 +105,15 @@ def rough_calibration(pis, mission):
Examples
--------
>>> rough_calibration(0, 'nustar')
1.6
>>> rough_calibration(0.0, 'ixpe')
0.0
>>> assert rough_calibration(0, 'nustar') == 1.6
>>> assert rough_calibration(0.0, 'ixpe') == 0.0
>>> # It's case-insensitive
>>> rough_calibration(1200, 'XMm')
1.2
>>> assert rough_calibration(1200, 'XMm') == 1.2
>>> rough_calibration(10, 'asDf')
Traceback (most recent call last):
...
ValueError: Mission asdf not recognized
>>> rough_calibration(100, 'nicer')
1.0
>>> assert rough_calibration(100, 'nicer') == 1.0
"""
if mission.lower() == "nustar":
return pis * 0.04 + 1.6
Expand Down
24 changes: 8 additions & 16 deletions hendrics/efsearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,10 +393,8 @@ def z_n_fast(phase, norm, n=2):
--------
>>> phase = 2 * np.pi * np.arange(0, 1, 0.01)
>>> norm = np.sin(phase) + 1
>>> np.isclose(z_n_fast(phase, norm, n=4), 50)
True
>>> np.isclose(z_n_fast(phase, norm, n=2), 50)
True
>>> assert np.isclose(z_n_fast(phase, norm, n=4), 50)
>>> assert np.isclose(z_n_fast(phase, norm, n=2), 50)
"""

total_norm = np.sum(norm)
Expand Down Expand Up @@ -434,14 +432,10 @@ def _average_and_z_sub_search(profiles, n=2):
>>> profiles = np.ones((16, len(norm)))
>>> profiles[8] = norm
>>> n_ave, results = _average_and_z_sub_search(profiles, n=2)
>>> np.isclose(results[0, 8], 50)
True
>>> np.isclose(results[1, 8], 50/2)
True
>>> np.isclose(results[2, 8], 50/4)
True
>>> np.isclose(results[3, 8], 50/8)
True
>>> assert np.isclose(results[0, 8], 50)
>>> assert np.isclose(results[1, 8], 50/2)
>>> assert np.isclose(results[2, 8], 50/4)
>>> assert np.isclose(results[3, 8], 50/8)
"""
nprof = len(profiles)
# Only use powers of two
Expand Down Expand Up @@ -1192,8 +1186,7 @@ def get_xy_boundaries_from_level(x, y, image, level, x0, y0):
>>> X, Y = np.meshgrid(x, y)
>>> Z = Z = np.sinc(np.sqrt(X**2 + Y**2))**2 + np.sinc(np.sqrt((X - 5)**2 + Y**2))**2
>>> vals = get_xy_boundaries_from_level(X, Y, Z, 0.5, 0, 0)
>>> np.allclose(np.abs(vals), 0.44, atol=0.1)
True
>>> assert np.allclose(np.abs(vals), 0.44, atol=0.1)
"""
fig = plt.figure(np.random.random())
cs = fig.gca().contour(x, y, image, [level])
Expand Down Expand Up @@ -1226,8 +1219,7 @@ def get_boundaries_from_level(x, y, level, x0):
>>> x = np.linspace(-10, 10, 1000)
>>> y = np.sinc(x)**2 + np.sinc((x - 5))**2
>>> vals = get_boundaries_from_level(x, y, 0.5, 0)
>>> np.allclose(np.abs(vals), 0.44, atol=0.1)
True
>>> assert np.allclose(np.abs(vals), 0.44, atol=0.1)
"""
max_idx = np.argmin(np.abs(x - x0))
idx = max_idx
Expand Down
Loading

0 comments on commit 5d110ac

Please sign in to comment.