From 84df0eafedee714f67e2dd229c0f810a8502b919 Mon Sep 17 00:00:00 2001 From: dachengx Date: Fri, 15 Nov 2024 14:13:38 -0600 Subject: [PATCH] Remove 1T related codes --- .coveragerc | 1 - .gitignore | 1 - docs/source/build_datastructure_doc.py | 50 +-- pyproject.toml | 1 - straxen/analyses/bokeh_waveform_plot.py | 10 +- straxen/analyses/event_display.py | 12 +- .../analyses/holoviews_waveform_display.py | 2 +- straxen/analyses/waveform_plot.py | 2 - straxen/common.py | 47 +-- straxen/contexts.py | 25 -- straxen/corrections_services.py | 24 +- straxen/legacy/README.md | 6 - straxen/legacy/__init__.py | 11 - straxen/legacy/contexts_1t.py | 210 ----------- straxen/legacy/hitfinder_thresholds.py | 294 --------------- straxen/legacy/plugins_1t/__init__.py | 11 - straxen/legacy/plugins_1t/event_info.py | 22 -- straxen/legacy/plugins_1t/fake_daqreader.py | 8 - straxen/legacy/plugins_1t/pax_interface.py | 189 ---------- straxen/legacy/plugins_1t/peak_positions.py | 117 ------ straxen/legacy/plugins_1t/x1t_cuts.py | 292 --------------- straxen/legacy/xenon1t_url_configs.py | 354 ------------------ straxen/scripts/fake_daq.py | 249 ------------ straxen/test_utils.py | 3 - tests/test_1T_plugins.py | 136 ------- tests/test_cmt.py | 15 - tests/test_contexts.py | 42 --- tests/test_mini_analyses.py | 14 +- 28 files changed, 38 insertions(+), 2110 deletions(-) delete mode 100644 straxen/legacy/README.md delete mode 100644 straxen/legacy/__init__.py delete mode 100644 straxen/legacy/contexts_1t.py delete mode 100644 straxen/legacy/hitfinder_thresholds.py delete mode 100644 straxen/legacy/plugins_1t/__init__.py delete mode 100644 straxen/legacy/plugins_1t/event_info.py delete mode 100644 straxen/legacy/plugins_1t/fake_daqreader.py delete mode 100644 straxen/legacy/plugins_1t/pax_interface.py delete mode 100644 straxen/legacy/plugins_1t/peak_positions.py delete mode 100644 straxen/legacy/plugins_1t/x1t_cuts.py delete mode 100644 straxen/legacy/xenon1t_url_configs.py delete mode 100755 straxen/scripts/fake_daq.py delete mode 100644 tests/test_1T_plugins.py diff --git a/.coveragerc b/.coveragerc index 833b822aa..563a4b80d 100644 --- a/.coveragerc +++ b/.coveragerc @@ -2,7 +2,6 @@ [report] omit = straxen/storage/rucio_remote.py - straxen/legacy/plugins_1t/pax_interface.py straxen/entry_points.py straxen/daq_core.py straxen/scripts/*.py diff --git a/.gitignore b/.gitignore index c0fcfab59..9af983f30 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,6 @@ live_data daq_test_data strax_data strax_test_data -from_fake_daq from_eb from_eb_finished resource_cache diff --git a/docs/source/build_datastructure_doc.py b/docs/source/build_datastructure_doc.py index 1aa0df404..5a20735f8 100644 --- a/docs/source/build_datastructure_doc.py +++ b/docs/source/build_datastructure_doc.py @@ -108,10 +108,10 @@ """ titles = { - "": "Straxen {xT} datastructure", - "_he": "Straxen {xT} datastructure for high energy channels", - "_nv": "Straxen {xT} datastructure for neutron veto", - "_mv": "Straxen {xT} datastructure for muon veto", + "": "Straxen nT datastructure", + "_he": "Straxen nT datastructure for high energy channels", + "_nv": "Straxen nT datastructure for neutron veto", + "_mv": "Straxen nT datastructure for muon veto", } tree_suffices = list(titles.keys()) @@ -144,29 +144,23 @@ def get_plugins_deps(st): return plugins_by_deps -def get_context(is_nt): +def get_context(): """Need to init a context without initializing the runs_db as that requires the appropriate passwords. :return: straxen context that mimics the xenonnt_online context without the rundb init """ - if is_nt: - st = straxen.contexts.xenonnt_online(_database_init=False) - st.context_config["forbid_creation_of"] = straxen.daqreader.DAQReader.provides - else: - st = straxen.contexts.xenon1t_dali() - st.register_all(straxen.legacy.plugins_1t.x1t_cuts) - st.context_config["forbid_creation_of"] = straxen.daqreader.DAQReader.provides + st = straxen.contexts.xenonnt_online(_database_init=False) + st.context_config["forbid_creation_of"] = straxen.daqreader.DAQReader.provides return st -def build_datastructure_doc(is_nt): +def build_datastructure_doc(): """Build a dependency tree for all plugins.""" pd.set_option("display.max_colwidth", int(1e9)) - st = get_context(is_nt) - one_tonne_or_n_tonne = "nT" if is_nt else "1T" + st = get_context() # Too lazy to write proper graph sorter # Make dictionary {total number of dependencies below -> list of plugins} @@ -174,14 +168,11 @@ def build_datastructure_doc(is_nt): # Make graph for each suffix ('' referring to TPC) for suffix in tree_suffices: - title = titles[suffix].format(xT=one_tonne_or_n_tonne) - out = page_header.format(title=title, context="xenonnt_online" if is_nt else "xenon1t_dali") - if not is_nt and suffix != "": - # No NV/MV/HE for 1T - continue - - print(f"------------ {one_tonne_or_n_tonne}{suffix} ------------") - os.makedirs(this_dir + f"/graphs{suffix}_{one_tonne_or_n_tonne}", exist_ok=True) + title = titles[suffix] + out = page_header.format(title=title, context="xenonnt_online") + + print(f"------------ nT{suffix} ------------") + os.makedirs(this_dir + f"/graphs{suffix}_nT", exist_ok=True) for n_deps in list(reversed(sorted(list(plugins_by_deps[suffix].keys())))): for this_data_type in plugins_by_deps[suffix][n_deps]: this_plugin = st._get_plugins(targets=(this_data_type,), run_id="0")[this_data_type] @@ -192,7 +183,7 @@ def build_datastructure_doc(is_nt): add_deps_to_graph_tree(graph_tree, this_plugin, this_data_type) # Where to save this node - fn = this_dir + f"/graphs{suffix}_{one_tonne_or_n_tonne}/" + this_data_type + fn = this_dir + f"/graphs{suffix}_nT/" + this_data_type graph_tree.render(fn) with open(f"{fn}.svg", mode="r") as f: svg = add_spaces(f.readlines()[5:]) @@ -225,12 +216,10 @@ def build_datastructure_doc(is_nt): config_options=add_spaces(config_df.to_html(index=False)), ) - with open( - this_dir + f"/reference/datastructure{suffix}_{one_tonne_or_n_tonne}.rst", mode="w" - ) as f: + with open(this_dir + f"/reference/datastructure{suffix}_nT.rst", mode="w") as f: f.write(out) - shutil.rmtree(this_dir + f"/graphs{suffix}_{one_tonne_or_n_tonne}") + shutil.rmtree(this_dir + f"/graphs{suffix}_nT") def tree_to_svg(graph_tree, save_as="data_kinds_nT"): @@ -246,7 +235,7 @@ def tree_to_svg(graph_tree, save_as="data_kinds_nT"): def write_data_kind_dep_tree(): """Work in progress to build a dependency tree of the datakinds.""" print("------------ data kinds ------------") - st = get_context(is_nt=True) + st = get_context() def get_plugin(pov): return st._get_plugins((pov,), "0")[pov] @@ -342,5 +331,4 @@ def get_plugin(pov): if __name__ == "__main__": write_data_kind_dep_tree() - build_datastructure_doc(True) - build_datastructure_doc(False) + build_datastructure_doc() diff --git a/pyproject.toml b/pyproject.toml index 97dd1123f..83e9309b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,6 @@ repository = "https://github.com/XENONnT/straxen" [tool.poetry.scripts] ajax = "straxen.scripts.ajax:main" bootstrax = "straxen.scripts.bootstrax:main" -fake_daq = "straxen.scripts.fake_daq:main" microstrax = "straxen.scripts.microstrax:main" refresh_raw_records = "straxen.scripts.refresh_raw_records:main" restrax = "straxen.scripts.restrax:main" diff --git a/straxen/analyses/bokeh_waveform_plot.py b/straxen/analyses/bokeh_waveform_plot.py index b021471e2..af62220be 100644 --- a/straxen/analyses/bokeh_waveform_plot.py +++ b/straxen/analyses/bokeh_waveform_plot.py @@ -33,7 +33,6 @@ def event_display_interactive( plot_all_pmts=False, plot_record_matrix=False, plot_records_threshold=10, - xenon1t=False, colors=("gray", "blue", "green"), yscale=("linear", "linear", "linear"), log=True, @@ -53,7 +52,6 @@ def event_display_interactive( :param plot_records_threshold: Threshold at which zoom level to display record matrix as polygons. Larger values may lead to longer render times since more polygons are shown. - :param xenon1t: Flag to use event display with 1T data. :param colors: Colors to be used for peaks. Order is as peak types, 0 = Unknown, 1 = S1, 2 = S2. Can be any colors accepted by bokeh. :param yscale: Defines scale for main/alt S1 == 0, main/alt S2 == 1, @@ -149,7 +147,6 @@ def event_display_interactive( to_pe, labels, plot_all_pmts, - xenon1t=xenon1t, log=log, ) @@ -306,7 +303,7 @@ def plot_detail_plot_s1_s2(signal, s1_keys, s2_keys, labels, colors, yscale=("li def plot_pmt_arrays_and_positions( - top_array_keys, bottom_array_keys, signal, to_pe, labels, plot_all_pmts, xenon1t=False, log=True + top_array_keys, bottom_array_keys, signal, to_pe, labels, plot_all_pmts, log=True ): """Function which plots the Top and Bottom PMT array. @@ -335,7 +332,6 @@ def plot_pmt_arrays_and_positions( to_pe, plot_all_pmts=plot_all_pmts, label=labels[k], - xenon1t=xenon1t, fig=fig, log=log, ) @@ -534,7 +530,6 @@ def plot_pmt_array( to_pe, plot_all_pmts=False, log=False, - xenon1t=False, fig=None, label="", ): @@ -545,7 +540,6 @@ def plot_pmt_array( :param to_pe: PMT gains. :param log: If true use a log-scale for the color scale. :param plot_all_pmts: If True colors all PMTs instead of showing swtiched off PMTs as gray dots. - :param xenon1t: If True plots 1T array. :param fig: Instance of bokeh.plotting.figure if None one will be created via straxen.bokeh.utils.default_figure(). :param label: Label of the peak which should be used for the plot legend @@ -574,7 +568,7 @@ def plot_pmt_array( fig = _plot_tpc(fig) # Plotting PMTs: - pmts = straxen.pmt_positions(xenon1t) + pmts = straxen.pmt_positions() if plot_all_pmts: mask_pmts = np.zeros(len(pmts), dtype=np.bool_) else: diff --git a/straxen/analyses/event_display.py b/straxen/analyses/event_display.py index 8f1db7f7d..4eb43c3f6 100644 --- a/straxen/analyses/event_display.py +++ b/straxen/analyses/event_display.py @@ -57,7 +57,6 @@ def event_display( s2_fuzz=50, s1_fuzz=0, max_peaks=500, - xenon1t=False, s1_hp_kwargs=None, s2_hp_kwargs=None, event_time_limit=None, @@ -92,7 +91,6 @@ def event_display( s2_fuzz=s2_fuzz, s1_fuzz=s1_fuzz, max_peaks=max_peaks, - xenon1t=xenon1t, display_peak_info=display_peak_info, display_event_info=display_event_info, s1_hp_kwargs=s1_hp_kwargs, @@ -112,7 +110,6 @@ def _event_display( s2_fuzz=50, s1_fuzz=0, max_peaks=500, - xenon1t=False, display_peak_info=PEAK_DISPLAY_DEFAULT_INFO, display_event_info=EVENT_DISPLAY_DEFAULT_INFO, s1_hp_kwargs=None, @@ -170,7 +167,6 @@ def _event_display( # Hit patterns options: for hp_opt, color_map in ((s1_hp_kwargs, "Blues"), (s2_hp_kwargs, "Greens")): _common_opt = dict( - xenon1t=xenon1t, pmt_label_color="lightgrey", log_scale=True, vmin=0.1, @@ -239,7 +235,7 @@ def _event_display( ) # Mark reconstructed position (corrected) plt.scatter(event["x"], event["y"], marker="X", s=100, c="k") - if not xenon1t and axi == 0 and plot_all_positions: + if axi == 0 and plot_all_positions: _scatter_rec(event) # Fill panels with peak/event info @@ -279,9 +275,8 @@ def _event_display( single_figure=False, ) ax_rec.tick_params(axis="x", rotation=0) - if not xenon1t: - # Top vs bottom division - ax_rec.axhline(straxen.n_top_pmts, c="k") + # Top vs bottom division + ax_rec.axhline(straxen.n_top_pmts, c="k") if ev_range is not None: plt.xlim(*ev_range) @@ -450,7 +445,6 @@ def _scatter_rec( :param s2_fuzz: extra time around main S2 [ns] :param s1_fuzz: extra time around main S1 [ns] :param max_peaks: max peaks for plotting in the wf plot -:param xenon1t: True: is 1T, False: is nT :param display_peak_info: tuple, items that will be extracted from event and displayed in the event info panel see above for format :param display_event_info: tuple, items that will be extracted from diff --git a/straxen/analyses/holoviews_waveform_display.py b/straxen/analyses/holoviews_waveform_display.py index b93b06b5e..ea48a5769 100644 --- a/straxen/analyses/holoviews_waveform_display.py +++ b/straxen/analyses/holoviews_waveform_display.py @@ -42,7 +42,7 @@ def hvdisp_plot_pmt_pattern(*, config, records, to_pe, array="bottom"): """ import holoviews as hv - pmts = straxen.pmt_positions(xenon1t=config["n_tpc_pmts"] < 300) + pmts = straxen.pmt_positions() areas = np.bincount( records["channel"], weights=records["area"] * to_pe[records["channel"]], minlength=len(pmts) ) diff --git a/straxen/analyses/waveform_plot.py b/straxen/analyses/waveform_plot.py index 1af86ab95..4f333d7fe 100644 --- a/straxen/analyses/waveform_plot.py +++ b/straxen/analyses/waveform_plot.py @@ -111,7 +111,6 @@ def plot_hit_pattern( log_scale=False, label=None, single_figure=False, - xenon1t=False, figsize=(10, 4), ): if single_figure: @@ -124,7 +123,6 @@ def plot_hit_pattern( vmin=vmin, log_scale=log_scale, label=label, - xenon1t=xenon1t, ) diff --git a/straxen/common.py b/straxen/common.py index 830d7ea8e..353df8c36 100644 --- a/straxen/common.py +++ b/straxen/common.py @@ -2,8 +2,6 @@ import os.path as osp import json from re import match -import ast -import configparser import gzip import inspect from typing import Union, Dict, Any @@ -100,35 +98,14 @@ def rotate_perp_wires(x_obs: np.ndarray, y_obs: np.ndarray, angle_extra: Union[f @export -def pmt_positions(xenon1t=False): +def pmt_positions(): """Return pandas dataframe with PMT positions columns: array (top/bottom), i (PMT number), x, y """ - if xenon1t: - # Get PMT positions from the XENON1T config without PAX - config = configparser.ConfigParser() - config.read_string( - resource_from_url( - "https://raw.githubusercontent.com/XENON1T/pax/master/pax/config/XENON1T.ini" - ) - ) - pmt_config = ast.literal_eval(config["DEFAULT"]["pmts"]) - return pd.DataFrame( - [ - dict( - x=q["position"]["x"], - y=q["position"]["y"], - i=q["pmt_position"], - array=q.get("array", "other"), - ) - for q in pmt_config[:248] - ] - ) - else: - return resource_from_url( - aux_repo + "874de2ffe41147719263183b89d26c9ee562c334/pmt_positions_xenonnt.csv", - fmt="csv", - ) + return resource_from_url( + aux_repo + "874de2ffe41147719263183b89d26c9ee562c334/pmt_positions_xenonnt.csv", + fmt="csv", + ) # In-memory resource cache @@ -585,17 +562,3 @@ def _swap_values_in_array(data_arr, buffer, items, replacements): buffer[i] = replacements[k] break return buffer - - -## -# Old XENON1T Stuff -## - - -first_sr1_run = 170118_1327 - - -@export -def pax_file(x): - """Return URL to file hosted in the pax repository master branch.""" - return "https://raw.githubusercontent.com/XENON1T/pax/master/pax/data/" + x diff --git a/straxen/contexts.py b/straxen/contexts.py index fe3727a92..af2f5896b 100644 --- a/straxen/contexts.py +++ b/straxen/contexts.py @@ -326,28 +326,3 @@ def xenonnt_led(**kwargs): ) st.set_config({"coincidence_level_recorder_nv": 1}) return st - - -## -# XENON1T, see straxen/legacy -## - - -def demo(): - """Return strax context used in the straxen demo notebook.""" - return straxen.legacy.contexts_1t.demo() - - -def fake_daq(): - """Context for processing fake DAQ data in the current directory.""" - return straxen.legacy.contexts_1t.fake_daq() - - -def xenon1t_dali(output_folder="./strax_data", build_lowlevel=False, **kwargs): - return straxen.legacy.contexts_1t.xenon1t_dali( - output_folder=output_folder, build_lowlevel=build_lowlevel, **kwargs - ) - - -def xenon1t_led(**kwargs): - return straxen.legacy.contexts_1t.xenon1t_led(**kwargs) diff --git a/straxen/corrections_services.py b/straxen/corrections_services.py index 6aec14e82..71cc26fc9 100644 --- a/straxen/corrections_services.py +++ b/straxen/corrections_services.py @@ -33,7 +33,6 @@ ] single_value_corrections = [ - "elife_xenon1t", "elife", "baseline_samples_nv", "electron_drift_velocity", @@ -76,7 +75,7 @@ class CorrectionsManagementServices: """ - def __init__(self, username=None, password=None, mongo_url=None, is_nt=True): + def __init__(self, username=None, password=None, mongo_url=None): """ :param username: corrections DB username read the .xenon_config for the users "pymongo_user" has @@ -84,7 +83,6 @@ def __init__(self, username=None, password=None, mongo_url=None, is_nt=True): the "CMT admin user" has r/w permission to corrections DB and read permission to RunDB :param password: DB password - :param is_nt: bool if True we are looking at nT if False we are looking at 1T """ mongo_kwargs = { @@ -101,17 +99,13 @@ def __init__(self, username=None, password=None, mongo_url=None, is_nt=True): # Setup the interface self.interface = strax.CorrectionsInterface(self.client, database_name="corrections") - self.is_nt = is_nt - if self.is_nt: - self.collection = self.client["xenonnt"]["runs"] - else: - self.collection = self.client["run"]["runs_new"] + self.collection = self.client["xenonnt"]["runs"] def __str__(self): return self.__repr__() def __repr__(self): - return str(f'{"XENONnT " if self.is_nt else "XENON1T"}-Corrections_Management_Services') + return str("XENONnT_Corrections_Management_Services") def get_corrections_config(self, run_id, config_model=None): """Get context configuration for a given correction. @@ -164,9 +158,7 @@ def _get_correction(self, run_id, correction, version): # because every pmt is its own dataframe...of course if correction in {"pmt", "n_veto", "mu_veto"}: # get lists of pmts - df_global = self.interface.read( - "global_xenonnt" if self.is_nt else "global_xenon1t" - ) + df_global = self.interface.read("global_xenonnt") gains = df_global["global_ONLINE"][0] # global is where all pmts are grouped pmts = list(gains.keys()) for it_correction in pmts: # loop over all PMTs @@ -320,13 +312,9 @@ def get_start_time(self, run_id): """ - if self.is_nt: - # xenonnt use int - run_id = int(run_id) + run_id = int(run_id) - rundoc = self.collection.find_one( - {"number" if self.is_nt else "name": run_id}, {"start": 1} - ) + rundoc = self.collection.find_one({"number": run_id}, {"start": 1}) if rundoc is None: raise ValueError(f"run_id = {run_id} not found") time = rundoc["start"] diff --git a/straxen/legacy/README.md b/straxen/legacy/README.md deleted file mode 100644 index 393c38737..000000000 --- a/straxen/legacy/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# 1T support files - -In this subdirectory, we store XENON1T related files / scripts that are only used for xenon1t. - -This folder is only marginally useful, you probably want to go of using a straxen version pre-v1.0.0 where we still were -actively checking and using the 1T chain. diff --git a/straxen/legacy/__init__.py b/straxen/legacy/__init__.py deleted file mode 100644 index 165593650..000000000 --- a/straxen/legacy/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from . import hitfinder_thresholds -from .hitfinder_thresholds import * - -from . import xenon1t_url_configs -from .xenon1t_url_configs import * - -from . import plugins_1t -from .plugins_1t import * - -from . import contexts_1t -from .contexts_1t import * diff --git a/straxen/legacy/contexts_1t.py b/straxen/legacy/contexts_1t.py deleted file mode 100644 index 9f98d42d6..000000000 --- a/straxen/legacy/contexts_1t.py +++ /dev/null @@ -1,210 +0,0 @@ -from immutabledict import immutabledict -import strax -import straxen -from straxen.common import pax_file -from .plugins_1t.fake_daqreader import Fake1TDAQReader - - -def get_x1t_context_config(): - """Wrapper to get straxen.contexts after imports.""" - from straxen.contexts import common_opts - - x1t_context_config = { - **common_opts, - **dict( - check_available=("raw_records", "records", "peaklets", "events", "event_info"), - free_options=("channel_map",), - use_per_run_defaults=True, - store_run_fields=tuple( - [x for x in common_opts["store_run_fields"] if x != "mode"] - + ["trigger.events_built", "reader.ini.name"] - ), - ), - } - x1t_context_config.update( - dict( - register=common_opts["register"] - + [ - straxen.PeakPositions1T, - straxen.RecordsFromPax, - straxen.EventInfo1T, - ] - ) - ) - return x1t_context_config - - -x1t_common_config = dict( - check_raw_record_overlaps=False, - allow_sloppy_chunking=True, - n_tpc_pmts=248, - n_top_pmts=127, - channel_map=immutabledict( - # (Minimum channel, maximum channel) - tpc=(0, 247), - diagnostic=(248, 253), - aqmon=(254, 999), - ), - # Records - hev_gain_model="cmt://to_pe_model?version=v1&detector=1t&run_id=plugin.run_id", - pmt_pulse_filter=( - 0.012, - -0.119, - 2.435, - -1.271, - 0.357, - -0.174, - -0.0, - -0.036, - -0.028, - -0.019, - -0.025, - -0.013, - -0.03, - -0.039, - -0.005, - -0.019, - -0.012, - -0.015, - -0.029, - 0.024, - -0.007, - 0.007, - -0.001, - 0.005, - -0.002, - 0.004, - -0.002, - ), - hit_min_amplitude="legacy-thresholds://XENON1T_SR1", - tail_veto_threshold=int(1e5), - save_outside_hits=(3, 3), - # Peaklets - peaklet_gap_threshold=350, - gain_model="cmt://to_pe_model?version=v1&detector=1t&run_id=plugin.run_id", - peak_split_gof_threshold=(None, ((0.5, 1), (3.5, 0.25)), ((2, 1), (4.5, 0.4))), # Reserved - peak_min_pmts=2, - # MergedS2s - s2_merge_gap_thresholds=((1.7, 5.0e3), (4.0, 500.0), (5.0, 0.0)), - # Peaks - # Smaller right extension since we applied the filter - peak_right_extension=30, - s1_max_rise_time_post100=150, - s1_min_coincidence=3, - event_s1_min_coincidence=3, - # Events* - left_event_extension=int(0.3e6), - right_event_extension=int(1e6), - elife=1e6, - electron_drift_velocity=1.3325e-4, - max_drift_length=96.9, - electron_drift_time_gate=1700, - se_gain=28.2, - avg_se_gain=28.2, - rel_extraction_eff=1.0, - rel_light_yield=1.0, - s1_xyz_map=f'itp_map://resource://{pax_file("XENON1T_s1_xyz_lce_true_kr83m_SR1_pax-680_fdc-3d_v0.json")}?fmt=json', # noqa - s2_xy_map=f'itp_map://resource://{pax_file("XENON1T_s2_xy_ly_SR1_v2.2.json")}?fmt=json', - g1=0.1426, - g2=11.55 / (1 - 0.63), -) - - -def demo(): - """Return strax context used in the straxen demo notebook.""" - straxen.download_test_data() - - st = strax.Context( - storage=[ - strax.DataDirectory("./strax_data"), - strax.DataDirectory( - "./strax_test_data", deep_scan=True, provide_run_metadata=True, readonly=True - ), - ], - forbid_creation_of=straxen.daqreader.DAQReader.provides, - config=dict(**x1t_common_config), - **get_x1t_context_config(), - ) - - # Use configs that are always available - st.set_config( - dict( - hev_gain_model="legacy-to-pe://1T_to_pe_placeholder", - gain_model="legacy-to-pe://1T_to_pe_placeholder", - elife=1e6, - electron_drift_velocity=1.3325e-4, - se_gain=28.2, - avg_se_gain=28.2, - rel_extraction_eff=1.0, - s1_xyz_map=( - "itp_map://resource://" - f"{pax_file('XENON1T_s1_xyz_lce_true_kr83m_SR1_pax-680_fdc-3d_v0.json')}" - "?fmt=json" - ), - s2_xy_map=f'itp_map://resource://{pax_file("XENON1T_s2_xy_ly_SR1_v2.2.json")}?fmt=json', - ) - ) - return st - - -def fake_daq(): - """Context for processing fake DAQ data in the current directory.""" - st = strax.Context( - storage=[ - strax.DataDirectory("./strax_data"), - # Fake DAQ puts run doc JSON in same folder: - strax.DataDirectory("./from_fake_daq", provide_run_metadata=True, readonly=True), - ], - config=dict( - daq_input_dir="./from_fake_daq", - daq_chunk_duration=int(2e9), - daq_compressor="lz4", - n_readout_threads=8, - daq_overlap_chunk_duration=int(2e8), - **x1t_common_config, - ), - **get_x1t_context_config(), - ) - st.register(Fake1TDAQReader) - return st - - -def xenon1t_dali(output_folder="./strax_data", build_lowlevel=False, **kwargs): - context_options = {**get_x1t_context_config(), **kwargs} - - st = strax.Context( - storage=[ - strax.DataDirectory( - "/dali/lgrandi/xenon1t/strax_converted/raw", - take_only="raw_records", - provide_run_metadata=True, - readonly=True, - ), - strax.DataDirectory("/dali/lgrandi/xenon1t/strax_converted/processed", readonly=True), - strax.DataDirectory(output_folder), - ], - config=dict(**x1t_common_config), - # When asking for runs that don't exist, throw an error rather than - # starting the pax converter - forbid_creation_of=( - straxen.daqreader.DAQReader.provides - if build_lowlevel - else straxen.daqreader.DAQReader.provides + ("records", "peaklets") - ), - **context_options, - ) - return st - - -def xenon1t_led(**kwargs): - st = xenon1t_dali(**kwargs) - st.set_context_config( - { - "check_available": ("raw_records", "led_calibration"), - "free_options": list(get_x1t_context_config().keys()), - } - ) - # Return a new context with only raw_records and led_calibration registered - st = st.new_context(replace=True, config=st.config, storage=st.storage, **st.context_config) - st.register([straxen.RecordsFromPax, straxen.LEDCalibration]) - return st diff --git a/straxen/legacy/hitfinder_thresholds.py b/straxen/legacy/hitfinder_thresholds.py deleted file mode 100644 index 808bc644a..000000000 --- a/straxen/legacy/hitfinder_thresholds.py +++ /dev/null @@ -1,294 +0,0 @@ -import numpy as np - -import strax -import straxen - -export, __all__ = strax.exporter() - - -@export -def hit_min_amplitude(model): - """Return hitfiner height threshold to use in processing. - - :param model: Model name (str), or int to use a uniform threshold, or array/tuple or thresholds - to use. - - """ - - if isinstance(model, (int, float)): - return np.ones(straxen.n_tpc_pmts, dtype=np.int16) * model - - if isinstance(model, (tuple, np.ndarray)): - return model - - if model == "XENON1T_SR1": - return np.array( - [ - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 16, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 18, - 15, - 15, - 15, - 15, - 15, - 54, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 16, - 15, - 15, - 35, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 18, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 17, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 17, - 15, - 15, - 26, - 88, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 16, - 20, - 22, - 15, - 16, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 17, - 15, - 15, - 15, - 15, - 15, - 17, - 16, - 15, - 15, - 15, - 15, - 15, - 15, - 17, - 16, - 15, - 15, - 15, - 15, - 15, - 15, - 45, - 15, - 15, - 15, - 15, - 25, - 15, - 15, - 15, - 17, - 15, - 18, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 24, - 15, - 17, - 15, - 15, - 18, - 15, - 15, - 15, - 34, - 15, - 15, - 18, - 15, - 15, - 39, - 16, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 18, - 15, - 20, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 15, - 16, - 15, - 15, - 19, - 15, - 15, - 15, - 15, - 15, - 15, - 17, - 15, - 15, - 18, - 15, - 15, - 15, - 15, - 15, - 17, - 15, - 18, - 15, - 15, - 15, - 17, - 15, - 18, - 15, - 35, - 15, - 15, - ], - dtype=np.int16, - ) - - if model == "pmt_commissioning_initial": - # ADC thresholds used for the initial PMT commissioning data - # (at least since April 28 2020, run 007305) - result = 15 * np.ones(straxen.n_tpc_pmts, dtype=np.int16) - result[453] = 30 - return result - - if model == "pmt_commissioning_initial_he": - # ADC thresholds used for the initial PMT commissioning data - # (at least since April 28 2020, run 007305) - result = 15 * np.ones( - straxen.contexts.xnt_common_config["channel_map"]["he"][1], dtype=np.int16 - ) - return result - - raise ValueError(f"Unknown ADC threshold model {model}") diff --git a/straxen/legacy/plugins_1t/__init__.py b/straxen/legacy/plugins_1t/__init__.py deleted file mode 100644 index cf20d791b..000000000 --- a/straxen/legacy/plugins_1t/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from . import event_info -from .event_info import * - -from . import pax_interface -from .pax_interface import * - -from . import peak_positions -from .peak_positions import * - -from . import x1t_cuts -from .x1t_cuts import * diff --git a/straxen/legacy/plugins_1t/event_info.py b/straxen/legacy/plugins_1t/event_info.py deleted file mode 100644 index b0cbea85a..000000000 --- a/straxen/legacy/plugins_1t/event_info.py +++ /dev/null @@ -1,22 +0,0 @@ -import strax - -export, __all__ = strax.exporter() - - -@export -class EventInfo1T(strax.MergeOnlyPlugin): - """Plugin which merges the information of all event data_kinds into a single data_type. - - This only uses 1T data-types as several event-plugins are nT only - - """ - - depends_on = ( - "event_basics", - "event_positions", - "corrected_areas", - "energy_estimates", - ) - provides = "event_info" - save_when = strax.SaveWhen.ALWAYS - __version__ = "0.0.1" diff --git a/straxen/legacy/plugins_1t/fake_daqreader.py b/straxen/legacy/plugins_1t/fake_daqreader.py deleted file mode 100644 index b689e727f..000000000 --- a/straxen/legacy/plugins_1t/fake_daqreader.py +++ /dev/null @@ -1,8 +0,0 @@ -from straxen.plugins.raw_records.daqreader import DAQReader -from immutabledict import immutabledict - - -class Fake1TDAQReader(DAQReader): - provides = ("raw_records", "raw_records_diagnostic", "raw_records_aqmon") - - data_kind = immutabledict(zip(provides, provides)) diff --git a/straxen/legacy/plugins_1t/pax_interface.py b/straxen/legacy/plugins_1t/pax_interface.py deleted file mode 100644 index 2e4e4ddb4..000000000 --- a/straxen/legacy/plugins_1t/pax_interface.py +++ /dev/null @@ -1,189 +0,0 @@ -import os -import glob -from typing import Tuple - -import numpy as np -import strax - -export, __all__ = strax.exporter() - - -def records_needed(pulse_length, samples_per_record): - """Return records needed to store pulse_length samples.""" - return np.ceil(pulse_length / samples_per_record).astype(np.int64) - - -@export -def pax_to_records( - input_filename, samples_per_record=strax.DEFAULT_RECORD_LENGTH, events_per_chunk=10 -): - """Return pulse records array from pax zip input_filename Convert pax .zip files to flat records - format This only works if you have pax installed in your strax environment, which is somewhat - tricky.""" - - # Monkeypatch matplotlib so pax is importable - # See https://github.com/XENON1T/pax/pull/734 - import matplotlib - - matplotlib._cntr = None - - from pax import core # Pax is not a dependency - - mypax = core.Processor( - "XENON1T", - config_dict=dict( - pax=dict( - look_for_config_in_runs_db=False, - plugin_group_names=["input"], - encoder_plugin=None, - input_name=input_filename, - ), - # Fast startup: skip loading big maps - WaveformSimulator=dict( - s1_light_yield_map="placeholder_map.json", - s2_light_yield_map="placeholder_map.json", - s1_patterns_file=None, - s2_patterns_file=None, - ), - ), - ) - - print(f"Starting conversion, {events_per_chunk} evt/chunk") - - results = [] - - def finish_results(): - nonlocal results - records = np.concatenate(results) - # In strax data, records are always stored - # sorted, baselined and integrated - records = strax.sort_by_time(records) - print("Returning %d records" % len(records)) - results = [] - return records - - for event in mypax.get_events(): - event = mypax.process_event(event) - - if not len(event.pulses): - # Triggerless pax data contains many empty events - # at the end. With the fixed events per chunk setting - # this can lead to empty files, which confuses strax. - continue - - pulse_lengths = np.array([p.length for p in event.pulses]) - - n_records_tot = records_needed(pulse_lengths, samples_per_record).sum() - records = np.zeros(n_records_tot, dtype=strax.raw_record_dtype(samples_per_record)) - output_record_index = 0 # Record offset in data - - for p in event.pulses: - n_records = records_needed(p.length, samples_per_record) - - for rec_i in range(n_records): - r = records[output_record_index] - r["time"] = event.start_time + p.left * 10 + rec_i * samples_per_record * 10 - r["channel"] = p.channel - r["pulse_length"] = p.length - r["record_i"] = rec_i - r["dt"] = 10 - - # How much are we storing in this record? - if rec_i != n_records - 1: - # There's more chunks coming, so we store a full chunk - n_store = samples_per_record - assert p.length > samples_per_record * (rec_i + 1) - else: - # Just enough to store the rest of the data - # Note it's not p.length % samples_per_record!!! - # (that would be zero if we have to store a full record) - n_store = p.length - samples_per_record * rec_i - - assert 0 <= n_store <= samples_per_record - r["length"] = n_store - - offset = rec_i * samples_per_record - r["data"][:n_store] = p.raw_data[offset : offset + n_store] - output_record_index += 1 - - results.append(records) - if len(results) >= events_per_chunk: - yield finish_results() - - mypax.shutdown() - - if len(results): - y = finish_results() - if len(y): - yield y - - -@export -@strax.takes_config( - strax.Option( - "pax_raw_dir", - default="/data/xenon/raw", - track=False, - infer_type=False, - help="Directory with raw pax datasets", - ), - strax.Option( - "stop_after_zips", - default=0, - track=False, - infer_type=False, - help="Convert only this many zip files. 0 = all.", - ), - strax.Option( - "events_per_chunk", - default=50, - track=False, - infer_type=False, - help="Number of events to yield per chunk", - ), - strax.Option( - "samples_per_record", - default=strax.DEFAULT_RECORD_LENGTH, - track=False, - infer_type=False, - help="Number of samples per record", - ), -) -class RecordsFromPax(strax.Plugin): - __version__ = "0.0.0" - provides = "raw_records" - data_kind = "raw_records" - compressor = "zstd" - depends_on: Tuple = tuple() - parallel = False - rechunk_on_save = False - - def infer_dtype(self): - return strax.raw_record_dtype(self.config["samples_per_record"]) - - def iter(self, *args, **kwargs): - if not os.path.exists(self.config["pax_raw_dir"]): - raise FileNotFoundError(self.config["pax_raw_dir"]) - input_dir = os.path.join(self.config["pax_raw_dir"], self.run_id) - pax_files = sorted(glob.glob(input_dir + "/XENON*.zip")) - pax_sizes = np.array([os.path.getsize(x) for x in pax_files]) - print(f"Found {len(pax_files)} files, {pax_sizes.sum() / 1e9:.2f} GB") - last_endtime = 0 - - for file_i, in_fn in enumerate(pax_files): - if self.config["stop_after_zips"] and file_i >= self.config["stop_after_zips"]: - break - for records in pax_to_records( - in_fn, - samples_per_record=self.config["samples_per_record"], - events_per_chunk=self.config["events_per_chunk"], - ): - if not len(records): - continue - if last_endtime == 0: - last_endtime = records[0]["time"] - new_endtime = strax.endtime(records).max() - - yield self.chunk(start=last_endtime, end=new_endtime, data=records) - - last_endtime = new_endtime diff --git a/straxen/legacy/plugins_1t/peak_positions.py b/straxen/legacy/plugins_1t/peak_positions.py deleted file mode 100644 index fa1355e77..000000000 --- a/straxen/legacy/plugins_1t/peak_positions.py +++ /dev/null @@ -1,117 +0,0 @@ -import strax -import straxen -from straxen import pax_file, first_sr1_run -from straxen.common import get_resource -import tempfile -import numpy as np -import json -import os - -export, __all__ = strax.exporter() - - -@export -@strax.takes_config( - strax.Option( - "nn_architecture", - infer_type=False, - help="Path to JSON of neural net architecture", - default_by_run=[ - (0, pax_file("XENON1T_tensorflow_nn_pos_20171217_sr0.json")), - ( - first_sr1_run, - straxen.aux_repo + "3548132b55f81a43654dba5141366041e1daaf01/strax_files/" - "XENON1T_tensorflow_nn_pos_20171217_sr1_reformatted.json", - ), - ], - ), # noqa - strax.Option( - "nn_weights", - infer_type=False, - help="Path to HDF5 of neural net weights", - default_by_run=[ - (0, pax_file("XENON1T_tensorflow_nn_pos_weights_20171217_sr0.h5")), - (first_sr1_run, pax_file("XENON1T_tensorflow_nn_pos_weights_20171217_sr1.h5")), - ], - ), # noqa - strax.Option( - "min_reconstruction_area", - help="Skip reconstruction if area (PE) is less than this", - default=10, - infer_type=False, - ), - strax.Option( - "n_top_pmts", default=straxen.n_top_pmts, infer_type=False, help="Number of top PMTs" - ), -) -class PeakPositions1T(strax.Plugin): - """Compute the S2 (x,y)-position based on a neural net.""" - - dtype = [ - ("x", np.float32, "Reconstructed S2 X position (cm), uncorrected"), - ("y", np.float32, "Reconstructed S2 Y position (cm), uncorrected"), - ] + strax.time_fields - depends_on = "peaks" - provides = "peak_positions" - - # Parallelization doesn't seem to make it go faster - # Is there much pure-python stuff in tensorflow? - # Process-level paralellization might work, but you'd have to do setup - # in each process, which probably negates the benefits, - # except for huge chunks - parallel = False - - __version__ = "0.1.1" - - def setup(self): - import tensorflow as tf - - keras = tf.keras - nn_conf = get_resource(self.config["nn_architecture"], fmt="json") - # badPMTList was inserted by a very clever person into the keras json - # file. Let's delete it to prevent future keras versions from crashing. - # Do NOT try `del nn_conf['badPMTList']`! See get_resource docstring - # for the gruesome details. - bad_pmts = nn_conf["badPMTList"] - nn = keras.models.model_from_json( - json.dumps({k: v for k, v in nn_conf.items() if k != "badPMTList"}) - ) - self.pmt_mask = ~np.in1d(np.arange(self.config["n_top_pmts"]), bad_pmts) - - # Keras needs a file to load its weights. We can't put the load - # inside the context, then it would break on Windows, - # because there temporary files cannot be opened again. - with tempfile.NamedTemporaryFile(delete=False) as f: - f.write(get_resource(self.config["nn_weights"], fmt="binary")) - fname = f.name - nn.load_weights(fname) - os.remove(fname) - self.nn = nn - - def compute(self, peaks): - result = np.ones(len(peaks), dtype=self.dtype) - result["time"], result["endtime"] = peaks["time"], strax.endtime(peaks) - result["x"] *= float("nan") - result["y"] *= float("nan") - - # Keep large peaks only - peak_mask = peaks["area"] > self.config["min_reconstruction_area"] - if not np.sum(peak_mask): - # Nothing to do, and .predict crashes on empty arrays - return result - - # Input: normalized hitpatterns in good top PMTs - _in = peaks["area_per_channel"][peak_mask, :] - _in = _in[:, : self.config["n_top_pmts"]][:, self.pmt_mask] - with np.errstate(divide="ignore", invalid="ignore"): - _in /= _in.sum(axis=1).reshape(-1, 1) - - # Output: positions in mm (unfortunately), so convert to cm - _out = self.nn.predict(_in) / 10 - - # Set output in valid rows. Do NOT try result[peak_mask]['x'] - # unless you want all NaN positions (boolean masks make a copy unless - # they are used as the last index) - result["x"][peak_mask] = _out[:, 0] - result["y"][peak_mask] = _out[:, 1] - return result diff --git a/straxen/legacy/plugins_1t/x1t_cuts.py b/straxen/legacy/plugins_1t/x1t_cuts.py deleted file mode 100644 index 7b3a8ab58..000000000 --- a/straxen/legacy/plugins_1t/x1t_cuts.py +++ /dev/null @@ -1,292 +0,0 @@ -"""XENON1T cuts. - -How to apply: - - First register the plugins: - st.register_all(straxen.plugins.x1t_cuts) - - Load events and the cuts you want to apply: - events = st.get_array(run_id, - targets=('event_info', 'cut_s2_width', 'cut_s2_threshold')) - - Apply the selection of events that pass the cut like: - selected_events = events[events['cut_s2_threshold'] == True] - -""" - -import numpy as np -from scipy.stats import chi2 -import strax - -from straxen import units - -export, __all__ = strax.exporter() - - -class S2Width(strax.CutPlugin): - """S2 Width cut based on diffusion model The S2 width cut compares the S2 width to what we could - expect based on its depth in the detector. - - The inputs to this are the drift velocity and the diffusion constant. - The allowed variation in S2 width is greater at low - energy (since it is fluctuating statistically) Ref: (arXiv:1102.2865) - It should be applicable to data regardless of if it ER or NR; - above cS2 = 1e5 pe ERs the acceptance will go down due to track length effects. - around S2 = 1e5 pe there are beta-gamma merged peaks from Pb214 that extends the S2 width - Tune the diffusion model parameters based on fax data according to note: - https://xe1t-wiki.lngs.infn.it/doku.php?id=xenon:xenon1t:sim:notes:tzhu:width_cut_tuning#toy_fax_simulation - Contact: Tianyu , Yuehuan , Jelle - ported from lax.sciencerun1.py - - """ - - depends_on = "event_info" - provides = "cut_s2_width" - cut_name = "cut_s2_width" - cut_description = "S2 Width cut" - - __version__ = 1.3 - - diffusion_constant = 29.35 * ((units.cm) ** 2) / units.s - v_drift = 1.335 * (units.um) / units.ns - scg = 21.3 # s2_secondary_sc_gain in pax config - scw = 229.58 # s2_secondary_sc_width median - SigmaToR50 = 1.349 - DriftTimeFromGate = 1.6 * units.us - - def s2_width_model(self, drift_time): - """Diffusion model.""" - return np.sqrt( - 2 * self.diffusion_constant * (drift_time - self.DriftTimeFromGate) / self.v_drift**2 - ) - - def nElectron(self, events): - return np.clip(events["s2_area"], 0, 5000) / self.scg - - def normWidth(self, events): - return ( - np.square(events["s2_range_50p_area"] / self.SigmaToR50) - np.square(self.scw) - ) / np.square(self.s2_width_model(events["drift_time"])) - - def logpdf(self, events): - return chi2.logpdf( - self.normWidth(events) * (self.nElectron(events) - 1), self.nElectron(events) - ) - - def cut_by(self, events): - return np.all([self.logpdf(events) > -14], axis=0) - - -class S1SingleScatter(strax.CutPlugin): - """Requires only one valid interaction between the largest S2, and any S1 recorded before it. - - The S1 cut checks that any possible secondary S1s recorded in a waveform, could not have also - produced a valid interaction with the primary S2. To check whether an interaction between the - second largest S1 and the largest S2 is valid, we use the S2Width cut. If the event would pass - the S2Width cut, a valid second interaction exists, and we may have mis-identified which S1 to - pair with the primary S2. Therefore we cut this event. If it fails the S2Width cut the event is - not removed. - Current version is developed on calibration data (pax v6.8.0). It is described in this note: - https://xecluster.lngs.infn.it/dokuwiki/doku.php?id=xenon:xenon1t:jacques:s1_single_scatter_cut_sr1 - It should be applicable to data regardless whether it is ER or NR. - Contact: Jacques Pienaar, - ported from lax.sciencerun1.py - - """ - - depends_on = "event_info" - provides = "cut_s1_single_scatter" - cut_name = "cut_s1_single_scatter" - cut_description = "S1 Single Scatter cut" - s2width = S2Width - - __version__ = 1.3 - - def cut_by(self, events): - mask = events["alt_s1_interaction_drift_time"] > self.s2width.DriftTimeFromGate - alt_n_electron = np.clip(events[mask]["s2_area"], 0, 5000) / self.s2width.scg - - # Alternate S1 relative width - alt_rel_width = np.square( - events[mask]["s2_range_50p_area"] / self.s2width.SigmaToR50 - ) - np.square(self.s2width.scw) - alt_rel_width /= np.square( - self.s2width.s2_width_model(self.s2width, events[mask]["alt_s1_interaction_drift_time"]) - ) - - alt_interaction_passes = ( - chi2.logpdf(alt_rel_width * (alt_n_electron - 1), alt_n_electron) > -20 - ) - - return np.all([True ^ alt_interaction_passes], axis=0) - - -class S2SingleScatter(strax.CutPlugin): - """Check that largest other S2 area is smaller than some bound. - - The single scatter is to cut an event if its largest_other_s2 is too large. - As the largest_other_s2 takes a greater value when they originated from some real scatters - in comparison, those from photo-ionization in single scatter cases would be smaller. - https://xecluster.lngs.infn.it/dokuwiki/doku.php?id=xenon:xenon1t:analysis:firstresults:cut:s2single - Contact: Tianyu Zhu - ported from lax.sciencerun1.py - - """ - - depends_on = "event_info" - provides = "cut_s2_single_scatter" - cut_name = "cut_s2_single_scatter" - cut_description = "S2 Single Scatter cut" - __version__ = 4.3 - - @classmethod - def other_s2_bound(cls, s2_area): - rescaled_s2_0 = s2_area * 0.00832 + 72.3 - rescaled_s2_1 = s2_area * 0.03 - 109 - - another_term_0 = 1 / (np.exp((s2_area - 23300) * 5.91e-4) + 1) - another_term_1 = 1 / (np.exp((23300 - s2_area) * 5.91e-4) + 1) - - return rescaled_s2_0 * another_term_0 + rescaled_s2_1 * another_term_1 - - def cut_by(self, events): - largest_other_s2_is_nan = np.isnan(events["alt_s2_area"]) - arr = np.all( - [ - largest_other_s2_is_nan - | (events["alt_s2_area"] < self.other_s2_bound(events["s2_area"])) - ], - axis=0, - ) - return arr - - -class S2Threshold(strax.CutPlugin): - """The S2 energy at which the trigger is perfectly efficient. - - See: https://xecluster.lngs.infn.it/dokuwiki/doku.php?id=xenon:xenon1t:analysis:firstresults:daqtriggerpaxefficiency # noqa - Contact: Jelle Aalbers - ported from lax.sciencerun1.py - - """ - - depends_on = "event_info" - provides = "cut_s2_threshold" - cut_name = "cut_s2_threshold" - cut_description = "s2 must be larger then 200 PE" - __version__ = 1.4 - - def cut_by(self, events): - return np.all([events["s2_area"] > 200], axis=0) - - -class S2AreaFractionTop(strax.CutPlugin): - """Cuts events with an unusual fraction of S2 on top array. - - Primarily cuts gas events with a particularly large S2 AFT, also targets some - strange / junk / other events with a low AFT. - This cut has been checked on S2 ranges between 0 and 50 000 pe. - Described in the note at: xenon:xenon1t:analysis:firstresults:s2_aft_cut_summary - Contact: Adam Brown , - ported from lax.sciencerun1.py - - """ - - depends_on = "event_info" - provides = "cut_s2_area_fraction_top" - cut_name = "cut_s2_area_fraction_top" - cut_description = "Cut on S2 AFT" - - __version__ = 1.3 - - def upper_limit_s2_aft(self, s2): - return 0.6177399420527526 + 3.713166211522462e-08 * s2 + 0.5460484265254656 / np.log(s2) - - def lower_limit_s2_aft(self, s2): - return 0.6648160611018054 - 2.590402853814859e-07 * s2 - 0.8531029789184852 / np.log(s2) - - def cut_by(self, events): - arr = np.all( - [ - events["s2_area_fraction_top"] < self.upper_limit_s2_aft(events["s2_area"]), - events["s2_area_fraction_top"] > self.lower_limit_s2_aft(events["s2_area"]), - ], - axis=0, - ) - return arr - - -class FiducialCylinder1T(strax.CutPlugin): - """Implementation of fiducial volume cylinder 1T, ported from lax.sciencerun0.py.""" - - depends_on = "event_positions" - provides = "cut_fiducial_cylinder_1t" - cut_name = "cut_fiducial_cylinder_1t" - cut_description = "One tonne fiducial cylinder" - - __version__ = 1.3 - - def cut_by(self, events): - return np.all( - [ - (-92.9 < events["z"]), - (-9 > events["z"]), - (36.94 > np.sqrt(events["x"] ** 2 + events["y"] ** 2)), - ], - axis=0, - ) - - -class S1MaxPMT(strax.LoopPlugin): - """Removes events where the largest hit in S1 is too large port from lax.sciencerun0.py.""" - - depends_on = ("events", "event_basics", "peak_basics") - dtype = [("cut_s1_max_pmt", np.bool_, "S1 max PMT cut")] + strax.time_fields - provides = "cut_s1_max_pmt" - __version__ = 1.3 - - def compute_loop(self, event, peaks): - ret = dict(cut_s1_max_pmt=True) - ret["time"] = event["time"] - ret["endtime"] = event["endtime"] - if not len(peaks) or np.isnan(event["s1_index"]): - return ret - - peak = peaks[event["s1_index"]] - max_channel = peak["max_pmt_area"] - ret["cut_s1_max_pmt"] = max_channel < 0.052 * event["s1_area"] + 4.15 - return ret - - -class S1LowEnergyRange(strax.CutPlugin): - """Pass only events with cs1<200.""" - - depends_on = ("events", "corrected_areas") - provides = "cut_s1_low_energy_range" - cut_name = "cut_s1_low_energy_range" - cut_description = "Event under 200pe" - - __version__ = 1.3 - - def cut_by(self, events): - return np.all([events["cs1"] < 200], axis=0) - - -class SR1Cuts(strax.MergeOnlyPlugin): - depends_on = ("cut_fiducial_cylinder_1t", "cut_s1_max_pmt", "cut_s1_low_energy_range") - save_when = strax.SaveWhen.ALWAYS - - -class FiducialEvents(strax.Plugin): - depends_on = ("event_info", "cut_fiducial_cylinder_1t") - data_kind = "fiducial_events" - __version__ = "0.0.1" - - def infer_dtype(self): - dtype = [self.deps[d].dtype_for(d) for d in self.depends_on] - dtype = strax.merged_dtype(dtype) - return dtype - - def compute(self, events): - fiducial_events = events[events["cut_fiducial_cylinder_1t"]] - result = np.zeros(len(fiducial_events), dtype=self.dtype) - # Cast the fiducual events dtype into the expected format - strax.copy_to_buffer(fiducial_events, result, "_fiducial_copy") - return result diff --git a/straxen/legacy/xenon1t_url_configs.py b/straxen/legacy/xenon1t_url_configs.py deleted file mode 100644 index e1b9e1335..000000000 --- a/straxen/legacy/xenon1t_url_configs.py +++ /dev/null @@ -1,354 +0,0 @@ -"""Support for 1T per_run_default options.""" - -import strax -import straxen -from straxen.common import get_resource, pax_file, first_sr1_run -from straxen.itp_map import InterpolatingMap -from straxen.config.url_config import URLConfig -import numpy as np - -export, __all__ = strax.exporter() - -FIXED_TO_PE = { - "to_pe_placeholder": np.repeat(0.0085, straxen.n_tpc_pmts), - "1T_to_pe_placeholder": np.array( - [ - 0.007, - 0.0, - 0.0, - 0.008, - 0.004, - 0.008, - 0.004, - 0.008, - 0.007, - 0.005, - 0.007, - 0.006, - 0.0, - 0.006, - 0.008, - 0.007, - 0.006, - 0.009, - 0.007, - 0.007, - 0.007, - 0.012, - 0.004, - 0.008, - 0.005, - 0.008, - 0.0, - 0.0, - 0.007, - 0.007, - 0.004, - 0.0, - 0.004, - 0.007, - 0.0, - 0.005, - 0.007, - 0.007, - 0.005, - 0.005, - 0.008, - 0.006, - 0.005, - 0.007, - 0.006, - 0.007, - 0.008, - 0.005, - 0.008, - 0.008, - 0.005, - 0.005, - 0.007, - 0.008, - 0.005, - 0.009, - 0.004, - 0.005, - 0.01, - 0.008, - 0.006, - 0.016, - 0.0, - 0.005, - 0.005, - 0.0, - 0.01, - 0.008, - 0.004, - 0.006, - 0.005, - 0.0, - 0.008, - 0.0, - 0.004, - 0.004, - 0.006, - 0.005, - 0.012, - 0.0, - 0.005, - 0.004, - 0.004, - 0.008, - 0.007, - 0.012, - 0.0, - 0.0, - 0.0, - 0.007, - 0.007, - 0.0, - 0.005, - 0.008, - 0.006, - 0.004, - 0.004, - 0.006, - 0.008, - 0.008, - 0.008, - 0.006, - 0.0, - 0.007, - 0.005, - 0.005, - 0.005, - 0.007, - 0.004, - 0.008, - 0.007, - 0.008, - 0.008, - 0.006, - 0.006, - 0.01, - 0.005, - 0.008, - 0.0, - 0.012, - 0.007, - 0.004, - 0.008, - 0.007, - 0.007, - 0.008, - 0.003, - 0.004, - 0.007, - 0.006, - 0.0, - 0.005, - 0.004, - 0.005, - 0.0, - 0.0, - 0.004, - 0.0, - 0.004, - 0.0, - 0.004, - 0.0, - 0.011, - 0.005, - 0.006, - 0.005, - 0.004, - 0.004, - 0.0, - 0.007, - 0.0, - 0.004, - 0.0, - 0.005, - 0.006, - 0.007, - 0.005, - 0.008, - 0.004, - 0.006, - 0.008, - 0.007, - 0.0, - 0.008, - 0.008, - 0.007, - 0.007, - 0.0, - 0.008, - 0.004, - 0.004, - 0.005, - 0.004, - 0.007, - 0.008, - 0.004, - 0.006, - 0.006, - 0.0, - 0.007, - 0.004, - 0.004, - 0.005, - 0.0, - 0.008, - 0.004, - 0.004, - 0.004, - 0.008, - 0.008, - 0.0, - 0.006, - 0.005, - 0.004, - 0.005, - 0.008, - 0.008, - 0.008, - 0.0, - 0.005, - 0.008, - 0.0, - 0.008, - 0.0, - 0.004, - 0.012, - 0.0, - 0.005, - 0.007, - 0.009, - 0.005, - 0.004, - 0.004, - 0.0, - 0.0, - 0.004, - 0.004, - 0.011, - 0.004, - 0.004, - 0.007, - 0.004, - 0.005, - 0.004, - 0.005, - 0.007, - 0.004, - 0.006, - 0.006, - 0.004, - 0.008, - 0.005, - 0.007, - 0.007, - 0.0, - 0.004, - 0.007, - 0.008, - 0.004, - 0.0, - 0.007, - 0.004, - 0.004, - 0.004, - 0.0, - 0.004, - 0.005, - 0.004, - ] - ), - # Gains which will preserve all areas in adc counts. - # Useful for debugging and tests. - "adc_tpc": np.ones(straxen.n_tpc_pmts), - "adc_mv": np.ones(straxen.n_mveto_pmts), - "adc_nv": np.ones(straxen.n_nveto_pmts), -} - -RUN_MAPPINGS = { - "xenon1t_sr0_sr1": [ - (0, pax_file("XENON1T_FDC_SR0_data_driven_3d_correction_tf_nn_v0.json.gz")), # noqa - ( - first_sr1_run, - pax_file( - "XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part1_v1.json.gz" - ), - ), - # noqa - ( - 170411_0611, - pax_file( - "XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part2_v1.json.gz" - ), - ), - # noqa - ( - 170704_0556, - pax_file( - "XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part3_v1.json.gz" - ), - ), - # noqa - ( - 170925_0622, - pax_file( - "XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part4_v1.json.gz" - ), - ), - # noqa - (1_000_000_000_000, None), # noqa - ] -} - - -@URLConfig.register("legacy-to-pe") -def get_fixed_pe(name: str): - """Return a fixed value for a given name.""" - return FIXED_TO_PE[name] - - -@URLConfig.register("legacy-thresholds") -def get_thresholds(model: str): - """Return a fixed value for a given model.""" - - return straxen.legacy.hit_min_amplitude(model) - - -@URLConfig.register("legacy-fdc") -def get_legacy_fdc(name, run_id=None): - if run_id is None: - raise ValueError("Must provide run_id to get legacy fdc") - - if isinstance(run_id, str): - run_id = int(run_id.replace("_", "")) - - if name not in RUN_MAPPINGS: - raise ValueError(f"Unknown legacy fdc name {name}") - - mapping = RUN_MAPPINGS[name] - - for (start_run, url), (end_run, _) in zip(mapping[:-1], mapping[1:]): - if run_id >= start_run and run_id < end_run: - break - else: - raise ValueError(f"No legacy fdc for run {run_id}") - - if url is None: - raise ValueError(f"No legacy fdc for run {run_id}") - - return InterpolatingMap(get_resource(url, fmt="binary")) - - -@URLConfig.register("legacy-z_bias") -def get_z_bias(offset: str): - """Return a lambda function return offset as placeholder.""" - - def fake_z_bias(rz, **kwargs): - return np.zeros(len(rz)) * int(offset) - - return fake_z_bias diff --git a/straxen/scripts/fake_daq.py b/straxen/scripts/fake_daq.py deleted file mode 100755 index fde14c164..000000000 --- a/straxen/scripts/fake_daq.py +++ /dev/null @@ -1,249 +0,0 @@ -import argparse -import os -from copy import copy -import shutil -import time - -import numpy as np -import strax -import straxen - -parser = argparse.ArgumentParser(description="Fake DAQ to test XENONnT eventbuilder prototype") - -parser.add_argument( - "--input_path", - default="./test_input_data", - help="Directory with input data (used to fake new run)", -) -parser.add_argument("--input_run", default="180215_1029", help="Run id of input data") -parser.add_argument("--detector", default="tpc", help="Specifies the detector type (tpc/nveto).") -parser.add_argument("--output", default="./from_fake_daq", help="Output directory") -parser.add_argument( - "--output_run", default=None, help="Output run id to use. If omitted, use same as input" -) -parser.add_argument("--compressor", default="lz4", help="Compressor to use for live records") -parser.add_argument( - "--rate", - default=0, - type=int, - help="Output rate in MBraw/sec. If omitted, emit data as fast as possible", -) -parser.add_argument( - "--realtime", action="store_true", help="Emit data at same pace as it was acquired" -) -parser.add_argument("--shm", action="store_true", help="Operate in /dev/shm") -parser.add_argument( - "--no_run_metadata", - action="store_true", - help=( - "Produce Fake DAQ data even if you have lost the run metadata. " - "Some useful sanity checks will be disabled." - ), -) -parser.add_argument("--chunk_duration", default=2.0, type=float, help="Chunk size in sec (not ns)") -parser.add_argument( - "--stop_after", - default=float("inf"), - type=float, - help="Stop after this much MB written/loaded in", -) -parser.add_argument( - "--sync_chunk_duration", - default=0.2, - type=float, - help="Synchronization chunk size in sec (not ns)", -) -args = parser.parse_args() - -if args.shm: - output_dir = "/dev/shm/from_fake_daq" -else: - output_dir = args.output -output_run = args.output_run if args.output_run else args.input_run - - -def main(): - global output_dir - - # Get context for reading - st = strax.Context( - storage=strax.DataDirectory(args.input_path, provide_run_metadata=True, readonly=True), - register=straxen.plugins.pax_interface.RecordsFromPax, - config=straxen.contexts.x1t_common_config, - **straxen.contexts.common_opts, - ) - - n_readout_threads = 8 - if args.detector == "tpc": - n_channels = st.config["n_tpc_pmts"] - elif args.detector == "nveto": - n_channels = st.config["n_nveto_pmts"] - else: - raise ValueError("Detector type not supported.") - channels_per_reader = np.ceil(n_channels / n_readout_threads) - - if os.path.exists(output_dir): - shutil.rmtree(output_dir) - os.makedirs(output_dir) - - # Copy over metadata - run_start = None - if not args.no_run_metadata: - run_md = st.run_metadata(args.input_run) - st2 = st.new_context(storage=strax.DataDirectory(output_dir), replace=True) - run_md.setdefault("strax_defaults", dict()) - run_md["strax_defaults"]["n_readout_threads"] = n_readout_threads - run_md["strax_defaults"]["compressor"] = args.compressor - st2.storage[0].write_run_metadata(output_run, run_md) - del st2 - run_start = int(int(1e9) * int(run_md["start"].timestamp())) - - if args.rate: - print("Preparing payload data: slurping into memory") - - chunk_sizes = [] - chunk_data_compressed = [] - - if args.detector == "tpc": - source = st.get_iter(args.input_run, "raw_records") - sampling = 10 # Hardcoded these numbers since records might be empty. - elif args.detector == "nveto": - source = st.get_iter(args.input_run, "nveto_pre_raw_records") - sampling = 2 - else: - raise ValueError("Detector type not supported.") - - buffer: strax.Chunk = next(source) - payload_t_start = payload_t_end = buffer.start - input_exhausted = False - - chunk_i = -1 - while len(buffer) or not input_exhausted: - chunk_i += 1 - desired_end = payload_t_end + int( # endtime of last chunk - int(1e9) * (args.sync_chunk_duration if chunk_i % 2 else args.chunk_duration) - ) - while buffer.end < desired_end: - try: - buffer = strax.Chunk.concatenate([buffer, next(source)]) - except StopIteration: - input_exhausted = True - break - t_0 = time.time() - - # NB: this is not a regular strax chunk split! - keep = buffer.data["time"] < desired_end - records = buffer.data[keep] - buffer.data = buffer.data[~keep] - buffer.start = 0 # We don't use buffer.start anymore, fortunately - payload_t_end = desired_end - - # Restore baseline, clear metadata, fix time - if run_start is None: - run_start = records["time"][0] - records["time"] = records["time"] - run_start - assert np.all(records["time"] % sampling == 0) - - chunk_sizes.append(records.nbytes) - result = [] - for reader_i in range(n_readout_threads): - first_channel = reader_i * channels_per_reader - r = records[ - (records["channel"] >= first_channel) - & (records["channel"] < first_channel + channels_per_reader) - ] - r = strax.io.COMPRESSORS[args.compressor]["compress"](r) - result.append(r) - - if args.rate: - # Slurp into memory - chunk_data_compressed.append(result) - else: - # Simulate realtime DAQ / emit data immediately - # Cannot slurp in advance, else time would be offset. - write_chunk(chunk_i, result) - if chunk_i % 2 == 0: - dt = args.chunk_duration - else: - dt = args.sync_chunk_duration - - t_sleep = dt - (time.time() - t_0) - wrote_mb = chunk_sizes[chunk_i] / 1e6 - - print( - f"{chunk_i}: wrote {wrote_mb:.1f} MB_raw" - + (f", sleep for {t_sleep:.2f} s" if args.realtime else "") - ) - if args.realtime: - if t_sleep < 0: - if chunk_i % 2 == 0: - print("Fake DAQ too slow :-(") - else: - time.sleep(t_sleep) - - if sum(chunk_sizes) / 1e6 > args.stop_after: - # TODO: background thread does not terminate! - break - - if args.rate: - total_raw = sum(chunk_sizes) / 1e6 - total_comp = sum([len(y) for x in chunk_data_compressed for y in x]) / 1e6 - total_dt = (payload_t_end - payload_t_start) / int(1e9) - print( - f"Prepared {len(chunk_sizes)} chunks " - f"spanning {total_dt:.1f} sec, " - f"{total_raw:.2f} MB raw " - f"({total_comp:.2f} MB compressed)" - ) - if args.rate: - takes = total_raw / args.rate - else: - takes = total_dt - input(f"Press enter to start DAQ for {takes:.1f} sec") - - # Emit at fixed rate - for chunk_i, reader_data in enumerate(chunk_data_compressed): - t_0 = time.time() - - write_chunk(chunk_i, reader_data) - - wrote_mb = chunk_sizes[chunk_i] / 1e6 - t_sleep = wrote_mb / args.rate - (time.time() - t_0) - - print(f"{chunk_i}: wrote {wrote_mb:.1f} MB_raw, sleep for {t_sleep:.2f} s") - if t_sleep < 0: - if chunk_i % 2 == 0: - print("Fake DAQ too slow :-(") - else: - time.sleep(t_sleep) - - end_dir = output_dir + "/THE_END" - os.makedirs(end_dir) - for i in range(n_readout_threads): - with open(end_dir + f"/{i:06d}", mode="w") as f: - f.write("That's all folks!") - - print("Fake DAQ done") - - -def write_to_dir(c, outdir): - tempdir = outdir + "_temp" - os.makedirs(tempdir) - for reader_i, x in enumerate(c): - with open(f"{tempdir}/reader_{reader_i}", "wb") as f: - f.write(copy(x)) # Copy needed for honest shm writing? - os.rename(tempdir, outdir) - - -def write_chunk(chunk_i, reader_data): - big_chunk_i = chunk_i // 2 - - if chunk_i % 2 != 0: - write_to_dir(reader_data, output_dir + "/%06d_post" % big_chunk_i) - write_to_dir(reader_data, output_dir + "/%06d_pre" % (big_chunk_i + 1)) - else: - write_to_dir(reader_data, output_dir + "/%06d" % big_chunk_i) - - -if __name__ == "__main__": - main() diff --git a/straxen/test_utils.py b/straxen/test_utils.py index e7f62effb..45f927b8b 100644 --- a/straxen/test_utils.py +++ b/straxen/test_utils.py @@ -214,9 +214,6 @@ def compute(self, chunk_i): rr = np.copy(r) # Add detector specific channel offset: for key, channel_key in self.channel_map_keys.items(): - if channel_key not in self.config["channel_map"]: - # Channel map for 1T is different. - continue if p.endswith(key): first_channel, last_channel = self.config["channel_map"][channel_key] rr["channel"] += first_channel diff --git a/tests/test_1T_plugins.py b/tests/test_1T_plugins.py deleted file mode 100644 index 7f44ff7de..000000000 --- a/tests/test_1T_plugins.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Test for 1T plugins, nT plugins are tested in the ./plugins directory.""" - -import tempfile -import strax -import straxen -from straxen.test_utils import DummyRawRecords -from immutabledict import immutabledict - -test_run_id_1T = "180423_1021" - -testing_config_1T = dict( - hev_gain_model="legacy-to-pe://1T_to_pe_placeholder", - gain_model="legacy-to-pe://1T_to_pe_placeholder", - elife=1e6, - electron_drift_velocity=1e-4, - electron_drift_time_gate=1700, -) - - -def _run_plugins(st, make_all=False, run_id=test_run_id_1T, **process_kwargs): - """Try all plugins (except the DAQReader) for a given context (st) to see if we can really push - some (empty) data from it and don't have any nasty problems like that we are referring to some - non existant dali folder.""" - - with tempfile.TemporaryDirectory() as temp_dir: - st.storage = [strax.DataDirectory(temp_dir)] - # As we use a temporary directory we should have a clean start - assert not st.is_stored(run_id, "raw_records"), "have RR???" - - if not make_all: - return - - end_targets = tuple(set(st._get_end_targets(st._plugin_class_registry))) - if st.context_config["allow_multiprocess"]: - st.make( - run_id, - end_targets, - allow_multiple=True, - processor="threaded_mailbox", - **process_kwargs, - ) - else: - for data_type in end_targets: - st.make(run_id, data_type) - # Now make sure we can get some data for all plugins - all_datatypes = set(st._plugin_class_registry.keys()) - for data_type in all_datatypes: - savewhen = st._plugin_class_registry[data_type].save_when - if isinstance(savewhen, (dict, immutabledict)): - savewhen = savewhen[data_type] - should_be_stored = savewhen == strax.SaveWhen.ALWAYS - if should_be_stored: - is_stored = st.is_stored(run_id, data_type) - assert is_stored, f"{data_type} did not save correctly!" - print("Wonderful all plugins work (= at least they don't fail), bye bye") - - -def _update_context(st, max_workers): - # Ignore strax-internal warnings - st.set_context_config({"free_options": tuple(st.config.keys()), "forbid_creation_of": ()}) - - st.register(DummyRawRecords) - st.set_config(testing_config_1T) - - if max_workers - 1: - st.set_context_config( - { - "allow_multiprocess": True, - "allow_lazy": False, - "timeout": 120, # we don't want to build travis for ever - "allow_shm": strax.processor.SHMExecutor is not None, - } - ) - - -def _test_child_options(st, run_id): - """Test which checks if child options are handled correctly.""" - # Register all used plugins - plugins = [] - already_seen = [] - for data_type in st._plugin_class_registry.keys(): - if data_type in already_seen or data_type in straxen.DAQReader.provides: - continue - - p = st.get_single_plugin(run_id, data_type) - plugins.append(p) - already_seen += p.provides - - # Loop over all plugins and check if child options were propagated to the parent: - for p in plugins: - for option_name, option in p.takes_config.items(): - # Check if option is a child option: - if option.child_option: - # Get corresponding parent option. Do not have to test if - # parent option name is defined this is already done in strax - parent_name = option.parent_option_name - - # Now check if parent config was replaced with child: - t = p.config[parent_name] == p.config[option_name] - assert t, ( - f'This is strange the child option "{option_name}" was set to ' - f"{p.config[option_name]}, but the corresponding parent config" - f' "{parent_name}" has the value {p.config[parent_name]}. ' - f"Please check the options of {p.__class__.__name__} and if " - "it is a child plugin (child_plugin=True)!" - ) - - # Test if parent names were removed from the lineage: - t = parent_name in p.lineage[p.provides[-1]][2] - assert not t, ( - f'Found "{parent_name}" in the lineage of {p.__class__.__name__}. ' - f'This should not have happend since "{parent_name}" is a child of ' - f'"{option_name}"!' - ) - - -def test_1T(ncores=1): - st = straxen.contexts.xenon1t_dali() - _update_context(st, ncores) - st.register_all(straxen.legacy.plugins_1t.x1t_cuts) - for _plugin, _plugin_class in st._plugin_class_registry.items(): - if "cut" in str(_plugin).lower(): - _plugin_class.save_when = strax.SaveWhen.ALWAYS - - # Run the test - _run_plugins(st, make_all=True, max_workers=ncores, run_id=test_run_id_1T) - # set all the configs to be non-CMT - st.set_config(testing_config_1T) - _test_child_options(st, test_run_id_1T) - - print(st.context_config) - - -def test_1T_mc(): - # Run multicore - test_1T(2) diff --git a/tests/test_cmt.py b/tests/test_cmt.py index b11c67047..a8961ead0 100644 --- a/tests/test_cmt.py +++ b/tests/test_cmt.py @@ -4,9 +4,7 @@ import straxen import utilix import numpy as np -from .test_basics import test_run_id_1T from straxen.test_utils import nt_test_run_id as test_run_id_nT -from straxen.common import aux_repo import unittest @@ -21,19 +19,6 @@ def test_connect_to_db(): assert not df.empty, mes -@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!") -def test_1T_elife(): - """Test elife from CMT DB against historical data(aux file)""" - elife_conf = ("elife_xenon1t", "ONLINE", False) - elife_cmt = straxen.get_correction_from_cmt(test_run_id_1T, elife_conf) - elife_file = aux_repo + "3548132b55f81a43654dba5141366041e1daaf01/strax_files/elife.npy" - x = straxen.get_resource(elife_file, fmt="npy") - run_index = np.where(x["run_id"] == int(test_run_id_1T))[0] - elife = x[run_index[0]]["e_life"] - mes = "Elife values do not match. Please check" - assert elife_cmt == elife, mes - - @unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!") def test_cmt_conf_option(option="mlp_model", version="ONLINE", is_nT=True): """Test CMT conf options If wrong conf is passed it would raise an error accordingly.""" diff --git a/tests/test_contexts.py b/tests/test_contexts.py index 08283b50c..88d7dceb8 100644 --- a/tests/test_contexts.py +++ b/tests/test_contexts.py @@ -1,11 +1,8 @@ """For all of the context, do a quick check to see that we are able to search a field (i.e. can build the dependencies in the context correctly) See issue #233 and PR #236.""" -import os import unittest -import tempfile import straxen -from straxen.contexts import xenon1t_dali, xenon1t_led, fake_daq, demo from straxen.contexts import xenonnt_led, xenonnt_online, xenonnt @@ -84,42 +81,3 @@ def test_cmt_versions(): test = unittest.TestCase() # We should always work for one offline and the online version test.assertTrue(len(success_for) >= 2) - - -## -# XENON1T -## - - -def test_xenon1t_dali(): - st = xenon1t_dali() - st.search_field("time") - - -def test_demo(): - """Test the demo context. - - Since we download the folder to the current working directory, make sure we are in a tempfolder - where we can write the data to - - """ - with tempfile.TemporaryDirectory() as temp_dir: - try: - print("Temporary directory is ", temp_dir) - os.chdir(temp_dir) - st = demo() - st.search_field("time") - # On windows, you cannot delete the current process' - # working directory, so we have to chdir out first. - finally: - os.chdir("..") - - -def test_fake_daq(): - st = fake_daq() - st.search_field("time") - - -def test_xenon1t_led(): - st = xenon1t_led() - st.search_field("time") diff --git a/tests/test_mini_analyses.py b/tests/test_mini_analyses.py index 7c9822e73..f7325d713 100644 --- a/tests/test_mini_analyses.py +++ b/tests/test_mini_analyses.py @@ -15,14 +15,9 @@ def is_py310(): return platform.python_version_tuple()[:2] == ("3", "10") -def test_pmt_pos_1t(): - """Test if we can get the 1T PMT positions.""" - pandas.DataFrame(straxen.pmt_positions(True)) - - def test_pmt_pos_nt(): """Test if we can get the nT PMT positions.""" - pandas.DataFrame(straxen.pmt_positions(False)) + pandas.DataFrame(straxen.pmt_positions()) @unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!") @@ -101,7 +96,7 @@ def test_plot_waveform_deep(self): self.test_plot_waveform(deep=True) def test_plot_hit_pattern(self): - self.st.plot_hit_pattern(nt_test_run_id, time_within=self.first_peak, xenon1t=False) + self.st.plot_hit_pattern(nt_test_run_id, time_within=self.first_peak) def test_plot_records_matrix(self): self._st_attr_for_one_peak("plot_records_matrix") @@ -121,7 +116,6 @@ def test_event_display_simple(self): self.st.event_display( nt_test_run_id, time_within=self.first_event, - xenon1t=False, plot_all_positions=plot_all_positions, simple_layout=True, ) @@ -133,7 +127,6 @@ def test_single_event_plot(self): nt_test_run_id, events=self.st.get_array(nt_test_run_id, "events"), event_number=self.first_event["event_number"], - xenon1t=False, plot_all_positions=plot_all_positions, ) @@ -141,7 +134,6 @@ def test_event_display_interactive(self): self.st.event_display_interactive( nt_test_run_id, time_within=self.first_event, - xenon1t=False, ) def test_plot_peaks_aft_histogram(self): @@ -300,7 +292,6 @@ def test_interactive_display(self): fig = self.st.event_display_interactive( nt_test_run_id, time_within=self.first_event, - xenon1t=False, plot_record_matrix=True, ) save_as = "test_display.html" @@ -312,7 +303,6 @@ def test_interactive_display(self): st.event_display_interactive( nt_test_run_id, time_within=self.first_event, - xenon1t=False, plot_record_matrix=False, only_main_peaks=True, )