diff --git a/README.md b/README.md
index 2136a4f..a838cd1 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# usv-playpen v0.5.0
+# usv-playpen v0.6.0
diff --git a/setup.py b/setup.py
index e87a580..b9c4b9b 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
setup(
name='usv-playpen',
- version='0.5.0',
+ version='0.6.0',
author='@bartulem',
author_email='mimica.bartul@gmail.com',
classifiers=[
@@ -43,7 +43,7 @@
'requests==2.32.3',
'scipy==1.10.0',
'sleap-anipose==0.1.7',
- 'scikit-learn==1.5.0',
+ 'scikit-learn==1.5.2',
'soundfile==0.12.1',
'toml==0.10.2']
)
diff --git a/src/anipose_operations.py b/src/anipose_operations.py
index ffd3db8..452c956 100644
--- a/src/anipose_operations.py
+++ b/src/anipose_operations.py
@@ -331,7 +331,7 @@ def conduct_anipose_triangulation(self):
excluded_views=tuple(self.input_parameter_dict['conduct_anipose_triangulation']['excluded_views']),
fname=f"{self.session_root_joint_date_dir}{os.sep}{self.session_root_name}_points3d.h5",
disp_progress=self.input_parameter_dict['conduct_anipose_triangulation']['display_progress_bool'],
- reproj_error_threshold=self.input_parameter_dict['conduct_anipose_triangulation']['reprojection_error_loss'][0])
+ reproj_error_threshold=self.input_parameter_dict['conduct_anipose_triangulation']['reprojection_error_threshold'])
def translate_rotate_metric(self):
"""
diff --git a/src/synchronize_files.py b/src/synchronize_files.py
index 1829fb3..452ef5c 100644
--- a/src/synchronize_files.py
+++ b/src/synchronize_files.py
@@ -14,12 +14,13 @@
import pathlib
import pims
import shutil
+import subprocess
import numpy as np
from collections import Counter
from datetime import datetime
from numba import njit
+from scipy.io import wavfile
from file_loader import DataLoader
-from file_writer import DataWriter
from sync_regression import LinRegression
@pims.pipeline
@@ -79,6 +80,13 @@ class Synchronizer:
'current': {'21241563': {'LED_top': [317, 1247], 'LED_middle': [360, 1254], 'LED_bottom': [403, 1262]},
'21372315': {'LED_top': [507, 1267], 'LED_middle': [554, 1267], 'LED_bottom': [601, 1266]}}}
+ if os.name == 'nt':
+ command_addition = 'cmd /c '
+ shell_usage_bool = False
+ else:
+ command_addition = ''
+ shell_usage_bool = True
+
def __init__(self, root_directory=None, input_parameter_dict=None,
message_output=None, exp_settings_dict=None):
if input_parameter_dict is None:
@@ -651,9 +659,14 @@ def crop_wav_files_to_video(self):
"""
Description
----------
- This method takes a (multi-)channel audio recording to find sequences of recorded
- video frames in the LSB of the ch1 recording, and then crops the audio file to
+ This method takes a WAV file audio recording to find sequences of recorded
+ video frames in the LSB of the triggerbox input channel, and then crops the audio file to
match the length from the beginning of the first to the end of the last video frame.
+
+ NB: If there are two audio recording devices and if they are not synchronized, both
+ sets of audio files are cut to the length of the shorter one. This entails resampling
+ longer audio files to match the shorter duration (on one device) using SoX, and the
+ LSB of those files is resampled and then maintained in the final audio file.
----------
Parameters
@@ -670,7 +683,7 @@ def crop_wav_files_to_video(self):
----------
"""
- self.message_output(f"Cropping WAV files started at: {datetime.now().hour:02d}:{datetime.now().minute:02d}.{datetime.now().second:02d}")
+ self.message_output(f"Cropping WAV files to video started at: {datetime.now().hour:02d}:{datetime.now().minute:02d}.{datetime.now().second:02d}")
QTest.qWait(1000)
# load info from camera_frame_count_dict
@@ -679,55 +692,144 @@ def crop_wav_files_to_video(self):
total_frame_number = camera_frame_count_dict['total_frame_number_least']
total_video_time = camera_frame_count_dict['total_video_time_least']
- # audio
+ # load audio channels receiving camera triggerbox input
wave_data_dict = DataLoader(input_parameter_dict={'wave_data_loc': [f"{self.root_directory}{os.sep}audio{os.sep}original"],
- 'load_wavefile_data': {'library': 'scipy', 'conditional_arg': []}}).load_wavefile_data()
+ 'load_wavefile_data': {'library': 'scipy',
+ 'conditional_arg': [f"_ch{self.input_parameter_dict['crop_wav_files_to_video']['ch_receiving_input']:02d}"]}}).load_wavefile_data()
- # determine device ID that gets camera frame trigger pulses
- device_id = self.input_parameter_dict['crop_wav_files_to_video']['device_receiving_input']
+ # determine device ID(s) that get(s) camera frame trigger pulses
+ if self.input_parameter_dict['crop_wav_files_to_video']['device_receiving_input'] == 'both':
+ device_ids = ['m', 's']
+ else:
+ device_ids = [self.input_parameter_dict['crop_wav_files_to_video']['device_receiving_input']]
# find camera frame trigger pulses and IPIs in channel file
- start_first_recorded_frame = 0
- end_last_recorded_frame = 0
-
- for audio_file in wave_data_dict.keys():
- if f"_ch{self.input_parameter_dict['crop_wav_files_to_video']['ch_receiving_input']:02d}" in audio_file and 'm_' in audio_file:
+ start_end_video = {device: {'start_first_recorded_frame': 0, 'end_last_recorded_frame': 0,
+ 'duration_samples': 0, 'duration_seconds': 0, 'audio_tracking_diff_seconds': 0} for device in device_ids}
+
+ for device in device_ids:
+ for audio_file in wave_data_dict.keys():
+ if f'{device}_' in audio_file:
+
+ (start_end_video[device]['start_first_recorded_frame'],
+ start_end_video[device]['end_last_recorded_frame']) = self.find_lsb_changes(relevant_array=wave_data_dict[audio_file]['wav_data'],
+ lsb_bool=True,
+ total_frame_number=total_frame_number)
+
+ start_end_video[device]['duration_samples'] = int(start_end_video[device]['end_last_recorded_frame'] - start_end_video[device]['start_first_recorded_frame'] + 1)
+ start_end_video[device]['duration_seconds'] = round(start_end_video[device]['duration_samples'] / wave_data_dict[audio_file]['sampling_rate'], 4)
+ start_end_video[device]['audio_tracking_diff_seconds'] = round(start_end_video[device]['duration_seconds'] - total_video_time, 4)
+
+ self.message_output(f"On {device} device, the first tracking frame started at {start_end_video[device]['start_first_recorded_frame']} samples, and the last joint one ended at "
+ f"{start_end_video[device]['end_last_recorded_frame']} samples, giving a total audio recording time of {start_end_video[device]['duration_seconds']} seconds, "
+ f"which is {start_end_video[device]['audio_tracking_diff_seconds']} seconds off relative to tracking.")
+
+ break
+
+ # create new directory for cropped files and HPSS files
+ with open(f"{self.root_directory}{os.sep}audio{os.sep}audio_triggerbox_sync_info.json", 'w') as audio_dict_outfile:
+ json.dump(start_end_video, audio_dict_outfile, indent=4)
+ new_directory_cropped_files = f"{self.root_directory}{os.sep}audio{os.sep}cropped_to_video"
+ pathlib.Path(new_directory_cropped_files).mkdir(parents=True, exist_ok=True)
+
+ # find all audio files
+ all_audio_files = sorted(glob.glob(f"{self.root_directory}{os.sep}audio{os.sep}original{os.sep}*.wav"))
+
+ m_longer = False
+ s_longer = False
+ if len(device_ids) > 1:
+ if start_end_video['m']['duration_samples'] > start_end_video['s']['duration_samples']:
+ m_longer = True
+ m_original_arr_indices = np.arange(0, start_end_video['m']['duration_samples'])
+ m_new_arr_indices = np.linspace(start=0, stop=start_end_video['m']['duration_samples'] - 1, num=start_end_video['s']['duration_samples'])
+ if start_end_video['m']['duration_samples'] < start_end_video['s']['duration_samples']:
+ s_longer = True
+ s_original_arr_indices = np.arange(0, start_end_video['s']['duration_samples'])
+ s_new_arr_indices = np.linspace(start=0, stop=start_end_video['s']['duration_samples'] - 1, num=start_end_video['m']['duration_samples'])
- start_first_recorded_frame, end_last_recorded_frame = self.find_lsb_changes(relevant_array=wave_data_dict[audio_file]['wav_data'],
- lsb_bool=True,
- total_frame_number=total_frame_number)
+ QTest.qWait(1000)
- total_audio_recording_during_tracking = (end_last_recorded_frame - start_first_recorded_frame + 1) / wave_data_dict[audio_file]['sampling_rate']
- audio_tracking_difference = total_audio_recording_during_tracking - total_video_time
- self.message_output(f"On device {device_id}, the first tracking frame started at {start_first_recorded_frame} samples, and the last joint one ended at "
- f"{end_last_recorded_frame} samples, giving a total audio recording time of {total_audio_recording_during_tracking:.4f} seconds, "
- f"which is {audio_tracking_difference:.4f} seconds off relative to tracking.")
- break
+ cut_audio_subprocesses = []
+ for audio_file in all_audio_files:
+ outfile_loc = f"{self.root_directory}{os.sep}audio{os.sep}cropped_to_video{os.sep}{os.path.basename(audio_file)[:-4]}_cropped_to_video.wav"
+
+ if len(device_ids) == 1:
+ start_cut_sample = start_end_video[device_ids[0]]['start_first_recorded_frame']
+ cut_duration_samples = start_end_video[device_ids[0]]['duration_samples']
+ cut_audio_subp = subprocess.Popen(args=f'''{self.command_addition}sox {os.path.basename(audio_file)} {outfile_loc} trim {start_cut_sample}s {cut_duration_samples}s''',
+ cwd=f"{self.root_directory}{os.sep}audio{os.sep}original",
+ shell=self.shell_usage_bool)
+ cut_audio_subprocesses.append(cut_audio_subp)
+ else:
+ if 'm_' in audio_file:
+ m_start_cut_sample = start_end_video['m']['start_first_recorded_frame']
+ m_cut_duration_samples = start_end_video['m']['duration_samples']
+ if m_longer:
+ # extract original LSB data
+ m_sr_original, m_data_original = wavfile.read(f'{audio_file}')
+ m_lsb_original = m_data_original[start_end_video['m']['start_first_recorded_frame']:start_end_video['m']['end_last_recorded_frame']+1] & 1
+
+ # resample the LSB data
+ m_lsb_modified = np.where(np.interp(x=m_new_arr_indices, xp=m_original_arr_indices, fp=m_lsb_original).astype(np.int16) > 0.5, 1, 0).astype(np.int16)
+
+ # trim and adjust tempo
+ tempo_adjustment_factor = start_end_video['m']['duration_samples'] / start_end_video['s']['duration_samples']
+ subprocess.Popen(args=f'''{self.command_addition}sox {os.path.basename(audio_file)} {outfile_loc} trim {m_start_cut_sample}s {m_cut_duration_samples}s tempo -s {tempo_adjustment_factor}''',
+ cwd=f"{self.root_directory}{os.sep}audio{os.sep}original",
+ shell=self.shell_usage_bool).wait()
+
+ # load data again and overwrite the LSB
+ m_sr_tempo_adjusted, m_data_tempo_adjusted = wavfile.read(f'{outfile_loc}')
+ if m_data_tempo_adjusted.size == start_end_video['s']['duration_samples']:
+ m_data_modified = (m_data_tempo_adjusted & ~1) ^ m_lsb_modified
+ else:
+ m_data_modified = (m_data_tempo_adjusted[:start_end_video['s']['duration_samples']] & ~1) ^ m_lsb_modified
+ wavfile.write(filename=outfile_loc, rate=m_sr_original, data=m_data_modified)
- QTest.qWait(1000)
+ else:
+ cut_audio_subp = subprocess.Popen(args=f'''{self.command_addition}sox {os.path.basename(audio_file)} {outfile_loc} trim {m_start_cut_sample}s {m_cut_duration_samples}s''',
+ cwd=f"{self.root_directory}{os.sep}audio{os.sep}original",
+ shell=self.shell_usage_bool)
+ cut_audio_subprocesses.append(cut_audio_subp)
+ else:
+ s_start_cut_sample = start_end_video['s']['start_first_recorded_frame']
+ s_cut_duration_samples = start_end_video['s']['duration_samples']
+ if s_longer:
+ # extract original LSB data
+ s_sr_original, s_data_original = wavfile.read(f'{audio_file}')
+ s_lsb_original = s_data_original[start_end_video['s']['start_first_recorded_frame']:start_end_video['s']['end_last_recorded_frame'] + 1] & 1
+
+ # resample the LSB data
+ s_lsb_modified = np.where(np.interp(x=s_new_arr_indices, xp=s_original_arr_indices, fp=s_lsb_original).astype(np.int16) > 0.5, 1, 0).astype(np.int16)
+
+ # trim and adjust tempo
+ tempo_adjustment_factor = start_end_video['s']['duration_samples'] / start_end_video['m']['duration_samples']
+ subprocess.Popen(args=f'''{self.command_addition}sox {os.path.basename(audio_file)} {outfile_loc} trim {s_start_cut_sample}s {s_cut_duration_samples}s tempo -s {tempo_adjustment_factor}''',
+ cwd=f"{self.root_directory}{os.sep}audio{os.sep}original",
+ shell=self.shell_usage_bool).wait()
+
+ # load data again and overwrite the LSB
+ s_sr_tempo_adjusted, s_data_tempo_adjusted = wavfile.read(f'{outfile_loc}')
+ if s_data_tempo_adjusted.size == start_end_video['m']['duration_samples']:
+ s_data_modified = (s_data_tempo_adjusted & ~1) ^ s_lsb_modified
+ else:
+ s_data_modified = (s_data_tempo_adjusted[:start_end_video['m']['duration_samples']] & ~1) ^ s_lsb_modified
- for audio_idx, audio_file in enumerate(wave_data_dict.keys()):
+ wavfile.write(filename=outfile_loc, rate=s_sr_original, data=s_data_modified)
- if wave_data_dict[audio_file]['wav_data'].ndim == 1:
- resized_wav_file = wave_data_dict[audio_file]['wav_data'][start_first_recorded_frame:end_last_recorded_frame + 1]
+ else:
+ cut_audio_subp = subprocess.Popen(args=f'''{self.command_addition}sox {os.path.basename(audio_file)} {outfile_loc} trim {s_start_cut_sample}s {s_cut_duration_samples}s''',
+ cwd=f"{self.root_directory}{os.sep}audio{os.sep}original",
+ shell=self.shell_usage_bool)
+ cut_audio_subprocesses.append(cut_audio_subp)
+
+ while True:
+ status_poll = [query_subp.poll() for query_subp in cut_audio_subprocesses]
+ if any(elem is None for elem in status_poll):
+ QTest.qWait(5000)
else:
- resized_wav_file = wave_data_dict[audio_file]['wav_data'][start_first_recorded_frame:end_last_recorded_frame + 1, :]
-
- # create new directory for cropped files and HPSS files
- new_directory_cropped_files = f"{self.root_directory}{os.sep}audio{os.sep}cropped_to_video"
- pathlib.Path(new_directory_cropped_files).mkdir(parents=True, exist_ok=True)
- pathlib.Path(f"{self.root_directory}{os.sep}audio{os.sep}hpss").mkdir(parents=True, exist_ok=True)
-
- # write to file
- DataWriter(wav_data=resized_wav_file,
- input_parameter_dict={'wave_write_loc': new_directory_cropped_files,
- 'write_wavefile_data': {
- 'file_name': f"{audio_file[:-4]}_cropped_to_video",
- 'sampling_rate': wave_data_dict[audio_file]['sampling_rate'] / 1e3,
- 'library': 'scipy'
- }}).write_wavefile_data()
-
- # delete original directory
- shutil.rmtree(f"{self.root_directory}{os.sep}audio{os.sep}original")
+ break
- return total_video_time, total_frame_number
+ # create HPSS directory and delete original directory
+ pathlib.Path(f"{self.root_directory}{os.sep}audio{os.sep}hpss").mkdir(parents=True, exist_ok=True)
+ shutil.rmtree(f"{self.root_directory}{os.sep}audio{os.sep}original")
diff --git a/src/usv_playpen_gui.py b/src/usv_playpen_gui.py
index d729890..3da77ce 100644
--- a/src/usv_playpen_gui.py
+++ b/src/usv_playpen_gui.py
@@ -1,6 +1,6 @@
"""
@author: bartulem
-GUI to run behavioral experiments.
+GUI to run behavioral experiments, data processing and analyses.
"""
import ast
@@ -50,7 +50,7 @@
my_app_id = 'mycompany.myproduct.subproduct.version'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(my_app_id)
-app_name = 'USV Playpen v0.5.0'
+app_name = 'USV Playpen v0.6.0'
experimenter_id = 'bartulem'
cup_directory_name = 'Bartul'
email_list_global = ''
@@ -58,6 +58,7 @@
das_model_base_global ='20240325_073951'
camera_ids_global = ['21372315', '21372316', '21369048', '22085397', '21241563']
camera_colors_global = ['white', 'orange', 'red', 'cyan', 'yellow']
+usgh_flags_global = '1574' # change to '1862' for NO SYNC mode
if platform.system() == 'Windows':
config_dir_global = 'C:\\experiment_running_docs'
@@ -75,7 +76,7 @@
avisoft_rec_dir_global = 'C:\\Program Files (x86)\\Avisoft Bioacoustics\\RECORDER USGH'
avisoft_base_dir_global = 'C:\\Users\\bartulem\\Documents\\Avisoft Bioacoustics\\'
coolterm_base_dir_global = 'D:\\CoolTermWin'
-destination_linux_global = f'/home/labadmin/falkner/{cup_directory_name}/Data,/home/labadmin/murthy/{cup_directory_name}/Data'
+destination_linux_global = f'/home/labadmin/falkner/{cup_directory_name}/Data'
destination_win_global = f'F:\\{cup_directory_name}\\Data,M:\\{cup_directory_name}\\Data'
gui_font_global = 'segoeui.ttf'
@@ -284,8 +285,8 @@ def __init__(self, **kwargs):
'relative_intensity_threshold': 0.6,
'millisecond_divergence_tolerance': 10},
'crop_wav_files_to_video': {
- 'device_receiving_input': 'm',
- 'ch_receiving_input': 1}}},
+ 'device_receiving_input': 'both',
+ 'ch_receiving_input': 4}}},
'usv_inference': {
'FindMouseVocalizations': {
'das_command_line_inference': {
@@ -492,7 +493,7 @@ def record_two(self):
'delay': '0.0', 'center': '40000', 'bandwidth': '5', 'fd': '5',
'decimation': '-1', 'device': '0', 'mode': '0', 'outfovertaps': '32',
'outfoverabtast': '2000000', 'outformat': '2', 'outfabtast': '-22050', 'outdeviceid': '0',
- 'outtype': '7', 'usghflags': '1574', 'diff': '0', 'format': '1',
+ 'outtype': '7', 'usghflags': usgh_flags_global, 'diff': '0', 'format': '1',
'type': '0', 'nbrwavehdr': '32', 'devbuffer': '0.032', 'ntaps': '32',
'filtercutoff': '15.0', 'filter': '0', 'fabtast': '250000', 'y2': '1322',
'x2': '2557', 'y1': '10', 'x1': '1653', 'fftlength': '256',
@@ -1003,11 +1004,11 @@ def process_one(self):
self.crop_wav_cam_cb.activated.connect(partial(self._combo_box_prior_false, variable_id='crop_wav_cam_cb_bool'))
self.crop_wav_cam_cb.move(column_two_x2, 70)
- device_receiving_input_cb_label = QLabel('Trgbox-USGH device (m|s):', self.ProcessSettings)
+ device_receiving_input_cb_label = QLabel('Trgbox-USGH device(s):', self.ProcessSettings)
device_receiving_input_cb_label.setFont(QFont(self.font_id, 12))
device_receiving_input_cb_label.move(column_two_x1, 100)
self.device_receiving_input_cb = QComboBox(self.ProcessSettings)
- self.device_receiving_input_cb.addItems(['m', 's'])
+ self.device_receiving_input_cb.addItems(['both', 'm', 's'])
self.device_receiving_input_cb.setStyleSheet('QComboBox { width: 80px; }')
self.device_receiving_input_cb.activated.connect(partial(self._combo_box_prior_audio_device_camera_input, variable_id='device_receiving_input'))
self.device_receiving_input_cb.move(column_two_x2, 100)
@@ -1015,7 +1016,7 @@ def process_one(self):
ch_receiving_input_label = QLabel('Trgbox-USGH ch (1-12):', self.ProcessSettings)
ch_receiving_input_label.setFont(QFont(self.font_id, 12))
ch_receiving_input_label.move(column_two_x1, 130)
- self.ch_receiving_input = QLineEdit('1', self.ProcessSettings)
+ self.ch_receiving_input = QLineEdit('4', self.ProcessSettings)
self.ch_receiving_input.setFont(QFont(self.font_id, 10))
self.ch_receiving_input.setStyleSheet('QLineEdit { width: 108px; }')
self.ch_receiving_input.move(column_two_x2, 130)
@@ -1030,7 +1031,7 @@ def process_one(self):
self.conduct_hpss_cb.activated.connect(partial(self._combo_box_prior_false, variable_id='conduct_hpss_cb_bool'))
self.conduct_hpss_cb.move(column_two_x2, 160)
- stft_label = QLabel('STFT window and hop size:', self.ProcessSettings)
+ stft_label = QLabel('STFT window & hop size:', self.ProcessSettings)
stft_label.setFont(QFont(self.font_id, 12))
stft_label.move(column_two_x1, 190)
self.stft_window_hop = QLineEdit('512,128', self.ProcessSettings)
@@ -1150,7 +1151,7 @@ def process_one(self):
ev_sync_label.setStyleSheet('QLabel { font-weight: bold;}')
ev_sync_label.move(column_two_x1, 630)
- conduct_ephys_file_chaining_label = QLabel('Conduct e-phys file concat:', self.ProcessSettings)
+ conduct_ephys_file_chaining_label = QLabel('Conduct e-phys concat:', self.ProcessSettings)
conduct_ephys_file_chaining_label.setFont(QFont(self.font_id, 12))
conduct_ephys_file_chaining_label.setStyleSheet('QLabel { color: #F58025; }')
conduct_ephys_file_chaining_label.move(column_two_x1, 660)
@@ -1474,12 +1475,12 @@ def process_two(self):
self.ConductProcess = ConductProcess(self)
self.setWindowTitle(f'{app_name} (Conduct Processing)')
self.setCentralWidget(self.ConductProcess)
- record_four_x, record_four_y = (860, 1100)
+ record_four_x, record_four_y = (870, 1100)
self.setFixedSize(record_four_x, record_four_y)
self.txt_edit_process = QPlainTextEdit(self.ConductProcess)
self.txt_edit_process.move(5, 5)
- self.txt_edit_process.setFixedSize(845, 1040)
+ self.txt_edit_process.setFixedSize(855, 1040)
self.txt_edit_process.setReadOnly(True)
self._save_modified_values_to_toml(run_exp_bool=False, message_func=self._process_message)
@@ -1702,7 +1703,7 @@ def _save_process_labels_func(self):
self.settings_dir_btn_clicked_flag = False
self.processing_input_dict['synchronize_files']['Synchronizer']['crop_wav_files_to_video']['device_receiving_input'] = str(getattr(self, 'device_receiving_input'))
- self.device_receiving_input = 'm'
+ self.device_receiving_input = 'both'
self.processing_input_dict['send_email']['Messenger']['processing_pc_choice'] = str(getattr(self, 'processing_pc_choice'))
self.processing_pc_choice = 'A84E Backup'
@@ -1947,6 +1948,8 @@ def _combo_box_prior_processing_pc_choice(self, index, variable_id=None):
def _combo_box_prior_audio_device_camera_input(self, index, variable_id=None):
if index == 0:
+ self.__dict__[variable_id] = 'both'
+ elif index == 1:
self.__dict__[variable_id] = 'm'
else:
self.__dict__[variable_id] = 's'
@@ -2334,7 +2337,7 @@ def main():
'conduct_audio_cb_bool': True, 'conduct_tracking_calibration_cb_bool': False, 'modify_audio_config': False, 'conduct_video_concatenation_cb_bool': False,
'conduct_video_fps_change_cb_bool': False, 'delete_con_file_cb_bool': True, 'conduct_multichannel_conversion_cb_bool': False, 'crop_wav_cam_cb_bool': False,
'conc_audio_cb_bool': False, 'filter_audio_cb_bool': False, 'conduct_sync_cb_bool': False, 'conduct_nv_sync_cb_bool': False, 'recording_codec': 'hq',
- 'npx_file_type': 'ap', 'device_receiving_input': 'm', 'kilosort_version': '4', 'conduct_hpss_cb_bool': False, 'conduct_ephys_file_chaining_cb_bool': False,
+ 'npx_file_type': 'ap', 'device_receiving_input': 'both', 'kilosort_version': '4', 'conduct_hpss_cb_bool': False, 'conduct_ephys_file_chaining_cb_bool': False,
'split_cluster_spikes_cb_bool': False, 'anipose_calibration_cb_bool': False, 'sleap_file_conversion_cb_bool': False, 'board_provided_cb_bool': False,
'anipose_triangulation_cb_bool': False, 'triangulate_arena_points_cb_bool': False, 'display_progress_cb_bool': False, 'translate_rotate_metric_cb_bool': False,
'save_arena_data_cb_bool': False, 'save_mouse_data_cb_bool': True, 'delete_original_h5_cb_bool': True, 'das_inference_cb_bool': False, 'sleap_cluster_cb_bool': False,