• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 15738036488

18 Jun 2025 04:10PM UTC coverage: 48.249% (+1.5%) from 46.79%
15738036488

Pull #815

github

9b495a
web-flow
Merge fd70c12e3 into 5c537cbb7
Pull Request #815: extended tests for photometry copier

23 of 32 new or added lines in 1 file covered. (71.88%)

1106 existing lines in 22 files now uncovered.

4408 of 9136 relevant lines covered (48.25%)

0.96 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.23
/iblrig/path_helper.py
1
import logging
2✔
2
import os
2✔
3
import re
2✔
4
import shutil
2✔
5
import subprocess
2✔
6
from pathlib import Path
2✔
7
from typing import TypeVar
2✔
8

9
import numpy as np
2✔
10
import yaml
2✔
11
from packaging import version
2✔
12
from pydantic import BaseModel, ValidationError
2✔
13

14
import iblrig
2✔
15
from ibllib.io import session_params
2✔
16
from ibllib.io.raw_data_loaders import load_settings
2✔
17
from iblrig.constants import HARDWARE_SETTINGS_YAML, RIG_SETTINGS_YAML
2✔
18
from iblrig.pydantic_definitions import HardwareSettings, RigSettings
2✔
19
from iblutil.util import Bunch
2✔
20
from one.alf.spec import is_session_path
2✔
21

22
log = logging.getLogger(__name__)
2✔
23
T = TypeVar('T', bound=BaseModel)
2✔
24

25

26
def iterate_previous_sessions(subject_name: str, task_name: str, n: int = 1, **kwargs) -> list[dict]:
2✔
27
    """
28
    Iterate over the sessions of a given subject in both the remote and local path and search for a given protocol name.
29
    Return the information of the last n found matching protocols in the form of a dictionary.
30

31
    Parameters
32
    ----------
33
    subject_name : str
34
        Name of the subject.
35
    task_name : str
36
        Name of the protocol to look for in experiment description.
37
    n : int, optional
38
        maximum number of protocols to return
39
    **kwargs
40
        Optional arguments to be passed to iblrig.path_helper.get_local_and_remote_paths
41
        If not used, will use the arguments from iblrig/settings/iblrig_settings.yaml
42

43
    Returns
44
    -------
45
    list[dict]
46
        List of dictionaries with keys: session_path, experiment_description, task_settings, file_task_data
47
    """
48
    rig_paths = get_local_and_remote_paths(**kwargs)
2✔
49
    local_subjects_folder = rig_paths['local_subjects_folder']
2✔
50
    remote_subjects_folder = rig_paths['remote_subjects_folder']
2✔
51
    sessions = _iterate_protocols(local_subjects_folder.joinpath(subject_name), task_name=task_name, n=n)
2✔
52
    if remote_subjects_folder is not None:
2✔
53
        remote_sessions = _iterate_protocols(remote_subjects_folder.joinpath(subject_name), task_name=task_name, n=n)
2✔
54
        if remote_sessions is not None:
2✔
55
            sessions.extend(remote_sessions)
2✔
56
        # here we rely on the fact that np.unique sort and then we output sessions with the last one first
57
        _, ises = np.unique([s['session_stub'] for s in sessions], return_index=True)
2✔
58
        sessions = [sessions[i] for i in np.flipud(ises)]
2✔
59
    return sessions
2✔
60

61

62
def _iterate_protocols(subject_folder: Path, task_name: str, n: int = 1, min_trials: int = 43) -> list[dict]:
2✔
63
    """
64
    Return information on the last n sessions with matching protocol.
65

66
    This function iterates over the sessions of a given subject and searches for a given protocol name.
67

68
    Parameters
69
    ----------
70
    subject_folder : Path
71
        A subject folder containing dated folders.
72
    task_name : str
73
        The task protocol name to look for.
74
    n : int
75
        The number of previous protocols to return.
76
    min_trials : int
77
        Skips sessions with fewer than this number of trials.
78

79
    Returns
80
    -------
81
    list[dict]
82
        list of dictionaries with keys: session_stub, session_path, experiment_description,
83
        task_settings, file_task_data.
84
    """
85

86
    def proc_num(x):
2✔
87
        """Return protocol number.
88

89
        Use 'protocol_number' key if present (unlikely), otherwise use collection name.
90
        """
91
        i = (x or {}).get('collection', '00').split('_')
2✔
92
        collection_int = int(i[-1]) if i[-1].isnumeric() else 0
2✔
93
        return x.get('protocol_number', collection_int)
2✔
94

95
    protocols = []
2✔
96
    if subject_folder is None or Path(subject_folder).exists() is False:
2✔
97
        return protocols
2✔
98
    sessions = subject_folder.glob('????-??-??/*/_ibl_experiment.description*.yaml')  # seq may be X or XXX
2✔
99
    # Make extra sure to only include valid sessions
100
    sessions = filter(lambda x: is_session_path(x.relative_to(subject_folder.parent).parent), sessions)
2✔
101
    for file_experiment in sorted(sessions, reverse=True):
2✔
102
        session_path = file_experiment.parent
2✔
103
        ad = session_params.read_params(file_experiment)
2✔
104
        # reversed: we look for the last task first if the protocol ran twice
105
        tasks = filter(None, map(lambda x: x.get(task_name), ad.get('tasks', [])))
2✔
106
        for adt in sorted(tasks, key=proc_num, reverse=True):
2✔
107
            if not (task_settings := load_settings(session_path, task_collection=adt['collection'])):
2✔
108
                continue
2✔
109
            if task_settings.get('NTRIALS', min_trials + 1) < min_trials:  # ignore sessions with too few trials
2✔
110
                continue
2✔
111
            protocols.append(
2✔
112
                Bunch(
113
                    {
114
                        'session_stub': '_'.join(file_experiment.parent.parts[-2:]),  # 2019-01-01_001
115
                        'session_path': file_experiment.parent,
116
                        'task_collection': adt['collection'],
117
                        'experiment_description': ad,
118
                        'task_settings': task_settings,
119
                        'file_task_data': session_path.joinpath(adt['collection'], '_iblrig_taskData.raw.jsonable'),
120
                    }
121
                )
122
            )
123
            if len(protocols) >= n:
2✔
124
                return protocols
2✔
125
    return protocols
2✔
126

127

128
def get_local_and_remote_paths(
2✔
129
    local_path: str | Path | None = None, remote_path: str | Path | None = None, lab: str | None = None, iblrig_settings=None
130
) -> dict:
131
    """
132
    Function used to parse input arguments to transfer commands.
133

134
    If the arguments are None, reads in the settings and returns the values from the files.
135
    local_subjects_path always has a fallback on the home directory / iblrig_data
136
    remote_subjects_path has no fallback and will return None when all options are exhausted
137
    :param local_path:
138
    :param remote_path:
139
    :param lab:
140
    :param iblrig_settings: if provided, settings dictionary, otherwise will load the default settings files
141
    :return: dictionary, with following keys (example output)
142
       {'local_data_folder': PosixPath('C:/iblrigv8_data'),
143
        'remote_data_folder': PosixPath('Y:/'),
144
        'local_subjects_folder': PosixPath('C:/iblrigv8_data/mainenlab/Subjects'),
145
        'remote_subjects_folder': PosixPath('Y:/Subjects')}
146
    """
147
    # we only want to attempt to load the settings file if necessary
148
    if iblrig_settings is None and ((local_path is None) or (remote_path is None) or (lab is None)):
2✔
149
        iblrig_settings = load_pydantic_yaml(RigSettings)
2✔
150
    if isinstance(iblrig_settings, RigSettings):
2✔
151
        iblrig_settings = iblrig_settings.model_dump()
2✔
152

153
    paths = Bunch({'local_data_folder': local_path, 'remote_data_folder': remote_path})
2✔
154
    if paths.local_data_folder is None:
2✔
155
        paths.local_data_folder = (
2✔
156
            Path(p) if (p := iblrig_settings['iblrig_local_data_path']) else Path.home().joinpath('iblrig_data')
157
        )
158
    elif isinstance(paths.local_data_folder, str):
2✔
159
        paths.local_data_folder = Path(paths.local_data_folder)
2✔
160
    if paths.remote_data_folder is None:
2✔
161
        paths.remote_data_folder = Path(p) if (p := iblrig_settings['iblrig_remote_data_path']) else None
2✔
162
    elif isinstance(paths.remote_data_folder, str):
2✔
163
        paths.remote_data_folder = Path(paths.remote_data_folder)
2✔
164

165
    # Get the subjects folders. If not defined in the settings, assume local_data_folder + /Subjects
166
    paths.local_subjects_folder = (iblrig_settings or {}).get('iblrig_local_subjects_path', None)
2✔
167
    lab = lab or (iblrig_settings or {}).get('ALYX_LAB', None)
2✔
168
    if paths.local_subjects_folder is None:
2✔
169
        if paths.local_data_folder.name == 'Subjects':
2✔
170
            paths.local_subjects_folder = paths.local_data_folder
2✔
171
        elif lab:  # append lab/Subjects part
2✔
172
            paths.local_subjects_folder = paths.local_data_folder.joinpath(lab, 'Subjects')
2✔
173
        else:  # NB: case is important here. ALF spec expects lab folder before 'Subjects' (capitalized)
174
            paths.local_subjects_folder = paths.local_data_folder.joinpath('subjects')
2✔
175
    else:
176
        paths.local_subjects_folder = Path(paths.local_subjects_folder)
2✔
177

178
    #  Get the remote subjects folders. If not defined in the settings, assume remote_data_folder + /Subjects
179
    paths.remote_subjects_folder = (iblrig_settings or {}).get('iblrig_remote_subjects_path', None)
2✔
180
    if paths.remote_subjects_folder is None:
2✔
181
        if paths.remote_data_folder:
2✔
182
            if paths.remote_data_folder.name == 'Subjects':
2✔
183
                paths.remote_subjects_folder = paths.remote_data_folder
2✔
184
            else:
185
                paths.remote_subjects_folder = paths.remote_data_folder.joinpath('Subjects')
2✔
186
    else:
187
        paths.remote_subjects_folder = Path(paths.remote_subjects_folder)
2✔
188
    return paths
2✔
189

190

191
def _load_settings_yaml(filename: Path | str = RIG_SETTINGS_YAML, do_raise: bool = True) -> Bunch:
2✔
192
    filename = Path(filename)
2✔
193
    if not filename.is_absolute():
2✔
194
        filename = Path(iblrig.__file__).parents[1].joinpath('settings', filename)
2✔
195
    if not filename.exists() and not do_raise:
2✔
UNCOV
196
        log.error(f'File not found: {filename}')
×
197
        return Bunch()
×
198
    with open(filename) as fp:
2✔
199
        rs = yaml.safe_load(fp)
2✔
200
    rs = patch_settings(rs, filename.stem)
2✔
201
    return Bunch(rs)
2✔
202

203

204
def load_pydantic_yaml(model: type[T], filename: Path | str | None = None, do_raise: bool = True) -> T:
2✔
205
    """
206
    Load YAML data from a specified file or a standard IBLRIG settings file,
207
    validate it using a Pydantic model, and return the validated Pydantic model
208
    instance.
209

210
    Parameters
211
    ----------
212
    model : Type[T]
213
        The Pydantic model class to validate the YAML data against.
214
    filename : Path | str | None, optional
215
        The path to the YAML file.
216
        If None (default), the function deduces the appropriate standard IBLRIG
217
        settings file based on the model.
218
    do_raise : bool, optional
219
        If True (default), raise a ValidationError if validation fails.
220
        If False, log the validation error and construct a model instance
221
        with the provided data. Defaults to True.
222

223
    Returns
224
    -------
225
    T
226
        An instance of the Pydantic model, validated against the YAML data.
227

228
    Raises
229
    ------
230
    ValidationError
231
        If validation fails and do_raise is set to True.
232
        The raised exception contains details about the validation error.
233
    TypeError
234
        If the filename is None and the model class is not recognized as
235
        HardwareSettings or RigSettings.
236
    """
237
    if filename is None:
2✔
238
        if model == HardwareSettings:
2✔
UNCOV
239
            filename = HARDWARE_SETTINGS_YAML
×
240
        elif model == RigSettings:
2✔
241
            filename = RIG_SETTINGS_YAML
2✔
242
        else:
UNCOV
243
            raise TypeError(f'Cannot deduce filename for model `{model.__name__}`.')
×
244
    if filename not in (HARDWARE_SETTINGS_YAML, RIG_SETTINGS_YAML):
2✔
245
        # TODO: We currently skip validation of pydantic models if an extra
246
        #       filename is provided that does NOT correspond to the standard
247
        #       settings files of IBLRIG. This should be re-evaluated.
248
        do_raise = False
2✔
249
    rs = _load_settings_yaml(filename=filename, do_raise=do_raise)
2✔
250
    try:
2✔
251
        return model.model_validate(rs)
2✔
UNCOV
252
    except ValidationError as e:
×
UNCOV
253
        if not do_raise:
×
UNCOV
254
            log.exception(e)
×
UNCOV
255
            return model.model_construct(**rs)
×
256
        else:
UNCOV
257
            raise e
×
258

259

260
def save_pydantic_yaml(data: T, filename: Path | str | None = None) -> None:
2✔
261
    if filename is None:
2✔
UNCOV
262
        if isinstance(data, HardwareSettings):
×
UNCOV
263
            filename = HARDWARE_SETTINGS_YAML
×
UNCOV
264
        elif isinstance(data, RigSettings):
×
UNCOV
265
            filename = RIG_SETTINGS_YAML
×
266
        else:
UNCOV
267
            raise TypeError(f'Cannot deduce filename for model `{type(data).__name__}`.')
×
268
    else:
269
        filename = Path(filename)
2✔
270
    yaml_data = data.model_dump()
2✔
271
    data.model_validate(yaml_data)
2✔
272
    with open(filename, 'w') as f:
2✔
273
        log.debug(f'Dumping {type(data).__name__} to {filename.name}')
2✔
274
        yaml.dump(yaml_data, f, sort_keys=False)
2✔
275

276

277
def patch_settings(rs: dict, filename: str | Path) -> dict:
2✔
278
    """
279
    Update loaded settings files to ensure compatibility with latest version.
280

281
    Parameters
282
    ----------
283
    rs : dict
284
        A loaded settings file.
285
    filename : str | Path
286
        The filename of the settings file.
287

288
    Returns
289
    -------
290
    dict
291
        The updated settings.
292
    """
293
    filename = Path(filename)
2✔
294
    settings_version = version.parse(rs.get('VERSION', '0.0.0'))
2✔
295
    if filename.stem.startswith('hardware'):
2✔
296
        if settings_version < version.Version('1.0.0') and 'device_camera' in rs:
2✔
297
            log.info('Patching hardware settings; assuming left camera label')
2✔
298
            rs['device_cameras'] = {'left': rs.pop('device_camera')}
2✔
299
            rs['VERSION'] = '1.0.0'
2✔
300
        if 'device_cameras' in rs and rs['device_cameras'] is not None:
2✔
301
            rs['device_cameras'] = {k: v for k, v in rs['device_cameras'].items() if v}  # remove empty keys
2✔
302
            idx_missing = set(rs['device_cameras']) == {'left'} and 'INDEX' not in rs['device_cameras']['left']
2✔
303
            if settings_version < version.Version('1.1.0') and idx_missing:
2✔
304
                log.info('Patching hardware settings; assuming left camera index and training workflow')
2✔
305
                workflow = rs['device_cameras']['left'].pop('BONSAI_WORKFLOW', None)
2✔
306
                bonsai_workflows = {'setup': 'devices/camera_setup/setup_video.bonsai', 'recording': workflow}
2✔
307
                rs['device_cameras'] = {
2✔
308
                    'training': {'BONSAI_WORKFLOW': bonsai_workflows, 'left': {'INDEX': 1, 'SYNC_LABEL': 'audio'}}
309
                }
310
                rs['VERSION'] = '1.1.0'
2✔
311
        if rs.get('device_cameras') is None:
2✔
312
            rs['device_cameras'] = {}
2✔
313
    return rs
2✔
314

315

316
def get_commit_hash(folder: str):
2✔
317
    here = os.getcwd()
2✔
318
    os.chdir(folder)
2✔
319
    out = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
2✔
320
    os.chdir(here)
2✔
321
    if not out:
2✔
UNCOV
322
        log.debug('Commit hash is empty string')
×
323
    log.debug(f'Found commit hash {out}')
2✔
324
    return out
2✔
325

326

327
def iterate_collection(session_path: str, collection_name='raw_task_data') -> str:
2✔
328
    """
329
    Given a session path returns the next numbered collection name.
330

331
    Parameters
332
    ----------
333
    session_path : str
334
        The session path containing zero or more numbered collections.
335
    collection_name : str
336
        The collection name without the _NN suffix.
337

338
    Returns
339
    -------
340
    str
341
        The next numbered collection name.
342

343
    Examples
344
    --------
345
    In a folder where there are no raw task data folders
346

347
    >>> iterate_collection('./subject/2020-01-01/001')
348
    'raw_task_data_00'
349

350
    In a folder where there is one raw_imaging_data_00 folder
351

352
    >>> iterate_collection('./subject/2020-01-01/001', collection_name='raw_imaging_data')
353
    'raw_imaging_data_01'
354
    """
355
    if not Path(session_path).exists():
2✔
356
        return f'{collection_name}_00'
2✔
357
    collections = filter(Path.is_dir, Path(session_path).iterdir())
2✔
358
    collection_names = map(lambda x: x.name, collections)
2✔
359
    tasks = sorted(filter(re.compile(f'{collection_name}' + '_[0-9]{2}').match, collection_names))
2✔
360
    if len(tasks) == 0:
2✔
361
        return f'{collection_name}_00'
2✔
362
    return f'{collection_name}_{int(tasks[-1][-2:]) + 1:02}'
2✔
363

364

365
def create_bonsai_layout_from_template(workflow_file: Path) -> None:
2✔
366
    """
367
    Create a Bonsai layout file from a template if it does not already exist.
368

369
    If the file with the suffix `.bonsai.layout` does not exist for the given
370
    workflow file, this function will attempt to create it from a template
371
    file with the suffix `.bonsai.layout_template`. If the template file also
372
    does not exist, the function logs that no template layout is available.
373

374
    Background: Bonsai stores dialog settings (window position, control
375
    visibility, etc.) in an XML file with the suffix `.bonsai.layout`. These
376
    layout files are user-specific and may be overwritten locally by the user
377
    according to their preferences. To ensure that a default layout is
378
    available, a template file with the suffix `.bonsai.layout_template` can
379
    be provided as a starting point.
380

381
    Parameters
382
    ----------
383
    workflow_file : Path
384
        The path to the Bonsai workflow for which the layout is to be created.
385

386
    Raises
387
    ------
388
    FileNotFoundError
389
        If the provided workflow_file does not exist.
390
    """
391
    if not workflow_file.exists():
2✔
UNCOV
392
        raise FileNotFoundError(workflow_file)
×
393
    if not (layout_file := workflow_file.with_suffix('.bonsai.layout')).exists():
2✔
394
        template_file = workflow_file.with_suffix('.bonsai.layout_template')
2✔
395
        if template_file.exists():
2✔
396
            log.info(f'Creating default {layout_file.name}')
2✔
397
            shutil.copy(template_file, layout_file)
2✔
398
        else:
UNCOV
399
            log.debug(f'No template layout for {workflow_file.name}')
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc