• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 10568073180

26 Aug 2024 10:13PM UTC coverage: 47.538% (+0.7%) from 46.79%
10568073180

Pull #711

github

eeff82
web-flow
Merge 599c9edfb into ad41db25f
Pull Request #711: 8.23.2

121 of 135 new or added lines in 8 files covered. (89.63%)

1025 existing lines in 22 files now uncovered.

4084 of 8591 relevant lines covered (47.54%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.12
/iblrig/path_helper.py
1
import logging
2✔
2
import os
2✔
3
import re
2✔
4
import shutil
2✔
5
import subprocess
2✔
6
from pathlib import Path
2✔
7
from typing import TypeVar
2✔
8

9
import numpy as np
2✔
10
import yaml
2✔
11
from packaging import version
2✔
12
from pydantic import BaseModel, ValidationError
2✔
13

14
import iblrig
2✔
15
from ibllib.io import session_params
2✔
16
from ibllib.io.raw_data_loaders import load_settings
2✔
17
from iblrig.constants import HARDWARE_SETTINGS_YAML, RIG_SETTINGS_YAML
2✔
18
from iblrig.pydantic_definitions import HardwareSettings, RigSettings
2✔
19
from iblutil.util import Bunch
2✔
20
from one.alf.spec import is_session_path
2✔
21

22
log = logging.getLogger(__name__)
2✔
23
T = TypeVar('T', bound=BaseModel)
2✔
24

25

26
def iterate_previous_sessions(subject_name: str, task_name: str, n: int = 1, **kwargs) -> list[dict]:
2✔
27
    """
28
    Iterate over the sessions of a given subject in both the remote and local path and search for a given protocol name.
29
    Return the information of the last n found matching protocols in the form of a dictionary.
30

31
    Parameters
32
    ----------
33
    subject_name : str
34
        Name of the subject.
35
    task_name : str
36
        Name of the protocol to look for in experiment description.
37
    n : int, optional
38
        maximum number of protocols to return
39
    **kwargs
40
        Optional arguments to be passed to iblrig.path_helper.get_local_and_remote_paths
41
        If not used, will use the arguments from iblrig/settings/iblrig_settings.yaml
42

43
    Returns
44
    -------
45
    list[dict]
46
        List of dictionaries with keys: session_path, experiment_description, task_settings, file_task_data
47
    """
48
    rig_paths = get_local_and_remote_paths(**kwargs)
2✔
49
    local_subjects_folder = rig_paths['local_subjects_folder']
2✔
50
    remote_subjects_folder = rig_paths['remote_subjects_folder']
2✔
51
    sessions = _iterate_protocols(local_subjects_folder.joinpath(subject_name), task_name=task_name, n=n)
2✔
52
    if remote_subjects_folder is not None:
2✔
53
        remote_sessions = _iterate_protocols(remote_subjects_folder.joinpath(subject_name), task_name=task_name, n=n)
2✔
54
        if remote_sessions is not None:
2✔
55
            sessions.extend(remote_sessions)
2✔
56
        # here we rely on the fact that np.unique sort and then we output sessions with the last one first
57
        _, ises = np.unique([s['session_stub'] for s in sessions], return_index=True)
2✔
58
        sessions = [sessions[i] for i in np.flipud(ises)]
2✔
59
    return sessions
2✔
60

61

62
def _iterate_protocols(subject_folder: Path, task_name: str, n: int = 1, min_trials: int = 43) -> list[dict]:
2✔
63
    """
64
    Return information on the last n sessions with matching protocol.
65

66
    This function iterates over the sessions of a given subject and searches for a given protocol name.
67

68
    Parameters
69
    ----------
70
    subject_folder : Path
71
        A subject folder containing dated folders.
72
    task_name : str
73
        The task protocol name to look for.
74
    n : int
75
        The number of previous protocols to return.
76
    min_trials : int
77
        Skips sessions with fewer than this number of trials.
78

79
    Returns
80
    -------
81
    list[dict]
82
        list of dictionaries with keys: session_stub, session_path, experiment_description,
83
        task_settings, file_task_data.
84
    """
85

86
    def proc_num(x):
2✔
87
        """Return protocol number.
88

89
        Use 'protocol_number' key if present (unlikely), otherwise use collection name.
90
        """
91
        i = (x or {}).get('collection', '00').split('_')
2✔
92
        collection_int = int(i[-1]) if i[-1].isnumeric() else 0
2✔
93
        return x.get('protocol_number', collection_int)
2✔
94

95
    protocols = []
2✔
96
    if subject_folder is None or Path(subject_folder).exists() is False:
2✔
97
        return protocols
2✔
98
    sessions = subject_folder.glob('????-??-??/*/_ibl_experiment.description*.yaml')  # seq may be X or XXX
2✔
99
    # Make extra sure to only include valid sessions
100
    sessions = filter(lambda x: is_session_path(x.relative_to(subject_folder.parent).parent), sessions)
2✔
101
    for file_experiment in sorted(sessions, reverse=True):
2✔
102
        session_path = file_experiment.parent
2✔
103
        ad = session_params.read_params(file_experiment)
2✔
104
        # reversed: we look for the last task first if the protocol ran twice
105
        tasks = filter(None, map(lambda x: x.get(task_name), ad.get('tasks', [])))
2✔
106
        for adt in sorted(tasks, key=proc_num, reverse=True):
2✔
107
            if not (task_settings := load_settings(session_path, task_collection=adt['collection'])):
2✔
108
                continue
2✔
109
            if task_settings.get('NTRIALS', min_trials + 1) < min_trials:  # ignore sessions with too few trials
2✔
110
                continue
2✔
111
            protocols.append(
2✔
112
                Bunch(
113
                    {
114
                        'session_stub': '_'.join(file_experiment.parent.parts[-2:]),  # 2019-01-01_001
115
                        'session_path': file_experiment.parent,
116
                        'task_collection': adt['collection'],
117
                        'experiment_description': ad,
118
                        'task_settings': task_settings,
119
                        'file_task_data': session_path.joinpath(adt['collection'], '_iblrig_taskData.raw.jsonable'),
120
                    }
121
                )
122
            )
123
            if len(protocols) >= n:
2✔
124
                return protocols
2✔
125
    return protocols
2✔
126

127

128
def get_local_and_remote_paths(
2✔
129
    local_path: str | Path | None = None, remote_path: str | Path | None = None, lab: str | None = None, iblrig_settings=None
130
) -> dict:
131
    """
132
    Function used to parse input arguments to transfer commands.
133

134
    If the arguments are None, reads in the settings and returns the values from the files.
135
    local_subjects_path always has a fallback on the home directory / iblrig_data
136
    remote_subjects_path has no fallback and will return None when all options are exhausted
137
    :param local_path:
138
    :param remote_path:
139
    :param lab:
140
    :param iblrig_settings: if provided, settings dictionary, otherwise will load the default settings files
141
    :return: dictionary, with following keys (example output)
142
       {'local_data_folder': PosixPath('C:/iblrigv8_data'),
143
        'remote_data_folder': PosixPath('Y:/'),
144
        'local_subjects_folder': PosixPath('C:/iblrigv8_data/mainenlab/Subjects'),
145
        'remote_subjects_folder': PosixPath('Y:/Subjects')}
146
    """
147
    # we only want to attempt to load the settings file if necessary
148
    if (local_path is None) or (remote_path is None) or (lab is None):
2✔
149
        iblrig_settings = load_pydantic_yaml(RigSettings) if iblrig_settings is None else iblrig_settings
2✔
150

151
    paths = Bunch({'local_data_folder': local_path, 'remote_data_folder': remote_path})
2✔
152
    if paths.local_data_folder is None:
2✔
153
        paths.local_data_folder = (
2✔
154
            Path(p) if (p := iblrig_settings['iblrig_local_data_path']) else Path.home().joinpath('iblrig_data')
155
        )
156
    elif isinstance(paths.local_data_folder, str):
2✔
157
        paths.local_data_folder = Path(paths.local_data_folder)
2✔
158
    if paths.remote_data_folder is None:
2✔
159
        paths.remote_data_folder = Path(p) if (p := iblrig_settings['iblrig_remote_data_path']) else None
2✔
160
    elif isinstance(paths.remote_data_folder, str):
2✔
161
        paths.remote_data_folder = Path(paths.remote_data_folder)
2✔
162

163
    # Get the subjects folders. If not defined in the settings, assume local_data_folder + /Subjects
164
    paths.local_subjects_folder = (iblrig_settings or {}).get('iblrig_local_subjects_path', None)
2✔
165
    lab = lab or (iblrig_settings or {}).get('ALYX_LAB', None)
2✔
166
    if paths.local_subjects_folder is None:
2✔
167
        if paths.local_data_folder.name == 'Subjects':
2✔
168
            paths.local_subjects_folder = paths.local_data_folder
2✔
169
        elif lab:  # append lab/Subjects part
2✔
170
            paths.local_subjects_folder = paths.local_data_folder.joinpath(lab, 'Subjects')
2✔
171
        else:  # NB: case is important here. ALF spec expects lab folder before 'Subjects' (capitalized)
172
            paths.local_subjects_folder = paths.local_data_folder.joinpath('subjects')
2✔
173
    else:
174
        paths.local_subjects_folder = Path(paths.local_subjects_folder)
2✔
175

176
    #  Get the remote subjects folders. If not defined in the settings, assume remote_data_folder + /Subjects
177
    paths.remote_subjects_folder = (iblrig_settings or {}).get('iblrig_remote_subjects_path', None)
2✔
178
    if paths.remote_subjects_folder is None:
2✔
179
        if paths.remote_data_folder:
2✔
180
            if paths.remote_data_folder.name == 'Subjects':
2✔
181
                paths.remote_subjects_folder = paths.remote_data_folder
2✔
182
            else:
183
                paths.remote_subjects_folder = paths.remote_data_folder.joinpath('Subjects')
2✔
184
    else:
185
        paths.remote_subjects_folder = Path(paths.remote_subjects_folder)
2✔
186
    return paths
2✔
187

188

189
def _load_settings_yaml(filename: Path | str = RIG_SETTINGS_YAML, do_raise: bool = True) -> Bunch:
2✔
190
    filename = Path(filename)
2✔
191
    if not filename.is_absolute():
2✔
192
        filename = Path(iblrig.__file__).parents[1].joinpath('settings', filename)
2✔
193
    if not filename.exists() and not do_raise:
2✔
194
        log.error(f'File not found: {filename}')
×
195
        return Bunch()
×
196
    with open(filename) as fp:
2✔
197
        rs = yaml.safe_load(fp)
2✔
198
    rs = patch_settings(rs, filename.stem)
2✔
199
    return Bunch(rs)
2✔
200

201

202
def load_pydantic_yaml(model: type[T], filename: Path | str | None = None, do_raise: bool = True) -> T:
2✔
203
    """
204
    Load YAML data from a specified file or a standard IBLRIG settings file,
205
    validate it using a Pydantic model, and return the validated Pydantic model
206
    instance.
207

208
    Parameters
209
    ----------
210
    model : Type[T]
211
        The Pydantic model class to validate the YAML data against.
212
    filename : Path | str | None, optional
213
        The path to the YAML file.
214
        If None (default), the function deduces the appropriate standard IBLRIG
215
        settings file based on the model.
216
    do_raise : bool, optional
217
        If True (default), raise a ValidationError if validation fails.
218
        If False, log the validation error and construct a model instance
219
        with the provided data. Defaults to True.
220

221
    Returns
222
    -------
223
    T
224
        An instance of the Pydantic model, validated against the YAML data.
225

226
    Raises
227
    ------
228
    ValidationError
229
        If validation fails and do_raise is set to True.
230
        The raised exception contains details about the validation error.
231
    TypeError
232
        If the filename is None and the model class is not recognized as
233
        HardwareSettings or RigSettings.
234
    """
235
    if filename is None:
2✔
236
        if model == HardwareSettings:
2✔
UNCOV
237
            filename = HARDWARE_SETTINGS_YAML
×
238
        elif model == RigSettings:
2✔
239
            filename = RIG_SETTINGS_YAML
2✔
240
        else:
UNCOV
241
            raise TypeError(f'Cannot deduce filename for model `{model.__name__}`.')
×
242
    if filename not in (HARDWARE_SETTINGS_YAML, RIG_SETTINGS_YAML):
2✔
243
        # TODO: We currently skip validation of pydantic models if an extra
244
        #       filename is provided that does NOT correspond to the standard
245
        #       settings files of IBLRIG. This should be re-evaluated.
246
        do_raise = False
2✔
247
    rs = _load_settings_yaml(filename=filename, do_raise=do_raise)
2✔
248
    try:
2✔
249
        return model.model_validate(rs)
2✔
UNCOV
250
    except ValidationError as e:
×
UNCOV
251
        if not do_raise:
×
UNCOV
252
            log.exception(e)
×
UNCOV
253
            return model.model_construct(**rs)
×
254
        else:
UNCOV
255
            raise e
×
256

257

258
def save_pydantic_yaml(data: T, filename: Path | str | None = None) -> None:
2✔
259
    if filename is None:
2✔
UNCOV
260
        if isinstance(data, HardwareSettings):
×
UNCOV
261
            filename = HARDWARE_SETTINGS_YAML
×
UNCOV
262
        elif isinstance(data, RigSettings):
×
UNCOV
263
            filename = RIG_SETTINGS_YAML
×
264
        else:
UNCOV
265
            raise TypeError(f'Cannot deduce filename for model `{type(data).__name__}`.')
×
266
    else:
267
        filename = Path(filename)
2✔
268
    yaml_data = data.model_dump()
2✔
269
    data.model_validate(yaml_data)
2✔
270
    with open(filename, 'w') as f:
2✔
271
        log.debug(f'Dumping {type(data).__name__} to {filename.name}')
2✔
272
        yaml.dump(yaml_data, f, sort_keys=False)
2✔
273

274

275
def patch_settings(rs: dict, filename: str | Path) -> dict:
2✔
276
    """
277
    Update loaded settings files to ensure compatibility with latest version.
278

279
    Parameters
280
    ----------
281
    rs : dict
282
        A loaded settings file.
283
    filename : str | Path
284
        The filename of the settings file.
285

286
    Returns
287
    -------
288
    dict
289
        The updated settings.
290
    """
291
    filename = Path(filename)
2✔
292
    settings_version = version.parse(rs.get('VERSION', '0.0.0'))
2✔
293
    if filename.stem.startswith('hardware'):
2✔
294
        if settings_version < version.Version('1.0.0') and 'device_camera' in rs:
2✔
295
            log.info('Patching hardware settings; assuming left camera label')
2✔
296
            rs['device_cameras'] = {'left': rs.pop('device_camera')}
2✔
297
            rs['VERSION'] = '1.0.0'
2✔
298
        if 'device_cameras' in rs and rs['device_cameras'] is not None:
2✔
299
            rs['device_cameras'] = {k: v for k, v in rs['device_cameras'].items() if v}  # remove empty keys
2✔
300
            idx_missing = set(rs['device_cameras']) == {'left'} and 'INDEX' not in rs['device_cameras']['left']
2✔
301
            if settings_version < version.Version('1.1.0') and idx_missing:
2✔
302
                log.info('Patching hardware settings; assuming left camera index and training workflow')
2✔
303
                workflow = rs['device_cameras']['left'].pop('BONSAI_WORKFLOW', None)
2✔
304
                bonsai_workflows = {'setup': 'devices/camera_setup/setup_video.bonsai', 'recording': workflow}
2✔
305
                rs['device_cameras'] = {
2✔
306
                    'training': {'BONSAI_WORKFLOW': bonsai_workflows, 'left': {'INDEX': 1, 'SYNC_LABEL': 'audio'}}
307
                }
308
                rs['VERSION'] = '1.1.0'
2✔
309
        if rs.get('device_cameras') is None:
2✔
310
            rs['device_cameras'] = {}
2✔
311
    return rs
2✔
312

313

314
def get_commit_hash(folder: str):
2✔
315
    here = os.getcwd()
2✔
316
    os.chdir(folder)
2✔
317
    out = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
2✔
318
    os.chdir(here)
2✔
319
    if not out:
2✔
UNCOV
320
        log.debug('Commit hash is empty string')
×
321
    log.debug(f'Found commit hash {out}')
2✔
322
    return out
2✔
323

324

325
def iterate_collection(session_path: str, collection_name='raw_task_data') -> str:
2✔
326
    """
327
    Given a session path returns the next numbered collection name.
328

329
    Parameters
330
    ----------
331
    session_path : str
332
        The session path containing zero or more numbered collections.
333
    collection_name : str
334
        The collection name without the _NN suffix.
335

336
    Returns
337
    -------
338
    str
339
        The next numbered collection name.
340

341
    Examples
342
    --------
343
    In a folder where there are no raw task data folders
344

345
    >>> iterate_collection('./subject/2020-01-01/001')
346
    'raw_task_data_00'
347

348
    In a folder where there is one raw_imaging_data_00 folder
349

350
    >>> iterate_collection('./subject/2020-01-01/001', collection_name='raw_imaging_data')
351
    'raw_imaging_data_01'
352
    """
353
    if not Path(session_path).exists():
2✔
354
        return f'{collection_name}_00'
2✔
355
    collections = filter(Path.is_dir, Path(session_path).iterdir())
2✔
356
    collection_names = map(lambda x: x.name, collections)
2✔
357
    tasks = sorted(filter(re.compile(f'{collection_name}' + '_[0-9]{2}').match, collection_names))
2✔
358
    if len(tasks) == 0:
2✔
359
        return f'{collection_name}_00'
2✔
360
    return f'{collection_name}_{int(tasks[-1][-2:]) + 1:02}'
2✔
361

362

363
def create_bonsai_layout_from_template(workflow_file: Path) -> None:
2✔
364
    """
365
    Create a Bonsai layout file from a template if it does not already exist.
366

367
    If the file with the suffix `.bonsai.layout` does not exist for the given
368
    workflow file, this function will attempt to create it from a template
369
    file with the suffix `.bonsai.layout_template`. If the template file also
370
    does not exist, the function logs that no template layout is available.
371

372
    Background: Bonsai stores dialog settings (window position, control
373
    visibility, etc.) in an XML file with the suffix `.bonsai.layout`. These
374
    layout files are user-specific and may be overwritten locally by the user
375
    according to their preferences. To ensure that a default layout is
376
    available, a template file with the suffix `.bonsai.layout_template` can
377
    be provided as a starting point.
378

379
    Parameters
380
    ----------
381
    workflow_file : Path
382
        The path to the Bonsai workflow for which the layout is to be created.
383

384
    Raises
385
    ------
386
    FileNotFoundError
387
        If the provided workflow_file does not exist.
388
    """
389
    if not workflow_file.exists():
2✔
UNCOV
390
        raise FileNotFoundError(workflow_file)
×
391
    if not (layout_file := workflow_file.with_suffix('.bonsai.layout')).exists():
2✔
392
        template_file = workflow_file.with_suffix('.bonsai.layout_template')
2✔
393
        if template_file.exists():
2✔
394
            log.info(f'Creating default {layout_file.name}')
2✔
395
            shutil.copy(template_file, layout_file)
2✔
396
        else:
UNCOV
397
            log.debug(f'No template layout for {workflow_file.name}')
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc