• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 14196118657

01 Apr 2025 12:52PM UTC coverage: 47.634% (+0.8%) from 46.79%
14196118657

Pull #795

github

cfb5bd
web-flow
Merge 5ba5d5f25 into 58cf64236
Pull Request #795: fixes for habituation CW

11 of 12 new or added lines in 1 file covered. (91.67%)

1083 existing lines in 22 files now uncovered.

4288 of 9002 relevant lines covered (47.63%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.0
/iblrig/choiceworld.py
1
"""
2
Choice World Task related logic and functions that translate the task description in
3
Appendix 2 of the paper into code.
4
"""
5

6
import logging
2✔
7
from typing import Literal
2✔
8

9
import numpy as np
2✔
10
import numpy.typing as npt
2✔
11
import pandas as pd
2✔
12

13
import iblrig.raw_data_loaders
2✔
14
from iblrig.path_helper import iterate_previous_sessions
2✔
15

16
log = logging.getLogger(__name__)
2✔
17

18
CONTRASTS = 1 / np.array([-1, -2, -4, -8, -16, np.inf, 16, 8, 4, 2, 1])
2✔
19
DEFAULT_TRAINING_PHASE = 0
2✔
20
DEFAULT_REWARD_VOLUME = 3.0
2✔
21

22

23
def compute_adaptive_reward_volume(subject_weight_g, reward_volume_ul, delivered_volume_ul, ntrials):
2✔
24
    """
25
    If the mouse completed over 200 trials in the previous session, the reward volume is automatically
26
    lowered by 0.1 microliters for the next session, but cannot go lower than a floor of 1.5 microliters.
27
    If the mouse received less than its minimum required daily dose (~1 milliliter/25 grams of body weight)
28
    during the previous session, the reward volume is increased by 0.1 microliters for the next session,
29
     but cannot go above a ceiling of 3 microliters.
30
    :param subject_weight_g: in grams
31
    :param reward_volume_ul: the last reward volume setting in uL
32
    :param delivered_volume_ul: the cumulative water deliverd during the last session in uL
33
    :param n_trials:
34
    :return: adaptive_reward_ul
35
    """
36
    if subject_weight_g > (delivered_volume_ul / 1000 * 25):
2✔
37
        reward_volume_ul += 0.1
2✔
38
    elif ntrials > 200:
2✔
39
        reward_volume_ul -= 0.1
2✔
40
    return np.maximum(np.minimum(reward_volume_ul, 3), 1.5)
2✔
41

42

43
def get_subject_training_info(
2✔
44
    subject_name: str,
45
    task_name: str = '_iblrig_tasks_trainingChoiceWorld',
46
    stim_gain: float | None = None,
47
    stim_gain_on_error: float | None = None,
48
    default_reward: float = DEFAULT_REWARD_VOLUME,
49
    mode: Literal['silent', 'raise'] = 'silent',
50
    **kwargs,
51
) -> tuple[dict, dict | None]:
52
    """
53
    Goes through a subject's history and gets the latest training phase and adaptive reward volume.
54

55
    Parameters
56
    ----------
57
    subject_name : str
58
        Name of the subject.
59
    task_name : str, optional
60
        Name of the protocol to look for in experiment description, defaults to '_iblrig_tasks_trainingChoiceWorld'.
61
    stim_gain: float, optional
62
        Default stimulus gain if no previous session is available, default to None
63
    stim_gain_on_error: float, optional
64
        Default stimulus gain if there was an exception whilst obtaining the previous sessions' info, default to None
65
    default_reward : float, optional
66
        Default reward volume in uL if no previous session is available.
67
    mode : str, optional
68
        If 'silent' returns default values if no history is found, if 'raise' raises ValueError.
69
    **kwargs
70
        Optional arguments to be passed to get_local_and_remote_paths
71

72
    Returns
73
    -------
74
    training_info: dict
75
        Dictionary with keys: training_phase, adaptive_reward, adaptive_gain
76
    session_info: dict or None
77
        Dictionary with keys: session_path, experiment_description, task_settings, file_task_data
78
    """
79
    # default values (if no previous session is available)
80
    training_info = {
2✔
81
        'training_phase': DEFAULT_TRAINING_PHASE,
82
        'adaptive_reward': default_reward,
83
        'adaptive_gain': stim_gain,
84
    }
85

86
    # try to obtain the subject's previous session's info
87
    try:
2✔
88
        session_info = iterate_previous_sessions(subject_name, task_name=task_name, n=1, **kwargs)
2✔
89
        if len(session_info) > 0:
2✔
90
            session_info = session_info[0]
2✔
91
            task_settings = session_info.get('task_settings')
2✔
92
            trials_data, _ = iblrig.raw_data_loaders.load_task_jsonable(session_info.get('file_task_data'))
2✔
93
    except Exception as e:
2✔
94
        log.exception(msg='Error obtaining training information from previous session!', exc_info=e)
2✔
95
        training_info['adaptive_gain'] = stim_gain_on_error
2✔
96
        session_info = []
2✔
97

98
    # handle lack of previous sessions
99
    if len(session_info) == 0:
2✔
100
        if mode == 'silent':
2✔
101
            log.warning(
2✔
102
                f"Could not determine training status for subject '{subject_name}' - returning default values "
103
                f'(training phase: {training_info["training_phase"]}, adaptive reward: '
104
                f'{training_info["adaptive_reward"]}, adaptive gain: {training_info["adaptive_gain"]})'
105
            )
106
            return training_info, None
2✔
107
        else:
UNCOV
108
            raise ValueError(f'The training status for {subject_name} could not be determined as no previous sessions were found')
×
109

110
    # compute reward volume from previous session
111
    prev_reward_vol = task_settings.get('ADAPTIVE_REWARD_AMOUNT_UL') or task_settings.get('REWARD_AMOUNT_UL')
2✔
112
    training_info['adaptive_reward'] = compute_adaptive_reward_volume(
2✔
113
        subject_weight_g=task_settings.get('SUBJECT_WEIGHT'),
114
        reward_volume_ul=prev_reward_vol,
115
        delivered_volume_ul=trials_data.get('reward_amount').sum(),
116
        ntrials=trials_data.shape[0],
117
    )
118

119
    # retrieve training_phase from the previous session's trials table
120
    if 'training_phase' in trials_data:
2✔
121
        training_info['training_phase'] = trials_data['training_phase'].values[-1]
2✔
122

123
    # set adaptive gain depending on number of correct trials in previous session.
124
    # also fix negative adaptive gain values (due to a bug in the GUI prior to v8.21.0
125
    if np.sum(trials_data['response_side'] != 0) > 200:
2✔
126
        training_info['adaptive_gain'] = task_settings.get('STIM_GAIN')
2✔
127
    elif task_settings.get('ADAPTIVE_GAIN_VALUE', 1) < 0:
2✔
UNCOV
128
        training_info['adaptive_gain'] = task_settings.get('AG_INIT_VALUE')
×
129
    else:
130
        training_info['adaptive_gain'] = task_settings.get('ADAPTIVE_GAIN_VALUE', task_settings.get('AG_INIT_VALUE'))
2✔
131

132
    return training_info, session_info
2✔
133

134

135
def training_contrasts_probabilities(phase=1):
2✔
136
    match phase:
2✔
137
        case 0:  # Starts with only 100% and 50% contrasts.
2✔
138
            frequencies = np.abs(CONTRASTS) >= 0.5
2✔
139
        case 1:  # The 25% contrast is added to the set.
2✔
140
            frequencies = np.abs(CONTRASTS) >= 0.25
2✔
141
        case 2:  # The 12.5% contrast is added to the set.
2✔
142
            frequencies = np.abs(CONTRASTS) >= 0.125
2✔
143
        case 3:  # The 6.25% contrast is added to the set.
2✔
144
            frequencies = np.abs(CONTRASTS) >= 0.0625
2✔
145
        case 4:  # The 0% contrast is added to the set.
2✔
146
            frequencies = np.abs(CONTRASTS) >= 0
2✔
147
        case 5:  # The 50% contrast is removed from the set
2✔
148
            frequencies = np.abs(CONTRASTS) != 0.5
2✔
149
        case _:
2✔
150
            raise ValueError(f'{phase} is not a valid value for training phase')
2✔
151
    return frequencies / np.sum(frequencies)
2✔
152

153

154
def draw_training_contrast(phase: int) -> float:
2✔
155
    probabilities = training_contrasts_probabilities(phase)
2✔
156
    return np.random.choice(CONTRASTS, p=probabilities)
2✔
157

158

159
def contrasts_set(phase: int) -> np.array:
2✔
UNCOV
160
    probabilities = training_contrasts_probabilities(phase)
×
UNCOV
161
    return CONTRASTS[probabilities > 0]
×
162

163

164
def training_phase_from_contrast_set(contrast_set: npt.ArrayLike) -> int:
2✔
165
    contrast_set = np.unique(np.abs(contrast_set)).astype(float)
2✔
166
    contrast_mask = CONTRASTS >= 0
2✔
167
    for phase in range(6):
2✔
168
        expected_set = CONTRASTS[np.logical_and(training_contrasts_probabilities(phase) > 0, contrast_mask)]
2✔
169
        if np.array_equal(contrast_set, expected_set):
2✔
170
            return phase
2✔
171
    raise ValueError(f'Could not determine training phase from contrast set {contrast_set}')
2✔
172

173

174
def compute_performance(
2✔
175
    trials_table: pd.DataFrame,
176
) -> pd.DataFrame:
177
    """
178
    Aggregates performance metrics for each signed contrast in the table
179
    :param trials_data:
180
    :return:
181
    """
182
    trials_table = trials_table.loc[trials_table.position.notna(), :].copy()
2✔
183
    trials_table['signed_contrast'] = trials_table['contrast'] * np.sign(trials_table['position'])
2✔
184
    df_performance = trials_table.groupby(['signed_contrast']).agg(
2✔
185
        last_50_perf=pd.NamedAgg(column='trial_correct', aggfunc=lambda x: np.sum(x[np.maximum(-50, -x.size) :]) / 50),
186
        ntrials=pd.NamedAgg(column='trial_correct', aggfunc='count'),
187
    )
188
    return df_performance
2✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc