• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 10568073180

26 Aug 2024 10:13PM UTC coverage: 47.538% (+0.7%) from 46.79%
10568073180

Pull #711

github

eeff82
web-flow
Merge 599c9edfb into ad41db25f
Pull Request #711: 8.23.2

121 of 135 new or added lines in 8 files covered. (89.63%)

1025 existing lines in 22 files now uncovered.

4084 of 8591 relevant lines covered (47.54%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.57
/iblrig/choiceworld.py
1
"""
2
Choice World Task related logic and functions that translate the task description in
3
Appendix 2 of the paper into code.
4
"""
5

6
import logging
2✔
7
from typing import Literal
2✔
8

9
import numpy as np
2✔
10

11
import iblrig.raw_data_loaders
2✔
12
from iblrig.path_helper import iterate_previous_sessions
2✔
13

14
log = logging.getLogger(__name__)
2✔
15

16
CONTRASTS = 1 / np.array([-1, -2, -4, -8, -16, np.inf, 16, 8, 4, 2, 1])
2✔
17
DEFAULT_TRAINING_PHASE = 0
2✔
18
DEFAULT_REWARD_VOLUME = 3.0
2✔
19

20

21
def compute_adaptive_reward_volume(subject_weight_g, reward_volume_ul, delivered_volume_ul, ntrials):
2✔
22
    """
23
    If the mouse completed over 200 trials in the previous session, the reward volume is automatically
24
    lowered by 0.1 microliters for the next session, but cannot go lower than a floor of 1.5 microliters.
25
    If the mouse received less than its minimum required daily dose (~1 milliliter/25 grams of body weight)
26
    during the previous session, the reward volume is increased by 0.1 microliters for the next session,
27
     but cannot go above a ceiling of 3 microliters.
28
    :param subject_weight_g: in grams
29
    :param reward_volume_ul: the last reward volume setting in uL
30
    :param delivered_volume_ul: the cumulative water deliverd during the last session in uL
31
    :param n_trials:
32
    :return: adaptive_reward_ul
33
    """
34
    if subject_weight_g > (delivered_volume_ul / 1000 * 25):
2✔
35
        reward_volume_ul += 0.1
2✔
36
    elif ntrials > 200:
2✔
37
        reward_volume_ul -= 0.1
2✔
38
    return np.maximum(np.minimum(reward_volume_ul, 3), 1.5)
2✔
39

40

41
def get_subject_training_info(
2✔
42
    subject_name: str,
43
    task_name: str = '_iblrig_tasks_trainingChoiceWorld',
44
    stim_gain: float | None = None,
45
    stim_gain_on_error: float | None = None,
46
    default_reward: float = DEFAULT_REWARD_VOLUME,
47
    mode: Literal['silent', 'raise'] = 'silent',
48
    **kwargs,
49
) -> tuple[dict, dict | None]:
50
    """
51
    Goes through a subject's history and gets the latest training phase and adaptive reward volume.
52

53
    Parameters
54
    ----------
55
    subject_name : str
56
        Name of the subject.
57
    task_name : str, optional
58
        Name of the protocol to look for in experiment description, defaults to '_iblrig_tasks_trainingChoiceWorld'.
59
    stim_gain: float, optional
60
        Default stimulus gain if no previous session is available, default to None
61
    stim_gain_on_error: float, optional
62
        Default stimulus gain if there was an exception whilst obtaining the previous sessions' info, default to None
63
    default_reward : float, optional
64
        Default reward volume in uL if no previous session is available.
65
    mode : str, optional
66
        If 'silent' returns default values if no history is found, if 'raise' raises ValueError.
67
    **kwargs
68
        Optional arguments to be passed to get_local_and_remote_paths
69

70
    Returns
71
    -------
72
    training_info: dict
73
        Dictionary with keys: training_phase, adaptive_reward, adaptive_gain
74
    session_info: dict or None
75
        Dictionary with keys: session_path, experiment_description, task_settings, file_task_data
76
    """
77
    # default values (if no previous session is available)
78
    training_info = {
2✔
79
        'training_phase': DEFAULT_TRAINING_PHASE,
80
        'adaptive_reward': default_reward,
81
        'adaptive_gain': stim_gain,
82
    }
83

84
    # try to obtain the subject's previous session's info
85
    try:
2✔
86
        session_info = iterate_previous_sessions(subject_name, task_name=task_name, n=1, **kwargs)
2✔
87
        if len(session_info) > 0:
2✔
88
            session_info = session_info[0]
2✔
89
            task_settings = session_info.get('task_settings')
2✔
90
            trials_data, _ = iblrig.raw_data_loaders.load_task_jsonable(session_info.get('file_task_data'))
2✔
91
    except Exception as e:
2✔
92
        log.exception(msg='Error obtaining training information from previous session!', exc_info=e)
2✔
93
        training_info['adaptive_gain'] = stim_gain_on_error
2✔
94
        session_info = []
2✔
95

96
    # handle lack of previous sessions
97
    if len(session_info) == 0:
2✔
98
        if mode == 'silent':
2✔
99
            log.warning(
2✔
100
                f"Could not determine training status for subject '{subject_name}' - returning default values "
101
                f'(training phase: {training_info["training_phase"]}, adaptive reward: '
102
                f'{training_info["adaptive_reward"]}, adaptive gain: {training_info["adaptive_gain"]})'
103
            )
104
            return training_info, None
2✔
105
        else:
UNCOV
106
            raise ValueError(f'The training status for {subject_name} could not be determined as no previous sessions were found')
×
107

108
    # compute reward volume from previous session
109
    prev_reward_vol = task_settings.get('ADAPTIVE_REWARD_AMOUNT_UL') or task_settings.get('REWARD_AMOUNT_UL')
2✔
110
    training_info['adaptive_reward'] = compute_adaptive_reward_volume(
2✔
111
        subject_weight_g=task_settings.get('SUBJECT_WEIGHT'),
112
        reward_volume_ul=prev_reward_vol,
113
        delivered_volume_ul=trials_data.get('reward_amount').sum(),
114
        ntrials=trials_data.shape[0],
115
    )
116

117
    # retrieve training_phase from the previous session's trials table
118
    if 'training_phase' in trials_data:
2✔
119
        training_info['training_phase'] = trials_data['training_phase'].values[-1]
2✔
120

121
    # set adaptive gain depending on number of correct trials in previous session.
122
    # also fix negative adaptive gain values (due to a bug in the GUI prior to v8.21.0
123
    if np.sum(trials_data['response_side'] != 0) > 200:
2✔
124
        training_info['adaptive_gain'] = task_settings.get('STIM_GAIN')
2✔
125
    elif task_settings.get('ADAPTIVE_GAIN_VALUE', 1) < 0:
2✔
UNCOV
126
        training_info['adaptive_gain'] = task_settings.get('AG_INIT_VALUE')
×
127
    else:
128
        training_info['adaptive_gain'] = task_settings.get('ADAPTIVE_GAIN_VALUE', task_settings.get('AG_INIT_VALUE'))
2✔
129

130
    return training_info, session_info
2✔
131

132

133
def training_contrasts_probabilities(phase=1):
2✔
134
    match phase:
2✔
135
        case 0:  # Starts with only 100% and 50% contrasts.
2✔
136
            frequencies = np.abs(CONTRASTS) >= 0.5
2✔
137
        case 1:  # The 25% contrast is added to the set.
2✔
138
            frequencies = np.abs(CONTRASTS) >= 0.25
2✔
139
        case 2:  # The 12.5% contrast is added to the set.
2✔
140
            frequencies = np.abs(CONTRASTS) >= 0.125
2✔
141
        case 3:  # The 6.25% contrast is added to the set.
2✔
142
            frequencies = np.abs(CONTRASTS) >= 0.0625
2✔
143
        case 4:  # The 0% contrast is added to the set.
2✔
144
            frequencies = np.abs(CONTRASTS) >= 0
2✔
145
        case 5:  # The 50% contrast is removed from the set
2✔
146
            frequencies = np.abs(CONTRASTS) != 0.5
2✔
147
    return frequencies / np.sum(frequencies)
2✔
148

149

150
def draw_training_contrast(phase: int) -> float:
2✔
151
    probabilities = training_contrasts_probabilities(phase)
2✔
152
    return np.random.choice(CONTRASTS, p=probabilities)
2✔
153

154

155
def contrasts_set(phase: int) -> np.array:
2✔
156
    probabilities = training_contrasts_probabilities(phase)
2✔
157
    return CONTRASTS[probabilities > 0]
2✔
158

159

160
def training_phase_from_contrast_set(contrast_set: list[float]) -> int | None:
2✔
UNCOV
161
    contrast_set = sorted(contrast_set)
×
UNCOV
162
    for phase in range(6):
×
UNCOV
163
        expected_set = CONTRASTS[np.logical_and(training_contrasts_probabilities(phase) > 0, CONTRASTS >= 0)]
×
UNCOV
164
        if np.array_equal(contrast_set, expected_set):
×
UNCOV
165
            return phase
×
UNCOV
166
    raise Exception(f'Could not determine training phase from contrast set {contrast_set}')
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc