• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 9031936551

10 May 2024 12:05PM UTC coverage: 48.538% (+1.7%) from 46.79%
9031936551

Pull #643

github

53c3e3
web-flow
Merge 3c8214f78 into ec2d8e4fe
Pull Request #643: 8.19.0

377 of 1073 new or added lines in 38 files covered. (35.14%)

977 existing lines in 19 files now uncovered.

3253 of 6702 relevant lines covered (48.54%)

0.97 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.75
/iblrig/choiceworld.py
1
"""
2
Choice World Task related logic and functions that translate the task description in
3
Appendix 2 of the paper into code.
4
"""
5

6
import logging
2✔
7

8
import numpy as np
2✔
9

10
import iblrig.raw_data_loaders
2✔
11
from iblrig.path_helper import iterate_previous_sessions
2✔
12

13
log = logging.getLogger(__name__)
2✔
14

15
CONTRASTS = 1 / np.array([-1, -2, -4, -8, -16, np.inf, 16, 8, 4, 2, 1])
2✔
16
DEFAULT_TRAINING_PHASE = 0
2✔
17
DEFAULT_REWARD_VOLUME = 3
2✔
18

19

20
def compute_adaptive_reward_volume(subject_weight_g, reward_volume_ul, delivered_volume_ul, ntrials):
2✔
21
    """
22
    If the mouse completed over 200 trials in the previous session, the reward volume is automatically
23
    lowered by 0.1 microliters for the next session, but cannot go lower than a floor of 1.5 microliters.
24
    If the mouse received less than its minimum required daily dose (~1 milliliter/25 grams of body weight)
25
    during the previous session, the reward volume is increased by 0.1 microliters for the next session,
26
     but cannot go above a ceiling of 3 microliters.
27
    :param subject_weight_g: in grams
28
    :param reward_volume_ul: the last reward volume setting in uL
29
    :param delivered_volume_ul: the cumulative water deliverd during the last session in uL
30
    :param n_trials:
31
    :return: adaptive_reward_ul
32
    """
33
    if subject_weight_g > (delivered_volume_ul / 1000 * 25):
2✔
34
        reward_volume_ul += 0.1
2✔
35
    elif ntrials > 200:
2✔
36
        reward_volume_ul -= 0.1
2✔
37
    return np.maximum(np.minimum(reward_volume_ul, 3), 1.5)
2✔
38

39

40
def get_subject_training_info(
2✔
41
    subject_name,
42
    task_name='_iblrig_tasks_trainingChoiceWorld',
43
    stim_gain=None,
44
    default_reward=DEFAULT_REWARD_VOLUME,
45
    mode='silent',
46
    **kwargs,
47
) -> tuple[dict, dict]:
48
    """
49
    Goes through the history of a subject and gets the latest
50
    training phase and the adaptive reward volume for this subject
51
    :param subject_name:
52
    :param subject_weight_grams: current weight of the subject in grams, if not available, will use the previous session weight
53
    :param default_reward: default reward volume in uL if no previous session is available
54
    :param task_name: name of the protocol to look for in experiment description,
55
     defaults to '_iblrig_tasks_trainingChoiceWorld'
56
    :param mode: 'defaults' or 'raise': if 'defaults' returns default values if no history is found, if 'raise' raises ValueError
57
    :param **kwargs: optional arguments to be passed to iblrig.path_helper.get_local_and_remote_paths
58
    if not used, will use the arguments from iblrig/settings/iblrig_settings.yaml
59
    :return: training_info dictionary with keys:
60
        default_reward uL (float between 1.5 and 3) and a
61
    session_info dictionary with keys: session_path, experiment_description, task_settings, file_task_data
62
    """
63
    session_info = iterate_previous_sessions(subject_name, task_name=task_name, n=1, **kwargs)
2✔
64
    if len(session_info) == 0:
2✔
65
        if mode == 'silent':
2✔
66
            log.warning(f'The training status for {subject_name} could not be determined - returning default values')
2✔
67
            return dict(training_phase=DEFAULT_TRAINING_PHASE, adaptive_reward=default_reward, adaptive_gain=stim_gain), None
2✔
UNCOV
68
        elif mode == 'raise':
×
UNCOV
69
            raise ValueError(f'The training status for {subject_name} could not be determined as no previous sessions were found')
×
70
    else:
71
        session_info = session_info[0]
2✔
72
    trials_data, _ = iblrig.raw_data_loaders.load_task_jsonable(session_info.file_task_data)
2✔
73
    # gets the reward volume from the previous session
74
    previous_reward_volume = session_info.task_settings.get('ADAPTIVE_REWARD_AMOUNT_UL') or session_info.task_settings.get(
2✔
75
        'REWARD_AMOUNT_UL'
76
    )
77
    adaptive_reward = compute_adaptive_reward_volume(
2✔
78
        subject_weight_g=session_info.task_settings['SUBJECT_WEIGHT'],
79
        reward_volume_ul=previous_reward_volume,
80
        delivered_volume_ul=trials_data['reward_amount'].sum(),
81
        ntrials=trials_data.shape[0],
82
    )
83
    # gets the trainng_phase by looking at the trials table
84
    training_phase = trials_data['training_phase'].values[-1] if 'training_phase' in trials_data else DEFAULT_TRAINING_PHASE
2✔
85
    # gets the adaptive gain
86
    adaptive_gain = session_info.task_settings.get('ADAPTIVE_GAIN_VALUE', session_info.task_settings.get('AG_INIT_VALUE'))
2✔
87
    if np.sum(trials_data['response_side'] != 0) > 200:
2✔
UNCOV
88
        adaptive_gain = session_info.task_settings.get('STIM_GAIN')
×
89
    return dict(training_phase=training_phase, adaptive_reward=adaptive_reward, adaptive_gain=adaptive_gain), session_info
2✔
90

91

92
def training_contrasts_probabilities(phase=1):
2✔
93
    match phase:
2✔
94
        case 0:  # Starts with only 100% and 50% contrasts.
2✔
95
            frequencies = np.abs(CONTRASTS) >= 0.5
2✔
96
        case 1:  # The 25% contrast is added to the set.
2✔
97
            frequencies = np.abs(CONTRASTS) >= 0.25
2✔
98
        case 2:  # The 12.5% contrast is added to the set.
2✔
99
            frequencies = np.abs(CONTRASTS) >= 0.125
2✔
100
        case 3:  # The 6.25% contrast is added to the set.
2✔
101
            frequencies = np.abs(CONTRASTS) >= 0.0625
2✔
102
        case 4:  # The 0% contrast is added to the set.
2✔
103
            frequencies = np.abs(CONTRASTS) >= 0
2✔
104
        case 5:  # The 50% contrast is removed from the set
2✔
105
            frequencies = np.abs(CONTRASTS) != 0.5
2✔
106
    return frequencies / np.sum(frequencies)
2✔
107

108

109
def draw_training_contrast(phase: int) -> float:
2✔
110
    probabilities = training_contrasts_probabilities(phase)
2✔
111
    return np.random.choice(CONTRASTS, p=probabilities)
2✔
112

113

114
def contrasts_set(phase: int) -> np.array:
2✔
115
    probabilities = training_contrasts_probabilities(phase)
2✔
116
    return CONTRASTS[probabilities > 0]
2✔
117

118

119
def training_phase_from_contrast_set(contrast_set: list[float]) -> int | None:
2✔
UNCOV
120
    contrast_set = sorted(contrast_set)
×
UNCOV
121
    for phase in range(6):
×
UNCOV
122
        expected_set = CONTRASTS[np.logical_and(training_contrasts_probabilities(phase) > 0, CONTRASTS >= 0)]
×
UNCOV
123
        if np.array_equal(contrast_set, expected_set):
×
UNCOV
124
            return phase
×
UNCOV
125
    raise Exception(f'Could not determine training phase from contrast set {contrast_set}')
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc