• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 9032957364

10 May 2024 01:25PM UTC coverage: 48.538% (+1.7%) from 46.79%
9032957364

Pull #643

github

74d2ec
web-flow
Merge aebf2c9af into ec2d8e4fe
Pull Request #643: 8.19.0

377 of 1074 new or added lines in 38 files covered. (35.1%)

977 existing lines in 19 files now uncovered.

3253 of 6702 relevant lines covered (48.54%)

0.97 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.92
/iblrig_tasks/_iblrig_tasks_advancedChoiceWorld/task.py
1
from pathlib import Path
2✔
2

3
import numpy as np
2✔
4
import pandas as pd
2✔
5
import yaml
2✔
6

7
import iblrig.misc
2✔
8
from iblrig.base_choice_world import NTRIALS_INIT, ActiveChoiceWorldSession
2✔
9

10
# read defaults from task_parameters.yaml
11
with open(Path(__file__).parent.joinpath('task_parameters.yaml')) as f:
2✔
12
    DEFAULTS = yaml.safe_load(f)
2✔
13

14

15
class Session(ActiveChoiceWorldSession):
2✔
16
    """
17
    Advanced Choice World is the ChoiceWorld task using fixed 50/50 probability for the side
18
    and contrasts defined in the parameters.
19
    It differs from TrainingChoiceWorld in that it does not implement adaptive contrasts or debiasing,
20
    and it differs from BiasedChoiceWorld in that it does not implement biased blocks.
21
    """
22

23
    protocol_name = '_iblrig_tasks_advancedChoiceWorld'
2✔
24

25
    def __init__(
2✔
26
        self,
27
        *args,
28
        contrast_set: list[float] = DEFAULTS['CONTRAST_SET'],
29
        probability_set: list[float] = DEFAULTS['PROBABILITY_SET'],
30
        reward_set_ul: list[float] = DEFAULTS['REWARD_SET_UL'],
31
        position_set: list[float] = DEFAULTS['POSITION_SET'],
32
        stim_gain: float = DEFAULTS['STIM_GAIN'],
33
        **kwargs,
34
    ):
35
        super().__init__(*args, **kwargs)
2✔
36
        nc = len(contrast_set)
2✔
37
        assert len(probability_set) in [nc, 1], 'probability_set must be a scalar or have the same length as contrast_set'
2✔
38
        assert len(reward_set_ul) in [nc, 1], 'reward_set_ul must be a scalar or have the same length as contrast_set'
2✔
39
        assert len(position_set) == nc, 'position_set must have the same length as contrast_set'
2✔
40
        self.task_params['CONTRAST_SET'] = contrast_set
2✔
41
        self.task_params['PROBABILITY_SET'] = probability_set
2✔
42
        self.task_params['REWARD_SET_UL'] = reward_set_ul
2✔
43
        self.task_params['POSITION_SET'] = position_set
2✔
44
        self.task_params['STIM_GAIN'] = stim_gain
2✔
45
        # it is easier to work with parameters as a dataframe
46
        self.df_contingencies = pd.DataFrame(columns=['contrast', 'probability', 'reward_amount_ul', 'position'])
2✔
47
        self.df_contingencies['contrast'] = contrast_set
2✔
48
        self.df_contingencies['probability'] = probability_set if len(probability_set) == nc else probability_set[0]
2✔
49
        self.df_contingencies['reward_amount_ul'] = reward_set_ul if len(reward_set_ul) == nc else reward_set_ul[0]
2✔
50
        self.df_contingencies['position'] = position_set
2✔
51
        # normalize the probabilities
52
        self.df_contingencies.loc[:, 'probability'] = self.df_contingencies.loc[:, 'probability'] / np.sum(
2✔
53
            self.df_contingencies.loc[:, 'probability']
54
        )
55
        # update the PROBABILITY LEFT field to reflect the probabilities in the parameters above
56
        self.task_params['PROBABILITY_LEFT'] = np.sum(
2✔
57
            self.df_contingencies['probability'] * (self.df_contingencies['position'] < 0)
58
        )
59
        self.trials_table['debias_trial'] = np.zeros(NTRIALS_INIT, dtype=bool)
2✔
60

61
    def draw_next_trial_info(self, **kwargs):
2✔
62
        nc = self.df_contingencies.shape[0]
2✔
63
        ic = np.random.choice(np.arange(nc), p=self.df_contingencies['probability'])
2✔
64
        # now calling the super class with the proper parameters
65
        super().draw_next_trial_info(
2✔
66
            pleft=self.task_params.PROBABILITY_LEFT,
67
            contrast=self.df_contingencies.at[ic, 'contrast'],
68
            position=self.df_contingencies.at[ic, 'position'],
69
            reward_amount=self.df_contingencies.at[ic, 'reward_amount_ul'],
70
        )
71

72
    @property
2✔
73
    def reward_amount(self):
2✔
NEW
74
        return self.task_params.REWARD_AMOUNTS_UL[0]
×
75

76
    @staticmethod
2✔
77
    def extra_parser():
2✔
78
        """:return: argparse.parser()"""
79
        parser = super(Session, Session).extra_parser()
2✔
80
        parser.add_argument(
2✔
81
            '--contrast_set',
82
            option_strings=['--contrast_set'],
83
            dest='contrast_set',
84
            default=DEFAULTS['CONTRAST_SET'],
85
            nargs='+',
86
            type=float,
87
            help='Set of contrasts to present',
88
        )
89
        parser.add_argument(
2✔
90
            '--probability_set',
91
            option_strings=['--probability_set'],
92
            dest='probability_set',
93
            default=DEFAULTS['PROBABILITY_SET'],
94
            nargs='+',
95
            type=float,
96
            help='Probabilities of each contrast in contrast_set. If scalar all contrasts are equiprobable',
97
        )
98
        parser.add_argument(
2✔
99
            '--reward_set_ul',
100
            option_strings=['--reward_set_ul'],
101
            dest='reward_set_ul',
102
            default=DEFAULTS['REWARD_SET_UL'],
103
            nargs='+',
104
            type=float,
105
            help='Reward for contrast in contrast set.',
106
        )
107
        parser.add_argument(
2✔
108
            '--position_set',
109
            option_strings=['--position_set'],
110
            dest='position_set',
111
            default=DEFAULTS['POSITION_SET'],
112
            nargs='+',
113
            type=float,
114
            help='Position for each contrast in contrast set.',
115
        )
116
        parser.add_argument(
2✔
117
            '--stim_gain',
118
            option_strings=['--stim_gain'],
119
            dest='stim_gain',
120
            default=DEFAULTS['STIM_GAIN'],
121
            type=float,
122
            help=f'Visual angle/wheel displacement ' f'(deg/mm, default: {DEFAULTS["STIM_GAIN"]})',
123
        )
124
        return parser
2✔
125

126
    def next_trial(self):
2✔
127
        # update counters
128
        self.trial_num += 1
2✔
129
        # save and send trial info to bonsai
130
        self.draw_next_trial_info(pleft=self.task_params.PROBABILITY_LEFT)
2✔
131

132

133
if __name__ == '__main__':  # pragma: no cover
134
    kwargs = iblrig.misc.get_task_arguments(parents=[Session.extra_parser()])
135
    sess = Session(**kwargs)
136
    sess.run()
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc