• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 10568073180

26 Aug 2024 10:13PM UTC coverage: 47.538% (+0.7%) from 46.79%
10568073180

Pull #711

github

eeff82
web-flow
Merge 599c9edfb into ad41db25f
Pull Request #711: 8.23.2

121 of 135 new or added lines in 8 files covered. (89.63%)

1025 existing lines in 22 files now uncovered.

4084 of 8591 relevant lines covered (47.54%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.0
/iblrig_tasks/_iblrig_tasks_advancedChoiceWorld/task.py
1
from pathlib import Path
2✔
2

3
import numpy as np
2✔
4
import pandas as pd
2✔
5
import yaml
2✔
6

7
import iblrig.misc
2✔
8
from iblrig.base_choice_world import NTRIALS_INIT, ActiveChoiceWorldSession
2✔
9

10
# read defaults from task_parameters.yaml
11
with open(Path(__file__).parent.joinpath('task_parameters.yaml')) as f:
2✔
12
    DEFAULTS = yaml.safe_load(f)
2✔
13

14

15
class Session(ActiveChoiceWorldSession):
2✔
16
    """
17
    Advanced Choice World is the ChoiceWorld task using fixed 50/50 probability for the side
18
    and contrasts defined in the parameters.
19
    It differs from TrainingChoiceWorld in that it does not implement adaptive contrasts or debiasing,
20
    and it differs from BiasedChoiceWorld in that it does not implement biased blocks.
21
    """
22

23
    protocol_name = '_iblrig_tasks_advancedChoiceWorld'
2✔
24

25
    def __init__(
2✔
26
        self,
27
        *args,
28
        contrast_set: list[float] = DEFAULTS['CONTRAST_SET'],
29
        probability_set: list[float] = DEFAULTS['PROBABILITY_SET'],
30
        reward_set_ul: list[float] = DEFAULTS['REWARD_SET_UL'],
31
        position_set: list[float] = DEFAULTS['POSITION_SET'],
32
        stim_gain: float = DEFAULTS['STIM_GAIN'],
33
        stim_reverse: float = DEFAULTS['STIM_REVERSE'],
34
        **kwargs,
35
    ):
36
        super().__init__(*args, **kwargs)
2✔
37
        nc = len(contrast_set)
2✔
38
        assert len(probability_set) in [nc, 1], 'probability_set must be a scalar or have the same length as contrast_set'
2✔
39
        assert len(reward_set_ul) in [nc, 1], 'reward_set_ul must be a scalar or have the same length as contrast_set'
2✔
40
        assert len(position_set) == nc, 'position_set must have the same length as contrast_set'
2✔
41
        self.task_params['CONTRAST_SET'] = contrast_set
2✔
42
        self.task_params['PROBABILITY_SET'] = probability_set
2✔
43
        self.task_params['REWARD_SET_UL'] = reward_set_ul
2✔
44
        self.task_params['POSITION_SET'] = position_set
2✔
45
        self.task_params['STIM_GAIN'] = stim_gain
2✔
46
        self.task_params['STIM_REVERSE'] = stim_reverse
2✔
47
        # it is easier to work with parameters as a dataframe
48
        self.df_contingencies = pd.DataFrame(columns=['contrast', 'probability', 'reward_amount_ul', 'position'])
2✔
49
        self.df_contingencies['contrast'] = contrast_set
2✔
50
        self.df_contingencies['probability'] = np.float64(probability_set if len(probability_set) == nc else probability_set[0])
2✔
51
        self.df_contingencies['reward_amount_ul'] = reward_set_ul if len(reward_set_ul) == nc else reward_set_ul[0]
2✔
52
        self.df_contingencies['position'] = position_set
2✔
53
        # normalize the probabilities
54
        self.df_contingencies.loc[:, 'probability'] = self.df_contingencies.loc[:, 'probability'] / np.sum(
2✔
55
            self.df_contingencies.loc[:, 'probability']
56
        )
57
        # update the PROBABILITY LEFT field to reflect the probabilities in the parameters above
58
        self.task_params['PROBABILITY_LEFT'] = np.sum(
2✔
59
            self.df_contingencies['probability'] * (self.df_contingencies['position'] < 0)
60
        )
61
        self.trials_table['debias_trial'] = np.zeros(NTRIALS_INIT, dtype=bool)
2✔
62

63
    def draw_next_trial_info(self, **kwargs):
2✔
64
        nc = self.df_contingencies.shape[0]
2✔
65
        ic = np.random.choice(np.arange(nc), p=self.df_contingencies['probability'])
2✔
66
        # now calling the super class with the proper parameters
67
        super().draw_next_trial_info(
2✔
68
            pleft=self.task_params.PROBABILITY_LEFT,
69
            contrast=self.df_contingencies.at[ic, 'contrast'],
70
            position=self.df_contingencies.at[ic, 'position'],
71
            reward_amount=self.df_contingencies.at[ic, 'reward_amount_ul'],
72
        )
73

74
    @property
2✔
75
    def reward_amount(self):
2✔
UNCOV
76
        return self.task_params.REWARD_AMOUNTS_UL[0]
×
77

78
    @staticmethod
2✔
79
    def extra_parser():
2✔
80
        """:return: argparse.parser()"""
81
        parser = super(Session, Session).extra_parser()
2✔
82
        parser.add_argument(
2✔
83
            '--contrast_set',
84
            option_strings=['--contrast_set'],
85
            dest='contrast_set',
86
            default=DEFAULTS['CONTRAST_SET'],
87
            nargs='+',
88
            type=float,
89
            help='Set of contrasts to present',
90
        )
91
        parser.add_argument(
2✔
92
            '--probability_set',
93
            option_strings=['--probability_set'],
94
            dest='probability_set',
95
            default=DEFAULTS['PROBABILITY_SET'],
96
            nargs='+',
97
            type=float,
98
            help='Probabilities of each contrast in contrast_set. If scalar all contrasts are equiprobable',
99
        )
100
        parser.add_argument(
2✔
101
            '--reward_set_ul',
102
            option_strings=['--reward_set_ul'],
103
            dest='reward_set_ul',
104
            default=DEFAULTS['REWARD_SET_UL'],
105
            nargs='+',
106
            type=float,
107
            help='Reward for contrast in contrast set.',
108
        )
109
        parser.add_argument(
2✔
110
            '--position_set',
111
            option_strings=['--position_set'],
112
            dest='position_set',
113
            default=DEFAULTS['POSITION_SET'],
114
            nargs='+',
115
            type=float,
116
            help='Position for each contrast in contrast set.',
117
        )
118
        parser.add_argument(
2✔
119
            '--stim_gain',
120
            option_strings=['--stim_gain'],
121
            dest='stim_gain',
122
            default=DEFAULTS['STIM_GAIN'],
123
            type=float,
124
            help=f'Visual angle/wheel displacement ' f'(deg/mm, default: {DEFAULTS["STIM_GAIN"]})',
125
        )
126
        parser.add_argument(
2✔
127
            '--stim_reverse',
128
            option_strings=['--stim_reverse'],
129
            action='store_true',
130
            dest='stim_reverse',
131
            help='Inverse relationship of wheel to stimulus movement',
132
        )
133
        return parser
2✔
134

135
    def next_trial(self):
2✔
136
        # update counters
137
        self.trial_num += 1
2✔
138
        # save and send trial info to bonsai
139
        self.draw_next_trial_info(pleft=self.task_params.PROBABILITY_LEFT)
2✔
140

141

142
if __name__ == '__main__':  # pragma: no cover
143
    kwargs = iblrig.misc.get_task_arguments(parents=[Session.extra_parser()])
144
    sess = Session(**kwargs)
145
    sess.run()
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc