• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / iblrig / 10568073180

26 Aug 2024 10:13PM UTC coverage: 47.538% (+0.7%) from 46.79%
10568073180

Pull #711

github

eeff82
web-flow
Merge 599c9edfb into ad41db25f
Pull Request #711: 8.23.2

121 of 135 new or added lines in 8 files covered. (89.63%)

1025 existing lines in 22 files now uncovered.

4084 of 8591 relevant lines covered (47.54%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.12
/iblrig/base_choice_world.py
1
"""Extends the base_tasks modules by providing task logic around the Choice World protocol."""
2

3
import abc
2✔
4
import json
2✔
5
import logging
2✔
6
import math
2✔
7
import random
2✔
8
import subprocess
2✔
9
import time
2✔
10
from pathlib import Path
2✔
11
from string import ascii_letters
2✔
12

13
import numpy as np
2✔
14
import pandas as pd
2✔
15

16
import iblrig.base_tasks
2✔
17
import iblrig.graphic
2✔
18
from iblrig import choiceworld, misc
2✔
19
from iblrig.hardware import SOFTCODE
2✔
20
from iblutil.io import jsonable
2✔
21
from iblutil.util import Bunch
2✔
22
from pybpodapi.com.messaging.trial import Trial
2✔
23
from pybpodapi.protocol import StateMachine
2✔
24

25
log = logging.getLogger(__name__)
2✔
26

27
NTRIALS_INIT = 2000
2✔
28
NBLOCKS_INIT = 100
2✔
29

30
# TODO: task parameters should be verified through a pydantic model
31
#
32
# Probability = Annotated[float, Field(ge=0.0, le=1.0)]
33
#
34
# class ChoiceWorldParams(BaseModel):
35
#     AUTOMATIC_CALIBRATION: bool = True
36
#     ADAPTIVE_REWARD: bool = False
37
#     BONSAI_EDITOR: bool = False
38
#     CALIBRATION_VALUE: float = 0.067
39
#     CONTRAST_SET: list[Probability] = Field([1.0, 0.25, 0.125, 0.0625, 0.0], min_length=1)
40
#     CONTRAST_SET_PROBABILITY_TYPE: Literal['uniform', 'skew_zero'] = 'uniform'
41
#     GO_TONE_AMPLITUDE: float = 0.0272
42
#     GO_TONE_DURATION: float = 0.11
43
#     GO_TONE_IDX: int = Field(2, ge=0)
44
#     GO_TONE_FREQUENCY: float = Field(5000, gt=0)
45
#     FEEDBACK_CORRECT_DELAY_SECS: float = 1
46
#     FEEDBACK_ERROR_DELAY_SECS: float = 2
47
#     FEEDBACK_NOGO_DELAY_SECS: float = 2
48
#     INTERACTIVE_DELAY: float = 0.0
49
#     ITI_DELAY_SECS: float = 0.5
50
#     NTRIALS: int = Field(2000, gt=0)
51
#     PROBABILITY_LEFT: Probability = 0.5
52
#     QUIESCENCE_THRESHOLDS: list[float] = Field(default=[-2, 2], min_length=2, max_length=2)
53
#     QUIESCENT_PERIOD: float = 0.2
54
#     RECORD_AMBIENT_SENSOR_DATA: bool = True
55
#     RECORD_SOUND: bool = True
56
#     RESPONSE_WINDOW: float = 60
57
#     REWARD_AMOUNT_UL: float = 1.5
58
#     REWARD_TYPE: str = 'Water 10% Sucrose'
59
#     STIM_ANGLE: float = 0.0
60
#     STIM_FREQ: float = 0.1
61
#     STIM_GAIN: float = 4.0  # wheel to stimulus relationship (degrees visual angle per mm of wheel displacement)
62
#     STIM_POSITIONS: list[float] = [-35, 35]
63
#     STIM_SIGMA: float = 7.0
64
#     STIM_TRANSLATION_Z: Literal[7, 8] = 7  # 7 for ephys, 8 otherwise. -p:Stim.TranslationZ-{STIM_TRANSLATION_Z} bonsai param
65
#     STIM_REVERSE: bool = False
66
#     SYNC_SQUARE_X: float = 1.33
67
#     SYNC_SQUARE_Y: float = -1.03
68
#     USE_AUTOMATIC_STOPPING_CRITERIONS: bool = True
69
#     VISUAL_STIMULUS: str = 'GaborIBLTask / Gabor2D.bonsai'  # null / passiveChoiceWorld_passive.bonsai
70
#     WHITE_NOISE_AMPLITUDE: float = 0.05
71
#     WHITE_NOISE_DURATION: float = 0.5
72
#     WHITE_NOISE_IDX: int = 3
73

74

75
class ChoiceWorldSession(
2✔
76
    iblrig.base_tasks.BonsaiRecordingMixin,
77
    iblrig.base_tasks.BonsaiVisualStimulusMixin,
78
    iblrig.base_tasks.BpodMixin,
79
    iblrig.base_tasks.Frame2TTLMixin,
80
    iblrig.base_tasks.RotaryEncoderMixin,
81
    iblrig.base_tasks.SoundMixin,
82
    iblrig.base_tasks.ValveMixin,
83
    iblrig.base_tasks.NetworkSession,
84
):
85
    # task_params = ChoiceWorldParams()
86
    base_parameters_file = Path(__file__).parent.joinpath('base_choice_world_params.yaml')
2✔
87

88
    def __init__(self, *args, delay_secs=0, **kwargs):
2✔
89
        super().__init__(**kwargs)
2✔
90
        self.task_params['SESSION_DELAY_START'] = delay_secs
2✔
91
        # init behaviour data
92
        self.movement_left = self.device_rotary_encoder.THRESHOLD_EVENTS[self.task_params.QUIESCENCE_THRESHOLDS[0]]
2✔
93
        self.movement_right = self.device_rotary_encoder.THRESHOLD_EVENTS[self.task_params.QUIESCENCE_THRESHOLDS[1]]
2✔
94
        # init counter variables
95
        self.trial_num = -1
2✔
96
        self.block_num = -1
2✔
97
        self.block_trial_num = -1
2✔
98
        # init the tables, there are 2 of them: a trials table and a ambient sensor data table
99
        self.trials_table = pd.DataFrame(
2✔
100
            {
101
                'contrast': np.zeros(NTRIALS_INIT) * np.NaN,
102
                'position': np.zeros(NTRIALS_INIT) * np.NaN,
103
                'quiescent_period': np.zeros(NTRIALS_INIT) * np.NaN,
104
                'response_side': np.zeros(NTRIALS_INIT, dtype=np.int8),
105
                'response_time': np.zeros(NTRIALS_INIT) * np.NaN,
106
                'reward_amount': np.zeros(NTRIALS_INIT) * np.NaN,
107
                'reward_valve_time': np.zeros(NTRIALS_INIT) * np.NaN,
108
                'stim_angle': np.zeros(NTRIALS_INIT) * np.NaN,
109
                'stim_freq': np.zeros(NTRIALS_INIT) * np.NaN,
110
                'stim_gain': np.zeros(NTRIALS_INIT) * np.NaN,
111
                'stim_phase': np.zeros(NTRIALS_INIT) * np.NaN,
112
                'stim_reverse': np.zeros(NTRIALS_INIT, dtype=bool),
113
                'stim_sigma': np.zeros(NTRIALS_INIT) * np.NaN,
114
                'trial_correct': np.zeros(NTRIALS_INIT, dtype=bool),
115
                'trial_num': np.zeros(NTRIALS_INIT, dtype=np.int16),
116
                'pause_duration': np.zeros(NTRIALS_INIT, dtype=float),
117
            }
118
        )
119

120
        self.ambient_sensor_table = pd.DataFrame(
2✔
121
            {
122
                'Temperature_C': np.zeros(NTRIALS_INIT) * np.NaN,
123
                'AirPressure_mb': np.zeros(NTRIALS_INIT) * np.NaN,
124
                'RelativeHumidity': np.zeros(NTRIALS_INIT) * np.NaN,
125
            }
126
        )
127

128
    @staticmethod
2✔
129
    def extra_parser():
2✔
130
        """:return: argparse.parser()"""
131
        parser = super(ChoiceWorldSession, ChoiceWorldSession).extra_parser()
2✔
132
        parser.add_argument(
2✔
133
            '--delay_secs',
134
            dest='delay_secs',
135
            default=0,
136
            type=int,
137
            required=False,
138
            help='initial delay before starting the first trial (default: 0s)',
139
        )
140
        parser.add_argument(
2✔
141
            '--remote',
142
            dest='remote_rigs',
143
            type=str,
144
            required=False,
145
            action='append',
146
            nargs='+',
147
            help='specify one of the remote rigs to interact with over the network',
148
        )
149
        return parser
2✔
150

151
    def start_hardware(self):
2✔
152
        """
153
        In this step we explicitly run the start methods of the various mixins.
154
        The super class start method is overloaded because we need to start the different hardware pieces in order
155
        """
156
        if not self.is_mock:
2✔
UNCOV
157
            self.start_mixin_frame2ttl()
×
UNCOV
158
            self.start_mixin_bpod()
×
UNCOV
159
            self.start_mixin_valve()
×
UNCOV
160
            self.start_mixin_sound()
×
UNCOV
161
            self.start_mixin_rotary_encoder()
×
UNCOV
162
            self.start_mixin_bonsai_cameras()
×
UNCOV
163
            self.start_mixin_bonsai_microphone()
×
UNCOV
164
            self.start_mixin_bonsai_visual_stimulus()
×
UNCOV
165
            self.bpod.register_softcodes(self.softcode_dictionary())
×
166

167
    def _run(self):
2✔
168
        """Run the task with the actual state machine."""
169
        time_last_trial_end = time.time()
2✔
170
        for i in range(self.task_params.NTRIALS):  # Main loop
2✔
171
            # t_overhead = time.time()
172
            self.next_trial()
2✔
173
            log.info(f'Starting trial: {i}')
2✔
174
            # =============================================================================
175
            #     Start state machine definition
176
            # =============================================================================
177
            sma = self.get_state_machine_trial(i)
2✔
178
            log.debug('Sending state machine to bpod')
2✔
179
            # Send state machine description to Bpod device
180
            self.bpod.send_state_machine(sma)
2✔
181
            # t_overhead = time.time() - t_overhead
182
            # The ITI_DELAY_SECS defines the grey screen period within the state machine, where the
183
            # Bpod TTL is HIGH. The DEAD_TIME param defines the time between last trial and the next
184
            dead_time = self.task_params.get('DEAD_TIME', 0.5)
2✔
185
            dt = self.task_params.ITI_DELAY_SECS - dead_time - (time.time() - time_last_trial_end)
2✔
186
            # wait to achieve the desired ITI duration
187
            if dt > 0:
2✔
188
                time.sleep(dt)
×
189
            # Run state machine
190
            log.debug('running state machine')
2✔
191
            self.bpod.run_state_machine(sma)  # Locks until state machine 'exit' is reached
2✔
192
            time_last_trial_end = time.time()
2✔
193
            # handle pause event
194
            flag_pause = self.paths.SESSION_FOLDER.joinpath('.pause')
2✔
195
            flag_stop = self.paths.SESSION_FOLDER.joinpath('.stop')
2✔
196
            if flag_pause.exists() and i < (self.task_params.NTRIALS - 1):
2✔
197
                log.info(f'Pausing session inbetween trials {i} and {i + 1}')
2✔
198
                while flag_pause.exists() and not flag_stop.exists():
2✔
199
                    time.sleep(1)
2✔
200
                self.trials_table.at[self.trial_num, 'pause_duration'] = time.time() - time_last_trial_end
2✔
201
                if not flag_stop.exists():
2✔
202
                    log.info('Resuming session')
2✔
203
            # save trial and update log
204
            self.trial_completed(self.bpod.session.current_trial.export())
2✔
205
            self.ambient_sensor_table.loc[i] = self.bpod.get_ambient_sensor_reading()
2✔
206
            self.show_trial_log()
2✔
207

208
            # handle stop event
209
            if flag_stop.exists():
2✔
210
                log.info('Stopping session after trial %d', i)
2✔
211
                flag_stop.unlink()
2✔
212
                break
2✔
213

214
    def mock(self, file_jsonable_fixture=None):
2✔
215
        """
216
        Instantiate a state machine and Bpod object to simulate a task's run.
217

218
        This is useful to test or display the state machine flow.
219
        """
220
        super().mock()
2✔
221

222
        if file_jsonable_fixture is not None:
2✔
223
            task_data = jsonable.read(file_jsonable_fixture)
2✔
224
            # pop-out the bpod data from the table
225
            bpod_data = []
2✔
226
            for td in task_data:
2✔
227
                bpod_data.append(td.pop('behavior_data'))
2✔
228

229
            class MockTrial(Trial):
2✔
230
                def export(self):
2✔
231
                    return np.random.choice(bpod_data)
2✔
232
        else:
233

234
            class MockTrial(Trial):
2✔
235
                def export(self):
2✔
UNCOV
236
                    return {}
×
237

238
        self.bpod.session.trials = [MockTrial()]
2✔
239
        self.bpod.send_state_machine = lambda k: None
2✔
240
        self.bpod.run_state_machine = lambda k: time.sleep(1.2)
2✔
241

242
        daction = ('dummy', 'action')
2✔
243
        self.sound = Bunch({'GO_TONE': daction, 'WHITE_NOISE': daction})
2✔
244

245
        self.bpod.actions.update(
2✔
246
            {
247
                'play_tone': daction,
248
                'play_noise': daction,
249
                'stop_sound': daction,
250
                'rotary_encoder_reset': daction,
251
                'bonsai_hide_stim': daction,
252
                'bonsai_show_stim': daction,
253
                'bonsai_closed_loop': daction,
254
                'bonsai_freeze_stim': daction,
255
                'bonsai_show_center': daction,
256
            }
257
        )
258

259
    def get_graphviz_task(self, output_file=None, view=True):
2✔
260
        """
261
        Get the state machine's states diagram in Digraph format.
262

263
        :param output_file:
264
        :return:
265
        """
266
        import graphviz
2✔
267

268
        self.next_trial()
2✔
269
        sma = self.get_state_machine_trial(0)
2✔
270
        if sma is None:
2✔
271
            return
2✔
272
        states_indices = {i: k for i, k in enumerate(sma.state_names)}
2✔
273
        states_indices.update({(i + 10000): k for i, k in enumerate(sma.undeclared)})
2✔
274
        states_letters = {k: ascii_letters[i] for i, k in enumerate(sma.state_names)}
2✔
275
        dot = graphviz.Digraph(comment='The Great IBL Task')
2✔
276
        edges = []
2✔
277

278
        for i in range(len(sma.state_names)):
2✔
279
            letter = states_letters[sma.state_names[i]]
2✔
280
            dot.node(letter, sma.state_names[i])
2✔
281
            if ~np.isnan(sma.state_timer_matrix[i]):
2✔
282
                out_state = states_indices[sma.state_timer_matrix[i]]
2✔
283
                edges.append(f'{letter}{states_letters[out_state]}')
2✔
284
            for input in sma.input_matrix[i]:
2✔
285
                if input[0] == 0:
2✔
286
                    edges.append(f'{letter}{states_letters[states_indices[input[1]]]}')
2✔
287
        dot.edges(edges)
2✔
288
        if output_file is not None:
2✔
289
            try:
2✔
290
                dot.render(output_file, view=view)
2✔
291
            except graphviz.exceptions.ExecutableNotFound:
2✔
292
                log.info('Graphviz system executable not found, cannot render the graph')
2✔
293
        return dot
2✔
294

295
    def _instantiate_state_machine(self, *args, **kwargs):
2✔
296
        return StateMachine(self.bpod)
2✔
297

298
    def get_state_machine_trial(self, i):
2✔
299
        # we define the trial number here for subclasses that may need it
300
        sma = self._instantiate_state_machine(trial_number=i)
2✔
301
        if i == 0:  # First trial exception start camera
2✔
302
            session_delay_start = self.task_params.get('SESSION_DELAY_START', 0)
2✔
303
            log.info('First trial initializing, will move to next trial only if:')
2✔
304
            log.info('1. camera is detected')
2✔
305
            log.info(f'2. {session_delay_start} sec have elapsed')
2✔
306
            sma.add_state(
2✔
307
                state_name='trial_start',
308
                state_timer=0,
309
                state_change_conditions={'Port1In': 'delay_initiation'},
310
                output_actions=[('SoftCode', SOFTCODE.TRIGGER_CAMERA), ('BNC1', 255)],
311
            )  # start camera
312
            sma.add_state(
2✔
313
                state_name='delay_initiation',
314
                state_timer=session_delay_start,
315
                output_actions=[],
316
                state_change_conditions={'Tup': 'reset_rotary_encoder'},
317
            )
318
        else:
319
            sma.add_state(
2✔
320
                state_name='trial_start',
321
                state_timer=0,  # ~100µs hardware irreducible delay
322
                state_change_conditions={'Tup': 'reset_rotary_encoder'},
323
                output_actions=[self.bpod.actions.stop_sound, ('BNC1', 255)],
324
            )  # stop all sounds
325

326
        sma.add_state(
2✔
327
            state_name='reset_rotary_encoder',
328
            state_timer=0,
329
            output_actions=[self.bpod.actions.rotary_encoder_reset],
330
            state_change_conditions={'Tup': 'quiescent_period'},
331
        )
332

333
        sma.add_state(  # '>back' | '>reset_timer'
2✔
334
            state_name='quiescent_period',
335
            state_timer=self.quiescent_period,
336
            output_actions=[],
337
            state_change_conditions={
338
                'Tup': 'stim_on',
339
                self.movement_left: 'reset_rotary_encoder',
340
                self.movement_right: 'reset_rotary_encoder',
341
            },
342
        )
343
        # show stimulus, move on to next state if a frame2ttl is detected, with a time-out of 0.1s
344
        sma.add_state(
2✔
345
            state_name='stim_on',
346
            state_timer=0.1,
347
            output_actions=[self.bpod.actions.bonsai_show_stim],
348
            state_change_conditions={'Tup': 'interactive_delay', 'BNC1High': 'interactive_delay', 'BNC1Low': 'interactive_delay'},
349
        )
350
        # this is a feature that can eventually add a delay between visual and auditory cue
351
        sma.add_state(
2✔
352
            state_name='interactive_delay',
353
            state_timer=self.task_params.INTERACTIVE_DELAY,
354
            output_actions=[],
355
            state_change_conditions={'Tup': 'play_tone'},
356
        )
357
        # play tone, move on to next state if sound is detected, with a time-out of 0.1s
358
        sma.add_state(
2✔
359
            state_name='play_tone',
360
            state_timer=0.1,
361
            output_actions=[self.bpod.actions.play_tone],
362
            state_change_conditions={'Tup': 'reset2_rotary_encoder', 'BNC2High': 'reset2_rotary_encoder'},
363
        )
364

365
        sma.add_state(
2✔
366
            state_name='reset2_rotary_encoder',
367
            state_timer=0.05,  # the delay here is to avoid race conditions in the bonsai flow
368
            output_actions=[self.bpod.actions.rotary_encoder_reset],
369
            state_change_conditions={'Tup': 'closed_loop'},
370
        )
371

372
        sma.add_state(
2✔
373
            state_name='closed_loop',
374
            state_timer=self.task_params.RESPONSE_WINDOW,
375
            output_actions=[self.bpod.actions.bonsai_closed_loop],
376
            state_change_conditions={'Tup': 'no_go', self.event_error: 'freeze_error', self.event_reward: 'freeze_reward'},
377
        )
378

379
        sma.add_state(
2✔
380
            state_name='no_go',
381
            state_timer=self.task_params.FEEDBACK_NOGO_DELAY_SECS,
382
            output_actions=[self.bpod.actions.bonsai_hide_stim, self.bpod.actions.play_noise],
383
            state_change_conditions={'Tup': 'exit_state'},
384
        )
385

386
        sma.add_state(
2✔
387
            state_name='freeze_error',
388
            state_timer=0,
389
            output_actions=[self.bpod.actions.bonsai_freeze_stim],
390
            state_change_conditions={'Tup': 'error'},
391
        )
392

393
        sma.add_state(
2✔
394
            state_name='error',
395
            state_timer=self.task_params.FEEDBACK_ERROR_DELAY_SECS,
396
            output_actions=[self.bpod.actions.play_noise],
397
            state_change_conditions={'Tup': 'hide_stim'},
398
        )
399

400
        sma.add_state(
2✔
401
            state_name='freeze_reward',
402
            state_timer=0,
403
            output_actions=[self.bpod.actions.bonsai_show_center],
404
            state_change_conditions={'Tup': 'reward'},
405
        )
406

407
        sma.add_state(
2✔
408
            state_name='reward',
409
            state_timer=self.reward_time,
410
            output_actions=[('Valve1', 255), ('BNC1', 255)],
411
            state_change_conditions={'Tup': 'correct'},
412
        )
413

414
        sma.add_state(
2✔
415
            state_name='correct',
416
            state_timer=self.task_params.FEEDBACK_CORRECT_DELAY_SECS - self.reward_time,
417
            output_actions=[],
418
            state_change_conditions={'Tup': 'hide_stim'},
419
        )
420

421
        sma.add_state(
2✔
422
            state_name='hide_stim',
423
            state_timer=0.1,
424
            output_actions=[self.bpod.actions.bonsai_hide_stim],
425
            state_change_conditions={'Tup': 'exit_state', 'BNC1High': 'exit_state', 'BNC1Low': 'exit_state'},
426
        )
427

428
        sma.add_state(
2✔
429
            state_name='exit_state',
430
            state_timer=self.task_params.ITI_DELAY_SECS,
431
            output_actions=[('BNC1', 255)],
432
            state_change_conditions={'Tup': 'exit'},
433
        )
434
        return sma
2✔
435

436
    @abc.abstractmethod
2✔
437
    def next_trial(self):
2✔
UNCOV
438
        pass
×
439

440
    @property
2✔
441
    def default_reward_amount(self):
2✔
442
        return self.task_params.REWARD_AMOUNT_UL
2✔
443

444
    def draw_next_trial_info(self, pleft=0.5, contrast=None, position=None, reward_amount=None):
2✔
445
        """Draw next trial variables.
446

447
        calls :meth:`send_trial_info_to_bonsai`.
448
        This is called by the `next_trial` method before updating the Bpod state machine.
449
        """
450
        if contrast is None:
2✔
451
            contrast = misc.draw_contrast(self.task_params.CONTRAST_SET, self.task_params.CONTRAST_SET_PROBABILITY_TYPE)
2✔
452
        assert len(self.task_params.STIM_POSITIONS) == 2, 'Only two positions are supported'
2✔
453
        position = position or int(np.random.choice(self.task_params.STIM_POSITIONS, p=[pleft, 1 - pleft]))
2✔
454
        quiescent_period = self.task_params.QUIESCENT_PERIOD + misc.truncated_exponential(
2✔
455
            scale=0.35, min_value=0.2, max_value=0.5
456
        )
457
        reward_amount = self.default_reward_amount if reward_amount is None else reward_amount
2✔
458
        stim_gain = (
2✔
459
            self.session_info.ADAPTIVE_GAIN_VALUE if self.task_params.get('ADAPTIVE_GAIN', False) else self.task_params.STIM_GAIN
460
        )
461
        self.trials_table.at[self.trial_num, 'quiescent_period'] = quiescent_period
2✔
462
        self.trials_table.at[self.trial_num, 'contrast'] = contrast
2✔
463
        self.trials_table.at[self.trial_num, 'stim_phase'] = random.uniform(0, 2 * math.pi)
2✔
464
        self.trials_table.at[self.trial_num, 'stim_sigma'] = self.task_params.STIM_SIGMA
2✔
465
        self.trials_table.at[self.trial_num, 'stim_angle'] = self.task_params.STIM_ANGLE
2✔
466
        self.trials_table.at[self.trial_num, 'stim_gain'] = stim_gain
2✔
467
        self.trials_table.at[self.trial_num, 'stim_freq'] = self.task_params.STIM_FREQ
2✔
468
        self.trials_table.at[self.trial_num, 'stim_reverse'] = self.task_params.STIM_REVERSE
2✔
469
        self.trials_table.at[self.trial_num, 'trial_num'] = self.trial_num
2✔
470
        self.trials_table.at[self.trial_num, 'position'] = position
2✔
471
        self.trials_table.at[self.trial_num, 'reward_amount'] = reward_amount
2✔
472
        self.trials_table.at[self.trial_num, 'stim_probability_left'] = pleft
2✔
473
        self.send_trial_info_to_bonsai()
2✔
474

475
    def trial_completed(self, bpod_data):
2✔
476
        # if the reward state has not been triggered, null the reward
477
        if np.isnan(bpod_data['States timestamps']['reward'][0][0]):
2✔
478
            self.trials_table.at[self.trial_num, 'reward_amount'] = 0
2✔
479
        self.trials_table.at[self.trial_num, 'reward_valve_time'] = self.reward_time
2✔
480
        # update cumulative reward value
481
        self.session_info.TOTAL_WATER_DELIVERED += self.trials_table.at[self.trial_num, 'reward_amount']
2✔
482
        self.session_info.NTRIALS += 1
2✔
483
        # SAVE TRIAL DATA
484
        save_dict = self.trials_table.iloc[self.trial_num].to_dict()
2✔
485
        save_dict['behavior_data'] = bpod_data
2✔
486
        # Dump and save
487
        with open(self.paths['DATA_FILE_PATH'], 'a') as fp:
2✔
488
            fp.write(json.dumps(save_dict) + '\n')
2✔
489
        # this is a flag for the online plots. If online plots were in pyqt5, there is a file watcher functionality
490
        Path(self.paths['DATA_FILE_PATH']).parent.joinpath('new_trial.flag').touch()
2✔
491
        self.paths.SESSION_FOLDER.joinpath('transfer_me.flag').touch()
2✔
492
        self.check_sync_pulses(bpod_data=bpod_data)
2✔
493

494
    def check_sync_pulses(self, bpod_data):
2✔
495
        # todo move this in the post trial when we have a task flow
496
        if not self.bpod.is_connected:
2✔
497
            return
2✔
UNCOV
498
        events = bpod_data['Events timestamps']
×
UNCOV
499
        if not misc.get_port_events(events, name='BNC1'):
×
UNCOV
500
            log.warning("NO FRAME2TTL PULSES RECEIVED ON BPOD'S TTL INPUT 1")
×
UNCOV
501
        if not misc.get_port_events(events, name='BNC2'):
×
UNCOV
502
            log.warning("NO SOUND SYNC PULSES RECEIVED ON BPOD'S TTL INPUT 2")
×
UNCOV
503
        if not misc.get_port_events(events, name='Port1'):
×
UNCOV
504
            log.warning("NO CAMERA SYNC PULSES RECEIVED ON BPOD'S BEHAVIOR PORT 1")
×
505

506
    def show_trial_log(self, extra_info='', log_level: int = logging.INFO):
2✔
507
        trial_info = self.trials_table.iloc[self.trial_num]
2✔
508

509
        log.log(log_level, f'Outcome of Trial #{trial_info.trial_num}:')
2✔
510
        log.log(log_level, f'- Stim. Position:  {trial_info.position}')
2✔
511
        log.log(log_level, f'- Stim. Contrast:  {trial_info.contrast}')
2✔
512
        log.log(log_level, f'- Stim. Phase:     {trial_info.stim_phase}')
2✔
513
        log.log(log_level, f'- Stim. p Left:    {trial_info.stim_probability_left}')
2✔
514
        log.log(log_level, f'- Water delivered: {self.session_info.TOTAL_WATER_DELIVERED:.1f} µl')
2✔
515
        log.log(log_level, f'- Time from Start: {self.time_elapsed}')
2✔
516
        log.log(log_level, f'- Temperature:     {self.ambient_sensor_table.loc[self.trial_num, "Temperature_C"]:.1f} °C')
2✔
517
        log.log(log_level, f'- Air Pressure:    {self.ambient_sensor_table.loc[self.trial_num, "AirPressure_mb"]:.1f} mb')
2✔
518
        log.log(log_level, f'- Rel. Humidity:   {self.ambient_sensor_table.loc[self.trial_num, "RelativeHumidity"]:.1f} %\n')
2✔
519

520
    @property
2✔
521
    def iti_reward(self):
2✔
522
        """
523
        Returns the ITI time that needs to be set in order to achieve the desired ITI,
524
        by subtracting the time it takes to give a reward from the desired ITI.
525
        """
UNCOV
526
        return self.task_params.ITI_CORRECT - self.calibration.get('REWARD_VALVE_TIME', None)
×
527

528
    """
2✔
529
    Those are the properties that are used in the state machine code
530
    """
531

532
    @property
2✔
533
    def reward_time(self):
2✔
534
        return self.compute_reward_time(amount_ul=self.trials_table.at[self.trial_num, 'reward_amount'])
2✔
535

536
    @property
2✔
537
    def quiescent_period(self):
2✔
538
        return self.trials_table.at[self.trial_num, 'quiescent_period']
2✔
539

540
    @property
2✔
541
    def position(self):
2✔
542
        return self.trials_table.at[self.trial_num, 'position']
2✔
543

544
    @property
2✔
545
    def event_error(self):
2✔
546
        return self.device_rotary_encoder.THRESHOLD_EVENTS[(-1 if self.task_params.STIM_REVERSE else 1) * self.position]
2✔
547

548
    @property
2✔
549
    def event_reward(self):
2✔
550
        return self.device_rotary_encoder.THRESHOLD_EVENTS[(1 if self.task_params.STIM_REVERSE else -1) * self.position]
2✔
551

552

553
class HabituationChoiceWorldSession(ChoiceWorldSession):
2✔
554
    protocol_name = '_iblrig_tasks_habituationChoiceWorld'
2✔
555

556
    def __init__(self, **kwargs):
2✔
557
        super().__init__(**kwargs)
2✔
558
        self.trials_table['delay_to_stim_center'] = np.zeros(NTRIALS_INIT) * np.NaN
2✔
559

560
    def next_trial(self):
2✔
561
        self.trial_num += 1
2✔
562
        self.draw_next_trial_info()
2✔
563

564
    def draw_next_trial_info(self, *args, **kwargs):
2✔
565
        # update trial table fields specific to habituation choice world
566
        self.trials_table.at[self.trial_num, 'delay_to_stim_center'] = np.random.normal(self.task_params.DELAY_TO_STIM_CENTER, 2)
2✔
567
        super().draw_next_trial_info(*args, **kwargs)
2✔
568

569
    def get_state_machine_trial(self, i):
2✔
570
        sma = StateMachine(self.bpod)
2✔
571

572
        if i == 0:  # First trial exception start camera
2✔
573
            log.info('Waiting for camera pulses...')
2✔
574
            sma.add_state(
2✔
575
                state_name='iti',
576
                state_timer=3600,
577
                state_change_conditions={'Port1In': 'stim_on'},
578
                output_actions=[self.bpod.actions.bonsai_hide_stim, ('SoftCode', SOFTCODE.TRIGGER_CAMERA), ('BNC1', 255)],
579
            )  # start camera
580
        else:
581
            # NB: This state actually the inter-trial interval, i.e. the period of grey screen between stim off and stim on.
582
            # During this period the Bpod TTL is HIGH and there are no stimuli. The onset of this state is trial end;
583
            # the offset of this state is trial start!
NEW
UNCOV
584
            sma.add_state(
×
585
                state_name='iti',
586
                state_timer=1,  # Stim off for 1 sec
587
                state_change_conditions={'Tup': 'stim_on'},
588
                output_actions=[self.bpod.actions.bonsai_hide_stim, ('BNC1', 255)],
589
            )
590
        # This stim_on state is considered the actual trial start
591
        sma.add_state(
2✔
592
            state_name='stim_on',
593
            state_timer=self.trials_table.at[self.trial_num, 'delay_to_stim_center'],
594
            state_change_conditions={'Tup': 'stim_center'},
595
            output_actions=[self.bpod.actions.bonsai_show_stim, self.bpod.actions.play_tone],
596
        )
597

598
        sma.add_state(
2✔
599
            state_name='stim_center',
600
            state_timer=0.5,
601
            state_change_conditions={'Tup': 'reward'},
602
            output_actions=[self.bpod.actions.bonsai_show_center],
603
        )
604

605
        sma.add_state(
2✔
606
            state_name='reward',
607
            state_timer=self.reward_time,  # the length of time to leave reward valve open, i.e. reward size
608
            state_change_conditions={'Tup': 'post_reward'},
609
            output_actions=[('Valve1', 255), ('BNC1', 255)],
610
        )
611
        # This state defines the period after reward where Bpod TTL is LOW.
612
        # NB: The stimulus is on throughout this period. The stim off trigger occurs upon exit.
613
        # The stimulus thus remains in the screen centre for 0.5 + ITI_DELAY_SECS seconds.
614
        sma.add_state(
2✔
615
            state_name='post_reward',
616
            state_timer=self.task_params.ITI_DELAY_SECS - self.reward_time,
617
            state_change_conditions={'Tup': 'exit'},
618
            output_actions=[],
619
        )
620
        return sma
2✔
621

622

623
class ActiveChoiceWorldSession(ChoiceWorldSession):
2✔
624
    """
625
    The ActiveChoiceWorldSession is a base class for protocols where the mouse is actively making decisions
626
    by turning the wheel. It has the following characteristics
627
    -   it is trial based
628
    -   it is decision based
629
    -   left and right simulus are equiprobable: there is no biased block
630
    -   a trial can either be correct / error / no_go depending on the side of the stimulus and the response
631
    -   it has a quantifiable performance by computing the proportion of correct trials of passive stimulations protocols or
632
        habituation protocols.
633

634
    The TrainingChoiceWorld, BiasedChoiceWorld are all subclasses of this class
635
    """
636

637
    def __init__(self, **kwargs):
2✔
638
        super().__init__(**kwargs)
2✔
639
        self.trials_table['stim_probability_left'] = np.zeros(NTRIALS_INIT, dtype=np.float64)
2✔
640

641
    def _run(self):
2✔
642
        # starts online plotting
643
        if self.interactive:
2✔
UNCOV
644
            subprocess.Popen(
×
645
                ['view_session', str(self.paths['DATA_FILE_PATH']), str(self.paths['SETTINGS_FILE_PATH'])],
646
                stdout=subprocess.DEVNULL,
647
                stderr=subprocess.STDOUT,
648
            )
649
        super()._run()
2✔
650

651
    def show_trial_log(self, extra_info=''):
2✔
652
        trial_info = self.trials_table.iloc[self.trial_num]
2✔
653
        extra_info = f"""
2✔
654
RESPONSE TIME:        {trial_info.response_time}
655
{extra_info}
656

657
TRIAL CORRECT:        {trial_info.trial_correct}
658
NTRIALS CORRECT:      {self.session_info.NTRIALS_CORRECT}
659
NTRIALS ERROR:        {self.trial_num - self.session_info.NTRIALS_CORRECT}
660
        """
661
        super().show_trial_log(extra_info=extra_info)
2✔
662

663
    def trial_completed(self, bpod_data):
2✔
664
        """
665
        The purpose of this method is to
666
        -   update the trials table with information about the behaviour coming from the bpod
667
        Constraints on the state machine data:
668
        - mandatory states: ['correct', 'error', 'no_go', 'reward']
669
        - optional states : ['omit_correct', 'omit_error', 'omit_no_go']
670
        :param bpod_data:
671
        :return:
672
        """
673
        # get the response time from the behaviour data
674
        response_time = bpod_data['States timestamps']['closed_loop'][0][1] - bpod_data['States timestamps']['stim_on'][0][0]
2✔
675
        self.trials_table.at[self.trial_num, 'response_time'] = response_time
2✔
676
        # get the trial outcome
677
        state_names = ['correct', 'error', 'no_go', 'omit_correct', 'omit_error', 'omit_no_go']
2✔
678
        raw_outcome = {sn: ~np.isnan(bpod_data['States timestamps'].get(sn, [[np.NaN]])[0][0]) for sn in state_names}
2✔
679
        try:
2✔
680
            outcome = next(k for k in raw_outcome if raw_outcome[k])
2✔
681
            # Update response buffer -1 for left, 0 for nogo, and 1 for rightward
682
            position = self.trials_table.at[self.trial_num, 'position']
2✔
683
            if 'correct' in outcome:
2✔
684
                self.trials_table.at[self.trial_num, 'trial_correct'] = True
2✔
685
                self.session_info.NTRIALS_CORRECT += 1
2✔
686
                self.trials_table.at[self.trial_num, 'response_side'] = -np.sign(position)
2✔
687
            elif 'error' in outcome:
2✔
688
                self.trials_table.at[self.trial_num, 'response_side'] = np.sign(position)
2✔
689
            elif 'no_go' in outcome:
2✔
690
                self.trials_table.at[self.trial_num, 'response_side'] = 0
2✔
691
            super().trial_completed(bpod_data)
2✔
692
            # here we throw potential errors after having written the trial to disk
693
            assert np.sum(list(raw_outcome.values())) == 1
2✔
694
            assert position != 0, 'the position value should be either 35 or -35'
2✔
UNCOV
695
        except StopIteration as e:
×
UNCOV
696
            log.error(f'No outcome detected for trial {self.trial_num}.')
×
UNCOV
697
            log.error(f'raw_outcome: {raw_outcome}')
×
UNCOV
698
            log.error('State names: ' + ', '.join(bpod_data['States timestamps'].keys()))
×
UNCOV
699
            raise e
×
UNCOV
700
        except AssertionError as e:
×
NEW
UNCOV
701
            log.error(f'Assertion Error in trial {self.trial_num}.')
×
NEW
UNCOV
702
            log.error(f'raw_outcome: {raw_outcome}')
×
NEW
UNCOV
703
            log.error('State names: ' + ', '.join(bpod_data['States timestamps'].keys()))
×
NEW
UNCOV
704
            raise e
×
705

706

707
class BiasedChoiceWorldSession(ActiveChoiceWorldSession):
2✔
708
    """
709
    Biased choice world session is the instantiation of ActiveChoiceWorld where the notion of biased
710
    blocks is introduced.
711
    """
712

713
    base_parameters_file = Path(__file__).parent.joinpath('base_biased_choice_world_params.yaml')
2✔
714
    protocol_name = '_iblrig_tasks_biasedChoiceWorld'
2✔
715

716
    def __init__(self, **kwargs):
2✔
717
        super().__init__(**kwargs)
2✔
718
        self.blocks_table = pd.DataFrame(
2✔
719
            {'probability_left': np.zeros(NBLOCKS_INIT) * np.NaN, 'block_length': np.zeros(NBLOCKS_INIT, dtype=np.int16) * -1}
720
        )
721
        self.trials_table['block_num'] = np.zeros(NTRIALS_INIT, dtype=np.int16)
2✔
722
        self.trials_table['block_trial_num'] = np.zeros(NTRIALS_INIT, dtype=np.int16)
2✔
723

724
    def new_block(self):
2✔
725
        """
726
        if block_init_5050
727
            First block has 50/50 probability of leftward stim
728
            is 90 trials long
729
        """
730
        self.block_num += 1  # the block number is zero based
2✔
731
        self.block_trial_num = 0
2✔
732

733
        # handles the block length logic
734
        if self.task_params.BLOCK_INIT_5050 and self.block_num == 0:
2✔
735
            block_len = 90
2✔
736
        else:
737
            block_len = int(
2✔
738
                misc.truncated_exponential(
739
                    scale=self.task_params.BLOCK_LEN_FACTOR,
740
                    min_value=self.task_params.BLOCK_LEN_MIN,
741
                    max_value=self.task_params.BLOCK_LEN_MAX,
742
                )
743
            )
744
        if self.block_num == 0:
2✔
745
            pleft = 0.5 if self.task_params.BLOCK_INIT_5050 else np.random.choice(self.task_params.BLOCK_PROBABILITY_SET)
2✔
746
        elif self.block_num == 1 and self.task_params.BLOCK_INIT_5050:
2✔
747
            pleft = np.random.choice(self.task_params.BLOCK_PROBABILITY_SET)
2✔
748
        else:
749
            # this switches the probability of leftward stim for the next block
750
            pleft = round(abs(1 - self.blocks_table.loc[self.block_num - 1, 'probability_left']), 1)
2✔
751
        self.blocks_table.at[self.block_num, 'block_length'] = block_len
2✔
752
        self.blocks_table.at[self.block_num, 'probability_left'] = pleft
2✔
753

754
    def next_trial(self):
2✔
755
        self.trial_num += 1
2✔
756
        # if necessary update the block number
757
        self.block_trial_num += 1
2✔
758
        if self.block_num < 0 or self.block_trial_num > (self.blocks_table.loc[self.block_num, 'block_length'] - 1):
2✔
759
            self.new_block()
2✔
760
        # get and store probability left
761
        pleft = self.blocks_table.loc[self.block_num, 'probability_left']
2✔
762
        # update trial table fields specific to biased choice world task
763
        self.trials_table.at[self.trial_num, 'block_num'] = self.block_num
2✔
764
        self.trials_table.at[self.trial_num, 'block_trial_num'] = self.block_trial_num
2✔
765
        # save and send trial info to bonsai
766
        self.draw_next_trial_info(pleft=pleft)
2✔
767

768
    def show_trial_log(self):
2✔
769
        trial_info = self.trials_table.iloc[self.trial_num]
2✔
770
        extra_info = f"""
2✔
771
BLOCK NUMBER:         {trial_info.block_num}
772
BLOCK LENGTH:         {self.blocks_table.loc[self.block_num, 'block_length']}
773
TRIALS IN BLOCK:      {trial_info.block_trial_num}
774
        """
775
        super().show_trial_log(extra_info=extra_info)
2✔
776

777

778
class TrainingChoiceWorldSession(ActiveChoiceWorldSession):
2✔
779
    """
780
    The TrainingChoiceWorldSession corresponds to the first training protocol of the choice world task.
781
    This protocol has a complicated adaptation of the number of contrasts (embodied by the training_phase
782
    property) and the reward amount, embodied by the adaptive_reward property.
783
    """
784

785
    protocol_name = '_iblrig_tasks_trainingChoiceWorld'
2✔
786

787
    def __init__(self, training_phase=-1, adaptive_reward=-1.0, adaptive_gain=None, **kwargs):
2✔
788
        super().__init__(**kwargs)
2✔
789
        inferred_training_phase, inferred_adaptive_reward, inferred_adaptive_gain = self.get_subject_training_info()
2✔
790
        if training_phase == -1:
2✔
791
            log.critical(f'Got training phase: {inferred_training_phase}')
2✔
792
            self.training_phase = inferred_training_phase
2✔
793
        else:
794
            log.critical(f'Training phase manually set to: {training_phase}')
2✔
795
            self.training_phase = training_phase
2✔
796
        if adaptive_reward == -1:
2✔
797
            log.critical(f'Got Adaptive reward {inferred_adaptive_reward} uL')
2✔
798
            self.session_info['ADAPTIVE_REWARD_AMOUNT_UL'] = inferred_adaptive_reward
2✔
799
        else:
800
            log.critical(f'Adaptive reward manually set to {adaptive_reward} uL')
2✔
801
            self.session_info['ADAPTIVE_REWARD_AMOUNT_UL'] = adaptive_reward
2✔
802
        if adaptive_gain is None:
2✔
803
            log.critical(f'Got Adaptive gain {inferred_adaptive_gain} degrees/mm')
2✔
804
            self.session_info['ADAPTIVE_GAIN_VALUE'] = inferred_adaptive_gain
2✔
805
        else:
806
            log.critical(f'Adaptive gain manually set to {adaptive_gain} degrees/mm')
2✔
807
            self.session_info['ADAPTIVE_GAIN_VALUE'] = adaptive_gain
2✔
808
        self.var = {'training_phase_trial_counts': np.zeros(6), 'last_10_responses_sides': np.zeros(10)}
2✔
809
        self.trials_table['training_phase'] = np.zeros(NTRIALS_INIT, dtype=np.int8)
2✔
810
        self.trials_table['debias_trial'] = np.zeros(NTRIALS_INIT, dtype=bool)
2✔
811

812
    @property
2✔
813
    def default_reward_amount(self):
2✔
814
        return self.session_info.get('ADAPTIVE_REWARD_AMOUNT_UL', self.task_params.REWARD_AMOUNT_UL)
2✔
815

816
    def get_subject_training_info(self):
2✔
817
        """
818
        Get the previous session's according to this session parameters and deduce the
819
        training level, adaptive reward amount and adaptive gain value.
820

821
        Returns
822
        -------
823
        training_info: dict
824
            Dictionary with keys: training_phase, adaptive_reward, adaptive_gain
825
        """
826
        training_info, _ = choiceworld.get_subject_training_info(
2✔
827
            subject_name=self.session_info.SUBJECT_NAME,
828
            task_name=self.protocol_name,
829
            stim_gain=self.task_params.AG_INIT_VALUE,
830
            stim_gain_on_error=self.task_params.STIM_GAIN,
831
            default_reward=self.task_params.REWARD_AMOUNT_UL,
832
            local_path=self.iblrig_settings['iblrig_local_data_path'],
833
            remote_path=self.iblrig_settings['iblrig_remote_data_path'],
834
            lab=self.iblrig_settings['ALYX_LAB'],
835
            iblrig_settings=self.iblrig_settings,
836
        )
837
        return training_info['training_phase'], training_info['adaptive_reward'], training_info['adaptive_gain']
2✔
838

839
    def compute_performance(self):
2✔
840
        """Aggregate the trials table to compute the performance of the mouse on each contrast."""
841
        self.trials_table['signed_contrast'] = self.trials_table['contrast'] * np.sign(self.trials_table['position'])
2✔
842
        performance = self.trials_table.groupby(['signed_contrast']).agg(
2✔
843
            last_50_perf=pd.NamedAgg(column='trial_correct', aggfunc=lambda x: np.sum(x[np.maximum(-50, -x.size) :]) / 50),
844
            ntrials=pd.NamedAgg(column='trial_correct', aggfunc='count'),
845
        )
846
        return performance
2✔
847

848
    def check_training_phase(self):
2✔
849
        """Check if the mouse is ready to move to the next training phase."""
850
        move_on = False
2✔
851
        if self.training_phase == 0:  # each of the -1, -.5, .5, 1 contrast should be above 80% perf to switch
2✔
852
            performance = self.compute_performance()
2✔
853
            passing = performance[np.abs(performance.index) >= 0.5]['last_50_perf']
2✔
854
            if np.all(passing > 0.8) and passing.size == 4:
2✔
855
                move_on = True
2✔
856
        elif self.training_phase == 1:  # each of the -.25, .25 should be above 80% perf to switch
2✔
857
            performance = self.compute_performance()
2✔
858
            passing = performance[np.abs(performance.index) == 0.25]['last_50_perf']
2✔
859
            if np.all(passing > 0.8) and passing.size == 2:
2✔
860
                move_on = True
2✔
861
        elif 5 > self.training_phase >= 2:  # for the next phases, always switch after 200 trials
2✔
862
            if self.var['training_phase_trial_counts'][self.training_phase] >= 200:
2✔
863
                move_on = True
2✔
864
        if move_on:
2✔
865
            self.training_phase = np.minimum(5, self.training_phase + 1)
2✔
866
            log.warning(f'Moving on to training phase {self.training_phase}, {self.trial_num}')
2✔
867

868
    def next_trial(self):
2✔
869
        # update counters
870
        self.trial_num += 1
2✔
871
        self.var['training_phase_trial_counts'][self.training_phase] += 1
2✔
872
        # check if the subject graduates to a new training phase
873
        self.check_training_phase()
2✔
874
        # draw the next trial
875
        signed_contrast = choiceworld.draw_training_contrast(self.training_phase)
2✔
876
        position = self.task_params.STIM_POSITIONS[int(np.sign(signed_contrast) == 1)]
2✔
877
        contrast = np.abs(signed_contrast)
2✔
878
        # debiasing: if the previous trial was incorrect and easy repeat the trial
879
        if self.task_params.DEBIAS and self.trial_num >= 1 and self.training_phase < 5:
2✔
880
            last_contrast = self.trials_table.loc[self.trial_num - 1, 'contrast']
2✔
881
            do_debias_trial = (self.trials_table.loc[self.trial_num - 1, 'trial_correct'] != 1) and last_contrast >= 0.5
2✔
882
            self.trials_table.at[self.trial_num, 'debias_trial'] = do_debias_trial
2✔
883
            if do_debias_trial:
2✔
884
                iresponse = self.trials_table['response_side'] != 0  # trials that had a response
2✔
885
                # takes the average of right responses over last 10 response trials
886
                average_right = np.mean(self.trials_table['response_side'][iresponse[-np.maximum(10, iresponse.size) :]] == 1)
2✔
887
                # the next probability of next stimulus being on the left is a draw from a normal distribution
888
                # centered on average right with sigma 0.5. If it is less than 0.5 the next stimulus will be on the left
889
                position = self.task_params.STIM_POSITIONS[int(np.random.normal(average_right, 0.5) >= 0.5)]
2✔
890
                # contrast is the last contrast
891
                contrast = last_contrast
2✔
892
        # save and send trial info to bonsai
893
        self.draw_next_trial_info(pleft=self.task_params.PROBABILITY_LEFT, position=position, contrast=contrast)
2✔
894
        self.trials_table.at[self.trial_num, 'training_phase'] = self.training_phase
2✔
895

896
    def show_trial_log(self):
2✔
897
        extra_info = f"""
2✔
898
CONTRAST SET:         {np.unique(np.abs(choiceworld.contrasts_set(self.training_phase)))}
899
SUBJECT TRAINING PHASE (0-5):         {self.training_phase}
900
            """
901
        super().show_trial_log(extra_info=extra_info)
2✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc