• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

int-brain-lab / ibllib / 7961675356254463

pending completion
7961675356254463

Pull #557

continuous-integration/UCL

olivier
add test
Pull Request #557: Chained protocols

718 of 718 new or added lines in 27 files covered. (100.0%)

12554 of 18072 relevant lines covered (69.47%)

0.69 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.52
/ibllib/io/extractors/ephys_fpga.py
1
"""Data extraction from raw FPGA output
1✔
2
Complete FPGA data extraction depends on Bpod extraction
3
"""
4
from collections import OrderedDict
1✔
5
import logging
1✔
6
from pathlib import Path
1✔
7
import uuid
1✔
8

9
import matplotlib.pyplot as plt
1✔
10
import numpy as np
1✔
11

12
import spikeglx
1✔
13
import neurodsp.utils
1✔
14
import one.alf.io as alfio
1✔
15
from iblutil.util import Bunch
1✔
16
from iblutil.spacer import Spacer
1✔
17

18
import ibllib.exceptions as err
1✔
19
from ibllib.io import raw_data_loaders, session_params
1✔
20
from ibllib.io.extractors.bpod_trials import extract_all as bpod_extract_all
1✔
21
from ibllib.io.extractors.opto_trials import LaserBool
1✔
22
import ibllib.io.extractors.base as extractors_base
1✔
23
from ibllib.io.extractors.training_wheel import extract_wheel_moves
1✔
24
import ibllib.plots as plots
1✔
25
from ibllib.io.extractors.default_channel_maps import DEFAULT_MAPS
1✔
26

27
_logger = logging.getLogger(__name__)
1✔
28

29
SYNC_BATCH_SIZE_SECS = 100  # number of samples to read at once in bin file for sync
1✔
30
WHEEL_RADIUS_CM = 1  # stay in radians
1✔
31
WHEEL_TICKS = 1024
1✔
32

33
BPOD_FPGA_DRIFT_THRESHOLD_PPM = 150  # throws an error if bpod to fpga clock drift is higher
1✔
34
F2TTL_THRESH = 0.01  # consecutive pulses with less than this threshold ignored
1✔
35

36
CHMAPS = {'3A':
1✔
37
          {'ap':
38
           {'left_camera': 2,
39
            'right_camera': 3,
40
            'body_camera': 4,
41
            'bpod': 7,
42
            'frame2ttl': 12,
43
            'rotary_encoder_0': 13,
44
            'rotary_encoder_1': 14,
45
            'audio': 15
46
            }
47
           },
48
          '3B':
49
          {'nidq':
50
           {'left_camera': 0,
51
            'right_camera': 1,
52
            'body_camera': 2,
53
            'imec_sync': 3,
54
            'frame2ttl': 4,
55
            'rotary_encoder_0': 5,
56
            'rotary_encoder_1': 6,
57
            'audio': 7,
58
            'bpod': 16,
59
            'laser': 17,
60
            'laser_ttl': 18},
61
           'ap':
62
           {'imec_sync': 6}
63
           },
64
          }
65

66

67
def data_for_keys(keys, data):
1✔
68
    """Check keys exist in 'data' dict and contain values other than None"""
69
    return data is not None and all(k in data and data.get(k, None) is not None for k in keys)
1✔
70

71

72
def get_ibl_sync_map(ef, version):
1✔
73
    """
74
    Gets default channel map for the version/binary file type combination
75
    :param ef: ibllib.io.spikeglx.glob_ephys_file dictionary with field 'ap' or 'nidq'
76
    :return: channel map dictionary
77
    """
78
    # Determine default channel map
79
    if version == '3A':
1✔
80
        default_chmap = CHMAPS['3A']['ap']
1✔
81
    elif version == '3B':
1✔
82
        if ef.get('nidq', None):
1✔
83
            default_chmap = CHMAPS['3B']['nidq']
1✔
84
        elif ef.get('ap', None):
1✔
85
            default_chmap = CHMAPS['3B']['ap']
1✔
86
    # Try to load channel map from file
87
    chmap = spikeglx.get_sync_map(ef['path'])
1✔
88
    # If chmap provided but not with all keys, fill up with default values
89
    if not chmap:
1✔
90
        return default_chmap
1✔
91
    else:
92
        if data_for_keys(default_chmap.keys(), chmap):
1✔
93
            return chmap
1✔
94
        else:
95
            _logger.warning("Keys missing from provided channel map, "
1✔
96
                            "setting missing keys from default channel map")
97
            return {**default_chmap, **chmap}
1✔
98

99

100
def _sync_to_alf(raw_ephys_apfile, output_path=None, save=False, parts=''):
1✔
101
    """
102
    Extracts sync.times, sync.channels and sync.polarities from binary ephys dataset
103

104
    :param raw_ephys_apfile: bin file containing ephys data or spike
105
    :param output_path: output directory
106
    :param save: bool write to disk only if True
107
    :param parts: string or list of strings that will be appended to the filename before extension
108
    :return:
109
    """
110
    # handles input argument: support ibllib.io.spikeglx.Reader, str and pathlib.Path
111
    if isinstance(raw_ephys_apfile, spikeglx.Reader):
1✔
112
        sr = raw_ephys_apfile
1✔
113
    else:
114
        raw_ephys_apfile = Path(raw_ephys_apfile)
×
115
        sr = spikeglx.Reader(raw_ephys_apfile)
×
116
    opened = sr.is_open
1✔
117
    if not opened:  # if not (opened := sr.is_open)  # py3.8
1✔
118
        sr.open()
×
119
    # if no output, need a temp folder to swap for big files
120
    if not output_path:
1✔
121
        output_path = raw_ephys_apfile.parent
×
122
    file_ftcp = Path(output_path).joinpath(f'fronts_times_channel_polarity{str(uuid.uuid4())}.bin')
1✔
123

124
    # loop over chunks of the raw ephys file
125
    wg = neurodsp.utils.WindowGenerator(sr.ns, int(SYNC_BATCH_SIZE_SECS * sr.fs), overlap=1)
1✔
126
    fid_ftcp = open(file_ftcp, 'wb')
1✔
127
    for sl in wg.slice:
1✔
128
        ss = sr.read_sync(sl)
1✔
129
        ind, fronts = neurodsp.utils.fronts(ss, axis=0)
1✔
130
        # a = sr.read_sync_analog(sl)
131
        sav = np.c_[(ind[0, :] + sl.start) / sr.fs, ind[1, :], fronts.astype(np.double)]
1✔
132
        sav.tofile(fid_ftcp)
1✔
133
    # close temp file, read from it and delete
134
    fid_ftcp.close()
1✔
135
    tim_chan_pol = np.fromfile(str(file_ftcp))
1✔
136
    tim_chan_pol = tim_chan_pol.reshape((int(tim_chan_pol.size / 3), 3))
1✔
137
    file_ftcp.unlink()
1✔
138
    sync = {'times': tim_chan_pol[:, 0],
1✔
139
            'channels': tim_chan_pol[:, 1],
140
            'polarities': tim_chan_pol[:, 2]}
141
    # If opened Reader was passed into function, leave open
142
    if not opened:
1✔
143
        sr.close()
×
144
    if save:
1✔
145
        out_files = alfio.save_object_npy(output_path, sync, 'sync',
1✔
146
                                          namespace='spikeglx', parts=parts)
147
        return Bunch(sync), out_files
1✔
148
    else:
149
        return Bunch(sync)
×
150

151

152
def _assign_events_bpod(bpod_t, bpod_polarities, ignore_first_valve=True):
1✔
153
    """
154
    From detected fronts on the bpod sync traces, outputs the synchronisation events
155
    related to trial start and valve opening
156
    :param bpod_t: numpy vector containing times of fronts
157
    :param bpod_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
158
    :param ignore_first_valve (True): removes detected valve events at indices le 2
159
    :return: numpy arrays of times t_trial_start, t_valve_open and t_iti_in
160
    """
161
    TRIAL_START_TTL_LEN = 2.33e-4  # the TTL length is 0.1ms but this has proven to drift on
1✔
162
    # some bpods and this is the highest possible value that discriminates trial start from valve
163
    ITI_TTL_LEN = 0.4
1✔
164
    # make sure that there are no 2 consecutive fall or consecutive rise events
165
    assert np.all(np.abs(np.diff(bpod_polarities)) == 2)
1✔
166
    if bpod_polarities[0] == -1:
1✔
167
        bpod_t = np.delete(bpod_t, 0)
1✔
168
    # take only even time differences: ie. from rising to falling fronts
169
    dt = np.diff(bpod_t)[::2]
1✔
170
    # detect start trials event assuming length is 0.23 ms except the first trial
171
    i_trial_start = np.r_[0, np.where(dt <= TRIAL_START_TTL_LEN)[0] * 2]
1✔
172
    t_trial_start = bpod_t[i_trial_start]
1✔
173
    # the last trial is a dud and should be removed
174
    t_trial_start = t_trial_start[:-1]
1✔
175
    # valve open events are between 50ms to 300 ms
176
    i_valve_open = np.where(np.logical_and(dt > TRIAL_START_TTL_LEN,
1✔
177
                                           dt < ITI_TTL_LEN))[0] * 2
178
    if ignore_first_valve:
1✔
179
        i_valve_open = np.delete(i_valve_open, np.where(i_valve_open < 2))
1✔
180
    t_valve_open = bpod_t[i_valve_open]
1✔
181
    # ITI events are above 400 ms
182
    i_iti_in = np.where(dt > ITI_TTL_LEN)[0] * 2
1✔
183
    i_iti_in = np.delete(i_iti_in, np.where(i_valve_open < 2))
1✔
184
    t_iti_in = bpod_t[i_iti_in]
1✔
185
    ## some debug plots when needed
186
    # import matplotlib.pyplot as plt
187
    # import ibllib.plots as plots
188
    # events = {'id': np.zeros(bpod_t.shape), 't': bpod_t, 'p': bpod_polarities}
189
    # events['id'][i_trial_start] = 1
190
    # events['id'][i_valve_open] = 2
191
    # events['id'][i_iti_in] = 3
192
    # i_abnormal = np.where(np.diff(events['id'][bpod_polarities != -1]) == 0)
193
    # t_abnormal = events['t'][bpod_polarities != -1][i_abnormal]
194
    # assert np.all(events != 0)
195
    # plt.figure()
196
    # plots.squares(bpod_t, bpod_polarities, label='raw fronts')
197
    # plots.vertical_lines(t_trial_start, ymin=-0.2, ymax=1.1, linewidth=0.5, label='trial start')
198
    # plots.vertical_lines(t_valve_open, ymin=-0.2, ymax=1.1, linewidth=0.5, label='valve open')
199
    # plots.vertical_lines(t_iti_in, ymin=-0.2, ymax=1.1, linewidth=0.5, label='iti_in')
200
    # plt.plot(t_abnormal, t_abnormal * 0 + .5, 'k*')
201
    # plt.legend()
202

203
    return t_trial_start, t_valve_open, t_iti_in
1✔
204

205

206
def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb, ticks=WHEEL_TICKS, radius=1,
1✔
207
                                          coding='x4'):
208
    """
209
    Extracts the rotary encoder absolute position as function of time from fronts detected
210
    on the 2 channels. Outputs in units of radius parameters, by default radians
211
    Coding options detailed here: http://www.ni.com/tutorial/7109/pt/
212
    Here output is clockwise from subject perspective
213

214
    :param ta: time of fronts on channel A
215
    :param pa: polarity of fronts on channel A
216
    :param tb: time of fronts on channel B
217
    :param pb: polarity of fronts on channel B
218
    :param ticks: number of ticks corresponding to a full revolution (1024 for IBL rotary encoder)
219
    :param radius: radius of the wheel. Defaults to 1 for an output in radians
220
    :param coding: x1, x2 or x4 coding (IBL default is x4)
221
    :return: indices vector (ta) and position vector
222
    """
223
    if coding == 'x1':
1✔
224
        ia = np.searchsorted(tb, ta[pa == 1])
1✔
225
        ia = ia[ia < ta.size]
1✔
226
        ia = ia[pa[ia] == 1]
1✔
227
        ib = np.searchsorted(ta, tb[pb == 1])
1✔
228
        ib = ib[ib < tb.size]
1✔
229
        ib = ib[pb[ib] == 1]
1✔
230
        t = np.r_[ta[ia], tb[ib]]
1✔
231
        p = np.r_[ia * 0 + 1, ib * 0 - 1]
1✔
232
        ordre = np.argsort(t)
1✔
233
        t = t[ordre]
1✔
234
        p = p[ordre]
1✔
235
        p = np.cumsum(p) / ticks * np.pi * 2 * radius
1✔
236
        return t, p
1✔
237
    elif coding == 'x2':
1✔
238
        p = pb[np.searchsorted(tb, ta) - 1] * pa
1✔
239
        p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 2
1✔
240
        return ta, p
1✔
241
    elif coding == 'x4':
1✔
242
        p = np.r_[pb[np.searchsorted(tb, ta) - 1] * pa, -pa[np.searchsorted(ta, tb) - 1] * pb]
1✔
243
        t = np.r_[ta, tb]
1✔
244
        ordre = np.argsort(t)
1✔
245
        t = t[ordre]
1✔
246
        p = p[ordre]
1✔
247
        p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 4
1✔
248
        return t, p
1✔
249

250

251
def _assign_events_audio(audio_t, audio_polarities, return_indices=False, display=False):
1✔
252
    """
253
    From detected fronts on the audio sync traces, outputs the synchronisation events
254
    related to tone in
255

256
    :param audio_t: numpy vector containing times of fronts
257
    :param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
258
    :param return_indices (False): returns indices of tones
259
    :param display (False): for debug mode, displays the raw fronts overlaid with detections
260
    :return: numpy arrays t_ready_tone_in, t_error_tone_in
261
    :return: numpy arrays ind_ready_tone_in, ind_error_tone_in if return_indices=True
262
    """
263
    # make sure that there are no 2 consecutive fall or consecutive rise events
264
    assert np.all(np.abs(np.diff(audio_polarities)) == 2)
1✔
265
    # take only even time differences: ie. from rising to falling fronts
266
    dt = np.diff(audio_t)
1✔
267
    # detect ready tone by length below 110 ms
268
    i_ready_tone_in = np.where(np.logical_and(dt <= 0.11, audio_polarities[:-1] == 1))[0]
1✔
269
    t_ready_tone_in = audio_t[i_ready_tone_in]
1✔
270
    # error tones are events lasting from 400ms to 1200ms
271
    i_error_tone_in = np.where(np.logical_and(np.logical_and(0.4 < dt, dt < 1.2), audio_polarities[:-1] == 1))[0]
1✔
272
    t_error_tone_in = audio_t[i_error_tone_in]
1✔
273
    if display:  # pragma: no cover
1✔
274
        from ibllib.plots import squares, vertical_lines
275
        squares(audio_t, audio_polarities, yrange=[-1, 1],)
276
        vertical_lines(t_ready_tone_in, ymin=-.8, ymax=.8)
277
        vertical_lines(t_error_tone_in, ymin=-.8, ymax=.8)
278

279
    if return_indices:
1✔
280
        return t_ready_tone_in, t_error_tone_in, i_ready_tone_in, i_error_tone_in
×
281
    else:
282
        return t_ready_tone_in, t_error_tone_in
1✔
283

284

285
def _assign_events_to_trial(t_trial_start, t_event, take='last'):
1✔
286
    """
287
    Assign events to a trial given trial start times and event times.
288

289
    Trials without an event
290
    result in nan value in output time vector.
291
    The output has a consistent size with t_trial_start and ready to output to alf.
292
    :param t_trial_start: numpy vector of trial start times
293
    :param t_event: numpy vector of event times to assign to trials
294
    :param take: 'last' or 'first' (optional, default 'last'): index to take in case of duplicates
295
    :return: numpy array of event times with the same shape of trial start.
296
    """
297
    # make sure the events are sorted
298
    try:
1✔
299
        assert np.all(np.diff(t_trial_start) >= 0)
1✔
300
    except AssertionError:
×
301
        raise ValueError('Trial starts vector not sorted')
×
302
    try:
1✔
303
        assert np.all(np.diff(t_event) >= 0)
1✔
304
    except AssertionError:
×
305
        raise ValueError('Events vector is not sorted')
×
306
    # remove events that happened before the first trial start
307
    t_event = t_event[t_event >= t_trial_start[0]]
1✔
308
    ind = np.searchsorted(t_trial_start, t_event) - 1
1✔
309
    t_event_nans = np.zeros_like(t_trial_start) * np.nan
1✔
310
    # select first or last element matching each trial start
311
    if take == 'last':
1✔
312
        iall, iu = np.unique(np.flip(ind), return_index=True)
1✔
313
        t_event_nans[iall] = t_event[- (iu - ind.size + 1)]
1✔
314
    elif take == 'first':
1✔
315
        iall, iu = np.unique(ind, return_index=True)
1✔
316
        t_event_nans[iall] = t_event[iu]
1✔
317
    else:  # if the index is arbitrary, needs to be numeric (could be negative if from the end)
318
        iall = np.unique(ind)
1✔
319
        minsize = take + 1 if take >= 0 else - take
1✔
320
        # for each trial, take the takenth element if there are enough values in trial
321
        for iu in iall:
1✔
322
            match = t_event[iu == ind]
1✔
323
            if len(match) >= minsize:
1✔
324
                t_event_nans[iu] = match[take]
1✔
325
    return t_event_nans
1✔
326

327

328
def get_sync_fronts(sync, channel_nb, tmin=None, tmax=None):
1✔
329
    """
330
    Return the sync front polarities and times for a given channel.
331

332
    Parameters
333
    ----------
334
    sync : dict
335
        'polarities' of fronts detected on sync trace for all 16 channels and their 'times'.
336
    channel_nb : int
337
        The integer corresponding to the desired sync channel.
338
    tmin : float
339
        The minimum time from which to extract the sync pulses.
340
    tmax : float
341
        The maximum time up to which we extract the sync pulses.
342

343
    Returns
344
    -------
345
    Bunch
346
        Channel times and polarities.
347
    """
348
    selection = sync['channels'] == channel_nb
1✔
349
    selection = np.logical_and(selection, sync['times'] <= tmax) if tmax else selection
1✔
350
    selection = np.logical_and(selection, sync['times'] >= tmin) if tmin else selection
1✔
351
    return Bunch({'times': sync['times'][selection],
1✔
352
                  'polarities': sync['polarities'][selection]})
353

354

355
def _clean_audio(audio, display=False):
1✔
356
    """
357
    one guy wired the 150 Hz camera output onto the soundcard. The effect is to get 150 Hz periodic
358
    square pulses, 2ms up and 4.666 ms down. When this happens we remove all of the intermediate
359
    pulses to repair the audio trace
360
    Here is some helper code
361
        dd = np.diff(audio['times'])
362
        1 / np.median(dd[::2]) # 2ms up
363
        1 / np.median(dd[1::2])  # 4.666 ms down
364
        1 / (np.median(dd[::2]) + np.median(dd[1::2])) # both sum to 150 Hx
365
    This only runs on sessions when the bug is detected and leaves others untouched
366
    """
367
    DISCARD_THRESHOLD = 0.01
1✔
368
    average_150_hz = np.mean(1 / np.diff(audio['times'][audio['polarities'] == 1]) > 140)
1✔
369
    naudio = audio['times'].size
1✔
370
    if average_150_hz > 0.7 and naudio > 100:
1✔
371
        _logger.warning("Soundcard signal on FPGA seems to have been mixed with 150Hz camera")
1✔
372
        keep_ind = np.r_[np.diff(audio['times']) > DISCARD_THRESHOLD, False]
1✔
373
        keep_ind = np.logical_and(keep_ind, audio['polarities'] == -1)
1✔
374
        keep_ind = np.where(keep_ind)[0]
1✔
375
        keep_ind = np.sort(np.r_[0, keep_ind, keep_ind + 1, naudio - 1])
1✔
376

377
        if display:  # pragma: no cover
1✔
378
            from ibllib.plots import squares
379
            squares(audio['times'], audio['polarities'], ax=None, yrange=[-1, 1])
380
            squares(audio['times'][keep_ind], audio['polarities'][keep_ind], yrange=[-1, 1])
381
        audio = {'times': audio['times'][keep_ind],
1✔
382
                 'polarities': audio['polarities'][keep_ind]}
383
    return audio
1✔
384

385

386
def _clean_frame2ttl(frame2ttl, display=False):
1✔
387
    """
388
    Frame 2ttl calibration can be unstable and the fronts may be flickering at an unrealistic
389
    pace. This removes the consecutive frame2ttl pulses happening too fast, below a threshold
390
    of F2TTL_THRESH
391
    """
392
    dt = np.diff(frame2ttl['times'])
1✔
393
    iko = np.where(np.logical_and(dt < F2TTL_THRESH, frame2ttl['polarities'][:-1] == -1))[0]
1✔
394
    iko = np.unique(np.r_[iko, iko + 1])
1✔
395
    frame2ttl_ = {'times': np.delete(frame2ttl['times'], iko),
1✔
396
                  'polarities': np.delete(frame2ttl['polarities'], iko)}
397
    if iko.size > (0.1 * frame2ttl['times'].size):
1✔
398
        _logger.warning(f'{iko.size} ({iko.size / frame2ttl["times"].size:.2%} %) '
1✔
399
                        f'frame to TTL polarity switches below {F2TTL_THRESH} secs')
400
    if display:  # pragma: no cover
1✔
401
        from ibllib.plots import squares
402
        plt.figure()
403
        squares(frame2ttl['times'] * 1000, frame2ttl['polarities'], yrange=[0.1, 0.9])
404
        squares(frame2ttl_['times'] * 1000, frame2ttl_['polarities'], yrange=[1.1, 1.9])
405
        import seaborn as sns
406
        sns.displot(dt[dt < 0.05], binwidth=0.0005)
407

408
    return frame2ttl_
1✔
409

410

411
def extract_wheel_sync(sync, chmap=None, tmin=None, tmax=None):
1✔
412
    """
413
    Extract wheel positions and times from sync fronts dictionary for all 16 channels.
414
    Output position is in radians, mathematical convention.
415

416
    Parameters
417
    ----------
418
    sync : dict
419
        'polarities' of fronts detected on sync trace for all 16 chans and their 'times'
420
    chmap : dict
421
        Map of channel names and their corresponding index.  Default to constant.
422
    tmin : float
423
        The minimum time from which to extract the sync pulses.
424
    tmax : float
425
        The maximum time up to which we extract the sync pulses.
426

427
    Returns
428
    -------
429
    np.array
430
        Wheel timestamps in seconds.
431
    np.array
432
        Wheel positions in radians.
433
    """
434
    wheel = {}
1✔
435
    channela = get_sync_fronts(sync, chmap['rotary_encoder_0'], tmin=tmin, tmax=tmax)
1✔
436
    channelb = get_sync_fronts(sync, chmap['rotary_encoder_1'], tmin=tmin, tmax=tmax)
1✔
437
    wheel['re_ts'], wheel['re_pos'] = _rotary_encoder_positions_from_fronts(
1✔
438
        channela['times'], channela['polarities'], channelb['times'], channelb['polarities'],
439
        ticks=WHEEL_TICKS, radius=1, coding='x4')
440
    return wheel['re_ts'], wheel['re_pos']
1✔
441

442

443
def extract_behaviour_sync(sync, chmap=None, display=False, bpod_trials=None, tmin=None, tmax=None):
1✔
444
    """
445
    Extract task related event times from the sync.
446

447
    Parameters
448
    ----------
449
    sync : dict
450
        'polarities' of fronts detected on sync trace for all 16 chans and their 'times'
451
    chmap : dict
452
        Map of channel names and their corresponding index.  Default to constant.
453
    display : bool, matplotlib.pyplot.Axes
454
        Show the full session sync pulses display
455
    bpod_trials : dict
456
        The same trial events as recorded through Bpod. Assumed to contain an 'intervals_bpod' key.
457
    tmin : float
458
        The minimum time from which to extract the sync pulses.
459
    tmax : float
460
        The maximum time up to which we extract the sync pulses.
461

462
    Returns
463
    -------
464
    dict
465
        A map of trial event timestamps.
466
    """
467
    bpod = get_sync_fronts(sync, chmap['bpod'], tmin=tmin, tmax=tmax)
1✔
468
    if bpod.times.size == 0:
1✔
469
        raise err.SyncBpodFpgaException('No Bpod event found in FPGA. No behaviour extraction. '
×
470
                                        'Check channel maps.')
471
    frame2ttl = get_sync_fronts(sync, chmap['frame2ttl'], tmin=tmin, tmax=tmax)
1✔
472
    frame2ttl = _clean_frame2ttl(frame2ttl)
1✔
473
    audio = get_sync_fronts(sync, chmap['audio'], tmin=tmin, tmax=tmax)
1✔
474
    audio = _clean_audio(audio)
1✔
475
    # extract events from the fronts for each trace
476
    t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod(bpod['times'], bpod['polarities'])
1✔
477
    # one issue is that sometimes bpod pulses may not have been detected, in this case
478
    # perform the sync bpod/FPGA, and add the start that have not been detected
479
    if bpod_trials:
1✔
480
        bpod_start = bpod_trials['intervals_bpod'][:, 0]
1✔
481
        fcn, drift, ibpod, ifpga = neurodsp.utils.sync_timestamps(
1✔
482
            bpod_start, t_trial_start, return_indices=True)
483
        # if it's drifting too much
484
        if drift > 200 and bpod_start.size != t_trial_start.size:
1✔
485
            raise err.SyncBpodFpgaException('sync cluster f*ck')
×
486
        missing_bpod = fcn(bpod_start[np.setxor1d(ibpod, np.arange(len(bpod_start)))])
1✔
487
        t_trial_start = np.sort(np.r_[t_trial_start, missing_bpod])
1✔
488
    else:
489
        _logger.warning('Deprecation Warning: calling FPGA trials extraction without a bpod trials'
1✔
490
                        ' dictionary will result in an error.')
491
    t_ready_tone_in, t_error_tone_in = _assign_events_audio(
1✔
492
        audio['times'], audio['polarities'])
493
    trials = Bunch({
1✔
494
        'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'),
495
        'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in),
496
        'valveOpen_times': _assign_events_to_trial(t_trial_start, t_valve_open),
497
        'stimFreeze_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2),
498
        'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'),
499
        'stimOff_times': _assign_events_to_trial(t_trial_start, frame2ttl['times']),
500
        'itiIn_times': _assign_events_to_trial(t_trial_start, t_iti_in)
501
    })
502
    # feedback times are valve open on good trials and error tone in on error trials
503
    trials['feedback_times'] = np.copy(trials['valveOpen_times'])
1✔
504
    ind_err = np.isnan(trials['valveOpen_times'])
1✔
505
    trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err]
1✔
506
    trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']]
1✔
507

508
    if display:  # pragma: no cover
1✔
509
        width = 0.5
510
        ymax = 5
511
        if isinstance(display, bool):
512
            plt.figure("Ephys FPGA Sync")
513
            ax = plt.gca()
514
        else:
515
            ax = display
516
        r0 = get_sync_fronts(sync, chmap['rotary_encoder_0'], tmin=tmin, tmax=tmax)
517
        plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k')
518
        plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k')
519
        plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k')
520
        plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k')
521
        plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax,
522
                             ax=ax, label='goCue_times', color='b', linewidth=width)
523
        plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax,
524
                             ax=ax, label='start_trial', color='m', linewidth=width)
525
        plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax,
526
                             ax=ax, label='error tone', color='r', linewidth=width)
527
        plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax,
528
                             ax=ax, label='valveOpen_times', color='g', linewidth=width)
529
        plots.vertical_lines(trials['stimFreeze_times'], ymin=0, ymax=ymax,
530
                             ax=ax, label='stimFreeze_times', color='y', linewidth=width)
531
        plots.vertical_lines(trials['stimOff_times'], ymin=0, ymax=ymax,
532
                             ax=ax, label='stim off', color='c', linewidth=width)
533
        plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax,
534
                             ax=ax, label='stimOn_times', color='tab:orange', linewidth=width)
535
        c = get_sync_fronts(sync, chmap['left_camera'], tmin=tmin, tmax=tmax)
536
        plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k')
537
        c = get_sync_fronts(sync, chmap['right_camera'], tmin=tmin, tmax=tmax)
538
        plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k')
539
        c = get_sync_fronts(sync, chmap['body_camera'], tmin=tmin, tmax=tmax)
540
        plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k')
541
        ax.legend()
542
        ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', ''])
543
        ax.set_yticks([0, 1, 2, 3, 4, 5])
544
        ax.set_ylim([0, 5])
545

546
    return trials
1✔
547

548

549
def extract_sync(session_path, overwrite=False, ephys_files=None, namespace='spikeglx'):
1✔
550
    """
551
    Reads ephys binary file (s) and extract sync within the binary file folder
552
    Assumes ephys data is within a `raw_ephys_data` folder
553

554
    :param session_path: '/path/to/subject/yyyy-mm-dd/001'
555
    :param overwrite: Bool on re-extraction, forces overwrite instead of loading existing files
556
    :return: list of sync dictionaries
557
    """
558
    session_path = Path(session_path)
1✔
559
    if not ephys_files:
1✔
560
        ephys_files = spikeglx.glob_ephys_files(session_path)
1✔
561
    syncs = []
1✔
562
    outputs = []
1✔
563
    for efi in ephys_files:
1✔
564
        bin_file = efi.get('ap', efi.get('nidq', None))
1✔
565
        if not bin_file:
1✔
566
            continue
×
567
        alfname = dict(object='sync', namespace=namespace)
1✔
568
        if efi.label:
1✔
569
            alfname['extra'] = efi.label
1✔
570
        file_exists = alfio.exists(bin_file.parent, **alfname)
1✔
571
        if not overwrite and file_exists:
1✔
572
            _logger.warning(f'Skipping raw sync: SGLX sync found for {efi.label}!')
1✔
573
            sync = alfio.load_object(bin_file.parent, **alfname)
1✔
574
            out_files, _ = alfio._ls(bin_file.parent, **alfname)
1✔
575
        else:
576
            sr = spikeglx.Reader(bin_file)
1✔
577
            sync, out_files = _sync_to_alf(sr, bin_file.parent, save=True, parts=efi.label)
1✔
578
            sr.close()
1✔
579
        outputs.extend(out_files)
1✔
580
        syncs.extend([sync])
1✔
581

582
    return syncs, outputs
1✔
583

584

585
def _get_all_probes_sync(session_path, bin_exists=True):
1✔
586
    # round-up of all bin ephys files in the session, infer revision and get sync map
587
    ephys_files = spikeglx.glob_ephys_files(session_path, bin_exists=bin_exists)
1✔
588
    version = spikeglx.get_neuropixel_version_from_files(ephys_files)
1✔
589
    # attach the sync information to each binary file found
590
    for ef in ephys_files:
1✔
591
        ef['sync'] = alfio.load_object(ef.path, 'sync', namespace='spikeglx', short_keys=True)
1✔
592
        ef['sync_map'] = get_ibl_sync_map(ef, version)
1✔
593
    return ephys_files
1✔
594

595

596
def get_wheel_positions(sync, chmap, tmin=None, tmax=None):
1✔
597
    """
598
    Gets the wheel position from synchronisation pulses
599

600
    Parameters
601
    ----------
602
    sync : dict
603
        A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
604
        the corresponding channel numbers.
605
    chmap : dict[str, int]
606
        A map of channel names and their corresponding indices.
607
    tmin : float
608
        The minimum time from which to extract the sync pulses.
609
    tmax : float
610
        The maximum time up to which we extract the sync pulses.
611

612
    Returns
613
    -------
614
    Bunch
615
        A dictionary with keys ('timestamps', 'position'), containing the wheel event timestamps and
616
        position in radians
617
    Bunch
618
        A dictionary of detected movement times with keys ('intervals', 'peakAmplitude', 'peakVelocity_times').
619
    """
620
    ts, pos = extract_wheel_sync(sync=sync, chmap=chmap, tmin=tmin, tmax=tmax)
1✔
621
    moves = Bunch(extract_wheel_moves(ts, pos))
1✔
622
    wheel = Bunch({'timestamps': ts, 'position': pos})
1✔
623
    return wheel, moves
1✔
624

625

626
def get_main_probe_sync(session_path, bin_exists=False):
1✔
627
    """
628
    From 3A or 3B multiprobe session, returns the main probe (3A) or nidq sync pulses
629
    with the attached channel map (default chmap if none)
630

631
    Parameters
632
    ----------
633
    session_path : str, pathlib.Path
634
        The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
635
    bin_exists : bool
636
        Whether there is a .bin file present.
637

638
    Returns
639
    -------
640
    one.alf.io.AlfBunch
641
        A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
642
        the corresponding channel numbers.
643
    dict
644
        A map of channel names and their corresponding indices.
645
    """
646
    ephys_files = _get_all_probes_sync(session_path, bin_exists=bin_exists)
1✔
647
    if not ephys_files:
1✔
648
        raise FileNotFoundError(f"No ephys files found in {session_path}")
1✔
649
    version = spikeglx.get_neuropixel_version_from_files(ephys_files)
1✔
650
    if version == '3A':
1✔
651
        # the sync master is the probe with the most sync pulses
652
        sync_box_ind = np.argmax([ef.sync.times.size for ef in ephys_files])
1✔
653
    elif version == '3B':
1✔
654
        # the sync master is the nidq breakout box
655
        sync_box_ind = np.argmax([1 if ef.get('nidq') else 0 for ef in ephys_files])
1✔
656
    sync = ephys_files[sync_box_ind].sync
1✔
657
    sync_chmap = ephys_files[sync_box_ind].sync_map
1✔
658
    return sync, sync_chmap
1✔
659

660

661
def get_protocol_period(session_path, protocol_number, bpod_sync):
1✔
662
    """
663

664
    Parameters
665
    ----------
666
    session_path : str, pathlib.Path
667
        The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
668
    protocol_number : int
669
        The order that the protocol was run in.
670
    bpod_sync : dict
671
        The sync times and polarities for Bpod BNC1.
672

673
    Returns
674
    -------
675
    float
676
        The time of the detected spacer for the protocol number.
677
    float, None
678
        The time of the next detected spacer or None if this is the last protocol run.
679
    """
680
    # The spacers are TTLs generated by Bpod at the start of each protocol
681
    spacer_times = Spacer().find_spacers_from_fronts(bpod_sync)
1✔
682
    # Ensure that the number of detected spacers matched the number of expected tasks
683
    if acquisition_description := session_params.read_params(session_path):
1✔
684
        n_tasks = len(acquisition_description.get('tasks', []))
1✔
685
        assert n_tasks == len(spacer_times), f'expected {n_tasks} spacers, found {len(spacer_times)}'
1✔
686
        assert n_tasks > protocol_number >= 0, f'protocol number must be between 0 and {n_tasks}'
1✔
687
    else:
688
        assert protocol_number < len(spacer_times)
×
689
    start = spacer_times[int(protocol_number)]
1✔
690
    end = None if len(spacer_times) - 1 == protocol_number else spacer_times[int(protocol_number + 1)]
1✔
691
    return start, end
1✔
692

693

694
class FpgaTrials(extractors_base.BaseExtractor):
1✔
695
    save_names = ('_ibl_trials.intervals_bpod.npy',
1✔
696
                  '_ibl_trials.goCueTrigger_times.npy', None, None, None, None, None, None, None,
697
                  '_ibl_trials.stimOff_times.npy', None, None, None, None,
698
                  '_ibl_trials.table.pqt', '_ibl_wheel.timestamps.npy',
699
                  '_ibl_wheel.position.npy', '_ibl_wheelMoves.intervals.npy',
700
                  '_ibl_wheelMoves.peakAmplitude.npy')
701
    var_names = ('intervals_bpod',
1✔
702
                 'goCueTrigger_times', 'stimOnTrigger_times',
703
                 'stimOffTrigger_times', 'stimFreezeTrigger_times', 'errorCueTrigger_times',
704
                 'errorCue_times', 'itiIn_times', 'stimFreeze_times', 'stimOff_times',
705
                 'valveOpen_times', 'phase', 'position', 'quiescence', 'table',
706
                 'wheel_timestamps', 'wheel_position',
707
                 'wheelMoves_intervals', 'wheelMoves_peakAmplitude')
708

709
    # Fields from bpod extractor that we want to resync to FPGA
710
    bpod_rsync_fields = ('intervals', 'response_times', 'goCueTrigger_times',
1✔
711
                         'stimOnTrigger_times', 'stimOffTrigger_times',
712
                         'stimFreezeTrigger_times', 'errorCueTrigger_times')
713

714
    # Fields from bpod extractor that we want to save
715
    bpod_fields = ('feedbackType', 'choice', 'rewardVolume', 'contrastLeft', 'contrastRight', 'probabilityLeft',
1✔
716
                   'intervals_bpod', 'phase', 'position', 'quiescence')
717

718
    def __init__(self, *args, **kwargs):
1✔
719
        """An extractor for all ephys trial data, in FPGA time"""
720
        super().__init__(*args, **kwargs)
1✔
721
        self.bpod2fpga = None
1✔
722

723
    def _extract(self, sync=None, chmap=None, sync_collection='raw_ephys_data', task_collection='raw_behavior_data', **kwargs):
1✔
724
        """Extracts ephys trials by combining Bpod and FPGA sync pulses"""
725
        # extract the behaviour data from bpod
726
        if sync is None or chmap is None:
1✔
727
            _sync, _chmap = get_sync_and_chn_map(self.session_path, sync_collection)
×
728
            sync = sync or _sync
×
729
            chmap = chmap or _chmap
×
730
        # load the bpod data and performs a biased choice world training extraction
731
        # TODO these all need to pass in the collection so we can load for different protocols in different folders
732
        bpod_raw = raw_data_loaders.load_data(self.session_path, task_collection=task_collection)
1✔
733
        assert bpod_raw is not None, "No task trials data in raw_behavior_data - Exit"
1✔
734

735
        bpod_trials = self._extract_bpod(bpod_raw, task_collection=task_collection, save=False)
1✔
736
        # Explode trials table df
737
        trials_table = alfio.AlfBunch.from_df(bpod_trials.pop('table'))
1✔
738
        table_columns = trials_table.keys()
1✔
739
        bpod_trials.update(trials_table)
1✔
740
        # synchronize
741
        bpod_trials['intervals_bpod'] = np.copy(bpod_trials['intervals'])
1✔
742

743
        # Get the spacer times for this protocol
744
        if (protocol_number := kwargs.get('protocol_number')) is not None:  # look for spacer
1✔
745
            # The spacers are TTLs generated by Bpod at the start of each protocol
746
            bpod = get_sync_fronts(sync, chmap['bpod'])
×
747
            tmin, tmax = get_protocol_period(self.session_path, protocol_number, bpod)
×
748
        else:
749
            tmin = tmax = None
1✔
750

751
        fpga_trials = extract_behaviour_sync(
1✔
752
            sync=sync, chmap=chmap, bpod_trials=bpod_trials, tmin=tmin, tmax=tmax)
753
        # checks consistency and compute dt with bpod
754
        self.bpod2fpga, drift_ppm, ibpod, ifpga = neurodsp.utils.sync_timestamps(
1✔
755
            bpod_trials['intervals_bpod'][:, 0], fpga_trials.pop('intervals')[:, 0],
756
            return_indices=True)
757
        nbpod = bpod_trials['intervals_bpod'].shape[0]
1✔
758
        npfga = fpga_trials['feedback_times'].shape[0]
1✔
759
        nsync = len(ibpod)
1✔
760
        _logger.info(f"N trials: {nbpod} bpod, {npfga} FPGA, {nsync} merged, sync {drift_ppm} ppm")
1✔
761
        if drift_ppm > BPOD_FPGA_DRIFT_THRESHOLD_PPM:
1✔
762
            _logger.warning('BPOD/FPGA synchronization shows values greater than %i ppm',
×
763
                            BPOD_FPGA_DRIFT_THRESHOLD_PPM)
764
        out = OrderedDict()
1✔
765
        out.update({k: bpod_trials[k][ibpod] for k in self.bpod_fields})
1✔
766
        out.update({k: self.bpod2fpga(bpod_trials[k][ibpod]) for k in self.bpod_rsync_fields})
1✔
767
        out.update({k: fpga_trials[k][ifpga] for k in sorted(fpga_trials.keys())})
1✔
768
        # extract the wheel data
769
        wheel, moves = get_wheel_positions(sync=sync, chmap=chmap, tmin=tmin, tmax=tmax)
1✔
770
        from ibllib.io.extractors.training_wheel import extract_first_movement_times
1✔
771
        settings = raw_data_loaders.load_settings(session_path=self.session_path, task_collection=task_collection)
1✔
772
        min_qt = settings.get('QUIESCENT_PERIOD', None)
1✔
773
        first_move_onsets, *_ = extract_first_movement_times(moves, out, min_qt=min_qt)
1✔
774
        out.update({'firstMovement_times': first_move_onsets})
1✔
775
        # Re-create trials table
776
        trials_table = alfio.AlfBunch({x: out.pop(x) for x in table_columns})
1✔
777
        out['table'] = trials_table.to_df()
1✔
778

779
        out = {k: out[k] for k in self.var_names if k in out}  # Reorder output
1✔
780
        assert tuple(filter(lambda x: 'wheel' not in x, self.var_names)) == tuple(out.keys())
1✔
781
        return [out[k] for k in out] + [wheel['timestamps'], wheel['position'],
1✔
782
                                        moves['intervals'], moves['peakAmplitude']]
783

784
    def _extract_bpod(self, bpod_trials, task_collection='raw_behavior_data', save=False):
1✔
785
        bpod_trials, *_ = bpod_extract_all(
1✔
786
            session_path=self.session_path, save=save, bpod_trials=bpod_trials, task_collection=task_collection)
787

788
        return bpod_trials
1✔
789

790

791
def extract_all(session_path, sync_collection='raw_ephys_data', save=True, task_collection='raw_behavior_data', save_path=None,
1✔
792
                protocol_number=None, **kwargs):
793
    """
794
    For the IBL ephys task, reads ephys binary file and extract:
795
        -   sync
796
        -   wheel
797
        -   behaviour
798
        -   video time stamps
799

800
    Parameters
801
    ----------
802
    session_path : str, pathlib.Path
803
        The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
804
    sync_collection : str
805
        The session subdirectory where the sync data are located.
806
    save : bool
807
        If true, save the extracted files to save_path.
808
    task_collection : str
809
        The location of the behaviour protocol data.
810
    save_path : str, pathlib.Path
811
        The save location of the extracted files, defaults to the alf directory of the session path.
812
    protocol_number : int
813
        The order that the protocol was run in.
814
    **kwargs
815
        Optional extractor keyword arguments.
816

817
    Returns
818
    -------
819
    list
820
        The extracted data.
821
    list of pathlib.Path, None
822
        If save is True, a list of file paths to the extracted data.
823
    """
824
    extractor_type = extractors_base.get_session_extractor_type(session_path, task_collection=task_collection)
1✔
825
    _logger.info(f"Extracting {session_path} as {extractor_type}")
1✔
826
    sync, chmap = get_sync_and_chn_map(session_path, sync_collection)
1✔
827
    # sync, chmap = get_main_probe_sync(session_path, bin_exists=bin_exists)
828
    base = [FpgaTrials]
1✔
829
    if extractor_type == 'ephys_biased_opto':
1✔
830
        base.append(LaserBool)
1✔
831
    outputs, files = extractors_base.run_extractor_classes(
1✔
832
        base, session_path=session_path, save=save, sync=sync, chmap=chmap, path_out=save_path,
833
        task_collection=task_collection, protocol_number=protocol_number, **kwargs)
834
    return outputs, files
1✔
835

836

837
def get_sync_and_chn_map(session_path, sync_collection):
1✔
838
    """
839
    Return sync and channel map for session based on collection where main sync is stored.
840

841
    Parameters
842
    ----------
843
    session_path : str, pathlib.Path
844
        The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
845
    sync_collection : str
846
        The session subdirectory where the sync data are located.
847

848
    Returns
849
    -------
850
    one.alf.io.AlfBunch
851
        A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
852
        the corresponding channel numbers.
853
    dict
854
        A map of channel names and their corresponding indices.
855
    """
856
    if sync_collection == 'raw_ephys_data':
1✔
857
        # Check to see if we have nidq files, if we do just go with this otherwise go into other function that deals with
858
        # 3A probes
859
        nidq_meta = next(session_path.joinpath(sync_collection).glob('*nidq.meta'), None)
1✔
860
        if not nidq_meta:
1✔
861
            sync, chmap = get_main_probe_sync(session_path)
1✔
862
        else:
863
            sync = load_sync(session_path, sync_collection)
1✔
864
            ef = Bunch()
1✔
865
            ef['path'] = session_path.joinpath(sync_collection)
1✔
866
            ef['nidq'] = nidq_meta
1✔
867
            chmap = get_ibl_sync_map(ef, '3B')
1✔
868

869
    else:
870
        sync = load_sync(session_path, sync_collection)
1✔
871
        chmap = load_channel_map(session_path, sync_collection)
1✔
872

873
    return sync, chmap
1✔
874

875

876
def load_channel_map(session_path, sync_collection):
1✔
877
    """
878
    Load syncing channel map for session path and collection
879

880
    Parameters
881
    ----------
882
    session_path : str, pathlib.Path
883
        The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
884
    sync_collection : str
885
        The session subdirectory where the sync data are located.
886

887
    Returns
888
    -------
889
    dict
890
        A map of channel names and their corresponding indices.
891
    """
892

893
    device = sync_collection.split('_')[1]
1✔
894
    default_chmap = DEFAULT_MAPS[device]['nidq']
1✔
895

896
    # Try to load channel map from file
897
    chmap = spikeglx.get_sync_map(session_path.joinpath(sync_collection))
1✔
898
    # If chmap provided but not with all keys, fill up with default values
899
    if not chmap:
1✔
900
        return default_chmap
1✔
901
    else:
902
        if data_for_keys(default_chmap.keys(), chmap):
1✔
903
            return chmap
1✔
904
        else:
905
            _logger.warning("Keys missing from provided channel map, "
×
906
                            "setting missing keys from default channel map")
907
            return {**default_chmap, **chmap}
×
908

909

910
def load_sync(session_path, sync_collection):
1✔
911
    """
912
    Load sync files from session path and collection.
913

914
    Parameters
915
    ----------
916
    session_path : str, pathlib.Path
917
        The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
918
    sync_collection : str
919
        The session subdirectory where the sync data are located.
920

921
    Returns
922
    -------
923
    one.alf.io.AlfBunch
924
        A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
925
        the corresponding channel numbers.
926
    """
927
    sync = alfio.load_object(session_path.joinpath(sync_collection), 'sync', namespace='spikeglx', short_keys=True)
1✔
928

929
    return sync
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc