• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vocdoni / vocdoni-node / 13793199355

11 Mar 2025 04:28PM UTC coverage: 62.51% (-0.03%) from 62.542%
13793199355

Pull #1405

github

p4u
vochain: allow faucet package on NewProcessTx

Signed-off-by: p4u <pau@dabax.net>
Pull Request #1405: vochain: allow faucet package on NewProcessTx

18 of 27 new or added lines in 3 files covered. (66.67%)

19 existing lines in 3 files now uncovered.

16832 of 26927 relevant lines covered (62.51%)

37950.15 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

76.36
/vochain/indexer/indexer.go
1
package indexer
2

3
import (
4
        "bytes"
5
        "context"
6
        "database/sql"
7
        "embed"
8
        "encoding/hex"
9
        "errors"
10
        "fmt"
11
        "io"
12
        "maps"
13
        "math/big"
14
        "os"
15
        "path/filepath"
16
        "slices"
17
        "strings"
18
        "sync"
19
        "time"
20

21
        "go.vocdoni.io/dvote/log"
22
        "go.vocdoni.io/dvote/statedb"
23
        "go.vocdoni.io/dvote/types"
24
        "go.vocdoni.io/dvote/vochain"
25
        indexerdb "go.vocdoni.io/dvote/vochain/indexer/db"
26
        "go.vocdoni.io/dvote/vochain/indexer/indexertypes"
27
        "go.vocdoni.io/dvote/vochain/results"
28
        "go.vocdoni.io/dvote/vochain/state"
29
        "go.vocdoni.io/dvote/vochain/transaction/vochaintx"
30
        "go.vocdoni.io/proto/build/go/models"
31

32
        "github.com/pressly/goose/v3"
33

34
        // modernc is a pure-Go version, but its errors have less useful info.
35
        // We use mattn while developing and testing, and we can swap them later.
36
        // _ "modernc.org/sqlite"
37
        _ "github.com/mattn/go-sqlite3"
38
)
39

40
//go:generate go run github.com/sqlc-dev/sqlc/cmd/sqlc@v1.27.0 generate
41

42
//go:embed migrations/*.sql
43
var embedMigrations embed.FS
44

45
const dbFilename = "db.sqlite"
46

47
// EventListener is an interface used for executing custom functions during the
48
// events of the tally of a process.
49
type EventListener interface {
50
        OnComputeResults(results *results.Results, process *indexertypes.Process, height uint32)
51
}
52

53
// AddEventListener adds a new event listener, to receive method calls on block
54
// events as documented in EventListener.
55
func (idx *Indexer) AddEventListener(l EventListener) {
×
56
        idx.eventOnResults = append(idx.eventOnResults, l)
×
57
}
×
58

59
// Indexer is the component which makes the accounting of the voting processes
60
// and keeps it indexed in a local database.
61
type Indexer struct {
62
        App *vochain.BaseApplication
63

64
        // votePool is the set of votes that should be live counted,
65
        // first grouped by processId, then keyed by nullifier.
66
        // Only keeping one vote per nullifier is important for "overwrite" votes,
67
        // so that we only count the last one in the live results.
68
        // TODO: try using blockTx directly, after some more refactors?
69
        votePool map[string]map[string]*state.Vote
70

71
        dbPath      string
72
        readOnlyDB  *sql.DB
73
        readWriteDB *sql.DB
74

75
        readOnlyQuery *indexerdb.Queries
76

77
        // blockMu protects blockTx, blockQueries, and blockUpdateProcs.
78
        blockMu sync.Mutex
79
        // blockTx is an in-progress SQL transaction which is committed or rolled
80
        // back along with the current block.
81
        blockTx *sql.Tx
82
        // blockQueries wraps blockTx. Note that it is kept between multiple transactions
83
        // so that we can reuse the same prepared statements.
84
        blockQueries *indexerdb.Queries
85
        // blockUpdateProcs is the list of process IDs that require sync with the state database.
86
        // The key is a types.ProcessID as a string, so that it can be used as a map key.
87
        blockUpdateProcs          map[string]bool
88
        blockUpdateProcVoteCounts map[string]bool
89

90
        // list of live processes (those on which the votes will be computed on arrival)
91
        // TODO: we could query the procs table, perhaps memoizing to avoid querying the same over and over again?
92
        liveResultsProcs sync.Map
93

94
        // eventOnResults is the list of external callbacks that will be executed by the indexer
95
        eventOnResults []EventListener
96

97
        // ignoreLiveResults if true, partial/live results won't be calculated (only final results)
98
        ignoreLiveResults bool
99
}
100

101
type Options struct {
102
        DataDir string
103

104
        // ExpectBackupRestore should be set to true if a call to Indexer.RestoreBackup
105
        // will be made shortly after New is called, and before any indexing or queries happen.
106
        // If the DB file on disk exists, this flag will be ignored and the existing DB will be loaded.
107
        ExpectBackupRestore bool
108

109
        IgnoreLiveResults bool
110
}
111

112
// New returns an instance of the Indexer
113
// using the local storage database in DataDir and integrated into the state vochain instance.
114
func New(app *vochain.BaseApplication, opts Options) (*Indexer, error) {
50✔
115
        idx := &Indexer{
50✔
116
                App:               app,
50✔
117
                ignoreLiveResults: opts.IgnoreLiveResults,
50✔
118

50✔
119
                // TODO(mvdan): these three maps are all keyed by process ID,
50✔
120
                // and each of them needs to query existing data from the DB.
50✔
121
                // Since the map keys very often overlap, consider joining the maps
50✔
122
                // so that we can also reuse queries to the DB.
50✔
123
                votePool:                  make(map[string]map[string]*state.Vote),
50✔
124
                blockUpdateProcs:          make(map[string]bool),
50✔
125
                blockUpdateProcVoteCounts: make(map[string]bool),
50✔
126
        }
50✔
127
        log.Infow("indexer initialization", "dataDir", opts.DataDir, "liveResults", !opts.IgnoreLiveResults)
50✔
128

50✔
129
        // The DB itself is opened in "rwc" mode, so it is created if it does not yet exist.
50✔
130
        // Create the parent directory as well if it doesn't exist.
50✔
131
        if err := os.MkdirAll(opts.DataDir, os.ModePerm); err != nil {
50✔
132
                return nil, err
×
133
        }
×
134
        idx.dbPath = filepath.Join(opts.DataDir, dbFilename)
50✔
135

50✔
136
        // if dbPath exists, always startDB (ExpectBackupRestore is ignored)
50✔
137
        // if dbPath doesn't exist, and we're not expecting a BackupRestore, startDB
50✔
138
        // if dbPath doesn't exist and we're expecting a backup, skip startDB, it will be triggered after the restore
50✔
139
        if _, err := os.Stat(idx.dbPath); err == nil ||
50✔
140
                (os.IsNotExist(err) && !opts.ExpectBackupRestore) {
97✔
141
                if err := idx.startDB(); err != nil {
47✔
142
                        return nil, err
×
143
                }
×
144
        }
145

146
        // Subscribe to events
147
        idx.App.State.AddEventListener(idx)
50✔
148

50✔
149
        return idx, nil
50✔
150
}
151

152
func (idx *Indexer) startDB() error {
50✔
153
        if idx.readWriteDB != nil {
50✔
154
                panic("Indexer.startDB called twice")
×
155
        }
156

157
        var err error
50✔
158

50✔
159
        // sqlite doesn't support multiple concurrent writers.
50✔
160
        // For that reason, readWriteDB is limited to one open connection.
50✔
161
        // Per https://github.com/mattn/go-sqlite3/issues/1022#issuecomment-1067353980,
50✔
162
        // we use WAL to allow multiple concurrent readers at the same time.
50✔
163
        idx.readWriteDB, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&_journal_mode=wal&_txlock=immediate&_synchronous=normal&_foreign_keys=true", idx.dbPath))
50✔
164
        if err != nil {
50✔
165
                return err
×
166
        }
×
167
        idx.readWriteDB.SetMaxOpenConns(1)
50✔
168
        idx.readWriteDB.SetMaxIdleConns(1)
50✔
169
        idx.readWriteDB.SetConnMaxIdleTime(10 * time.Minute)
50✔
170

50✔
171
        if err := goose.SetDialect("sqlite3"); err != nil {
50✔
172
                return err
×
173
        }
×
174
        goose.SetLogger(log.GooseLogger())
50✔
175
        goose.SetBaseFS(embedMigrations)
50✔
176

50✔
177
        if gooseMigrationsPending(idx.readWriteDB, "migrations") {
98✔
178
                log.Info("indexer db needs migration, scheduling a reindex after sync")
48✔
179
                defer func() { go idx.ReindexBlocks(false) }()
96✔
180
        }
181

182
        if err := goose.Up(idx.readWriteDB, "migrations"); err != nil {
50✔
183
                return fmt.Errorf("goose up: %w", err)
×
184
        }
×
185

186
        // Analyze the tables and indices and store information in internal tables
187
        // so that the query optimizer can make better choices.
188
        if _, err := idx.readWriteDB.Exec("PRAGMA analysis_limit=1000; ANALYZE"); err != nil {
50✔
189
                return err
×
190
        }
×
191

192
        idx.readOnlyDB, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro&_journal_mode=wal", idx.dbPath))
50✔
193
        if err != nil {
50✔
194
                return err
×
195
        }
×
196
        // Increasing these numbers can allow for more queries to run concurrently,
197
        // but it also increases the memory used by sqlite and our connection pool.
198
        // Most read-only queries we run are quick enough, so a small number seems OK.
199
        idx.readOnlyDB.SetMaxOpenConns(16)
50✔
200
        idx.readOnlyDB.SetMaxIdleConns(4)
50✔
201
        idx.readOnlyDB.SetConnMaxIdleTime(30 * time.Minute)
50✔
202

50✔
203
        idx.readOnlyQuery, err = indexerdb.Prepare(context.TODO(), idx.readOnlyDB)
50✔
204
        if err != nil {
50✔
205
                return err
×
206
        }
×
207
        idx.blockQueries, err = indexerdb.Prepare(context.TODO(), idx.readWriteDB)
50✔
208
        if err != nil {
50✔
209
                return err
×
210
        }
×
211
        return nil
50✔
212
}
213

214
func copyFile(dst, src string) error {
3✔
215
        srcf, err := os.Open(src)
3✔
216
        if err != nil {
3✔
217
                return err
×
218
        }
×
219
        defer srcf.Close()
3✔
220

3✔
221
        // For now, we don't care about permissions
3✔
222
        dstf, err := os.Create(dst)
3✔
223
        if err != nil {
3✔
224
                return err
×
225
        }
×
226
        _, err = io.Copy(dstf, srcf)
3✔
227
        if err2 := dstf.Close(); err == nil {
6✔
228
                err = err2
3✔
229
        }
3✔
230
        return err
3✔
231
}
232

233
func (idx *Indexer) Close() error {
27✔
234
        if err := idx.readOnlyDB.Close(); err != nil {
27✔
235
                return err
×
236
        }
×
237
        if err := idx.readWriteDB.Close(); err != nil {
27✔
238
                return err
×
239
        }
×
240
        return nil
27✔
241
}
242

243
// BackupPath restores the database from a backup created via SaveBackup.
244
// Note that this must be called with ExpectBackupRestore set to true,
245
// and before any indexing or queries happen.
246
func (idx *Indexer) RestoreBackup(path string) error {
3✔
247
        if idx.readWriteDB != nil {
3✔
248
                panic("Indexer.RestoreBackup called after the database was initialized")
×
249
        }
250
        if err := copyFile(idx.dbPath, path); err != nil {
3✔
251
                return fmt.Errorf("could not restore indexer backup: %w", err)
×
252
        }
×
253
        if err := idx.startDB(); err != nil {
3✔
254
                return err
×
255
        }
×
256
        return nil
3✔
257
}
258

259
func gooseMigrationsPending(db *sql.DB, dir string) bool {
50✔
260
        // Get the latest applied migration version
50✔
261
        currentVersion, err := goose.GetDBVersion(db)
50✔
262
        if err != nil {
50✔
263
                log.Errorf("failed to get current database version: %v", err)
×
264
                return false
×
265
        }
×
266

267
        // Collect migrations after the current version
268
        migrations, err := goose.CollectMigrations(dir, currentVersion, goose.MaxVersion)
50✔
269
        if err != nil {
52✔
270
                if errors.Is(err, goose.ErrNoMigrationFiles) {
4✔
271
                        return false
2✔
272
                }
2✔
273
                log.Errorf("failed to collect migrations: %v", err)
×
274
                return false
×
275
        }
276

277
        return len(migrations) > 0
48✔
278
}
279

280
// SaveBackup backs up the database to a file on disk.
281
// Note that writes to the database may be blocked until the backup finishes,
282
// and an error may occur if a file at path already exists.
283
//
284
// For sqlite, this is done via "VACUUM INTO", so the resulting file is also a database.
285
func (idx *Indexer) SaveBackup(ctx context.Context, path string) error {
96✔
286
        _, err := idx.readOnlyDB.ExecContext(ctx, `VACUUM INTO ?`, path)
96✔
287
        return err
96✔
288
}
96✔
289

290
// ExportBackupAsBytes backs up the database, and returns the contents as []byte.
291
//
292
// Note that writes to the database may be blocked until the backup finishes.
293
//
294
// For sqlite, this is done via "VACUUM INTO", so the resulting file is also a database.
295
func (idx *Indexer) ExportBackupAsBytes(ctx context.Context) ([]byte, error) {
95✔
296
        tmpDir, err := os.MkdirTemp("", "indexer")
95✔
297
        if err != nil {
95✔
298
                return nil, fmt.Errorf("error creating tmpDir: %w", err)
×
299
        }
×
300
        tmpFilePath := filepath.Join(tmpDir, "indexer.sqlite3")
95✔
301
        if err := idx.SaveBackup(ctx, tmpFilePath); err != nil {
95✔
302
                return nil, fmt.Errorf("error saving indexer backup: %w", err)
×
303
        }
×
304
        defer func() {
190✔
305
                if err := os.Remove(tmpFilePath); err != nil {
95✔
306
                        log.Warnw("error removing indexer backup file", "path", tmpFilePath, "err", err)
×
307
                }
×
308
        }()
309
        return os.ReadFile(tmpFilePath)
95✔
310
}
311

312
// blockTxQueries assumes that lockPool is locked.
313
func (idx *Indexer) blockTxQueries() *indexerdb.Queries {
10,081✔
314
        if idx.blockMu.TryLock() {
10,081✔
315
                panic("Indexer.blockTxQueries was called without locking Indexer.lockPool")
×
316
        }
317
        if idx.blockTx == nil {
11,190✔
318
                tx, err := idx.readWriteDB.Begin()
1,109✔
319
                if err != nil {
1,109✔
320
                        panic(err) // shouldn't happen, use an error return if it ever does
×
321
                }
322
                idx.blockTx = tx
1,109✔
323
                idx.blockQueries = idx.blockQueries.WithTx(tx)
1,109✔
324
        }
325
        return idx.blockQueries
10,081✔
326
}
327

328
// AfterSyncBootstrap is a blocking function that waits until the Vochain is synchronized
329
// and then execute a set of recovery actions. It mainly checks for those processes which are
330
// still open (live) and updates all temporary data (current voting weight and live results
331
// if unecrypted). This method might be called on a goroutine after initializing the Indexer.
332
// TO-DO: refactor and use blockHeight for reusing existing live results
333
func (idx *Indexer) AfterSyncBootstrap(inTest bool) {
23✔
334
        // if no live results, we don't need the bootstraping
23✔
335
        if idx.ignoreLiveResults {
23✔
336
                return
×
337
        }
×
338

339
        if !inTest {
30✔
340
                <-idx.App.WaitUntilSynced()
7✔
341
        }
7✔
342

343
        log.Infof("running indexer after-sync bootstrap")
23✔
344

23✔
345
        // Note that holding blockMu means new votes aren't added until the recovery finishes.
23✔
346
        idx.blockMu.Lock()
23✔
347
        defer idx.blockMu.Unlock()
23✔
348

23✔
349
        queries := idx.blockTxQueries()
23✔
350
        ctx := context.TODO()
23✔
351

23✔
352
        prcIDs, err := queries.GetProcessIDsByFinalResults(ctx, false)
23✔
353
        if err != nil {
23✔
354
                log.Error(err)
×
355
                return // no point in continuing further
×
356
        }
×
357

358
        log.Infof("recovered %d live results processes", len(prcIDs))
23✔
359
        log.Infof("starting live results recovery computation")
23✔
360
        startTime := time.Now()
23✔
361
        for _, p := range prcIDs {
30✔
362
                // In order to recover the full list of live results, we need
7✔
363
                // to reset the existing Results and count them again from scratch.
7✔
364
                // Since we cannot be sure if there are votes missing, we need to
7✔
365
                // perform the full computation.
7✔
366
                log.Debugf("recovering live process %x", p)
7✔
367
                process, err := idx.ProcessInfo(p)
7✔
368
                if err != nil {
7✔
UNCOV
369
                        log.Errorf("cannot fetch process: %v", err)
×
UNCOV
370
                        continue
×
371
                }
372
                options := process.VoteOpts
7✔
373

7✔
374
                indxR := &results.Results{
7✔
375
                        ProcessID: p,
7✔
376
                        // MaxValue requires +1 since 0 is also an option
7✔
377
                        Votes:        results.NewEmptyVotes(options),
7✔
378
                        Weight:       new(types.BigInt).SetUint64(0),
7✔
379
                        VoteOpts:     options,
7✔
380
                        EnvelopeType: process.Envelope,
7✔
381
                }
7✔
382

7✔
383
                if _, err := queries.UpdateProcessResultByID(ctx, indexerdb.UpdateProcessResultByIDParams{
7✔
384
                        ID:       indxR.ProcessID,
7✔
385
                        Votes:    indexertypes.EncodeJSON(indxR.Votes),
7✔
386
                        Weight:   indexertypes.EncodeJSON(indxR.Weight),
7✔
387
                        VoteOpts: indexertypes.EncodeProto(indxR.VoteOpts),
7✔
388
                        Envelope: indexertypes.EncodeProto(indxR.EnvelopeType),
7✔
389
                }); err != nil {
7✔
390
                        log.Errorw(err, "cannot UpdateProcessResultByID sql")
×
391
                        continue
×
392
                }
393

394
                // Count the votes, add them to partialResults (in memory, without any db transaction)
395
                partialResults := &results.Results{
7✔
396
                        Weight:       new(types.BigInt).SetUint64(0),
7✔
397
                        VoteOpts:     options,
7✔
398
                        EnvelopeType: process.Envelope,
7✔
399
                }
7✔
400
                // Get the votes from the state
7✔
401
                if err := idx.App.State.IterateVotes(p, true, func(vote *models.StateDBVote) bool {
27✔
402
                        if err := idx.addLiveVote(process, vote.VotePackage, new(big.Int).SetBytes(vote.Weight), partialResults); err != nil {
20✔
UNCOV
403
                                log.Errorw(err, "could not add live vote")
×
UNCOV
404
                        }
×
405
                        return false
20✔
406
                }); err != nil {
5✔
407
                        if errors.Is(err, statedb.ErrEmptyTree) {
10✔
408
                                log.Debugf("process %x doesn't have any votes yet, skipping", p)
5✔
409
                                continue
5✔
410
                        }
411
                        log.Errorw(err, "unexpected error during iterate votes")
×
412
                        continue
×
413
                }
414

415
                // Store the results on the persistent database
416
                if err := idx.commitVotesUnsafe(queries, p, indxR, partialResults, nil, idx.App.Height()); err != nil {
2✔
417
                        log.Errorw(err, "could not commit live votes")
×
418
                        continue
×
419
                }
420
                // Add process to live results so new votes will be added
421
                idx.addProcessToLiveResults(p)
2✔
422
        }
423

424
        // don't wait until the next Commit call to commit blockTx
425
        if err := idx.blockTx.Commit(); err != nil {
23✔
426
                log.Errorw(err, "could not commit tx")
×
427
        }
×
428
        idx.blockTx = nil
23✔
429

23✔
430
        log.Infof("live results recovery computation finished, took %s", time.Since(startTime))
23✔
431
}
432

433
// ReindexBlocks reindexes all blocks found in blockstore
434
func (idx *Indexer) ReindexBlocks(inTest bool) {
48✔
435
        if !inTest {
96✔
436
                <-idx.App.WaitUntilSynced()
48✔
437
        }
48✔
438

439
        // Note that holding blockMu means new votes aren't added until the reindex finishes.
440
        idx.blockMu.Lock()
48✔
441
        defer idx.blockMu.Unlock()
48✔
442

48✔
443
        if idx.App.Node == nil || idx.App.Node.BlockStore() == nil {
90✔
444
                return
42✔
445
        }
42✔
446

447
        idxBlockCount, err := idx.CountBlocks("", "", "")
6✔
448
        if err != nil {
7✔
449
                log.Warnf("indexer CountBlocks returned error: %s", err)
1✔
450
        }
1✔
451
        log.Infow("start reindexing",
6✔
452
                "blockStoreBase", idx.App.Node.BlockStore().Base(),
6✔
453
                "blockStoreHeight", idx.App.Node.BlockStore().Height(),
6✔
454
                "indexerBlockCount", idxBlockCount,
6✔
455
        )
6✔
456
        queries := idx.blockTxQueries()
6✔
457
        for height := idx.App.Node.BlockStore().Base(); height <= idx.App.Node.BlockStore().Height(); height++ {
14✔
458
                if b := idx.App.GetBlockByHeight(int64(height)); b != nil {
15✔
459
                        // Blocks
7✔
460
                        func() {
14✔
461
                                idxBlock, err := idx.readOnlyQuery.GetBlockByHeight(context.TODO(), b.Height)
7✔
462
                                if height%10000 == 1 {
12✔
463
                                        log.Infof("reindexing height %d, updating values (%s, %x, %x, %x) on current row %+v",
5✔
464
                                                height, b.ChainID, b.Hash(), b.ProposerAddress, b.LastBlockID.Hash, idxBlock)
5✔
465
                                        if err := idx.blockTx.Commit(); err != nil {
5✔
466
                                                log.Errorw(err, "could not commit tx")
×
467
                                        }
×
468
                                        idx.blockTx = nil
5✔
469
                                        queries = idx.blockTxQueries()
5✔
470
                                }
471
                                if err == nil && idxBlock.Time != b.Time {
7✔
472
                                        log.Errorf("while reindexing blocks, block %d timestamp in db (%s) differs from blockstore (%s), leaving untouched", height, idxBlock.Time, b.Time)
×
473
                                        return
×
474
                                }
×
475
                                if _, err := queries.CreateBlock(context.TODO(), indexerdb.CreateBlockParams{
7✔
476
                                        ChainID:         b.ChainID,
7✔
477
                                        Height:          b.Height,
7✔
478
                                        Time:            b.Time,
7✔
479
                                        Hash:            nonNullBytes(b.Hash()),
7✔
480
                                        ProposerAddress: nonNullBytes(b.ProposerAddress),
7✔
481
                                        LastBlockHash:   nonNullBytes(b.LastBlockID.Hash),
7✔
482
                                }); err != nil {
7✔
483
                                        log.Errorw(err, "cannot index new block")
×
484
                                }
×
485
                        }()
486

487
                        // Transactions
488
                        func() {
14✔
489
                                for index, tx := range b.Data.Txs {
7✔
490
                                        idxTx, err := idx.readOnlyQuery.GetTransactionByHeightAndIndex(context.TODO(), indexerdb.GetTransactionByHeightAndIndexParams{
×
491
                                                BlockHeight: b.Height,
×
492
                                                BlockIndex:  int64(index),
×
493
                                        })
×
494
                                        if err == nil && !bytes.Equal(idxTx.Hash, tx.Hash()) {
×
495
                                                log.Errorf("while reindexing txs, tx %d/%d hash in db (%x) differs from blockstore (%x), leaving untouched", b.Height, index, idxTx.Hash, tx.Hash())
×
496
                                                return
×
497
                                        }
×
498
                                        vtx := new(vochaintx.Tx)
×
499
                                        if err := vtx.Unmarshal(tx, b.ChainID); err != nil {
×
500
                                                log.Errorw(err, fmt.Sprintf("cannot unmarshal tx %d/%d", b.Height, index))
×
501
                                                continue
×
502
                                        }
503
                                        idx.indexTx(vtx, uint32(b.Height), int32(index))
×
504
                                }
505
                        }()
506
                }
507
        }
508

509
        if err := idx.blockTx.Commit(); err != nil {
6✔
510
                log.Errorw(err, "could not commit tx")
×
511
        }
×
512
        idx.blockTx = nil
6✔
513

6✔
514
        log.Infow("finished reindexing",
6✔
515
                "blockStoreBase", idx.App.Node.BlockStore().Base(),
6✔
516
                "blockStoreHeight", idx.App.Node.BlockStore().Height(),
6✔
517
                "indexerBlockCount", idxBlockCount,
6✔
518
        )
6✔
519
}
520

521
// Commit is called by the APP when a block is confirmed and included into the chain
522
func (idx *Indexer) Commit(height uint32) error {
1,010✔
523
        idx.blockMu.Lock()
1,010✔
524
        defer idx.blockMu.Unlock()
1,010✔
525

1,010✔
526
        // Update existing processes
1,010✔
527
        updateProcs := slices.Sorted(maps.Keys(idx.blockUpdateProcs))
1,010✔
528

1,010✔
529
        queries := idx.blockTxQueries()
1,010✔
530
        ctx := context.TODO()
1,010✔
531

1,010✔
532
        // index the new block
1,010✔
533
        if b := idx.App.GetBlockByHeight(int64(height)); b != nil {
1,981✔
534
                if _, err := queries.CreateBlock(context.TODO(), indexerdb.CreateBlockParams{
971✔
535
                        ChainID:         b.ChainID,
971✔
536
                        Height:          b.Height,
971✔
537
                        Time:            b.Time,
971✔
538
                        Hash:            nonNullBytes(b.Hash()),
971✔
539
                        ProposerAddress: nonNullBytes(b.ProposerAddress),
971✔
540
                        LastBlockHash:   nonNullBytes(b.LastBlockID.Hash),
971✔
541
                }); err != nil {
971✔
542
                        log.Errorw(err, "cannot index new block")
×
543
                }
×
544
        }
545

546
        for _, pidStr := range updateProcs {
1,275✔
547
                pid := types.ProcessID(pidStr)
265✔
548
                if err := idx.updateProcess(ctx, queries, pid); err != nil {
265✔
549
                        log.Errorw(err, "commit: cannot update process")
×
550
                        continue
×
551
                }
552
                log.Debugw("updated process", "processID", hex.EncodeToString(pid))
265✔
553
        }
554
        clear(idx.blockUpdateProcs)
1,010✔
555

1,010✔
556
        // Add votes collected by onVote (live results)
1,010✔
557
        newVotes := 0
1,010✔
558
        overwritedVotes := 0
1,010✔
559
        startTime := time.Now()
1,010✔
560

1,010✔
561
        for pidStr, votesByNullifier := range idx.votePool {
1,247✔
562
                pid := []byte(pidStr)
237✔
563
                // Get the process information while reusing blockTx
237✔
564
                procInner, err := queries.GetProcess(ctx, pid)
237✔
565
                if err != nil {
237✔
566
                        log.Warnf("cannot get process %x", pid)
×
567
                        continue
×
568
                }
569
                proc := indexertypes.ProcessFromDB(&procInner)
237✔
570

237✔
571
                // results is used to accumulate the new votes for a process
237✔
572
                addedResults := &results.Results{
237✔
573
                        Weight:       new(types.BigInt).SetUint64(0),
237✔
574
                        VoteOpts:     proc.VoteOpts,
237✔
575
                        EnvelopeType: proc.Envelope,
237✔
576
                }
237✔
577
                // subtractedResults is used to subtract votes that are overwritten
237✔
578
                subtractedResults := &results.Results{
237✔
579
                        Weight:       new(types.BigInt).SetUint64(0),
237✔
580
                        VoteOpts:     proc.VoteOpts,
237✔
581
                        EnvelopeType: proc.Envelope,
237✔
582
                }
237✔
583
                // The order here isn't deterministic, but we assume that to be OK.
237✔
584
                for _, v := range votesByNullifier {
2,076✔
585
                        // If overwrite is 1 or more, we need to update the vote (remove the previous
1,839✔
586
                        // one and add the new) to results.
1,839✔
587
                        // We fetch the previous vote from the state by setting committed=true.
1,839✔
588
                        // Note that if there wasn't a previous vote in the committed state,
1,839✔
589
                        // then it wasn't counted in the results yet, so don't add it to subtractedResults.
1,839✔
590
                        // TODO: can we get previousVote from sqlite via blockTx?
1,839✔
591
                        var previousVote *models.StateDBVote
1,839✔
592
                        if v.Overwrites > 0 {
1,853✔
593
                                previousVote, err = idx.App.State.Vote(v.ProcessID, v.Nullifier, true)
14✔
594
                                if err != nil {
20✔
595
                                        log.Warnw("cannot get previous vote",
6✔
596
                                                "nullifier", hex.EncodeToString(v.Nullifier),
6✔
597
                                                "processID", hex.EncodeToString(v.ProcessID),
6✔
598
                                                "error", err.Error())
6✔
599
                                }
6✔
600
                        }
601
                        if previousVote != nil {
1,847✔
602
                                log.Debugw("vote overwrite, previous vote",
8✔
603
                                        "overwrites", v.Overwrites,
8✔
604
                                        "package", string(previousVote.VotePackage))
8✔
605
                                // ensure that overwriteCounter has increased
8✔
606
                                if v.Overwrites <= previousVote.GetOverwriteCount() {
8✔
607
                                        log.Errorw(fmt.Errorf(
×
608
                                                "state stored overwrite count is equal or smaller than current vote overwrite count (%d <= %d)",
×
609
                                                v.Overwrites, previousVote.GetOverwriteCount()),
×
610
                                                "check vote overwrite failed")
×
611
                                        continue
×
612
                                }
613
                                // add the live vote to subtracted results
614
                                if err := idx.addLiveVote(proc, previousVote.VotePackage,
8✔
615
                                        new(big.Int).SetBytes(previousVote.Weight), subtractedResults); err != nil {
8✔
616
                                        log.Errorw(err, "vote cannot be added to subtracted results")
×
617
                                        continue
×
618
                                }
619
                                overwritedVotes++
8✔
620
                        } else {
1,831✔
621
                                newVotes++
1,831✔
622
                        }
1,831✔
623
                        // add the new vote to results
624
                        if err := idx.addLiveVote(proc, v.VotePackage, v.Weight, addedResults); err != nil {
2,050✔
625
                                log.Errorw(err, "vote cannot be added to results")
211✔
626
                                continue
211✔
627
                        }
628
                }
629
                // Commit votes (store to disk)
630
                if err := idx.commitVotesUnsafe(queries, pid, proc.Results(), addedResults, subtractedResults, idx.App.Height()); err != nil {
237✔
631
                        log.Errorf("cannot commit live votes from block %d: (%v)", err, height)
×
632
                }
×
633
        }
634
        clear(idx.votePool)
1,010✔
635

1,010✔
636
        // Note that we re-compute each process vote count from the votes table,
1,010✔
637
        // since simply incrementing the vote count would break with vote overwrites.
1,010✔
638
        for pidStr := range idx.blockUpdateProcVoteCounts {
1,254✔
639
                pid := []byte(pidStr)
244✔
640
                if _, err := queries.ComputeProcessVoteCount(ctx, pid); err != nil {
244✔
641
                        log.Errorw(err, "could not compute process vote count")
×
642
                }
×
643
        }
644
        clear(idx.blockUpdateProcVoteCounts)
1,010✔
645

1,010✔
646
        if err := idx.blockTx.Commit(); err != nil {
1,010✔
647
                log.Errorw(err, "could not commit tx")
×
648
        }
×
649
        idx.blockTx = nil
1,010✔
650
        if height%1000 == 0 {
1,052✔
651
                // Regularly see if sqlite thinks another optimization analysis would be useful.
42✔
652
                // Block times tend to be in the order of seconds like 10s,
42✔
653
                // so a thousand blocks will tend to be in the order of hours.
42✔
654
                if _, err := idx.readWriteDB.Exec("PRAGMA optimize"); err != nil {
42✔
655
                        return err
×
656
                }
×
657
        }
658

659
        if newVotes+overwritedVotes > 0 {
1,214✔
660
                log.Infow("add live votes to results",
204✔
661
                        "block", height, "newVotes", newVotes, "overwritedVotes",
204✔
662
                        overwritedVotes, "time", time.Since(startTime))
204✔
663
        }
204✔
664

665
        return nil
1,010✔
666
}
667

668
// Rollback removes the non committed pending operations
669
func (idx *Indexer) Rollback() {
1,215✔
670
        idx.blockMu.Lock()
1,215✔
671
        defer idx.blockMu.Unlock()
1,215✔
672
        clear(idx.votePool)
1,215✔
673
        clear(idx.blockUpdateProcs)
1,215✔
674
        clear(idx.blockUpdateProcVoteCounts)
1,215✔
675
        if idx.blockTx != nil {
1,279✔
676
                if err := idx.blockTx.Rollback(); err != nil {
64✔
677
                        log.Errorw(err, "could not rollback tx")
×
678
                }
×
679
                idx.blockTx = nil
64✔
680
        }
681
}
682

683
// OnProcess indexer stores the processID
684
func (idx *Indexer) OnProcess(p *models.Process, _ int32) {
722✔
685
        pid := p.GetProcessId()
722✔
686
        if err := idx.newEmptyProcess(pid); err != nil {
722✔
687
                log.Errorw(err, "commit: cannot create new empty process")
×
688
        }
×
689
        if idx.App.IsSynced() {
1,441✔
690
                idx.addProcessToLiveResults(pid)
719✔
691
        }
719✔
692
        log.Debugw("new process", "processID", hex.EncodeToString(pid))
722✔
693
}
694

695
// OnVote indexer stores the votes if the processId is live results (on going)
696
// and the blockchain is not synchronizing.
697
// voterID is the identifier of the voter, the most common case is an ethereum address
698
// but can be any kind of id expressed as bytes.
699
func (idx *Indexer) OnVote(vote *state.Vote, txIndex int32) {
2,144✔
700
        pid := string(vote.ProcessID)
2,144✔
701
        if !idx.ignoreLiveResults && idx.isProcessLiveResults(vote.ProcessID) {
4,238✔
702
                // Since []byte in Go isn't comparable, but we can convert any bytes to string.
2,094✔
703
                nullifier := string(vote.Nullifier)
2,094✔
704
                if idx.votePool[pid] == nil {
2,361✔
705
                        idx.votePool[pid] = make(map[string]*state.Vote)
267✔
706
                }
267✔
707
                prevVote := idx.votePool[pid][nullifier]
2,094✔
708
                if prevVote != nil && vote.Overwrites < prevVote.Overwrites {
2,094✔
709
                        log.Warnw("OnVote called with a lower overwrite value than before",
×
710
                                "previous", prevVote.Overwrites, "latest", vote.Overwrites)
×
711
                }
×
712
                idx.votePool[pid][nullifier] = vote
2,094✔
713
        }
714

715
        ctx := context.TODO()
2,144✔
716
        weightStr := `"1"`
2,144✔
717
        if vote.Weight != nil {
4,061✔
718
                weightStr = indexertypes.EncodeJSON((*types.BigInt)(vote.Weight))
1,917✔
719
        }
1,917✔
720
        keyIndexes := indexertypes.EncodeJSON(vote.EncryptionKeyIndexes)
2,144✔
721

2,144✔
722
        idx.blockMu.Lock()
2,144✔
723
        defer idx.blockMu.Unlock()
2,144✔
724
        queries := idx.blockTxQueries()
2,144✔
725
        if _, err := queries.CreateVote(ctx, indexerdb.CreateVoteParams{
2,144✔
726
                Nullifier:            vote.Nullifier,
2,144✔
727
                ProcessID:            vote.ProcessID,
2,144✔
728
                BlockHeight:          int64(vote.Height),
2,144✔
729
                BlockIndex:           int64(txIndex),
2,144✔
730
                Weight:               weightStr,
2,144✔
731
                OverwriteCount:       int64(vote.Overwrites),
2,144✔
732
                VoterID:              nonNullBytes(vote.VoterID),
2,144✔
733
                EncryptionKeyIndexes: keyIndexes,
2,144✔
734
                Package:              string(vote.VotePackage),
2,144✔
735
        }); err != nil {
2,144✔
736
                log.Errorw(err, "could not index vote")
×
737
        }
×
738
        idx.blockUpdateProcVoteCounts[pid] = true
2,144✔
739
}
740

741
// OnCancel indexer stores the processID and entityID
742
func (idx *Indexer) OnCancel(pid []byte, _ int32) {
×
743
        idx.blockMu.Lock()
×
744
        defer idx.blockMu.Unlock()
×
745
        idx.blockUpdateProcs[string(pid)] = true
×
746
}
×
747

748
// OnProcessKeys does nothing
749
func (idx *Indexer) OnProcessKeys(pid []byte, _ string, _ int32) {
61✔
750
        idx.blockMu.Lock()
61✔
751
        defer idx.blockMu.Unlock()
61✔
752
        idx.blockUpdateProcs[string(pid)] = true
61✔
753
}
61✔
754

755
// OnProcessStatusChange adds the process to blockUpdateProcs and, if ended, the resultsPool
756
func (idx *Indexer) OnProcessStatusChange(pid []byte, _ models.ProcessStatus, _ int32) {
151✔
757
        idx.blockMu.Lock()
151✔
758
        defer idx.blockMu.Unlock()
151✔
759
        idx.blockUpdateProcs[string(pid)] = true
151✔
760
}
151✔
761

762
// OnProcessDurationChange adds the process to blockUpdateProcs and, if ended, the resultsPool
763
func (idx *Indexer) OnProcessDurationChange(pid []byte, _ uint32, _ int32) {
×
764
        idx.blockMu.Lock()
×
765
        defer idx.blockMu.Unlock()
×
766
        idx.blockUpdateProcs[string(pid)] = true
×
767
}
×
768

769
// OnRevealKeys checks if all keys have been revealed and in such case add the
770
// process to the results queue
771
func (idx *Indexer) OnRevealKeys(pid []byte, _ string, _ int32) {
61✔
772
        // TODO: can we get KeyIndex from ProcessInfo? perhaps len(PublicKeys), or adding a new sqlite column?
61✔
773
        p, err := idx.App.State.Process(pid, false)
61✔
774
        if err != nil {
61✔
775
                log.Errorf("cannot fetch process %s from state: (%s)", pid, err)
×
776
                return
×
777
        }
×
778
        if p.KeyIndex == nil {
61✔
779
                log.Errorf("keyindex is nil")
×
780
                return
×
781
        }
×
782
        idx.blockMu.Lock()
61✔
783
        defer idx.blockMu.Unlock()
61✔
784
        idx.blockUpdateProcs[string(pid)] = true
61✔
785
}
786

787
// OnProcessResults verifies the results for a process and appends it to blockUpdateProcs
788
func (idx *Indexer) OnProcessResults(pid []byte, _ *models.ProcessResult, _ int32) {
111✔
789
        idx.blockMu.Lock()
111✔
790
        defer idx.blockMu.Unlock()
111✔
791
        idx.blockUpdateProcs[string(pid)] = true
111✔
792
}
111✔
793

794
// OnProcessesStart adds the processes to blockUpdateProcs.
795
// This is required to update potential changes when a process is started, such as the census root.
796
func (idx *Indexer) OnProcessesStart(pids [][]byte) {
×
797
        idx.blockMu.Lock()
×
798
        defer idx.blockMu.Unlock()
×
799
        for _, pid := range pids {
×
800
                idx.blockUpdateProcs[string(pid)] = true
×
801
        }
×
802
}
803

804
func (idx *Indexer) OnSetAccount(accountAddress []byte, account *state.Account) {
2,854✔
805
        idx.blockMu.Lock()
2,854✔
806
        defer idx.blockMu.Unlock()
2,854✔
807
        queries := idx.blockTxQueries()
2,854✔
808
        if _, err := queries.CreateAccount(context.TODO(), indexerdb.CreateAccountParams{
2,854✔
809
                Account: accountAddress,
2,854✔
810
                Balance: int64(account.Balance),
2,854✔
811
                Nonce:   int64(account.Nonce),
2,854✔
812
        }); err != nil {
2,854✔
813
                log.Errorw(err, "cannot index new account")
×
814
        }
×
815
}
816

817
func (idx *Indexer) OnTransferTokens(tx *vochaintx.TokenTransfer) {
405✔
818
        idx.blockMu.Lock()
405✔
819
        defer idx.blockMu.Unlock()
405✔
820
        queries := idx.blockTxQueries()
405✔
821
        if _, err := queries.CreateTokenTransfer(context.TODO(), indexerdb.CreateTokenTransferParams{
405✔
822
                TxHash:       tx.TxHash,
405✔
823
                BlockHeight:  int64(idx.App.Height()),
405✔
824
                FromAccount:  tx.FromAddress.Bytes(),
405✔
825
                ToAccount:    tx.ToAddress.Bytes(),
405✔
826
                Amount:       int64(tx.Amount),
405✔
827
                TransferTime: time.Unix(idx.App.Timestamp(), 0),
405✔
828
        }); err != nil {
405✔
829
                log.Errorw(err, "cannot index new transaction")
×
830
        }
×
831
}
832

833
// OnCensusUpdate adds the process to blockUpdateProcs in order to update the census.
834
// This function call is triggered by the SET_PROCESS_CENSUS tx.
835
func (idx *Indexer) OnCensusUpdate(pid, _ []byte, _ string, _ uint64) {
9✔
836
        idx.blockMu.Lock()
9✔
837
        defer idx.blockMu.Unlock()
9✔
838
        idx.blockUpdateProcs[string(pid)] = true
9✔
839
}
9✔
840

841
// OnSpendTokens indexes a token spending event.
842
func (idx *Indexer) OnSpendTokens(address []byte, txType models.TxType, cost uint64, reference string) {
364✔
843
        idx.blockMu.Lock()
364✔
844
        defer idx.blockMu.Unlock()
364✔
845
        queries := idx.blockTxQueries()
364✔
846
        if _, err := queries.CreateTokenFee(context.TODO(), indexerdb.CreateTokenFeeParams{
364✔
847
                FromAccount: address,
364✔
848
                TxType:      strings.ToLower(txType.String()),
364✔
849
                Cost:        int64(cost),
364✔
850
                Reference:   reference,
364✔
851
                SpendTime:   time.Unix(idx.App.Timestamp(), 0),
364✔
852
                BlockHeight: int64(idx.App.Height()),
364✔
853
        }); err != nil {
364✔
854
                log.Errorw(err, "cannot index new token spending")
×
855
        }
×
856
}
857

858
// TokenFeesList returns all the token fees associated with a given transaction type, reference and fromAccount
859
// (all optional filters), ordered by timestamp and paginated by limit and offset
860
func (idx *Indexer) TokenFeesList(limit, offset int, txType, reference, fromAccount string) (
861
        []*indexertypes.TokenFeeMeta, uint64, error,
862
) {
129✔
863
        if offset < 0 {
129✔
864
                return nil, 0, fmt.Errorf("invalid value: offset cannot be %d", offset)
×
865
        }
×
866
        if limit <= 0 {
129✔
867
                return nil, 0, fmt.Errorf("invalid value: limit cannot be %d", limit)
×
868
        }
×
869
        results, err := idx.readOnlyQuery.SearchTokenFees(context.TODO(), indexerdb.SearchTokenFeesParams{
129✔
870
                Limit:       int64(limit),
129✔
871
                Offset:      int64(offset),
129✔
872
                TxType:      txType,
129✔
873
                Reference:   reference,
129✔
874
                FromAccount: fromAccount,
129✔
875
        })
129✔
876
        if err != nil {
129✔
877
                return nil, 0, err
×
878
        }
×
879
        list := []*indexertypes.TokenFeeMeta{}
129✔
880
        for _, row := range results {
174✔
881
                list = append(list, &indexertypes.TokenFeeMeta{
45✔
882
                        Cost:      uint64(row.Cost),
45✔
883
                        From:      row.FromAccount,
45✔
884
                        TxType:    row.TxType,
45✔
885
                        Height:    uint64(row.BlockHeight),
45✔
886
                        Reference: row.Reference,
45✔
887
                        Timestamp: row.SpendTime,
45✔
888
                })
45✔
889
        }
45✔
890
        if len(results) == 0 {
213✔
891
                return list, 0, nil
84✔
892
        }
84✔
893
        return list, uint64(results[0].TotalCount), nil
45✔
894
}
895

896
// TokenTransfersList returns all the token transfers, made to and/or from a given account
897
// (all optional filters), ordered by timestamp and paginated by limit and offset
898
func (idx *Indexer) TokenTransfersList(limit, offset int, fromOrToAccount, fromAccount, toAccount string) (
899
        []*indexertypes.TokenTransferMeta, uint64, error,
900
) {
137✔
901
        if offset < 0 {
137✔
902
                return nil, 0, fmt.Errorf("invalid value: offset cannot be %d", offset)
×
903
        }
×
904
        if limit <= 0 {
137✔
905
                return nil, 0, fmt.Errorf("invalid value: limit cannot be %d", limit)
×
906
        }
×
907
        results, err := idx.readOnlyQuery.SearchTokenTransfers(context.TODO(), indexerdb.SearchTokenTransfersParams{
137✔
908
                Limit:           int64(limit),
137✔
909
                Offset:          int64(offset),
137✔
910
                FromOrToAccount: fromOrToAccount,
137✔
911
                FromAccount:     fromAccount,
137✔
912
                ToAccount:       toAccount,
137✔
913
        })
137✔
914
        if err != nil {
137✔
915
                return nil, 0, err
×
916
        }
×
917
        list := []*indexertypes.TokenTransferMeta{}
137✔
918
        for _, row := range results {
283✔
919
                list = append(list, &indexertypes.TokenTransferMeta{
146✔
920
                        Amount:    uint64(row.Amount),
146✔
921
                        From:      row.FromAccount,
146✔
922
                        To:        row.ToAccount,
146✔
923
                        Height:    uint64(row.BlockHeight),
146✔
924
                        TxHash:    row.TxHash,
146✔
925
                        Timestamp: row.TransferTime,
146✔
926
                })
146✔
927
        }
146✔
928
        if len(results) == 0 {
141✔
929
                return list, 0, nil
4✔
930
        }
4✔
931
        return list, uint64(results[0].TotalCount), nil
133✔
932
}
933

934
// CountTokenTransfersByAccount returns the count all the token transfers made from a given account
935
func (idx *Indexer) CountTokenTransfersByAccount(acc []byte) (uint64, error) {
4✔
936
        count, err := idx.readOnlyQuery.CountTokenTransfersByAccount(context.TODO(), acc)
4✔
937
        return uint64(count), err
4✔
938
}
4✔
939

940
// CountTotalAccounts returns the total number of accounts indexed.
941
func (idx *Indexer) CountTotalAccounts() (uint64, error) {
2✔
942
        count, err := idx.readOnlyQuery.CountAccounts(context.TODO())
2✔
943
        return uint64(count), err
2✔
944
}
2✔
945

946
// AccountList returns a list of accounts, accountID is a partial or full hex string,
947
// and is optional (declared as zero-value will be ignored).
948
func (idx *Indexer) AccountList(limit, offset int, accountID string) ([]*indexertypes.Account, uint64, error) {
42✔
949
        if offset < 0 {
42✔
950
                return nil, 0, fmt.Errorf("invalid value: offset cannot be %d", offset)
×
951
        }
×
952
        if limit <= 0 {
42✔
953
                return nil, 0, fmt.Errorf("invalid value: limit cannot be %d", limit)
×
954
        }
×
955
        results, err := idx.readOnlyQuery.SearchAccounts(context.TODO(), indexerdb.SearchAccountsParams{
42✔
956
                Limit:           int64(limit),
42✔
957
                Offset:          int64(offset),
42✔
958
                AccountIDSubstr: accountID,
42✔
959
        })
42✔
960
        if err != nil {
42✔
961
                return nil, 0, err
×
962
        }
×
963
        list := []*indexertypes.Account{}
42✔
964
        for _, row := range results {
128✔
965
                list = append(list, &indexertypes.Account{
86✔
966
                        Address: row.Account,
86✔
967
                        Balance: uint64(row.Balance),
86✔
968
                        Nonce:   uint32(row.Nonce),
86✔
969
                })
86✔
970
        }
86✔
971
        if len(results) == 0 {
68✔
972
                return list, 0, nil
26✔
973
        }
26✔
974
        return list, uint64(results[0].TotalCount), nil
16✔
975
}
976

977
// AccountExists returns whether the passed accountID exists in the db.
978
// If passed arg is not the full hex string, returns false (i.e. no substring matching)
979
func (idx *Indexer) AccountExists(accountID string) bool {
13✔
980
        if len(accountID) != 40 {
19✔
981
                return false
6✔
982
        }
6✔
983
        _, count, err := idx.AccountList(1, 0, accountID)
7✔
984
        if err != nil {
7✔
985
                log.Errorw(err, "indexer query failed")
×
986
        }
×
987
        return count > 0
7✔
988
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc