• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

codenotary / immudb / 24841644892

23 Apr 2026 02:44PM UTC coverage: 85.279% (-4.0%) from 89.306%
24841644892

push

gh-ci

web-flow
feat: v1.11.0 PostgreSQL compatibility and SQL feature expansion (#2090)

* Add structured audit logging with immutable audit trail

Introduces a new --audit-log flag that records all gRPC operations as
structured JSON events in immudb's tamper-proof KV store. Events are
stored under the audit: key prefix in systemdb, queryable via Scan and
verifiable via VerifiableGet. An async buffered writer ensures minimal
latency impact. Configurable event filtering (all/write/admin) via
--audit-log-events flag.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* Add PostgreSQL ORM compatibility layer and verification functions

Extend the pgsql wire protocol with immudb verification functions
(immudb_state, immudb_verify_row, immudb_verify_tx, immudb_history,
immudb_tx) accessible via standard SQL SELECT statements.

Add pg_catalog resolvers (pg_attribute, pg_index, pg_constraint,
pg_type, pg_settings, pg_description) and information_schema
resolvers (tables, columns, schemata, key_column_usage) to support
ORM introspection from Django, SQLAlchemy, GORM, and ActiveRecord.

Add PostgreSQL compatibility functions: current_database,
current_schema, current_user, format_type, pg_encoding_to_char,
pg_get_expr, pg_get_constraintdef, obj_description, col_description,
has_table_privilege, has_schema_privilege, and others.

Add SHOW statement emulation for common ORM config queries and
schema-qualified name stripping for information_schema and public
schema references.

* Implement EXISTS and IN subquery support in SQL engine

Replace the previously stubbed ExistsBoolExp and InSubQueryExp
implementations with working non-correlated subquery execution.

EXISTS subqueries resolve the inner SELECT and check if any rows
are returned. IN subqueries resolve the inner SELECT, iterate the
result set, and compare each value against the outer expression.
Both support NOT variants (NOT EXISTS, NOT IN).

Correlated subqueries (referencing outer query columns) ar... (continued)

7254 of 10471 new or added lines in 124 files covered. (69.28%)

115 existing lines in 18 files now uncovered.

44599 of 52298 relevant lines covered (85.28%)

127676.6 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.79
/pkg/database/sql.go
1
/*
2
Copyright 2026 Codenotary Inc. All rights reserved.
3

4
SPDX-License-Identifier: BUSL-1.1
5
you may not use this file except in compliance with the License.
6
You may obtain a copy of the License at
7

8
    https://mariadb.com/bsl11/
9

10
Unless required by applicable law or agreed to in writing, software
11
distributed under the License is distributed on an "AS IS" BASIS,
12
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
See the License for the specific language governing permissions and
14
limitations under the License.
15
*/
16

17
package database
18

19
import (
20
        "bytes"
21
        "context"
22
        "fmt"
23
        "strings"
24

25
        "github.com/codenotary/immudb/embedded/sql"
26
        "github.com/codenotary/immudb/embedded/store"
27
        "github.com/codenotary/immudb/pkg/api/schema"
28
)
29

30
func (d *db) VerifiableSQLGet(ctx context.Context, req *schema.VerifiableSQLGetRequest) (*schema.VerifiableSQLEntry, error) {
60✔
31
        if req == nil || req.SqlGetRequest == nil {
61✔
32
                return nil, ErrIllegalArguments
1✔
33
        }
1✔
34

35
        lastTxID, _ := d.st.CommittedAlh()
59✔
36
        if lastTxID < req.ProveSinceTx {
61✔
37
                return nil, ErrIllegalState
2✔
38
        }
2✔
39

40
        d.mutex.Lock()
57✔
41
        defer d.mutex.Unlock()
57✔
42

57✔
43
        sqlTx, err := d.sqlEngine.NewTx(ctx, sql.DefaultTxOptions().WithReadOnly(true))
57✔
44
        if err != nil {
57✔
45
                return nil, err
×
46
        }
×
47
        defer sqlTx.Cancel()
57✔
48

57✔
49
        table, err := sqlTx.Catalog().GetTableByName(req.SqlGetRequest.Table)
57✔
50
        if err != nil {
59✔
51
                return nil, err
2✔
52
        }
2✔
53

54
        valbuf := bytes.Buffer{}
55✔
55

55✔
56
        if len(req.SqlGetRequest.PkValues) != len(table.PrimaryIndex().Cols()) {
57✔
57
                return nil, fmt.Errorf(
2✔
58
                        "%w: incorrect number of primary key values, expected %d, got %d",
2✔
59
                        ErrIllegalArguments,
2✔
60
                        len(table.PrimaryIndex().Cols()),
2✔
61
                        len(req.SqlGetRequest.PkValues),
2✔
62
                )
2✔
63
        }
2✔
64

65
        for i, pkCol := range table.PrimaryIndex().Cols() {
106✔
66
                pkEncVal, _, err := sql.EncodeRawValueAsKey(schema.RawValue(req.SqlGetRequest.PkValues[i]), pkCol.Type(), pkCol.MaxLen())
53✔
67
                if err != nil {
54✔
68
                        return nil, err
1✔
69
                }
1✔
70

71
                _, err = valbuf.Write(pkEncVal)
52✔
72
                if err != nil {
52✔
73
                        return nil, err
×
74
                }
×
75
        }
76

77
        // build the encoded key for the pk
78
        pkKey := sql.MapKey(
52✔
79
                []byte{SQLPrefix},
52✔
80
                sql.MappedPrefix,
52✔
81
                sql.EncodeID(table.ID()),
52✔
82
                sql.EncodeID(sql.PKIndexID),
52✔
83
                valbuf.Bytes(),
52✔
84
                valbuf.Bytes())
52✔
85

52✔
86
        e, err := d.sqlGetAt(ctx, pkKey, req.SqlGetRequest.AtTx, d.st, true)
52✔
87
        if err != nil {
54✔
88
                return nil, err
2✔
89
        }
2✔
90

91
        tx, err := d.allocTx()
50✔
92
        if err != nil {
50✔
93
                return nil, err
×
94
        }
×
95
        defer d.releaseTx(tx)
50✔
96

50✔
97
        // key-value inclusion proof
50✔
98
        err = d.st.ReadTx(e.Tx, false, tx)
50✔
99
        if err != nil {
50✔
100
                return nil, err
×
101
        }
×
102

103
        sourceKey := sql.MapKey(
50✔
104
                []byte{SQLPrefix},
50✔
105
                sql.RowPrefix,
50✔
106
                sql.EncodeID(1), // fixed database identifier
50✔
107
                sql.EncodeID(table.ID()),
50✔
108
                sql.EncodeID(sql.PKIndexID),
50✔
109
                valbuf.Bytes())
50✔
110

50✔
111
        inclusionProof, err := tx.Proof(sourceKey)
50✔
112
        if err != nil {
50✔
113
                return nil, err
×
114
        }
×
115

116
        var rootTxHdr *store.TxHeader
50✔
117

50✔
118
        if req.ProveSinceTx == 0 {
54✔
119
                rootTxHdr = tx.Header()
4✔
120
        } else {
50✔
121
                rootTxHdr, err = d.st.ReadTxHeader(req.ProveSinceTx, false, false)
46✔
122
                if err != nil {
46✔
123
                        return nil, err
×
124
                }
×
125
        }
126

127
        var sourceTxHdr, targetTxHdr *store.TxHeader
50✔
128

50✔
129
        if req.ProveSinceTx <= e.Tx {
91✔
130
                sourceTxHdr = rootTxHdr
41✔
131
                targetTxHdr = tx.Header()
41✔
132
        } else {
50✔
133
                sourceTxHdr = tx.Header()
9✔
134
                targetTxHdr = rootTxHdr
9✔
135
        }
9✔
136

137
        dualProof, err := d.st.DualProof(sourceTxHdr, targetTxHdr)
50✔
138
        if err != nil {
50✔
139
                return nil, err
×
140
        }
×
141

142
        verifiableTx := &schema.VerifiableTx{
50✔
143
                Tx:        schema.TxToProto(tx),
50✔
144
                DualProof: schema.DualProofToProto(dualProof),
50✔
145
        }
50✔
146

50✔
147
        colNamesByID := make(map[uint32]string, len(table.Cols()))
50✔
148
        colIdsByName := make(map[string]uint32, len(table.ColsByName()))
50✔
149
        colTypesByID := make(map[uint32]string, len(table.Cols()))
50✔
150
        colLenByID := make(map[uint32]int32, len(table.Cols()))
50✔
151

50✔
152
        for _, col := range table.Cols() {
201✔
153
                colNamesByID[col.ID()] = col.Name()
151✔
154
                colIdsByName[sql.EncodeSelector("", table.Name(), col.Name())] = col.ID()
151✔
155
                colTypesByID[col.ID()] = col.Type()
151✔
156
                colLenByID[col.ID()] = int32(col.MaxLen())
151✔
157
        }
151✔
158

159
        pkIDs := make([]uint32, len(table.PrimaryIndex().Cols()))
50✔
160

50✔
161
        for i, col := range table.PrimaryIndex().Cols() {
100✔
162
                pkIDs[i] = col.ID()
50✔
163
        }
50✔
164

165
        return &schema.VerifiableSQLEntry{
50✔
166
                SqlEntry:       e,
50✔
167
                VerifiableTx:   verifiableTx,
50✔
168
                InclusionProof: schema.InclusionProofToProto(inclusionProof),
50✔
169
                DatabaseId:     1,
50✔
170
                TableId:        table.ID(),
50✔
171
                PKIDs:          pkIDs,
50✔
172
                ColNamesById:   colNamesByID,
50✔
173
                ColIdsByName:   colIdsByName,
50✔
174
                ColTypesById:   colTypesByID,
50✔
175
                ColLenById:     colLenByID,
50✔
176
                MaxColId:       table.GetMaxColID(),
50✔
177
        }, nil
50✔
178
}
179

180
func (d *db) sqlGetAt(ctx context.Context, key []byte, atTx uint64, index store.KeyIndex, skipIntegrityCheck bool) (entry *schema.SQLEntry, err error) {
52✔
181
        var valRef store.ValueRef
52✔
182

52✔
183
        if atTx == 0 {
103✔
184
                valRef, err = index.Get(ctx, key)
51✔
185
        } else {
52✔
186
                valRef, err = index.GetBetween(ctx, key, atTx, atTx)
1✔
187
        }
1✔
188
        if err != nil {
54✔
189
                return nil, err
2✔
190
        }
2✔
191

192
        val, err := valRef.Resolve()
50✔
193
        if err != nil {
50✔
194
                return nil, err
×
195
        }
×
196

197
        return &schema.SQLEntry{
50✔
198
                Tx:       valRef.Tx(),
50✔
199
                Key:      key,
50✔
200
                Metadata: schema.KVMetadataToProto(valRef.KVMetadata()),
50✔
201
                Value:    val,
50✔
202
        }, err
50✔
203
}
204

205
func (d *db) ListTables(ctx context.Context, tx *sql.SQLTx) (*schema.SQLQueryResult, error) {
5✔
206
        d.mutex.RLock()
5✔
207
        defer d.mutex.RUnlock()
5✔
208

5✔
209
        catalog, err := d.sqlEngine.Catalog(ctx, tx)
5✔
210
        if err != nil {
5✔
211
                return nil, err
×
212
        }
×
213

214
        res := &schema.SQLQueryResult{Columns: []*schema.Column{{Name: "TABLE", Type: sql.VarcharType}}}
5✔
215

5✔
216
        for _, t := range catalog.GetTables() {
8✔
217
                res.Rows = append(res.Rows, &schema.Row{Values: []*schema.SQLValue{{Value: &schema.SQLValue_S{S: t.Name()}}}})
3✔
218
        }
3✔
219

220
        return res, nil
5✔
221
}
222

223
func (d *db) DescribeTable(ctx context.Context, tx *sql.SQLTx, tableName string) (*schema.SQLQueryResult, error) {
6✔
224
        d.mutex.RLock()
6✔
225
        defer d.mutex.RUnlock()
6✔
226

6✔
227
        catalog, err := d.sqlEngine.Catalog(ctx, tx)
6✔
228
        if err != nil {
6✔
229
                return nil, err
×
230
        }
×
231

232
        table, err := catalog.GetTableByName(tableName)
6✔
233
        if err != nil {
8✔
234
                return nil, err
2✔
235
        }
2✔
236

237
        res := &schema.SQLQueryResult{Columns: []*schema.Column{
4✔
238
                {Name: "COLUMN", Type: sql.VarcharType},
4✔
239
                {Name: "TYPE", Type: sql.VarcharType},
4✔
240
                {Name: "NULLABLE", Type: sql.BooleanType},
4✔
241
                {Name: "INDEX", Type: sql.VarcharType},
4✔
242
                {Name: "AUTO_INCREMENT", Type: sql.BooleanType},
4✔
243
                {Name: "UNIQUE", Type: sql.BooleanType},
4✔
244
        }}
4✔
245

4✔
246
        for _, c := range table.Cols() {
15✔
247
                index := "NO"
11✔
248

11✔
249
                indexed, err := table.IsIndexed(c.Name())
11✔
250
                if err != nil {
11✔
251
                        return nil, err
×
252
                }
×
253
                if indexed {
15✔
254
                        index = "YES"
4✔
255
                }
4✔
256

257
                if table.PrimaryIndex().IncludesCol(c.ID()) {
15✔
258
                        index = "PRIMARY KEY"
4✔
259
                }
4✔
260

261
                var unique bool
11✔
262
                for _, index := range table.GetIndexesByColID(c.ID()) {
15✔
263
                        if index.IsUnique() && len(index.Cols()) == 1 {
8✔
264
                                unique = true
4✔
265
                                break
4✔
266
                        }
267
                }
268

269
                var maxLen string
11✔
270

11✔
271
                if c.MaxLen() > 0 && (c.Type() == sql.VarcharType || c.Type() == sql.BLOBType) {
11✔
272
                        maxLen = fmt.Sprintf("(%d)", c.MaxLen())
×
273
                }
×
274

275
                res.Rows = append(res.Rows, &schema.Row{
11✔
276
                        Values: []*schema.SQLValue{
11✔
277
                                {Value: &schema.SQLValue_S{S: c.Name()}},
11✔
278
                                {Value: &schema.SQLValue_S{S: c.Type() + maxLen}},
11✔
279
                                {Value: &schema.SQLValue_B{B: c.IsNullable()}},
11✔
280
                                {Value: &schema.SQLValue_S{S: index}},
11✔
281
                                {Value: &schema.SQLValue_B{B: c.IsAutoIncremental()}},
11✔
282
                                {Value: &schema.SQLValue_B{B: unique}},
11✔
283
                        },
11✔
284
                })
11✔
285
        }
286

287
        return res, nil
4✔
288
}
289

290
func (d *db) NewSQLTx(ctx context.Context, opts *sql.TxOptions) (tx *sql.SQLTx, err error) {
289✔
291
        // txCtx is intentionally derived from context.Background(), NOT from the
289✔
292
        // caller's ctx.  The OngoingTx returned by NewTx stores txCtx in its ctx
289✔
293
        // field and uses it for snapshot reads that may happen on *subsequent*
289✔
294
        // RPCs (TxSQLQuery, TxSQLExec, etc.) bound to the same session.  If we
289✔
295
        // derived txCtx from the caller's ctx, gRPC would cancel the caller's ctx
289✔
296
        // as soon as the NewTx RPC returned, which would poison the stored ctx
289✔
297
        // and make every follow-up read on the transaction fail with
289✔
298
        // "context canceled".  Transaction lifetime is explicit (Commit/Rollback)
289✔
299
        // and independent of any single RPC.
289✔
300
        //
289✔
301
        // txCancel is still useful for the caller's ctx.Done() branch below: it
289✔
302
        // aborts an in-progress NewTx call inside the goroutine so the goroutine
289✔
303
        // exits promptly instead of leaking.
289✔
304
        txCtx, txCancel := context.WithCancel(context.Background())
289✔
305

289✔
306
        // Buffered channels (capacity 1) ensure the goroutine can always send its
289✔
307
        // result and exit, even when the outer select has already returned on
289✔
308
        // ctx.Done() — preventing a goroutine leak.
289✔
309
        txChan := make(chan *sql.SQLTx, 1)
289✔
310
        errChan := make(chan error, 1)
289✔
311

289✔
312
        defer func() {
578✔
313
                if err != nil {
289✔
314
                        txCancel()
×
315

×
316
                        if tx != nil {
×
317
                                tx.Cancel()
×
318
                        }
×
319
                }
320
        }()
321

322
        go func() {
578✔
323
                // Do NOT defer txCancel() here: the OngoingTx returned by NewTx
289✔
324
                // stores txCtx in its ctx field and uses it for all subsequent
289✔
325
                // snapshot reads.  Cancelling txCtx in this goroutine's defer
289✔
326
                // would fire immediately after txChan <- t, making every read on
289✔
327
                // the freshly-created transaction fail with "context canceled".
289✔
328
                // txCancel() is called by the outer defer (on error) or by the
289✔
329
                // ctx.Done() branch below (to interrupt an in-progress NewTx).
289✔
330
                md := schema.MetadataFromContext(ctx)
289✔
331
                if len(md) > 0 {
345✔
332
                        data, e := md.Marshal()
56✔
333
                        if e != nil {
56✔
NEW
334
                                errChan <- e
×
335
                                return
×
336
                        }
×
337
                        opts = opts.WithExtra(data)
56✔
338
                }
339

340
                t, e := d.sqlEngine.NewTx(txCtx, opts)
289✔
341
                if e != nil {
289✔
NEW
342
                        errChan <- e
×
343
                } else {
289✔
344
                        txChan <- t
289✔
345
                }
289✔
346
        }()
347

348
        select {
289✔
349
        case <-ctx.Done():
×
NEW
350
                txCancel() // interrupt the in-progress NewTx call inside the goroutine
×
NEW
351
                return nil, ctx.Err()
×
352
        case tx = <-txChan:
289✔
353
                return tx, nil
289✔
UNCOV
354
        case err = <-errChan:
×
NEW
355
                return nil, err
×
356
        }
357
}
358

359
func (d *db) SQLExec(ctx context.Context, tx *sql.SQLTx, req *schema.SQLExecRequest) (ntx *sql.SQLTx, ctxs []*sql.SQLTx, err error) {
278✔
360
        if req == nil {
279✔
361
                return nil, nil, ErrIllegalArguments
1✔
362
        }
1✔
363

364
        stmts, err := sql.ParseSQL(strings.NewReader(req.Sql))
277✔
365
        if err != nil {
279✔
366
                return nil, nil, err
2✔
367
        }
2✔
368

369
        params := make(map[string]interface{})
275✔
370

275✔
371
        for _, p := range req.Params {
380✔
372
                params[p.Name] = schema.RawValue(p.Value)
105✔
373
        }
105✔
374

375
        return d.SQLExecPrepared(ctx, tx, stmts, params)
275✔
376
}
377

378
func (d *db) SQLExecPrepared(ctx context.Context, tx *sql.SQLTx, stmts []sql.SQLStmt, params map[string]interface{}) (ntx *sql.SQLTx, ctxs []*sql.SQLTx, err error) {
548✔
379
        if len(stmts) == 0 {
549✔
380
                return nil, nil, ErrIllegalArguments
1✔
381
        }
1✔
382

383
        d.mutex.RLock()
547✔
384
        defer d.mutex.RUnlock()
547✔
385

547✔
386
        if d.isReplica() {
548✔
387
                return nil, nil, ErrIsReplica
1✔
388
        }
1✔
389

390
        return d.sqlEngine.ExecPreparedStmts(ctx, tx, stmts, params)
546✔
391
}
392

393
func (d *db) SQLQuery(ctx context.Context, tx *sql.SQLTx, req *schema.SQLQueryRequest) (sql.RowReader, error) {
75✔
394
        if req == nil {
77✔
395
                return nil, ErrIllegalArguments
2✔
396
        }
2✔
397

398
        stmts, err := sql.ParseSQL(strings.NewReader(req.Sql))
73✔
399
        if err != nil {
75✔
400
                return nil, err
2✔
401
        }
2✔
402

403
        stmt, ok := stmts[0].(sql.DataSource)
71✔
404
        if !ok {
72✔
405
                return nil, sql.ErrExpectingDQLStmt
1✔
406
        }
1✔
407
        reader, err := d.SQLQueryPrepared(ctx, tx, stmt, schema.NamedParamsFromProto(req.Params))
70✔
408
        if !req.AcceptStream {
124✔
409
                reader = &limitRowReader{RowReader: reader, maxRows: d.maxResultSize}
54✔
410
        }
54✔
411
        return reader, err
70✔
412
}
413

414
func (d *db) SQLQueryAll(ctx context.Context, tx *sql.SQLTx, req *schema.SQLQueryRequest) ([]*sql.Row, error) {
13✔
415
        reader, err := d.SQLQuery(ctx, tx, req)
13✔
416
        if err != nil {
18✔
417
                return nil, err
5✔
418
        }
5✔
419
        defer reader.Close()
8✔
420
        return sql.ReadAllRows(ctx, reader)
8✔
421
}
422

423
func (d *db) SQLQueryPrepared(ctx context.Context, tx *sql.SQLTx, stmt sql.DataSource, params map[string]interface{}) (sql.RowReader, error) {
397✔
424
        if stmt == nil {
397✔
425
                return nil, ErrIllegalArguments
×
426
        }
×
427

428
        d.mutex.RLock()
397✔
429
        defer d.mutex.RUnlock()
397✔
430

397✔
431
        return d.sqlEngine.QueryPreparedStmt(ctx, tx, stmt, params)
397✔
432
}
433

434
func (d *db) InferParameters(ctx context.Context, tx *sql.SQLTx, sql string) (map[string]sql.SQLValueType, error) {
2✔
435
        d.mutex.RLock()
2✔
436
        defer d.mutex.RUnlock()
2✔
437

2✔
438
        return d.sqlEngine.InferParameters(ctx, tx, sql)
2✔
439
}
2✔
440

441
func (d *db) InferParametersPrepared(ctx context.Context, tx *sql.SQLTx, stmt sql.SQLStmt) (map[string]sql.SQLValueType, error) {
147✔
442
        d.mutex.RLock()
147✔
443
        defer d.mutex.RUnlock()
147✔
444

147✔
445
        return d.sqlEngine.InferParametersPreparedStmts(ctx, tx, []sql.SQLStmt{stmt})
147✔
446
}
147✔
447

448
func (d *db) CopySQLCatalog(ctx context.Context, txID uint64) (uint64, error) {
14✔
449
        // copy sql catalogue
14✔
450
        tx, err := d.st.NewTx(ctx, store.DefaultTxOptions())
14✔
451
        if err != nil {
14✔
452
                return 0, err
×
453
        }
×
454

455
        err = d.CopyCatalogToTx(ctx, tx)
14✔
456
        if err != nil {
14✔
457
                d.Logger.Errorf("error during truncation for database '%s' {err = %v, id = %v, type=sql_catalogue_copy}", d.name, err, txID)
×
458
                return 0, err
×
459
        }
×
460
        defer tx.Cancel()
14✔
461

14✔
462
        // setting the metadata to record the transaction upto which the log was truncated
14✔
463
        tx.WithMetadata(store.NewTxMetadata().WithTruncatedTxID(txID))
14✔
464

14✔
465
        tx.RequireMVCCOnFollowingTxs(true)
14✔
466

14✔
467
        // commit catalogue as a new transaction
14✔
468
        hdr, err := tx.Commit(ctx)
14✔
469
        if err != nil {
14✔
470
                return 0, err
×
471
        }
×
472
        return hdr.ID, nil
14✔
473
}
474

475
type limitRowReader struct {
476
        sql.RowReader
477
        nRead   int
478
        maxRows int
479
}
480

481
func (r *limitRowReader) Read(ctx context.Context) (*sql.Row, error) {
140✔
482
        row, err := r.RowReader.Read(ctx)
140✔
483
        if err != nil {
186✔
484
                return nil, err
46✔
485
        }
46✔
486

487
        if r.nRead == r.maxRows {
97✔
488
                return nil, fmt.Errorf("%w: found more than %d rows (the maximum limit). "+
3✔
489
                        "Query constraints can be applied using the LIMIT clause",
3✔
490
                        ErrResultSizeLimitReached, r.maxRows)
3✔
491
        }
3✔
492

493
        r.nRead++
91✔
494
        return row, nil
91✔
495
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc