• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / tarantool / 11477469556

23 Oct 2024 09:52AM CUT coverage: 87.279% (+0.02%) from 87.263%
11477469556

push

github

locker
box: build fix

I got compile error for release build on gcc 14.2.1 20240910 version.

```
In function ‘char* mp_store_double(char*, double)’,
    inlined from ‘char* mp_encode_double(char*, double)’ at /home/shiny/dev/tarantool-ee/tarantool/src/lib/msgpuck/msgpuck.h:2409:24,
    inlined from ‘uint32_t tuple_hash_field(uint32_t*, uint32_t*, const char**, field_type, coll*)’ at /home/shiny/dev/tarantool-ee/tarantool/src/box/tuple_hash.cc:317:46:
/home/shiny/dev/tarantool-ee/tarantool/src/lib/msgpuck/msgpuck.h:340:16: error: ‘value’ may be used uninitialized [-Werror=maybe-uninitialized]
  340 |         cast.d = val;
      |         ~~~~~~~^~~~~
/home/shiny/dev/tarantool-ee/tarantool/src/box/tuple_hash.cc: In function ‘uint32_t tuple_hash_field(uint32_t*, uint32_t*, const char**, field_type, coll*)’:
/home/shiny/dev/tarantool-ee/tarantool/src/box/tuple_hash.cc:311:24: note: ‘value’ was declared here
  311 |                 double value;
      |
```

NO_TEST=build fix
NO_CHANGELOG=build fix
NO_DOC=build fix

(cherry picked from commit 1129c758d)

68506 of 121736 branches covered (56.27%)

2 of 2 new or added lines in 1 file covered. (100.0%)

43 existing lines in 16 files now uncovered.

101140 of 115881 relevant lines covered (87.28%)

2424775.69 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.55
/src/box/vy_read_iterator.c
1
/*
2
 * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file.
3
 *
4
 * Redistribution and use in source and binary forms, with or
5
 * without modification, are permitted provided that the following
6
 * conditions are met:
7
 *
8
 * 1. Redistributions of source code must retain the above
9
 *    copyright notice, this list of conditions and the
10
 *    following disclaimer.
11
 *
12
 * 2. Redistributions in binary form must reproduce the above
13
 *    copyright notice, this list of conditions and the following
14
 *    disclaimer in the documentation and/or other materials
15
 *    provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21
 * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
22
 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
28
 * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 */
31
#include "vy_read_iterator.h"
32
#include "vy_run.h"
33
#include "vy_mem.h"
34
#include "vy_cache.h"
35
#include "vy_tx.h"
36
#include "fiber.h"
37
#include "vy_history.h"
38
#include "vy_lsm.h"
39
#include "vy_stat.h"
40

41
/**
42
 * Merge source, support structure for vy_read_iterator.
43
 * Contains source iterator and merge state.
44
 */
45
struct vy_read_src {
46
        /** Source iterator. */
47
        union {
48
                struct vy_run_iterator run_iterator;
49
                struct vy_mem_iterator mem_iterator;
50
                struct vy_txw_iterator txw_iterator;
51
                struct vy_cache_iterator cache_iterator;
52
        };
53
        /** Set if the iterator was started. */
54
        bool is_started;
55
        /**
56
         * Set if this is the last (deepest) source that may store tuples
57
         * matching the search criteria.
58
         */
59
        bool is_last;
60
        /** See vy_read_iterator->front_id. */
61
        uint32_t front_id;
62
        /** History of the key the iterator is positioned at. */
63
        struct vy_history history;
64
};
65

66
/**
67
 * Extend internal source array capacity to fit capacity sources.
68
 * Not necessary to call is but calling it allows to optimize internal memory
69
 * allocation
70
 */
71
static NODISCARD int
72
vy_read_iterator_reserve(struct vy_read_iterator *itr, uint32_t capacity)
1,631,180✔
73
{
74
        if (itr->src_capacity >= capacity)
1,631,180!
75
                return 0;
×
76
        struct vy_read_src *new_src = calloc(capacity, sizeof(*new_src));
1,631,180✔
77
        if (new_src == NULL) {
1,631,180!
78
                diag_set(OutOfMemory, capacity * sizeof(*new_src),
×
79
                         "calloc", "new_src");
80
                return -1;
×
81
        }
82
        memcpy(new_src, itr->src, itr->src_count * sizeof(*new_src));
1,631,180✔
83
        for (uint32_t i = 0; i < itr->src_count; i++) {
4,233,340✔
84
                vy_history_create(&new_src[i].history,
2,602,170✔
85
                                  &itr->lsm->env->history_node_pool);
2,602,170✔
86
                vy_history_splice(&new_src[i].history, &itr->src[i].history);
2,602,170✔
87
        }
88
        free(itr->src);
1,631,180✔
89
        itr->src = new_src;
1,631,180✔
90
        itr->src_capacity = capacity;
1,631,180✔
91
        return 0;
1,631,180✔
92
}
93

94
/**
95
 * Add another source to read iterator. Must be called before actual
96
 * iteration start and must not be called after.
97
 */
98
static struct vy_read_src *
99
vy_read_iterator_add_src(struct vy_read_iterator *itr)
1,633,000✔
100
{
101
        if (itr->src_count == itr->src_capacity) {
1,633,000✔
102
                if (vy_read_iterator_reserve(itr, itr->src_count + 1) != 0)
1,631,180!
103
                        return NULL;
×
104
        }
105
        struct vy_read_src *src = &itr->src[itr->src_count++];
1,633,000✔
106
        memset(src, 0, sizeof(*src));
1,633,000✔
107
        vy_history_create(&src->history, &itr->lsm->env->history_node_pool);
1,633,000✔
108
        return src;
1,633,000✔
109
}
110

111
/**
112
 * Pin all slices open by the read iterator.
113
 * Used to make sure no run slice is invalidated by
114
 * compaction while we are fetching data from disk.
115
 */
116
static void
117
vy_read_iterator_pin_slices(struct vy_read_iterator *itr)
2,174,690✔
118
{
119
        for (uint32_t i = itr->disk_src; i < itr->src_count; i++) {
2,897,640✔
120
                struct vy_read_src *src = &itr->src[i];
722,952✔
121
                vy_slice_pin(src->run_iterator.slice);
722,952✔
122
        }
123
}
2,174,690✔
124

125
/**
126
 * Unpin all slices open by the read iterator.
127
 * See also: vy_read_iterator_pin_slices().
128
 */
129
static void
130
vy_read_iterator_unpin_slices(struct vy_read_iterator *itr)
2,174,690✔
131
{
132
        for (uint32_t i = itr->disk_src; i < itr->src_count; i++) {
2,897,640✔
133
                struct vy_read_src *src = &itr->src[i];
722,952✔
134
                vy_slice_unpin(src->run_iterator.slice);
722,952✔
135
        }
136
}
2,174,690✔
137

138
/**
139
 * Return true if the current candidate for the next key is outside
140
 * the current range and hence we should move to the next range.
141
 *
142
 * If we are looking for a match (EQ, REQ) and the search key
143
 * doesn't intersect with the current range's boundary, the next
144
 * range can't contain statements matching the search criteria
145
 * and hence there's no point in iterating to it.
146
 */
147
static bool
148
vy_read_iterator_range_is_done(struct vy_read_iterator *itr,
2,174,320✔
149
                               struct vy_entry next)
150
{
151
        struct vy_range *range = itr->curr_range;
2,174,320✔
152
        struct key_def *cmp_def = itr->lsm->cmp_def;
2,174,320✔
153
        int dir = iterator_direction(itr->iterator_type);
2,174,320✔
154

155
        if (dir > 0 && range->end.stmt != NULL &&
2,174,320✔
156
            (next.stmt == NULL || vy_entry_compare(next, range->end,
81,385✔
157
                                                   cmp_def) >= 0) &&
18,362✔
158
            (itr->iterator_type != ITER_EQ ||
18,362✔
159
             vy_entry_compare(itr->key, range->end, cmp_def) >= 0))
18,279✔
160
                return true;
6,770✔
161

162
        if (dir < 0 && range->begin.stmt != NULL &&
2,167,550✔
163
            (next.stmt == NULL || vy_entry_compare(next, range->begin,
9,564!
UNCOV
164
                                                   cmp_def) < 0) &&
×
UNCOV
165
            (itr->iterator_type != ITER_REQ ||
×
UNCOV
166
             vy_entry_compare(itr->key, range->begin, cmp_def) <= 0))
×
UNCOV
167
                return true;
×
168

169
        return false;
2,167,550✔
170
}
171

172
/**
173
 * Compare two tuples from the read iterator perspective.
174
 *
175
 * Returns:
176
 *  < 0 if statement @a precedes statement @b in the iterator output
177
 * == 0 if statements @a and @b are at the same position
178
 *  > 0 if statement @a supersedes statement @b
179
 *
180
 * NULL denotes the statement following the last one.
181
 */
182
static inline int
183
vy_read_iterator_cmp_stmt(struct vy_read_iterator *itr,
14,945,600✔
184
                          struct vy_entry a, struct vy_entry b)
185
{
186
        if (a.stmt == NULL && b.stmt != NULL)
14,945,600✔
187
                return 1;
2,167,000✔
188
        if (a.stmt != NULL && b.stmt == NULL)
12,778,600✔
189
                return -1;
3,337,960✔
190
        if (a.stmt == NULL && b.stmt == NULL)
9,440,650!
191
                return 0;
5,695,600✔
192
        return iterator_direction(itr->iterator_type) *
3,745,050✔
193
                vy_entry_compare(a, b, itr->lsm->cmp_def);
3,745,050✔
194
}
195

196
/**
197
 * Check if the statement at which the given read source
198
 * is positioned precedes the current candidate for the
199
 * next key ('next') and update the latter if so.
200
 * The 'stop' flag is set if the next key is found and
201
 * older sources don't need to be evaluated.
202
 */
203
static void
204
vy_read_iterator_evaluate_src(struct vy_read_iterator *itr,
11,841,500✔
205
                              struct vy_read_src *src,
206
                              struct vy_entry *next, bool *stop)
207
{
208
        uint32_t src_id = src - itr->src;
11,841,500✔
209
        struct vy_entry entry = vy_history_last_stmt(&src->history);
11,841,500!
210
        int cmp = vy_read_iterator_cmp_stmt(itr, entry, *next);
11,841,500!
211
        if (cmp < 0) {
11,841,500✔
212
                assert(entry.stmt != NULL);
3,721,340!
213
                *next = entry;
3,721,340✔
214
                itr->front_id++;
3,721,340✔
215
        }
216
        if (cmp <= 0)
11,841,500✔
217
                src->front_id = itr->front_id;
9,459,370✔
218

219
        if (src->is_last)
11,841,500✔
220
                goto stop;
17,652✔
221

222
        if (itr->check_exact_match &&
11,823,900✔
223
            cmp < 0 && vy_history_is_terminal(&src->history)) {
60,517!
224
                /*
225
                 * So this is a terminal statement that might be the first one
226
                 * in the output and the iterator may return at most one tuple
227
                 * equal to the search key. Let's check if this statement
228
                 * equals the search key. If it is, there cannot be a better
229
                 * candidate in deeper sources so we may skip them.
230
                 *
231
                 * No need to check for equality if it's EQ iterator because
232
                 * it must have been already checked by the source iterator.
233
                 * Sic: for REQ the check is still required (see need_check_eq).
234
                 */
235
                if (itr->iterator_type == ITER_EQ ||
60,029✔
236
                    vy_entry_compare(entry, itr->key, itr->lsm->cmp_def) == 0) {
16,655!
237
                        /*
238
                         * If we get an exact match for EQ/REQ search, we don't
239
                         * need to check deeper sources on next iterations so
240
                         * mark this source last. Note that we might still need
241
                         * to scan this source again though - if we encounter
242
                         * a DELETE statement - because in this case there may
243
                         * be a newer REPLACE statement for the same key in it.
244
                         */
245
                        if (itr->iterator_type == ITER_EQ ||
48,229✔
246
                            itr->iterator_type == ITER_REQ)
4,855✔
247
                                src->is_last = true;
43,418✔
248
                        goto stop;
48,229✔
249
                }
250
        }
251

252
        itr->skipped_src = MAX(itr->skipped_src, src_id + 1);
11,775,700✔
253
        return;
11,775,700✔
254
stop:
65,881✔
255
        itr->skipped_src = src_id + 1;
65,881✔
256
        *stop = true;
65,881✔
257
}
258

259
/**
260
 * Reevaluate scanned (not skipped) read sources and position 'next' to
261
 * the statement that is minimal from this read iterator's perspective.
262
 * This function assumes that all scanned read sources are up-to-date.
263
 * See also vy_read_iterator_evaluate_src().
264
 */
265
static void
266
vy_read_iterator_reevaluate_srcs(struct vy_read_iterator *itr,
1✔
267
                                 struct vy_entry *next)
268
{
269
        *next = vy_entry_none();
1✔
270
        for (uint32_t i = 0; i < itr->src_count; i++) {
5✔
271
                if (i >= itr->skipped_src)
4!
272
                        break;
×
273
                struct vy_read_src *src = &itr->src[i];
4✔
274
                struct vy_entry entry = vy_history_last_stmt(&src->history);
4!
275
                int cmp = vy_read_iterator_cmp_stmt(itr, entry, *next);
4!
276
                if (cmp < 0) {
4✔
277
                        *next = entry;
1✔
278
                        itr->front_id++;
1✔
279
                }
280
                if (cmp <= 0)
4!
281
                        src->front_id = itr->front_id;
4✔
282
        }
283
}
1✔
284

285
/*
286
 * Each of the functions from the vy_read_iterator_scan_* family
287
 * is used by vy_read_iterator_advance() to:
288
 *
289
 * 1. Update the position of a read source, which implies:
290
 *
291
 *    - Starting iteration over the source if it has not been done
292
 *      yet or restoring the iterator position in case the source
293
 *      has been modified since the last iteration.
294
 *
295
 *    - Advancing the iterator position to the first statement
296
 *      following the one returned on the previous iteration.
297
 *      To avoid an extra tuple comparison, we maintain front_id
298
 *      for each source: all sources with front_id equal to the
299
 *      front_id of the read iterator were used on the previous
300
 *      iteration and hence need to be advanced.
301
 *
302
 * 2. Update the candidate for the next key ('next') if the
303
 *    statement at which the source is positioned precedes it.
304
 *    The 'stop' flag is set if older sources do not need to be
305
 *    scanned (e.g. because a chain was found in the cache).
306
 *    See also vy_read_iterator_evaluate_src().
307
 */
308

309
static NODISCARD int
310
vy_read_iterator_scan_txw(struct vy_read_iterator *itr,
3,693,740✔
311
                          struct vy_entry *next, bool *stop)
312
{
313
        struct vy_read_src *src = &itr->src[itr->txw_src];
3,693,740✔
314
        struct vy_txw_iterator *src_itr = &src->txw_iterator;
3,693,740✔
315

316
        if (itr->tx == NULL)
3,693,740✔
317
                return 0;
250,600✔
318

319
        assert(itr->txw_src < itr->skipped_src);
3,443,140!
320

321
        int rc = vy_txw_iterator_restore(src_itr, itr->last, &src->history);
3,443,140✔
322
        if (rc == 0) {
3,443,140✔
323
                if (!src->is_started) {
3,442,970✔
324
                        rc = vy_txw_iterator_skip(src_itr, itr->last,
372,893✔
325
                                                  &src->history);
326
                } else if (src->front_id == itr->prev_front_id) {
3,070,080✔
327
                        rc = vy_txw_iterator_next(src_itr, &src->history);
174,807✔
328
                }
329
                src->is_started = true;
3,442,970✔
330
        }
331
        if (rc < 0)
3,443,140!
332
                return -1;
×
333

334
        vy_read_iterator_evaluate_src(itr, src, next, stop);
3,443,140✔
335
        return 0;
3,443,140✔
336
}
337

338
static NODISCARD int
339
vy_read_iterator_scan_cache(struct vy_read_iterator *itr,
3,682,360✔
340
                            struct vy_entry *next, bool *stop)
341
{
342
        bool is_interval = false;
3,682,360✔
343
        struct vy_read_src *src = &itr->src[itr->cache_src];
3,682,360✔
344
        struct vy_cache_iterator *src_itr = &src->cache_iterator;
3,682,360✔
345

346
        int rc = vy_cache_iterator_restore(src_itr, itr->last,
3,682,360!
347
                                           &src->history, &is_interval);
348
        if (rc == 0) {
3,682,360✔
349
                if (!src->is_started || itr->cache_src >= itr->skipped_src) {
3,634,640!
350
                        rc = vy_cache_iterator_skip(src_itr, itr->last,
399,175!
351
                                                &src->history, &is_interval);
352
                } else if (src->front_id == itr->prev_front_id) {
3,235,470✔
353
                        rc = vy_cache_iterator_next(src_itr, &src->history,
1,314,860!
354
                                                    &is_interval);
355
                }
356
                src->is_started = true;
3,634,640✔
357
        }
358
        if (rc < 0)
3,682,360!
359
                return -1;
3,682,360✔
360

361
        vy_read_iterator_evaluate_src(itr, src, next, stop);
3,682,360!
362
        if (is_interval) {
3,682,360✔
363
                itr->skipped_src = itr->cache_src + 1;
1,495,580✔
364
                *stop = true;
1,495,580✔
365
        }
366
        return 0;
3,682,360✔
367
}
368

369
static NODISCARD int
370
vy_read_iterator_scan_mem(struct vy_read_iterator *itr, uint32_t mem_src,
3,993,470✔
371
                          struct vy_entry *next, bool *stop)
372
{
373
        int rc;
374
        struct vy_read_src *src = &itr->src[mem_src];
3,993,470✔
375
        struct vy_mem_iterator *src_itr = &src->mem_iterator;
3,993,470✔
376

377
        assert(mem_src >= itr->mem_src && mem_src < itr->disk_src);
3,993,470!
378

379
        rc = vy_mem_iterator_restore(src_itr, itr->last, &src->history);
3,993,470✔
380
        if (rc == 0) {
3,993,470✔
381
                if (!src->is_started || mem_src >= itr->skipped_src) {
3,969,100✔
382
                        rc = vy_mem_iterator_skip(src_itr, itr->last,
333,474✔
383
                                                  &src->history);
384
                } else if (src->front_id == itr->prev_front_id) {
3,635,630✔
385
                        rc = vy_mem_iterator_next(src_itr, &src->history);
1,586,500✔
386
                }
387
                src->is_started = true;
3,969,100✔
388
        }
389
        if (rc < 0)
3,993,470!
390
                return -1;
×
391
        vy_read_iterator_evaluate_src(itr, src, next, stop);
3,993,470✔
392
        /*
393
         * Switch to read view if we skipped a prepared statement.
394
         */
395
        if (itr->tx != NULL && src_itr->min_skipped_plsn != INT64_MAX) {
3,993,470✔
396
                if (vy_tx_send_to_read_view(
495,375!
397
                                itr->tx, src_itr->min_skipped_plsn) != 0)
398
                        return -1;
×
399
                if (itr->tx->state == VINYL_TX_ABORT) {
495,375!
400
                        diag_set(ClientError, ER_TRANSACTION_CONFLICT);
×
401
                        return -1;
×
402
                }
403
        }
404
        return 0;
3,993,470✔
405
}
406

407
static NODISCARD int
408
vy_read_iterator_scan_disk(struct vy_read_iterator *itr, uint32_t disk_src,
722,866✔
409
                           struct vy_entry *next, bool *stop)
410
{
411
        int rc = 0;
722,866✔
412
        struct vy_read_src *src = &itr->src[disk_src];
722,866✔
413
        struct vy_run_iterator *src_itr = &src->run_iterator;
722,866✔
414

415
        assert(disk_src >= itr->disk_src && disk_src < itr->src_count);
722,866!
416

417
        if (!src->is_started || disk_src >= itr->skipped_src)
722,866✔
418
                rc = vy_run_iterator_skip(src_itr, itr->last,
35,010✔
419
                                          &src->history);
420
        else if (src->front_id == itr->prev_front_id)
687,856✔
421
                rc = vy_run_iterator_next(src_itr, &src->history);
235,660✔
422
        src->is_started = true;
722,866✔
423

424
        if (rc < 0)
722,866✔
425
                return -1;
306✔
426

427
        vy_read_iterator_evaluate_src(itr, src, next, stop);
722,560✔
428
        return 0;
722,560✔
429
}
430

431
/**
432
 * Restore the position of the active in-memory tree iterator
433
 * after a yield caused by a disk read and update 'next'
434
 * if necessary.
435
 */
436
static NODISCARD int
437
vy_read_iterator_restore_mem(struct vy_read_iterator *itr,
2,174,320✔
438
                             struct vy_entry *next)
439
{
440
        int rc;
441
        int cmp;
442
        struct vy_read_src *src = &itr->src[itr->mem_src];
2,174,320✔
443
        struct vy_mem_iterator *src_itr = &src->mem_iterator;
2,174,320✔
444

445
        /*
446
         * 'next' may refer to a statement in the memory source history,
447
         * which may be cleaned up by vy_mem_iterator_restore(), so we need
448
         * to take a reference to it.
449
         */
450
        struct tuple *next_stmt_ref = next->stmt;
2,174,320✔
451
        if (next_stmt_ref != NULL)
2,174,320✔
452
                tuple_ref(next_stmt_ref);
1,976,060!
453

454
        rc = vy_mem_iterator_restore(src_itr, itr->last, &src->history);
2,174,320!
455
        if (rc < 0)
2,174,320!
456
                goto out; /* memory allocation error */
×
457
        if (rc == 0)
2,174,320✔
458
                goto out; /* nothing changed */
2,173,000✔
459

460
        /* The memory source was updated. Reevaluate it for 'next'. */
461
        rc = 0;
1,318✔
462
        struct vy_entry entry = vy_history_last_stmt(&src->history);
1,318!
463
        cmp = vy_read_iterator_cmp_stmt(itr, entry, *next);
1,318!
464
        if (cmp > 0) {
1,318✔
465
                /*
466
                 * Normally, memory trees are append-only so if the source is
467
                 * not on top of the heap after restoration, it was not before.
468
                 * There's one exception to this rule though: a statement may
469
                 * be deleted from a memory tree on rollback after a WAL write
470
                 * failure. If the deleted statement was on top of the heap,
471
                 * we need to reevaluate all read sources to reposition the
472
                 * iterator to the minimal statement.
473
                 */
474
                if (src->front_id == itr->front_id)
1,132✔
475
                        vy_read_iterator_reevaluate_srcs(itr, next);
1!
476
                goto out;
1,132✔
477
        }
478
        /* The new statement is a better candidate for 'next'. */
479
        *next = entry;
186✔
480
        if (cmp < 0) {
186✔
481
                /*
482
                 * The new statement precedes the current
483
                 * candidate for the next key.
484
                 */
485
                itr->front_id++;
1✔
486
        } else {
487
                /*
488
                 * The new statement updates the next key.
489
                 * Make sure we don't read the old value
490
                 * from the cache while applying UPSERTs.
491
                 */
492
                struct vy_read_src *cache_src = &itr->src[itr->cache_src];
185✔
493
                if (cache_src->front_id == itr->front_id)
185✔
494
                        vy_history_cleanup(&cache_src->history);
18!
495
        }
496
        src->front_id = itr->front_id;
186✔
497
out:
2,174,320✔
498
        if (next_stmt_ref != NULL)
2,174,320✔
499
                tuple_unref(next_stmt_ref);
1,976,060!
500
        /*
501
         * Switch to read view if we skipped a prepared statement.
502
         */
503
        if (itr->tx != NULL && src_itr->min_skipped_plsn != INT64_MAX) {
2,174,320✔
504
                if (vy_tx_send_to_read_view(
493,900!
505
                                itr->tx, src_itr->min_skipped_plsn) != 0)
506
                        return -1;
2,174,320✔
507
                if (itr->tx->state == VINYL_TX_ABORT) {
493,900!
508
                        diag_set(ClientError, ER_TRANSACTION_CONFLICT);
×
509
                        return -1;
×
510
                }
511
        }
512
        return rc;
2,174,320✔
513
}
514

515
static void
516
vy_read_iterator_restore(struct vy_read_iterator *itr);
517

518
static void
519
vy_read_iterator_next_range(struct vy_read_iterator *itr);
520

521
/**
522
 * Advance the iterator to the next key.
523
 * Returns 0 on success, -1 on error.
524
 */
525
static NODISCARD int
526
vy_read_iterator_advance(struct vy_read_iterator *itr)
3,693,680✔
527
{
528
        /*
529
         * Restore the iterator position if the LSM tree has changed
530
         * since the last iteration or this is the first iteration.
531
         */
532
        if (!itr->is_started ||
3,693,680✔
533
            itr->mem_list_version != itr->lsm->mem_list_version ||
3,287,280✔
534
            itr->range_tree_version != itr->lsm->range_tree_version ||
3,287,170✔
535
            itr->range_version != itr->curr_range->version) {
3,287,090✔
536
                vy_read_iterator_restore(itr);
406,635!
537
        }
538
        itr->is_started = true;
3,693,680✔
539
restart:
3,693,740✔
540
        itr->prev_front_id = itr->front_id;
3,693,740✔
541
        itr->front_id++;
3,693,740✔
542

543
        /*
544
         * Look up the next key in read sources starting
545
         * from the one that stores newest data.
546
         */
547
        bool stop = false;
3,693,740✔
548
        struct vy_entry next = vy_entry_none();
3,693,740!
549
        if (vy_read_iterator_scan_txw(itr, &next, &stop) != 0)
3,693,740!
550
                return -1;
3,693,680✔
551
        if (stop)
3,693,740✔
552
                goto done;
11,376✔
553
        if (vy_read_iterator_scan_cache(itr, &next, &stop) != 0)
3,682,360!
554
                return -1;
×
555
        if (stop)
3,682,360✔
556
                goto done;
1,495,950✔
557

558
        for (uint32_t i = itr->mem_src; i < itr->disk_src && !stop; i++) {
6,179,890✔
559
                if (vy_read_iterator_scan_mem(itr, i, &next, &stop) != 0)
3,993,470!
560
                        return -1;
×
561
        }
562
        if (stop)
2,186,420✔
563
                goto done;
18,497✔
564
rescan_disk:
2,167,920✔
565
        /* The following code may yield as it needs to access disk. */
566
        vy_read_iterator_pin_slices(itr);
2,174,690!
567
        for (uint32_t i = itr->disk_src; i < itr->src_count; i++) {
2,892,880✔
568
                if (vy_read_iterator_scan_disk(itr, i, &next, &stop) != 0) {
722,866!
569
                        vy_read_iterator_unpin_slices(itr);
306!
570
                        return -1;
306✔
571
                }
572
                if (stop)
722,560✔
573
                        break;
4,365✔
574
        }
575
        vy_read_iterator_unpin_slices(itr);
2,174,380!
576
        /*
577
         * The transaction could have been aborted while we were
578
         * reading disk. We must stop now and return an error as
579
         * this function could be called by a DML request that
580
         * was aborted by a DDL operation: failing will prevent
581
         * it from dereferencing a destroyed space.
582
         */
583
        if (itr->tx != NULL && itr->tx->state == VINYL_TX_ABORT) {
2,174,380✔
584
                diag_set(ClientError, ER_TRANSACTION_CONFLICT);
2!
585
                return -1;
2✔
586
        }
587
        /*
588
         * The list of in-memory indexes and/or the range tree could
589
         * have been modified by dump/compaction while we were fetching
590
         * data from disk. Restart the iterator if this is the case.
591
         * Note, we don't need to check the current range's version,
592
         * because all slices were pinned and hence could not be
593
         * removed.
594
         */
595
        if (itr->mem_list_version != itr->lsm->mem_list_version ||
2,174,380✔
596
            itr->range_tree_version != itr->lsm->range_tree_version) {
2,174,350✔
597
                vy_read_iterator_restore(itr);
59!
598
                goto restart;
59✔
599
        }
600
        /*
601
         * The transaction write set couldn't change during the yield
602
         * as it is owned exclusively by the current fiber so the only
603
         * source to check is the active in-memory tree.
604
         */
605
        if (vy_read_iterator_restore_mem(itr, &next) != 0)
2,174,320!
606
                return -1;
×
607
        /*
608
         * Scan the next range in case we transgressed the current
609
         * range's boundaries.
610
         */
611
        if (vy_read_iterator_range_is_done(itr, next)) {
2,174,320!
612
                vy_read_iterator_next_range(itr);
6,770!
613
                goto rescan_disk;
6,770✔
614
        }
615
done:
2,167,550✔
616
#ifndef NDEBUG
617
        /* Check that the statement meets search criteria. */
618
        if (next.stmt != NULL) {
3,693,370✔
619
                int cmp = vy_entry_compare(next, itr->key, itr->lsm->cmp_def);
3,337,900!
620
                cmp *= iterator_direction(itr->iterator_type);
3,337,900!
621
                if (itr->iterator_type == ITER_GT ||
3,337,900✔
622
                    itr->iterator_type == ITER_LT)
3,319,690✔
623
                        assert(cmp > 0);
26,175!
624
                else
625
                        assert(cmp >= 0);
3,311,720!
626
        }
627
        /*
628
         * Ensure the read iterator does not return duplicates
629
         * and respects statement order.
630
         */
631
        if (itr->last.stmt != NULL && next.stmt != NULL) {
3,693,370✔
632
               assert(vy_read_iterator_cmp_stmt(itr, next, itr->last) > 0);
3,102,750!
633
        }
634
#endif
635
        if (itr->need_check_eq && next.stmt != NULL &&
3,693,370✔
636
            vy_entry_compare(next, itr->key, itr->lsm->cmp_def) != 0)
28,794!
637
                itr->front_id++;
2,384✔
638
        return 0;
3,693,370✔
639
}
640

641
/** Add the transaction source to the read iterator. */
642
static void
643
vy_read_iterator_add_tx(struct vy_read_iterator *itr)
372,893✔
644
{
645
        assert(itr->tx != NULL);
372,893!
646
        enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ?
745,786✔
647
                                            itr->iterator_type : ITER_LE);
372,893✔
648
        struct vy_txw_iterator_stat *stat = &itr->lsm->stat.txw.iterator;
372,893✔
649
        struct vy_read_src *sub_src = vy_read_iterator_add_src(itr);
372,893✔
650
        vy_txw_iterator_open(&sub_src->txw_iterator, stat, itr->tx, itr->lsm,
372,893✔
651
                             iterator_type, itr->key);
652
}
372,893✔
653

654
/** Add the cache source to the read iterator. */
655
static void
656
vy_read_iterator_add_cache(struct vy_read_iterator *itr, bool is_prepared_ok)
406,694✔
657
{
658
        enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ?
813,388✔
659
                                            itr->iterator_type : ITER_LE);
406,694✔
660
        struct vy_read_src *sub_src = vy_read_iterator_add_src(itr);
406,694✔
661
        vy_cache_iterator_open(&sub_src->cache_iterator, &itr->lsm->cache,
406,694✔
662
                               iterator_type, itr->key, itr->read_view,
663
                               is_prepared_ok);
664
}
406,694✔
665

666
/** Add the memory level source to the read iterator. */
667
static void
668
vy_read_iterator_add_mem(struct vy_read_iterator *itr, bool is_prepared_ok)
406,694✔
669
{
670
        enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ?
813,388✔
671
                                            itr->iterator_type : ITER_LE);
406,694✔
672
        struct vy_lsm *lsm = itr->lsm;
406,694✔
673
        struct vy_read_src *sub_src;
674

675
        /* Add the active in-memory index. */
676
        assert(lsm->mem != NULL);
406,694!
677
        sub_src = vy_read_iterator_add_src(itr);
406,694✔
678
        vy_mem_iterator_open(&sub_src->mem_iterator, &lsm->stat.memory.iterator,
406,694✔
679
                             lsm->mem, iterator_type, itr->key, itr->read_view,
680
                             is_prepared_ok);
681
        /* Add sealed in-memory indexes. */
682
        struct vy_mem *mem;
683
        rlist_foreach_entry(mem, &lsm->sealed, in_sealed) {
1,306,150✔
684
                sub_src = vy_read_iterator_add_src(itr);
246,383✔
685
                vy_mem_iterator_open(&sub_src->mem_iterator,
246,383✔
686
                                     &lsm->stat.memory.iterator,
687
                                     mem, iterator_type, itr->key,
688
                                     itr->read_view, is_prepared_ok);
689
        }
690
}
406,694✔
691

692
/** Add the disk level source to the read iterator. */
693
static void
694
vy_read_iterator_add_disk(struct vy_read_iterator *itr)
413,464✔
695
{
696
        assert(itr->curr_range != NULL);
413,464!
697
        enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ?
826,928✔
698
                                            itr->iterator_type : ITER_LE);
413,464✔
699
        struct vy_lsm *lsm = itr->lsm;
413,464✔
700
        struct vy_slice *slice;
701
        /*
702
         * The format of the statement must be exactly the space
703
         * format with the same identifier to fully match the
704
         * format in vy_mem.
705
         */
706
        rlist_foreach_entry(slice, &itr->curr_range->slices, in_range) {
1,227,590✔
707
                struct vy_read_src *sub_src = vy_read_iterator_add_src(itr);
200,333✔
708
                vy_run_iterator_open(&sub_src->run_iterator,
200,333✔
709
                                     &lsm->stat.disk.iterator, slice,
710
                                     iterator_type, itr->key,
711
                                     itr->read_view, lsm->cmp_def,
712
                                     lsm->key_def, lsm->disk_format);
713
        }
714
}
413,464✔
715

716
/**
717
 * Close all open sources and reset the merge state.
718
 */
719
static void
720
vy_read_iterator_cleanup(struct vy_read_iterator *itr)
813,098✔
721
{
722
        uint32_t i;
723
        struct vy_read_src *src;
724

725
        if (itr->txw_src < itr->src_count) {
813,098✔
726
                src = &itr->src[itr->txw_src];
372,891✔
727
                vy_history_cleanup(&src->history);
372,891✔
728
                vy_txw_iterator_close(&src->txw_iterator);
372,891✔
729
        }
730
        if (itr->cache_src < itr->src_count) {
813,098✔
731
                src = &itr->src[itr->cache_src];
406,692✔
732
                vy_history_cleanup(&src->history);
406,692✔
733
                vy_cache_iterator_close(&src->cache_iterator);
406,692✔
734
        }
735
        for (i = itr->mem_src; i < itr->disk_src; i++) {
1,466,170✔
736
                src = &itr->src[i];
653,075✔
737
                vy_history_cleanup(&src->history);
653,075✔
738
                vy_mem_iterator_close(&src->mem_iterator);
653,075✔
739
        }
740
        for (i = itr->disk_src; i < itr->src_count; i++) {
1,011,280✔
741
                src = &itr->src[i];
198,181✔
742
                vy_history_cleanup(&src->history);
198,181✔
743
                vy_run_iterator_close(&src->run_iterator);
198,181✔
744
        }
745

746
        itr->txw_src = UINT32_MAX;
813,098✔
747
        itr->cache_src = UINT32_MAX;
813,098✔
748
        itr->mem_src = UINT32_MAX;
813,098✔
749
        itr->disk_src = UINT32_MAX;
813,098✔
750
        itr->skipped_src = UINT32_MAX;
813,098✔
751
        itr->src_count = 0;
813,098✔
752
}
813,098✔
753

754
void
755
vy_read_iterator_open_after(struct vy_read_iterator *itr, struct vy_lsm *lsm,
406,408✔
756
                            struct vy_tx *tx, enum iterator_type iterator_type,
757
                            struct vy_entry key, struct vy_entry last,
758
                            const struct vy_read_view **rv)
759
{
760
        memset(itr, 0, sizeof(*itr));
406,408✔
761

762
        itr->lsm = lsm;
406,408✔
763
        itr->tx = tx;
406,408✔
764
        itr->iterator_type = iterator_type;
406,408✔
765
        itr->key = key;
406,408✔
766
        itr->read_view = rv;
406,408✔
767
        itr->last = last;
406,408✔
768
        itr->last_cached = vy_entry_none();
406,408✔
769
        itr->is_first_cached = (itr->last.stmt == NULL);
406,408✔
770

771
        if (vy_stmt_is_empty_key(key.stmt)) {
406,408✔
772
                /*
773
                 * Strictly speaking, a GT/LT iterator should return
774
                 * nothing if the key is empty, because every key is
775
                 * equal to the empty key, but historically we return
776
                 * all keys instead. So use GE/LE instead of GT/LT
777
                 * in this case.
778
                 */
779
                itr->iterator_type = iterator_direction(iterator_type) > 0 ?
121,980✔
780
                                     ITER_GE : ITER_LE;
60,990✔
781
        }
782

783
        if (iterator_type == ITER_ALL)
406,408✔
784
                itr->iterator_type = ITER_GE;
3,786✔
785

786
        if (iterator_type == ITER_REQ) {
406,408✔
787
                /*
788
                 * Source iterators cannot handle ITER_REQ and
789
                 * use ITER_LE instead, so we need to enable EQ
790
                 * check in this case.
791
                 *
792
                 * See vy_read_iterator_add_{tx,cache,mem,run}.
793
                 */
794
                itr->need_check_eq = true;
3,089✔
795
        }
796

797
        itr->check_exact_match =
406,408✔
798
                (iterator_type == ITER_EQ || iterator_type == ITER_REQ ||
77,374✔
799
                 iterator_type == ITER_GE || iterator_type == ITER_LE) &&
817,255✔
800
                vy_stmt_is_exact_key(key.stmt, lsm->cmp_def, lsm->key_def,
400,477✔
801
                                     lsm->opts.is_unique);
400,477✔
802
}
406,408✔
803

804
/**
805
 * Restart the read iterator from the position following
806
 * the last statement returned to the user. Called when
807
 * the current range or the whole range tree is changed.
808
 * Also used for preparing the iterator for the first
809
 * iteration.
810
 */
811
static void
812
vy_read_iterator_restore(struct vy_read_iterator *itr)
406,694✔
813
{
814
        vy_read_iterator_cleanup(itr);
406,694✔
815

816
        itr->mem_list_version = itr->lsm->mem_list_version;
406,694✔
817
        itr->range_tree_version = itr->lsm->range_tree_version;
406,694✔
818
        itr->curr_range = vy_range_tree_find_by_key(&itr->lsm->range_tree,
406,694✔
819
                                                    itr->iterator_type,
820
                                                    itr->last.stmt != NULL ?
406,694✔
821
                                                    itr->last : itr->key);
822
        itr->range_version = itr->curr_range->version;
406,694✔
823

824
        bool is_prepared_ok = true;
406,694✔
825
        if (itr->tx != NULL) {
406,694✔
826
                is_prepared_ok = vy_tx_is_prepared_ok(itr->tx);
372,893✔
827
                itr->txw_src = itr->src_count;
372,893✔
828
                vy_read_iterator_add_tx(itr);
372,893✔
829
        }
830

831
        itr->cache_src = itr->src_count;
406,694✔
832
        vy_read_iterator_add_cache(itr, is_prepared_ok);
406,694✔
833

834
        itr->mem_src = itr->src_count;
406,694✔
835
        vy_read_iterator_add_mem(itr, is_prepared_ok);
406,694✔
836

837
        itr->disk_src = itr->src_count;
406,694✔
838
        vy_read_iterator_add_disk(itr);
406,694✔
839
}
406,694✔
840

841
/**
842
 * Iterate to the next range.
843
 */
844
static void
845
vy_read_iterator_next_range(struct vy_read_iterator *itr)
6,770✔
846
{
847
        struct vy_range *range = itr->curr_range;
6,770✔
848
        struct key_def *cmp_def = itr->lsm->cmp_def;
6,770✔
849
        int dir = iterator_direction(itr->iterator_type);
6,770✔
850

851
        assert(range != NULL);
6,770!
852
        while (true) {
853
                range = dir > 0 ?
6,770✔
854
                        vy_range_tree_next(&itr->lsm->range_tree, range) :
6,770!
UNCOV
855
                        vy_range_tree_prev(&itr->lsm->range_tree, range);
×
856
                assert(range != NULL);
6,770!
857

858
                if (itr->last.stmt == NULL)
6,770✔
859
                        break;
5,638✔
860
                /*
861
                 * We could skip an entire range due to the cache.
862
                 * Make sure the next statement falls in the range.
863
                 */
864
                if (dir > 0 && (range->end.stmt == NULL ||
1,132!
865
                                vy_entry_compare(itr->last, range->end,
1,069!
866
                                                 cmp_def) < 0))
867
                        break;
UNCOV
868
                if (dir < 0 && (range->begin.stmt == NULL ||
×
UNCOV
869
                                vy_entry_compare(itr->last, range->begin,
×
870
                                                 cmp_def) > 0))
871
                        break;
872
        }
873
        itr->curr_range = range;
6,770✔
874
        itr->range_version = range->version;
6,770✔
875

876
        for (uint32_t i = itr->disk_src; i < itr->src_count; i++) {
8,920✔
877
                struct vy_read_src *src = &itr->src[i];
2,150✔
878
                vy_run_iterator_close(&src->run_iterator);
2,150✔
879
        }
880
        itr->src_count = itr->disk_src;
6,770✔
881

882
        vy_read_iterator_add_disk(itr);
6,770✔
883
}
6,770✔
884

885
/**
886
 * Get a resultant statement for the current key.
887
 * Returns 0 on success, -1 on error.
888
 */
889
static NODISCARD int
890
vy_read_iterator_apply_history(struct vy_read_iterator *itr,
3,693,370✔
891
                               struct vy_entry *ret)
892
{
893
        struct vy_lsm *lsm = itr->lsm;
3,693,370✔
894
        struct vy_history history;
895
        vy_history_create(&history, &lsm->env->history_node_pool);
3,693,370!
896

897
        for (uint32_t i = 0; i < itr->src_count; i++) {
10,556,600✔
898
                struct vy_read_src *src = &itr->src[i];
10,157,900✔
899
                if (src->front_id == itr->front_id) {
10,157,900✔
900
                        vy_history_splice(&history, &src->history);
4,350,530!
901
                        if (vy_history_is_terminal(&history))
4,350,530!
902
                                break;
3,294,660✔
903
                }
904
        }
905

906
        int upserts_applied = 0;
3,693,370✔
907
        int rc = vy_history_apply(&history, lsm->cmp_def,
3,693,370!
908
                                  true, &upserts_applied, ret);
909

910
        lsm->stat.upsert.applied += upserts_applied;
3,693,370✔
911
        vy_history_cleanup(&history);
3,693,370!
912
        return rc;
3,693,370✔
913
}
914

915
/**
916
 * Track a read in the conflict manager.
917
 */
918
static int
919
vy_read_iterator_track_read(struct vy_read_iterator *itr, struct vy_entry entry)
3,693,370✔
920
{
921
        if (itr->tx == NULL)
3,693,370✔
922
                return 0;
250,589✔
923

924
        if (entry.stmt == NULL) {
3,442,780✔
925
                entry = (itr->iterator_type == ITER_EQ ||
268,765✔
926
                         itr->iterator_type == ITER_REQ ?
57,883✔
927
                         itr->key : itr->lsm->env->empty_key);
379,667✔
928
        }
929

930
        int rc;
931
        if (iterator_direction(itr->iterator_type) >= 0) {
3,442,780✔
932
                rc = vy_tx_track(itr->tx, itr->lsm, itr->key,
2,779,000✔
933
                                 itr->iterator_type != ITER_GT,
2,779,000✔
934
                                 entry, true);
935
        } else {
936
                rc = vy_tx_track(itr->tx, itr->lsm, entry, true,
663,782✔
937
                                 itr->key, itr->iterator_type != ITER_LT);
663,782✔
938
        }
939
        return rc;
3,442,780✔
940
}
941

942
NODISCARD int
943
vy_read_iterator_next(struct vy_read_iterator *itr, struct vy_entry *result)
2,948,080✔
944
{
945
        assert(itr->tx == NULL || itr->tx->state == VINYL_TX_READY);
2,948,080!
946

947
        struct vy_entry entry;
948
next_key:
2,948,080✔
949
        if (vy_read_iterator_advance(itr) != 0)
3,693,680!
950
                return -1;
2,948,080✔
951
        if (vy_read_iterator_apply_history(itr, &entry) != 0)
3,693,370!
952
                return -1;
×
953
        if (vy_read_iterator_track_read(itr, entry) != 0)
3,693,370!
954
                return -1;
×
955

956
        if (itr->last.stmt != NULL)
3,693,370✔
957
                tuple_unref(itr->last.stmt);
3,287,540!
958
        itr->last = entry;
3,693,370✔
959

960
        if (entry.stmt != NULL && vy_stmt_type(entry.stmt) == IPROTO_DELETE) {
3,693,370!
961
                /*
962
                 * We don't return DELETEs so skip to the next key.
963
                 * If the DELETE was read from TX write set, there
964
                 * is a good chance that the space actually has
965
                 * the deleted key and hence we must not consider
966
                 * previous + current tuple as an unbroken chain.
967
                 */
968
                if (vy_stmt_lsn(entry.stmt) == INT64_MAX) {
745,603!
969
                        if (itr->last_cached.stmt != NULL)
3,209✔
970
                                tuple_unref(itr->last_cached.stmt);
8!
971
                        itr->last_cached = vy_entry_none();
3,209!
972
                }
973
                goto next_key;
745,603✔
974
        }
975
        assert(entry.stmt == NULL ||
2,947,770!
976
               vy_stmt_type(entry.stmt) == IPROTO_INSERT ||
977
               vy_stmt_type(entry.stmt) == IPROTO_REPLACE);
978

979
        itr->check_exact_match = false;
2,947,770✔
980
        *result = entry;
2,947,770✔
981
        return 0;
2,947,770✔
982
}
983

984
void
985
vy_read_iterator_cache_add(struct vy_read_iterator *itr, struct vy_entry entry)
2,729,770✔
986
{
987
        if ((**itr->read_view).vlsn != INT64_MAX) {
2,729,770✔
988
                if (itr->last_cached.stmt != NULL)
208,056✔
989
                        tuple_unref(itr->last_cached.stmt);
2,394✔
990
                itr->last_cached = vy_entry_none();
208,056✔
991
                return;
208,056✔
992
        }
993
        vy_cache_add(&itr->lsm->cache, entry, itr->last_cached,
2,521,710✔
994
                     itr->is_first_cached, itr->key, itr->iterator_type);
2,521,710✔
995
        if (entry.stmt != NULL)
2,521,710✔
996
                tuple_ref(entry.stmt);
2,201,460✔
997
        if (itr->last_cached.stmt != NULL)
2,521,710✔
998
                tuple_unref(itr->last_cached.stmt);
2,151,060✔
999
        itr->last_cached = entry;
2,521,710✔
1000
        itr->is_first_cached = false;
2,521,710✔
1001
}
1002

1003
/**
1004
 * Close the iterator and free resources
1005
 */
1006
void
1007
vy_read_iterator_close(struct vy_read_iterator *itr)
406,404✔
1008
{
1009
        if (itr->last.stmt != NULL)
406,404✔
1010
                tuple_unref(itr->last.stmt);
48,232✔
1011
        if (itr->last_cached.stmt != NULL)
406,404✔
1012
                tuple_unref(itr->last_cached.stmt);
47,995✔
1013
        vy_read_iterator_cleanup(itr);
406,404✔
1014
        free(itr->src);
406,404✔
1015
        TRASH(itr);
406,404✔
1016
}
406,404✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc