• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_385

12 Aug 2024 01:14PM UTC coverage: 91.1% (-0.007%) from 91.107%
jorgen.edelbo_385

Pull #7826

Evergreen

jedelbo
Merge tag 'v14.12.0' into next-major
Pull Request #7826: Merge Next major

103514 of 182200 branches covered (56.81%)

3132 of 3493 new or added lines in 52 files covered. (89.67%)

154 existing lines in 17 files now uncovered.

219973 of 241462 relevant lines covered (91.1%)

6545726.52 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

83.77
/src/realm/array.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/array_with_find.hpp>
20
#include <realm/utilities.hpp>
21
#include <realm/impl/destroy_guard.hpp>
22
#include <realm/column_integer.hpp>
23
#include <realm/bplustree.hpp>
24
#include <realm/query_conditions.hpp>
25
#include <realm/array_integer.hpp>
26
#include <realm/array_key.hpp>
27
#include <realm/impl/array_writer.hpp>
28

29
#include <array>
30
#include <cstring> // std::memcpy
31
#include <iomanip>
32
#include <limits>
33
#include <tuple>
34

35
#ifdef REALM_DEBUG
36
#include <iostream>
37
#include <sstream>
38
#endif
39

40
#ifdef _MSC_VER
41
#include <intrin.h>
42
#pragma warning(disable : 4127) // Condition is constant warning
43
#endif
44

45
// Header format (8 bytes):
46
// ------------------------
47
//
48
// In mutable part / outside file:
49
//
50
// |--------|--------|--------|--------|--------|--------|--------|--------|
51
// |         capacity         |reserved|12344555|           size           |
52
//
53
//
54
// In immutable part / in file:
55
//
56
// |--------|--------|--------|--------|--------|--------|--------|--------|
57
// |             checksum              |12344555|           size           |
58
//
59
//
60
//  1: 'is_inner_bptree_node' (inner node of B+-tree).
61
//
62
//  2: 'has_refs' (elements whose first bit is zero are refs to subarrays).
63
//
64
//  3: 'context_flag' (meaning depends on context)
65
//
66
//  4: 'width_scheme' (2 bits)
67
//
68
//      value  |  meaning of 'width'  |  number of bytes used after header
69
//      -------|----------------------|------------------------------------
70
//        0    |  number of bits      |  ceil(width * size / 8)
71
//        1    |  number of bytes     |  width * size
72
//        2    |  ignored             |  size
73
//
74
//  5: 'width_ndx' (3 bits)
75
//
76
//      'width_ndx'       |  0 |  1 |  2 |  3 |  4 |  5 |  6 |  7 |
77
//      ------------------|----|----|----|----|----|----|----|----|
78
//      value of 'width'  |  0 |  1 |  2 |  4 |  8 | 16 | 32 | 64 |
79
//
80
//
81
// 'capacity' is the total number of bytes allocated for this array
82
// including the header.
83
//
84
// 'size' (aka length) is the number of elements in the array.
85
//
86
// 'checksum' (not yet implemented) is the checksum of the array
87
// including the header.
88
//
89
//
90
// Inner node of B+-tree:
91
// ----------------------
92
//
93
// An inner node of a B+-tree is has one of two forms: The 'compact'
94
// form which uses a single array node, or the 'general' form which
95
// uses two. The compact form is used by default but is converted to
96
// the general form when the corresponding subtree is modified in
97
// certain ways. There are two kinds of modification that require
98
// conversion to the general form:
99
//
100
//  - Insertion of an element into the corresponding subtree, except
101
//    when insertion occurs after the last element in the subtree
102
//    (append).
103
//
104
//  - Removal of an element from the corresponding subtree, except
105
//    when the removed element is the last element in the subtree.
106
//
107
// Compact form:
108
//
109
//   --> | N_c | r_1 | r_2 | ... | r_N | N_t |
110
//
111
// General form:
112
//
113
//   --> |  .  | r_1 | r_2 | ... | r_N | N_t |  (main array node)
114
//          |
115
//           ------> | o_2 | ... | o_N |  (offsets array node)
116
//
117
// Here,
118
//   `r_i` is the i'th child ref,
119
//   `o_i` is the total number of elements preceeding the i'th child,
120
//   `N`   is the number of children,
121
//   'M'   is one less than the number of children,
122
//   `N_c` is the fixed number of elements per child
123
//         (`elems_per_child`), and
124
//   `N_t` is the total number of elements in the subtree
125
//         (`total_elems_in_subtree`).
126
//
127
// `N_c` must always be a power of `REALM_MAX_BPNODE_SIZE`.
128
//
129
// It is expected that `N_t` will be removed in a future version of
130
// the file format. This will make it much more efficient to append
131
// elements to the B+-tree (or remove elements from the end).
132
//
133
// The last child of an inner node on the compact form, may have fewer
134
// elements than `N_c`. All other children must have exactly `N_c`
135
// elements in them.
136
//
137
// When an inner node is on the general form, and has only one child,
138
// it has an empty `offsets` array.
139
//
140
//
141
// B+-tree invariants:
142
//
143
//  - Every inner node must have at least one child
144
//    (invar:bptree-nonempty-inner).
145
//
146
//  - A leaf node, that is not also a root node, must contain at least
147
//    one element (invar:bptree-nonempty-leaf).
148
//
149
//  - All leaf nodes must reside at the same depth in the tree
150
//    (invar:bptree-leaf-depth).
151
//
152
//  - If an inner node is on the general form, and has a parent, the
153
//    parent must also be on the general form
154
//    (invar:bptree-node-form).
155
//
156
// It follows from invar:bptree-nonempty-leaf that the root of an
157
// empty tree (zero elements) is a leaf.
158
//
159
// It follows from invar:bptree-nonempty-inner and
160
// invar:bptree-nonempty-leaf that in a tree with precisely one
161
// element, every inner node has precisely one child, there is
162
// precisely one leaf node, and that leaf node has precisely one
163
// element.
164
//
165
// It follows from invar:bptree-node-form that if the root is on the
166
// compact form, then so is every other inner node in the tree.
167
//
168
// In general, when the root node is an inner node, it will have at
169
// least two children, because otherwise it would be
170
// superflous. However, to allow for exception safety during element
171
// insertion and removal, this shall not be guaranteed.
172

173
// LIMITATION: The code below makes the non-portable assumption that
174
// negative number are represented using two's complement. This is not
175
// guaranteed by C++03, but holds for all known target platforms.
176
//
177
// LIMITATION: The code below makes the non-portable assumption that
178
// the types `int8_t`, `int16_t`, `int32_t`, and `int64_t`
179
// exist. This is not guaranteed by C++03, but holds for all
180
// known target platforms.
181
//
182
// LIMITATION: The code below makes the assumption that a reference into
183
// a realm file will never grow in size above what can be represented in
184
// a size_t, which is 2^31-1 on a 32-bit platform, and 2^63-1 on a 64 bit
185
// platform.
186

187
using namespace realm;
188
using namespace realm::util;
189

190
void QueryStateBase::dyncast() {}
×
191

192
uint8_t Array::bit_width(int64_t v)
193
{
21,198,072✔
194
    // FIXME: Assuming there is a 64-bit CPU reverse bitscan
195
    // instruction and it is fast, then this function could be
196
    // implemented as a table lookup on the result of the scan
197
    if ((uint64_t(v) >> 4) == 0) {
21,198,072✔
198
        static const int8_t bits[] = {0, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
2,755,845✔
199
        return bits[int8_t(v)];
2,755,845✔
200
    }
2,755,845✔
201
    if (v < 0)
18,442,227✔
202
        v = ~v;
206,199✔
203
    // Then check if bits 15-31 used (32b), 7-31 used (16b), else (8b)
204
    return uint64_t(v) >> 31 ? 64 : uint64_t(v) >> 15 ? 32 : uint64_t(v) >> 7 ? 16 : 8;
18,442,227✔
205
}
21,198,072✔
206

207
template <size_t width>
208
struct Array::VTableForWidth {
209
    struct PopulatedVTable : VTable {
210
        PopulatedVTable()
211
        {
192✔
212
            getter = &Array::get<width>;
192✔
213
            setter = &Array::set<width>;
192✔
214
            chunk_getter = &Array::get_chunk<width>;
192✔
215
            finder[cond_Equal] = &Array::find_vtable<Equal>;
192✔
216
            finder[cond_NotEqual] = &Array::find_vtable<NotEqual>;
192✔
217
            finder[cond_Greater] = &Array::find_vtable<Greater>;
192✔
218
            finder[cond_Less] = &Array::find_vtable<Less>;
192✔
219
        }
192✔
220
    };
221
    static const PopulatedVTable vtable;
222
};
223

224
template <size_t width>
225
const typename Array::VTableForWidth<width>::PopulatedVTable Array::VTableForWidth<width>::vtable;
226

227
void Array::init_from_mem(MemRef mem) noexcept
228
{
888,478,800✔
229
    // Header is the type of header that has been allocated, in case we are decompressing,
230
    // the header is of kind A, which is kind of deceiving the purpose of these checks.
231
    // Since we will try to fetch some data from the just initialised header, and never reset
232
    // important fields used for type A arrays, like width, lower, upper_bound which are used
233
    // for expanding the array, but also query the data.
234
    const auto header = mem.get_addr();
888,478,800✔
235
    const auto is_extended = m_integer_compressor.init(header);
888,478,800✔
236

237
    m_is_inner_bptree_node = get_is_inner_bptree_node_from_header(header);
888,478,800✔
238
    m_has_refs = get_hasrefs_from_header(header);
888,478,800✔
239
    m_context_flag = get_context_flag_from_header(header);
888,478,800✔
240

241
    if (is_extended) {
888,478,800✔
242
        m_ref = mem.get_ref();
4,981,251✔
243
        m_data = get_data_from_header(header);
4,981,251✔
244
        m_size = m_integer_compressor.size();
4,981,251✔
245
        m_width = m_integer_compressor.v_width();
4,981,251✔
246
        m_lbound = -m_integer_compressor.v_mask();
4,981,251✔
247
        m_ubound = m_integer_compressor.v_mask() - 1;
4,981,251✔
248
        m_integer_compressor.set_vtable(*this);
4,981,251✔
249
        m_getter = m_vtable->getter;
4,981,251✔
250
    }
4,981,251✔
251
    else {
883,497,549✔
252
        // Old init phase.
253
        Node::init_from_mem(mem);
883,497,549✔
254
        update_width_cache_from_header();
883,497,549✔
255
    }
883,497,549✔
256
}
888,478,800✔
257

258
MemRef Array::get_mem() const noexcept
259
{
343,883,259✔
260
    return MemRef(get_header_from_data(m_data), m_ref, m_alloc);
343,883,259✔
261
}
343,883,259✔
262

263
void Array::update_from_parent() noexcept
264
{
11,428,704✔
265
    ArrayParent* parent = get_parent();
11,428,704✔
266
    REALM_ASSERT_DEBUG(parent);
11,428,704✔
267
    ref_type new_ref = get_ref_from_parent();
11,428,704✔
268
    init_from_ref(new_ref);
11,428,704✔
269
}
11,428,704✔
270

271
void Array::set_type(Type type)
272
{
4,695✔
273
    REALM_ASSERT_DEBUG(is_attached());
4,695✔
274

275
    copy_on_write(); // Throws
4,695✔
276

277
    bool init_is_inner_bptree_node = false, init_has_refs = false;
4,695✔
278
    switch (type) {
4,695✔
279
        case type_Normal:
6✔
280
            break;
6✔
281
        case type_InnerBptreeNode:
6✔
282
            init_is_inner_bptree_node = true;
6✔
283
            init_has_refs = true;
6✔
284
            break;
6✔
285
        case type_HasRefs:
4,683✔
286
            init_has_refs = true;
4,683✔
287
            break;
4,683✔
288
    }
4,695✔
289
    m_is_inner_bptree_node = init_is_inner_bptree_node;
4,695✔
290
    m_has_refs = init_has_refs;
4,695✔
291

292
    char* header = get_header();
4,695✔
293
    set_is_inner_bptree_node_in_header(init_is_inner_bptree_node, header);
4,695✔
294
    set_hasrefs_in_header(init_has_refs, header);
4,695✔
295
}
4,695✔
296

297
void Array::destroy_children(size_t offset) noexcept
298
{
774,792✔
299
    for (size_t i = offset; i != m_size; ++i) {
4,319,163✔
300
        int64_t value = get(i);
3,544,371✔
301

302
        // Null-refs indicate empty sub-trees
303
        if (value == 0)
3,544,371✔
304
            continue;
1,201,497✔
305

306
        // A ref is always 8-byte aligned, so the lowest bit
307
        // cannot be set. If it is, it means that it should not be
308
        // interpreted as a ref.
309
        if ((value & 1) != 0)
2,342,874✔
310
            continue;
142,974✔
311

312
        ref_type ref = to_ref(value);
2,199,900✔
313
        destroy_deep(ref, m_alloc);
2,199,900✔
314
    }
2,199,900✔
315
}
774,792✔
316

317
// size_t Array::get_byte_size() const noexcept
318
//{
319
//     const auto header = get_header();
320
//     auto num_bytes = get_byte_size_from_header(header);
321
//     auto read_only = m_alloc.is_read_only(m_ref) == true;
322
//     auto capacity = get_capacity_from_header(header);
323
//     auto bytes_ok = num_bytes <= capacity;
324
//     REALM_ASSERT(read_only || bytes_ok);
325
//     REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
326
//     return num_bytes;
327
// }
328

329
ref_type Array::do_write_shallow(_impl::ArrayWriterBase& out) const
330
{
20,165,964✔
331
    // here we might want to compress the array and write down.
332
    const char* header = get_header_from_data(m_data);
20,165,964✔
333
    size_t byte_size = get_byte_size();
20,165,964✔
334
    const auto compressed = is_compressed();
20,165,964✔
335
    uint32_t dummy_checksum = compressed ? 0x42424242UL : 0x41414141UL; //
20,165,964✔
336
    uint32_t dummy_checksum_bytes = compressed ? 2 : 4; // AAAA / BB (only 2 bytes for extended arrays)
20,165,964✔
337
    ref_type new_ref = out.write_array(header, byte_size, dummy_checksum, dummy_checksum_bytes); // Throws
20,165,964✔
338
    REALM_ASSERT_3(new_ref % 8, ==, 0);                                                          // 8-byte alignment
20,165,964✔
339
    return new_ref;
20,165,964✔
340
}
20,165,964✔
341

342

343
ref_type Array::do_write_deep(_impl::ArrayWriterBase& out, bool only_if_modified, bool compress) const
344
{
3,068,421✔
345
    // Temp array for updated refs
346
    Array new_array(Allocator::get_default());
3,068,421✔
347
    Type type = m_is_inner_bptree_node ? type_InnerBptreeNode : type_HasRefs;
3,068,421✔
348
    new_array.create(type, m_context_flag); // Throws
3,068,421✔
349
    _impl::ShallowArrayDestroyGuard dg(&new_array);
3,068,421✔
350

351
    // First write out all sub-arrays
352
    size_t n = size();
3,068,421✔
353
    for (size_t i = 0; i < n; ++i) {
91,909,512✔
354
        int_fast64_t value = get(i);
88,841,091✔
355
        bool is_ref = (value != 0 && (value & 1) == 0);
88,841,091✔
356
        if (is_ref) {
88,841,091✔
357
            ref_type subref = to_ref(value);
70,552,446✔
358
            ref_type new_subref = write(subref, m_alloc, out, only_if_modified, compress); // Throws
70,552,446✔
359
            value = from_ref(new_subref);
70,552,446✔
360
        }
70,552,446✔
361
        new_array.add(value); // Throws
88,841,091✔
362
    }
88,841,091✔
363
    return new_array.do_write_shallow(out); // Throws
3,068,421✔
364
}
3,068,421✔
365

366

367
void Array::move(size_t begin, size_t end, size_t dest_begin)
368
{
11,666,835✔
369
    REALM_ASSERT_3(begin, <=, end);
11,666,835✔
370
    REALM_ASSERT_3(end, <=, m_size);
11,666,835✔
371
    REALM_ASSERT_3(dest_begin, <=, m_size);
11,666,835✔
372
    REALM_ASSERT_3(end - begin, <=, m_size - dest_begin);
11,666,835✔
373
    REALM_ASSERT(!(dest_begin >= begin && dest_begin < end)); // Required by std::copy
11,666,835✔
374

375
    // Check if we need to copy before modifying
376
    copy_on_write(); // Throws
11,666,835✔
377

378
    size_t bits_per_elem = m_width;
11,666,835✔
379
    const char* header = get_header_from_data(m_data);
11,666,835✔
380
    if (get_wtype_from_header(header) == wtype_Multiply) {
11,666,835✔
381
        bits_per_elem *= 8;
×
382
    }
×
383

384
    if (bits_per_elem < 8) {
11,666,835✔
385
        // FIXME: Should be optimized
386
        for (size_t i = begin; i != end; ++i) {
45,368,115✔
387
            int_fast64_t v = m_getter(*this, i);
43,927,659✔
388
            m_vtable->setter(*this, dest_begin++, v);
43,927,659✔
389
        }
43,927,659✔
390
        return;
1,440,456✔
391
    }
1,440,456✔
392

393
    size_t bytes_per_elem = bits_per_elem / 8;
10,226,379✔
394
    const char* begin_2 = m_data + begin * bytes_per_elem;
10,226,379✔
395
    const char* end_2 = m_data + end * bytes_per_elem;
10,226,379✔
396
    char* dest_begin_2 = m_data + dest_begin * bytes_per_elem;
10,226,379✔
397
    realm::safe_copy_n(begin_2, end_2 - begin_2, dest_begin_2);
10,226,379✔
398
}
10,226,379✔
399

400
void Array::move(Array& dst, size_t ndx)
401
{
12,768✔
402
    size_t dest_begin = dst.m_size;
12,768✔
403
    size_t nb_to_move = m_size - ndx;
12,768✔
404
    dst.copy_on_write();
12,768✔
405
    dst.ensure_minimum_width(this->m_ubound);
12,768✔
406
    dst.alloc(dst.m_size + nb_to_move, dst.m_width); // Make room for the new elements
12,768✔
407

408
    // cache variables used in tight loop
409
    auto getter = m_getter;
12,768✔
410
    auto setter = dst.m_vtable->setter;
12,768✔
411
    size_t sz = m_size;
12,768✔
412

413
    for (size_t i = ndx; i < sz; i++) {
1,420,305✔
414
        auto v = getter(*this, i);
1,407,537✔
415
        setter(dst, dest_begin++, v);
1,407,537✔
416
    }
1,407,537✔
417

418
    truncate(ndx);
12,768✔
419
}
12,768✔
420

421
void Array::set(size_t ndx, int64_t value)
422
{
223,629,114✔
423
    REALM_ASSERT_3(ndx, <, m_size);
223,629,114✔
424
    if (m_vtable->getter(*this, ndx) == value)
223,629,114✔
425
        return;
13,053,450✔
426

427
    // Check if we need to copy before modifying
428
    copy_on_write(); // Throws
210,575,664✔
429
    // Grow the array if needed to store this value
430
    ensure_minimum_width(value); // Throws
210,575,664✔
431
    // Set the value
432
    m_vtable->setter(*this, ndx, value);
210,575,664✔
433
}
210,575,664✔
434

435
void Array::set_as_ref(size_t ndx, ref_type ref)
436
{
18,021,831✔
437
    set(ndx, from_ref(ref));
18,021,831✔
438
}
18,021,831✔
439

440
/*
441
// Optimization for the common case of adding positive values to a local array
442
// (happens a lot when returning results to TableViews)
443
void Array::add_positive_local(int64_t value)
444
{
445
    REALM_ASSERT(value >= 0);
446
    REALM_ASSERT(&m_alloc == &Allocator::get_default());
447

448
    if (value <= m_ubound) {
449
        if (m_size < m_capacity) {
450
            (this->*(m_vtable->setter))(m_size, value);
451
            ++m_size;
452
            set_header_size(m_size);
453
            return;
454
        }
455
    }
456

457
    insert(m_size, value);
458
}
459
*/
460

461
size_t Array::blob_size() const noexcept
462
{
13,065,861✔
463
    if (get_context_flag()) {
13,065,861✔
464
        size_t total_size = 0;
96✔
465
        for (size_t i = 0; i < size(); ++i) {
216✔
466
            char* header = m_alloc.translate(Array::get_as_ref(i));
120✔
467
            total_size += Array::get_size_from_header(header);
120✔
468
        }
120✔
469
        return total_size;
96✔
470
    }
96✔
471
    else {
13,065,765✔
472
        return m_size;
13,065,765✔
473
    }
13,065,765✔
474
}
13,065,861✔
475

476
void Array::insert(size_t ndx, int_fast64_t value)
477
{
950,071,551✔
478
    REALM_ASSERT_DEBUG(ndx <= m_size);
950,071,551✔
479

480
    decompress_array(*this);
950,071,551✔
481
    const auto old_width = m_width;
950,071,551✔
482
    const auto old_size = m_size;
950,071,551✔
483
    const Getter old_getter = m_getter; // Save old getter before potential width expansion
950,071,551✔
484

485
    bool do_expand = value < m_lbound || value > m_ubound;
951,308,724✔
486
    if (do_expand) {
950,071,551✔
487
        size_t width = bit_width(value);
11,175,045✔
488
        REALM_ASSERT_DEBUG(width > m_width);
11,175,045✔
489
        alloc(m_size + 1, width); // Throws
11,175,045✔
490
    }
11,175,045✔
491
    else {
938,896,506✔
492
        alloc(m_size + 1, m_width); // Throws
938,896,506✔
493
    }
938,896,506✔
494

495
    // Move values below insertion (may expand)
496
    if (do_expand || old_width < 8) {
950,071,551✔
497
        size_t i = old_size;
297,534,363✔
498
        while (i > ndx) {
300,224,703✔
499
            --i;
2,690,340✔
500
            int64_t v = old_getter(*this, i);
2,690,340✔
501
            m_vtable->setter(*this, i + 1, v);
2,690,340✔
502
        }
2,690,340✔
503
    }
297,534,363✔
504
    else if (ndx != old_size) {
652,537,188✔
505
        // when byte sized and no expansion, use memmove
506
        // FIXME: Optimize by simply dividing by 8 (or shifting right by 3 bit positions)
507
        size_t w = (old_width == 64) ? 8 : (old_width == 32) ? 4 : (old_width == 16) ? 2 : 1;
4,127,928✔
508
        char* src_begin = m_data + ndx * w;
4,127,928✔
509
        char* src_end = m_data + old_size * w;
4,127,928✔
510
        char* dst_end = src_end + w;
4,127,928✔
511
        std::copy_backward(src_begin, src_end, dst_end);
4,127,928✔
512
    }
4,127,928✔
513

514
    // Insert the new value
515
    m_vtable->setter(*this, ndx, value);
950,071,551✔
516

517
    // Expand values above insertion
518
    if (do_expand) {
950,071,551✔
519
        size_t i = ndx;
11,170,170✔
520
        while (i != 0) {
38,299,866✔
521
            --i;
27,129,696✔
522
            int64_t v = old_getter(*this, i);
27,129,696✔
523
            m_vtable->setter(*this, i, v);
27,129,696✔
524
        }
27,129,696✔
525
    }
11,170,170✔
526
}
950,071,551✔
527

528
void Array::copy_on_write()
529
{
276,486,417✔
530
    if (is_read_only() && !decompress_array(*this))
276,486,417✔
531
        Node::copy_on_write();
7,984,611✔
532
}
276,486,417✔
533

534
void Array::copy_on_write(size_t min_size)
535
{
25,066,584✔
536
    if (is_read_only() && !decompress_array(*this))
25,066,584✔
537
        Node::copy_on_write(min_size);
215,316✔
538
}
25,066,584✔
539

540
void Array::truncate(size_t new_size)
541
{
2,117,511✔
542
    REALM_ASSERT(is_attached());
2,117,511✔
543
    REALM_ASSERT_3(new_size, <=, m_size);
2,117,511✔
544

545
    if (new_size == m_size)
2,117,511✔
546
        return;
164,022✔
547

548
    copy_on_write(); // Throws
1,953,489✔
549

550
    // Update size in accessor and in header. This leaves the capacity
551
    // unchanged.
552
    m_size = new_size;
1,953,489✔
553
    set_header_size(new_size);
1,953,489✔
554

555
    // If the array is completely cleared, we take the opportunity to
556
    // drop the width back to zero.
557
    if (new_size == 0) {
1,953,489✔
558
        set_width_in_header(0, get_header());
1,921,905✔
559
        update_width_cache_from_header();
1,921,905✔
560
    }
1,921,905✔
561
}
1,953,489✔
562

563
void Array::truncate_and_destroy_children(size_t new_size)
564
{
24,606✔
565
    REALM_ASSERT(is_attached());
24,606✔
566
    REALM_ASSERT_3(new_size, <=, m_size);
24,606✔
567

568
    if (new_size == m_size)
24,606✔
569
        return;
2,082✔
570

571
    copy_on_write(); // Throws
22,524✔
572

573
    if (m_has_refs) {
22,524✔
574
        size_t offset = new_size;
22,524✔
575
        destroy_children(offset);
22,524✔
576
    }
22,524✔
577

578
    // Update size in accessor and in header. This leaves the capacity
579
    // unchanged.
580
    m_size = new_size;
22,524✔
581
    set_header_size(new_size);
22,524✔
582

583
    // If the array is completely cleared, we take the opportunity to
584
    // drop the width back to zero.
585
    if (new_size == 0) {
22,524✔
586
        set_width_in_header(0, get_header());
19,881✔
587
        update_width_cache_from_header();
19,881✔
588
    }
19,881✔
589
}
22,524✔
590

591
void Array::do_ensure_minimum_width(int_fast64_t value)
592
{
9,529,713✔
593
    // Make room for the new value
594
    const size_t width = bit_width(value);
9,529,713✔
595

596
    REALM_ASSERT_3(width, >, m_width);
9,529,713✔
597

598
    Getter old_getter = m_getter; // Save old getter before width expansion
9,529,713✔
599
    alloc(m_size, width);         // Throws
9,529,713✔
600

601
    // Expand the old values
602
    size_t i = m_size;
9,529,713✔
603
    while (i != 0) {
78,929,577✔
604
        --i;
69,399,864✔
605
        int64_t v = old_getter(*this, i);
69,399,864✔
606
        m_vtable->setter(*this, i, v);
69,399,864✔
607
    }
69,399,864✔
608
}
9,529,713✔
609

610
bool Array::compress_array(Array& arr) const
611
{
977,244✔
612
    if (m_integer_compressor.get_encoding() == NodeHeader::Encoding::WTypBits) {
977,244✔
613
        return m_integer_compressor.compress(*this, arr);
879,105✔
614
    }
879,105✔
615
    return false;
98,139✔
616
}
977,244✔
617

618
bool Array::decompress_array(Array& arr) const
619
{
958,888,233✔
620
    return arr.is_compressed() ? m_integer_compressor.decompress(arr) : false;
958,888,233✔
621
}
958,888,233✔
622

623
bool Array::try_compress(Array& arr) const
624
{
168✔
625
    return compress_array(arr);
168✔
626
}
168✔
627

628
bool Array::try_decompress()
629
{
30✔
630
    return decompress_array(*this);
30✔
631
}
30✔
632

633
size_t Array::calc_aligned_byte_size(size_t size, int width)
UNCOV
634
{
×
UNCOV
635
    REALM_ASSERT(width != 0 && (width & (width - 1)) == 0); // Is a power of two
×
UNCOV
636
    size_t max = std::numeric_limits<size_t>::max();
×
UNCOV
637
    size_t max_2 = max & ~size_t(7); // Allow for upwards 8-byte alignment
×
UNCOV
638
    bool overflow;
×
UNCOV
639
    size_t byte_size;
×
UNCOV
640
    if (width < 8) {
×
UNCOV
641
        size_t elems_per_byte = 8 / width;
×
UNCOV
642
        size_t byte_size_0 = size / elems_per_byte;
×
UNCOV
643
        if (size % elems_per_byte != 0)
×
UNCOV
644
            ++byte_size_0;
×
UNCOV
645
        overflow = byte_size_0 > max_2 - header_size;
×
UNCOV
646
        byte_size = header_size + byte_size_0;
×
UNCOV
647
    }
×
UNCOV
648
    else {
×
UNCOV
649
        size_t bytes_per_elem = width / 8;
×
UNCOV
650
        overflow = size > (max_2 - header_size) / bytes_per_elem;
×
UNCOV
651
        byte_size = header_size + size * bytes_per_elem;
×
UNCOV
652
    }
×
UNCOV
653
    if (overflow)
×
654
        throw std::overflow_error("Byte size overflow");
×
UNCOV
655
    REALM_ASSERT_3(byte_size, >, 0);
×
UNCOV
656
    size_t aligned_byte_size = ((byte_size - 1) | 7) + 1; // 8-byte alignment
×
UNCOV
657
    return aligned_byte_size;
×
UNCOV
658
}
×
659

660
MemRef Array::clone(MemRef mem, Allocator& alloc, Allocator& target_alloc)
661
{
×
662
    const char* header = mem.get_addr();
×
663
    if (!get_hasrefs_from_header(header)) {
×
664
        // This array has no subarrays, so we can make a byte-for-byte
665
        // copy, which is more efficient.
666

667
        // Calculate size of new array in bytes
668
        size_t size = get_byte_size_from_header(header);
×
669

670
        // Create the new array
671
        MemRef clone_mem = target_alloc.alloc(size); // Throws
×
672
        char* clone_header = clone_mem.get_addr();
×
673

674
        // Copy contents
675
        const char* src_begin = header;
×
676
        const char* src_end = header + size;
×
677
        char* dst_begin = clone_header;
×
678
        realm::safe_copy_n(src_begin, src_end - src_begin, dst_begin);
×
679

680
        // Update with correct capacity
681
        set_capacity_in_header(size, clone_header);
×
682

683
        return clone_mem;
×
684
    }
×
685

686
    // Refs are integers, and integers arrays use wtype_Bits.
687
    REALM_ASSERT_3(get_wtype_from_header(header), ==, wtype_Bits);
×
688

689
    Array array{alloc};
×
690
    array.init_from_mem(mem);
×
691

692
    // Create new empty array of refs
693
    Array new_array(target_alloc);
×
694
    _impl::DeepArrayDestroyGuard dg(&new_array);
×
695
    Type type = get_type_from_header(header);
×
696
    bool context_flag = get_context_flag_from_header(header);
×
697
    new_array.create(type, context_flag); // Throws
×
698

699
    _impl::DeepArrayRefDestroyGuard dg_2(target_alloc);
×
700
    size_t n = array.size();
×
701
    for (size_t i = 0; i != n; ++i) {
×
702
        int_fast64_t value = array.get(i);
×
703

704
        // Null-refs signify empty subtrees. Also, all refs are
705
        // 8-byte aligned, so the lowest bits cannot be set. If they
706
        // are, it means that it should not be interpreted as a ref.
707
        bool is_subarray = value != 0 && (value & 1) == 0;
×
708
        if (!is_subarray) {
×
709
            new_array.add(value); // Throws
×
710
            continue;
×
711
        }
×
712

713
        ref_type ref = to_ref(value);
×
714
        MemRef new_mem = clone(MemRef(ref, alloc), alloc, target_alloc); // Throws
×
715
        dg_2.reset(new_mem.get_ref());
×
716
        value = from_ref(new_mem.get_ref());
×
717
        new_array.add(value); // Throws
×
718
        dg_2.release();
×
719
    }
×
720

721
    dg.release();
×
722
    return new_array.get_mem();
×
723
}
×
724

725
MemRef Array::create(Type type, bool context_flag, WidthType width_type, size_t size, int_fast64_t value,
726
                     Allocator& alloc)
727
{
17,477,004✔
728
    REALM_ASSERT_DEBUG(value == 0 || width_type == wtype_Bits);
17,477,004✔
729
    REALM_ASSERT_DEBUG(size == 0 || width_type != wtype_Ignore);
17,477,004✔
730
    uint8_t width = 0;
17,477,004✔
731
    if (value != 0)
17,477,004✔
732
        width = bit_width(value);
178,728✔
733
    auto mem = Node::create_node(size, alloc, context_flag, type, width_type, width);
17,477,004✔
734
    if (value != 0) {
17,477,004✔
735
        const auto header = mem.get_addr();
178,740✔
736
        char* data = get_data_from_header(header);
178,740✔
737
        size_t begin = 0, end = size;
178,740✔
738
        REALM_TEMPEX(fill_direct, width, (data, begin, end, value));
178,740✔
739
    }
178,740✔
740
    return mem;
17,477,004✔
741
}
17,477,004✔
742

743
// This is the one installed into the m_vtable->finder slots.
744
template <class cond>
745
bool Array::find_vtable(const Array& arr, int64_t value, size_t start, size_t end, size_t baseindex,
746
                        QueryStateBase* state)
747
{
12,412,431✔
748
    REALM_TEMPEX2(return ArrayWithFind(arr).find_optimized, cond, arr.m_width, (value, start, end, baseindex, state));
12,412,431✔
UNCOV
749
}
×
750

751
void Array::update_width_cache_from_header() noexcept
752
{
1,712,677,116✔
753
    m_width = get_width_from_header(get_header());
1,712,677,116✔
754
    m_lbound = lbound_for_width(m_width);
1,712,677,116✔
755
    m_ubound = ubound_for_width(m_width);
1,712,677,116✔
756
    REALM_ASSERT_DEBUG(m_lbound <= m_ubound);
1,712,677,116✔
757
    REALM_ASSERT_DEBUG(m_width >= m_lbound);
1,712,677,116✔
758
    REALM_ASSERT_DEBUG(m_width <= m_ubound);
1,712,677,116✔
759
    REALM_TEMPEX(m_vtable = &VTableForWidth, m_width, ::vtable);
1,712,677,116✔
760
    m_getter = m_vtable->getter;
1,712,677,116✔
761
}
1,712,677,116✔
762

763
// This method reads 8 concecutive values into res[8], starting from index 'ndx'. It's allowed for the 8 values to
764
// exceed array length; in this case, remainder of res[8] will be be set to 0.
765
template <size_t w>
766
void Array::get_chunk(const Array& arr, size_t ndx, int64_t res[8]) noexcept
767
{
2,231,124✔
768
    auto sz = arr.size();
2,231,124✔
769
    REALM_ASSERT_3(ndx, <, sz);
2,231,124✔
770
    size_t i = 0;
2,231,124✔
771

772
    // if constexpr to avoid producing spurious warnings resulting from
773
    // instantiating for too large w
774
    if constexpr (w > 0 && w <= 4) {
2,231,124✔
775
        // Calling get<w>() in a loop results in one load per call to get, but
776
        // for w < 8 we can do better than that
777
        constexpr size_t elements_per_byte = 8 / w;
1,258,176✔
778

779
        // Round m_size down to byte granularity as the trailing bits in the last
780
        // byte are uninitialized
781
        size_t bytes_available = sz / elements_per_byte;
1,258,176✔
782

783
        // Round start and end to be byte-aligned. Start is rounded down and
784
        // end is rounded up as we may read up to 7 unused bits at each end.
785
        size_t start = ndx / elements_per_byte;
1,258,176✔
786
        size_t end = std::min(bytes_available, (ndx + 8 + elements_per_byte - 1) / elements_per_byte);
1,258,176✔
787

788
        if (end > start) {
1,258,176✔
789
            // Loop in reverse order because data is stored in little endian order
790
            uint64_t c = 0;
1,256,658✔
791
            for (size_t i = end; i > start; --i) {
4,245,252✔
792
                c <<= 8;
2,988,594✔
793
                c += *reinterpret_cast<const uint8_t*>(arr.m_data + i - 1);
2,988,594✔
794
            }
2,988,594✔
795
            // Trim off leading bits which aren't part of the requested range
796
            c >>= (ndx - start * elements_per_byte) * w;
1,256,658✔
797

798
            uint64_t mask = (1ULL << w) - 1ULL;
1,256,658✔
799
            res[0] = (c >> 0 * w) & mask;
1,256,658✔
800
            res[1] = (c >> 1 * w) & mask;
1,256,658✔
801
            res[2] = (c >> 2 * w) & mask;
1,256,658✔
802
            res[3] = (c >> 3 * w) & mask;
1,256,658✔
803
            res[4] = (c >> 4 * w) & mask;
1,256,658✔
804
            res[5] = (c >> 5 * w) & mask;
1,256,658✔
805
            res[6] = (c >> 6 * w) & mask;
1,256,658✔
806
            res[7] = (c >> 7 * w) & mask;
1,256,658✔
807

808
            // Read the last few elements via get<w> if needed
809
            i = std::min<size_t>(8, end * elements_per_byte - ndx);
1,256,658✔
810
        }
1,256,658✔
811
    }
1,258,176✔
812

813
    for (; i + ndx < sz && i < 8; i++)
9,911,394✔
814
        res[i] = get<w>(arr, ndx + i);
7,680,270✔
815
    for (; i < 8; i++)
2,453,670✔
816
        res[i] = 0;
222,546✔
817

818
#ifdef REALM_DEBUG
2,231,124✔
819
    for (int j = 0; j + ndx < sz && j < 8; j++) {
19,857,570✔
820
        int64_t expected = Array::get_universal<w>(arr.m_data, ndx + j);
17,626,446✔
821
        REALM_ASSERT(res[j] == expected);
17,626,446✔
822
    }
17,626,446✔
823
#endif
2,231,124✔
824
}
2,231,124✔
825

826
template <>
827
void Array::get_chunk<0>(const Array& arr, size_t ndx, int64_t res[8]) noexcept
828
{
195,456✔
829
    REALM_ASSERT_3(ndx, <, arr.m_size);
195,456✔
830
    memset(res, 0, sizeof(int64_t) * 8);
195,456✔
831
}
195,456✔
832

833

834
template <size_t width>
835
void Array::set(Array& arr, size_t ndx, int64_t value)
836
{
1,328,559,315✔
837
    realm::set_direct<width>(arr.m_data, ndx, value);
1,328,559,315✔
838
}
1,328,559,315✔
839

840
void Array::_mem_usage(size_t& mem) const noexcept
841
{
432✔
842
    mem += get_byte_size();
432✔
843
    if (m_has_refs) {
432✔
844
        for (size_t i = 0; i < m_size; ++i) {
546✔
845
            int64_t val = get(i);
360✔
846
            if (val && !(val & 1)) {
360✔
847
                Array subarray(m_alloc);
276✔
848
                subarray.init_from_ref(to_ref(val));
276✔
849
                subarray._mem_usage(mem);
276✔
850
            }
276✔
851
        }
360✔
852
    }
186✔
853
}
432✔
854

855
#ifdef REALM_DEBUG
856
namespace {
857

858
class MemStatsHandler : public Array::MemUsageHandler {
859
public:
860
    MemStatsHandler(MemStats& stats) noexcept
861
        : m_stats(stats)
862
    {
×
863
    }
×
864
    void handle(ref_type, size_t allocated, size_t used) noexcept override
865
    {
×
866
        m_stats.allocated += allocated;
×
867
        m_stats.used += used;
×
868
        m_stats.array_count += 1;
×
869
    }
×
870

871
private:
872
    MemStats& m_stats;
873
};
874

875
} // anonymous namespace
876

877

878
void Array::stats(MemStats& stats_dest) const noexcept
879
{
×
880
    MemStatsHandler handler(stats_dest);
×
881
    report_memory_usage(handler);
×
882
}
×
883

884

885
void Array::report_memory_usage(MemUsageHandler& handler) const
886
{
860,691✔
887
    if (m_has_refs)
860,691✔
888
        report_memory_usage_2(handler); // Throws
860,694✔
889

890
    size_t used = get_byte_size();
860,691✔
891
    size_t allocated;
860,691✔
892
    if (m_alloc.is_read_only(m_ref)) {
860,691✔
893
        allocated = used;
843,363✔
894
    }
843,363✔
895
    else {
17,328✔
896
        char* header = get_header_from_data(m_data);
17,328✔
897
        allocated = get_capacity_from_header(header);
17,328✔
898
    }
17,328✔
899
    handler.handle(m_ref, allocated, used); // Throws
860,691✔
900
}
860,691✔
901

902

903
void Array::report_memory_usage_2(MemUsageHandler& handler) const
904
{
10,165,626✔
905
    Array subarray(m_alloc);
10,165,626✔
906
    for (size_t i = 0; i < m_size; ++i) {
191,993,271✔
907
        int_fast64_t value = get(i);
181,827,645✔
908
        // Skip null refs and values that are not refs. Values are not refs when
909
        // the least significant bit is set.
910
        if (value == 0 || (value & 1) == 1)
181,827,645✔
911
            continue;
24,663,939✔
912

913
        size_t used;
157,163,706✔
914
        ref_type ref = to_ref(value);
157,163,706✔
915
        char* header = m_alloc.translate(ref);
157,163,706✔
916
        bool array_has_refs = get_hasrefs_from_header(header);
157,163,706✔
917
        if (array_has_refs) {
157,163,706✔
918
            MemRef mem(header, ref, m_alloc);
9,310,062✔
919
            subarray.init_from_mem(mem);
9,310,062✔
920
            subarray.report_memory_usage_2(handler); // Throws
9,310,062✔
921
            used = subarray.get_byte_size();
9,310,062✔
922
        }
9,310,062✔
923
        else {
147,853,644✔
924
            used = get_byte_size_from_header(header);
147,853,644✔
925
        }
147,853,644✔
926

927
        size_t allocated;
157,163,706✔
928
        if (m_alloc.is_read_only(ref)) {
157,163,706✔
929
            allocated = used;
156,599,145✔
930
        }
156,599,145✔
931
        else {
564,561✔
932
            allocated = get_capacity_from_header(header);
564,561✔
933
        }
564,561✔
934
        handler.handle(ref, allocated, used); // Throws
157,163,706✔
935
    }
157,163,706✔
936
}
10,165,626✔
937
#endif
938

939
void Array::verify() const
940
{
936,612✔
941
#ifdef REALM_DEBUG
936,612✔
942

943
    REALM_ASSERT(is_attached());
936,612✔
944
    if (!wtype_is_extended(get_header())) {
936,612✔
945
        REALM_ASSERT(m_width == 0 || m_width == 1 || m_width == 2 || m_width == 4 || m_width == 8 || m_width == 16 ||
915,753✔
946
                     m_width == 32 || m_width == 64);
915,753✔
947
    }
915,753✔
948
    else {
20,859✔
949
        REALM_ASSERT(m_width <= 64);
20,859✔
950
    }
20,859✔
951

952
    if (!get_parent())
936,612✔
953
        return;
6,960✔
954

955
    // Check that parent is set correctly
956
    ref_type ref_in_parent = get_ref_from_parent();
929,652✔
957
    REALM_ASSERT_3(ref_in_parent, ==, m_ref);
929,652✔
958
#endif
929,652✔
959
}
929,652✔
960

961
size_t Array::lower_bound_int(int64_t value) const noexcept
962
{
13,689,453✔
963
    if (is_compressed())
13,689,453✔
NEW
964
        return lower_bound_int_compressed(value);
×
965
    REALM_TEMPEX(return lower_bound, m_width, (m_data, m_size, value));
13,689,453✔
966
}
×
967

968
size_t Array::upper_bound_int(int64_t value) const noexcept
969
{
5,287,740✔
970
    if (is_compressed())
5,287,740✔
NEW
971
        return upper_bound_int_compressed(value);
×
972
    REALM_TEMPEX(return upper_bound, m_width, (m_data, m_size, value));
5,287,740✔
973
}
×
974

975
size_t Array::lower_bound_int_compressed(int64_t value) const noexcept
UNCOV
976
{
×
NEW
977
    static impl::CompressedDataFetcher<IntegerCompressor> encoder;
×
NEW
978
    encoder.ptr = &m_integer_compressor;
×
NEW
979
    return lower_bound(m_data, m_size, value, encoder);
×
UNCOV
980
}
×
981

982
size_t Array::upper_bound_int_compressed(int64_t value) const noexcept
NEW
983
{
×
NEW
984
    static impl::CompressedDataFetcher<IntegerCompressor> encoder;
×
NEW
985
    encoder.ptr = &m_integer_compressor;
×
NEW
986
    return upper_bound(m_data, m_size, value, encoder);
×
NEW
987
}
×
988

989
int_fast64_t Array::get(const char* header, size_t ndx) noexcept
990
{
505,247,496✔
991
    // this is very important. Most of the times we end up here
992
    // because we are traversing the cluster, the keys/refs in the cluster
993
    // are not compressed (because there is almost no gain), so the intent
994
    // is avoiding to pollute traversing the cluster as little as possible.
995
    // We need to check the header wtype and only initialise the
996
    // integer compressor, if needed. Otherwise we should just call
997
    // get_direct. On average there should be one more access to the header
998
    // while traversing the cluster tree.
999
    if (REALM_LIKELY(!NodeHeader::wtype_is_extended(header))) {
505,247,496✔
1000
        const char* data = get_data_from_header(header);
465,152,373✔
1001
        uint_least8_t width = get_width_from_header(header);
465,152,373✔
1002
        return get_direct(data, width, ndx);
465,152,373✔
1003
    }
465,152,373✔
1004
    // Ideally, we would not want to construct a compressor every time we end up here.
1005
    // However the compressor initalization should be fast enough. Creating an array,
1006
    // which owns a compressor internally, is the better approach if we intend to access
1007
    // the same data over and over again. The compressor basically caches the most important
1008
    // information about the layuot of the data itself.
1009
    IntegerCompressor s_compressor;
40,095,123✔
1010
    s_compressor.init(header);
40,095,123✔
1011
    return s_compressor.get(ndx);
40,095,123✔
1012
}
505,247,496✔
1013

1014
std::pair<int64_t, int64_t> Array::get_two(const char* header, size_t ndx) noexcept
1015
{
618,957✔
1016
    return std::make_pair(get(header, ndx), get(header, ndx + 1));
618,957✔
1017
}
618,957✔
1018

1019
bool QueryStateCount::match(size_t, Mixed) noexcept
1020
{
31,920✔
1021
    ++m_match_count;
31,920✔
1022
    return (m_limit > m_match_count);
31,920✔
1023
}
31,920✔
1024

1025
bool QueryStateCount::match(size_t) noexcept
1026
{
22,033,620✔
1027
    ++m_match_count;
22,033,620✔
1028
    return (m_limit > m_match_count);
22,033,620✔
1029
}
22,033,620✔
1030

1031
bool QueryStateFindFirst::match(size_t index, Mixed) noexcept
1032
{
149,562✔
1033
    m_match_count++;
149,562✔
1034
    m_state = index;
149,562✔
1035
    return false;
149,562✔
1036
}
149,562✔
1037

1038
bool QueryStateFindFirst::match(size_t index) noexcept
1039
{
10,676,655✔
1040
    ++m_match_count;
10,676,655✔
1041
    m_state = index;
10,676,655✔
1042
    return false;
10,676,655✔
1043
}
10,676,655✔
1044

1045
template <>
1046
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index, Mixed) noexcept
1047
{
106,480,482✔
1048
    ++m_match_count;
106,480,482✔
1049

1050
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
106,480,482✔
1051
    m_keys.push_back(ObjKey(key_value));
106,480,482✔
1052

1053
    return (m_limit > m_match_count);
106,480,482✔
1054
}
106,480,482✔
1055

1056
template <>
1057
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index) noexcept
1058
{
24,147,450✔
1059
    ++m_match_count;
24,147,450✔
1060
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
24,147,450✔
1061
    m_keys.push_back(ObjKey(key_value));
24,147,450✔
1062
    return (m_limit > m_match_count);
24,147,450✔
1063
}
24,147,450✔
1064

1065
template <>
1066
bool QueryStateFindAll<IntegerColumn>::match(size_t index, Mixed) noexcept
1067
{
4,833,024✔
1068
    ++m_match_count;
4,833,024✔
1069
    m_keys.add(index);
4,833,024✔
1070

1071
    return (m_limit > m_match_count);
4,833,024✔
1072
}
4,833,024✔
1073

1074
template <>
1075
bool QueryStateFindAll<IntegerColumn>::match(size_t index) noexcept
1076
{
33,838,308✔
1077
    ++m_match_count;
33,838,308✔
1078
    m_keys.add(index);
33,838,308✔
1079

1080
    return (m_limit > m_match_count);
33,838,308✔
1081
}
33,838,308✔
1082

1083
ref_type ArrayPayload::typed_write(ref_type ref, _impl::ArrayWriterBase& out, Allocator& alloc)
1084
{
845,688✔
1085
    Array arr(alloc);
845,688✔
1086
    arr.init_from_ref(ref);
845,688✔
1087
    // By default we are not compressing
1088
    constexpr bool compress = false;
845,688✔
1089
    return arr.write(out, true, out.only_modified, compress);
845,688✔
1090
}
845,688✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc