• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_333

01 Jul 2024 07:21AM UTC coverage: 90.865% (-0.08%) from 90.948%
jorgen.edelbo_333

Pull #7826

Evergreen

jedelbo
Merge tag 'v14.10.2' into next-major
Pull Request #7826: Merge Next major

102912 of 181138 branches covered (56.81%)

3131 of 3738 new or added lines in 54 files covered. (83.76%)

80 existing lines in 14 files now uncovered.

217498 of 239364 relevant lines covered (90.86%)

6655796.15 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.38
/src/realm/array.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/array_with_find.hpp>
20
#include <realm/utilities.hpp>
21
#include <realm/impl/destroy_guard.hpp>
22
#include <realm/column_integer.hpp>
23
#include <realm/bplustree.hpp>
24
#include <realm/query_conditions.hpp>
25
#include <realm/array_integer.hpp>
26
#include <realm/array_key.hpp>
27
#include <realm/impl/array_writer.hpp>
28

29
#include <array>
30
#include <cstring> // std::memcpy
31
#include <iomanip>
32
#include <limits>
33
#include <tuple>
34

35
#ifdef REALM_DEBUG
36
#include <iostream>
37
#include <sstream>
38
#endif
39

40
#ifdef _MSC_VER
41
#include <intrin.h>
42
#pragma warning(disable : 4127) // Condition is constant warning
43
#endif
44

45
// Header format (8 bytes):
46
// ------------------------
47
//
48
// In mutable part / outside file:
49
//
50
// |--------|--------|--------|--------|--------|--------|--------|--------|
51
// |         capacity         |reserved|12344555|           size           |
52
//
53
//
54
// In immutable part / in file:
55
//
56
// |--------|--------|--------|--------|--------|--------|--------|--------|
57
// |             checksum              |12344555|           size           |
58
//
59
//
60
//  1: 'is_inner_bptree_node' (inner node of B+-tree).
61
//
62
//  2: 'has_refs' (elements whose first bit is zero are refs to subarrays).
63
//
64
//  3: 'context_flag' (meaning depends on context)
65
//
66
//  4: 'width_scheme' (2 bits)
67
//
68
//      value  |  meaning of 'width'  |  number of bytes used after header
69
//      -------|----------------------|------------------------------------
70
//        0    |  number of bits      |  ceil(width * size / 8)
71
//        1    |  number of bytes     |  width * size
72
//        2    |  ignored             |  size
73
//
74
//  5: 'width_ndx' (3 bits)
75
//
76
//      'width_ndx'       |  0 |  1 |  2 |  3 |  4 |  5 |  6 |  7 |
77
//      ------------------|----|----|----|----|----|----|----|----|
78
//      value of 'width'  |  0 |  1 |  2 |  4 |  8 | 16 | 32 | 64 |
79
//
80
//
81
// 'capacity' is the total number of bytes allocated for this array
82
// including the header.
83
//
84
// 'size' (aka length) is the number of elements in the array.
85
//
86
// 'checksum' (not yet implemented) is the checksum of the array
87
// including the header.
88
//
89
//
90
// Inner node of B+-tree:
91
// ----------------------
92
//
93
// An inner node of a B+-tree is has one of two forms: The 'compact'
94
// form which uses a single array node, or the 'general' form which
95
// uses two. The compact form is used by default but is converted to
96
// the general form when the corresponding subtree is modified in
97
// certain ways. There are two kinds of modification that require
98
// conversion to the general form:
99
//
100
//  - Insertion of an element into the corresponding subtree, except
101
//    when insertion occurs after the last element in the subtree
102
//    (append).
103
//
104
//  - Removal of an element from the corresponding subtree, except
105
//    when the removed element is the last element in the subtree.
106
//
107
// Compact form:
108
//
109
//   --> | N_c | r_1 | r_2 | ... | r_N | N_t |
110
//
111
// General form:
112
//
113
//   --> |  .  | r_1 | r_2 | ... | r_N | N_t |  (main array node)
114
//          |
115
//           ------> | o_2 | ... | o_N |  (offsets array node)
116
//
117
// Here,
118
//   `r_i` is the i'th child ref,
119
//   `o_i` is the total number of elements preceeding the i'th child,
120
//   `N`   is the number of children,
121
//   'M'   is one less than the number of children,
122
//   `N_c` is the fixed number of elements per child
123
//         (`elems_per_child`), and
124
//   `N_t` is the total number of elements in the subtree
125
//         (`total_elems_in_subtree`).
126
//
127
// `N_c` must always be a power of `REALM_MAX_BPNODE_SIZE`.
128
//
129
// It is expected that `N_t` will be removed in a future version of
130
// the file format. This will make it much more efficient to append
131
// elements to the B+-tree (or remove elements from the end).
132
//
133
// The last child of an inner node on the compact form, may have fewer
134
// elements than `N_c`. All other children must have exactly `N_c`
135
// elements in them.
136
//
137
// When an inner node is on the general form, and has only one child,
138
// it has an empty `offsets` array.
139
//
140
//
141
// B+-tree invariants:
142
//
143
//  - Every inner node must have at least one child
144
//    (invar:bptree-nonempty-inner).
145
//
146
//  - A leaf node, that is not also a root node, must contain at least
147
//    one element (invar:bptree-nonempty-leaf).
148
//
149
//  - All leaf nodes must reside at the same depth in the tree
150
//    (invar:bptree-leaf-depth).
151
//
152
//  - If an inner node is on the general form, and has a parent, the
153
//    parent must also be on the general form
154
//    (invar:bptree-node-form).
155
//
156
// It follows from invar:bptree-nonempty-leaf that the root of an
157
// empty tree (zero elements) is a leaf.
158
//
159
// It follows from invar:bptree-nonempty-inner and
160
// invar:bptree-nonempty-leaf that in a tree with precisely one
161
// element, every inner node has precisely one child, there is
162
// precisely one leaf node, and that leaf node has precisely one
163
// element.
164
//
165
// It follows from invar:bptree-node-form that if the root is on the
166
// compact form, then so is every other inner node in the tree.
167
//
168
// In general, when the root node is an inner node, it will have at
169
// least two children, because otherwise it would be
170
// superflous. However, to allow for exception safety during element
171
// insertion and removal, this shall not be guaranteed.
172

173
// LIMITATION: The code below makes the non-portable assumption that
174
// negative number are represented using two's complement. This is not
175
// guaranteed by C++03, but holds for all known target platforms.
176
//
177
// LIMITATION: The code below makes the non-portable assumption that
178
// the types `int8_t`, `int16_t`, `int32_t`, and `int64_t`
179
// exist. This is not guaranteed by C++03, but holds for all
180
// known target platforms.
181
//
182
// LIMITATION: The code below makes the assumption that a reference into
183
// a realm file will never grow in size above what can be represented in
184
// a size_t, which is 2^31-1 on a 32-bit platform, and 2^63-1 on a 64 bit
185
// platform.
186

187
using namespace realm;
188
using namespace realm::util;
189

190
void QueryStateBase::dyncast() {}
×
191

192
uint8_t Array::bit_width(int64_t v)
193
{
19,808,559✔
194
    // FIXME: Assuming there is a 64-bit CPU reverse bitscan
195
    // instruction and it is fast, then this function could be
196
    // implemented as a table lookup on the result of the scan
197
    if ((uint64_t(v) >> 4) == 0) {
19,808,559✔
198
        static const int8_t bits[] = {0, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
2,452,176✔
199
        return bits[int8_t(v)];
2,452,176✔
200
    }
2,452,176✔
201
    if (v < 0)
17,356,383✔
202
        v = ~v;
181,653✔
203
    // Then check if bits 15-31 used (32b), 7-31 used (16b), else (8b)
204
    return uint64_t(v) >> 31 ? 64 : uint64_t(v) >> 15 ? 32 : uint64_t(v) >> 7 ? 16 : 8;
17,356,383✔
205
}
19,808,559✔
206

207
template <size_t width>
208
struct Array::VTableForWidth {
209
    struct PopulatedVTable : VTable {
210
        PopulatedVTable()
211
        {
192✔
212
            getter = &Array::get<width>;
192✔
213
            setter = &Array::set<width>;
192✔
214
            chunk_getter = &Array::get_chunk<width>;
192✔
215
            finder[cond_Equal] = &Array::find_vtable<Equal>;
192✔
216
            finder[cond_NotEqual] = &Array::find_vtable<NotEqual>;
192✔
217
            finder[cond_Greater] = &Array::find_vtable<Greater>;
192✔
218
            finder[cond_Less] = &Array::find_vtable<Less>;
192✔
219
        }
192✔
220
    };
221
    static const PopulatedVTable vtable;
222
};
223

224
template <size_t width>
225
const typename Array::VTableForWidth<width>::PopulatedVTable Array::VTableForWidth<width>::vtable;
226

227
void Array::init_from_mem(MemRef mem) noexcept
228
{
849,096,645✔
229
    // Header is the type of header that has been allocated, in case we are decompressing,
230
    // the header is of kind A, which is kind of deceiving the purpose of these checks.
231
    // Since we will try to fetch some data from the just initialised header, and never reset
232
    // important fields used for type A arrays, like width, lower, upper_bound which are used
233
    // for expanding the array, but also query the data.
234
    const auto header = mem.get_addr();
849,096,645✔
235
    const auto is_extended = m_integer_compressor.init(header);
849,096,645✔
236

237
    m_is_inner_bptree_node = get_is_inner_bptree_node_from_header(header);
849,096,645✔
238
    m_has_refs = get_hasrefs_from_header(header);
849,096,645✔
239
    m_context_flag = get_context_flag_from_header(header);
849,096,645✔
240

241
    if (is_extended) {
849,096,645✔
242
        m_ref = mem.get_ref();
5,118,690✔
243
        m_data = get_data_from_header(header);
5,118,690✔
244
        m_size = m_integer_compressor.size();
5,118,690✔
245
        m_width = m_integer_compressor.v_width();
5,118,690✔
246
        m_lbound = -m_integer_compressor.v_mask();
5,118,690✔
247
        m_ubound = m_integer_compressor.v_mask() - 1;
5,118,690✔
248
        m_integer_compressor.set_vtable(*this);
5,118,690✔
249
        m_getter = m_vtable->getter;
5,118,690✔
250
    }
5,118,690✔
251
    else {
843,977,955✔
252
        // Old init phase.
253
        Node::init_from_mem(mem);
843,977,955✔
254
        update_width_cache_from_header();
843,977,955✔
255
    }
843,977,955✔
256
}
849,096,645✔
257

258
MemRef Array::get_mem() const noexcept
259
{
328,998,495✔
260
    return MemRef(get_header_from_data(m_data), m_ref, m_alloc);
328,998,495✔
261
}
328,998,495✔
262

263
void Array::update_from_parent() noexcept
264
{
12,057,132✔
265
    ArrayParent* parent = get_parent();
12,057,132✔
266
    REALM_ASSERT_DEBUG(parent);
12,057,132✔
267
    ref_type new_ref = get_ref_from_parent();
12,057,132✔
268
    init_from_ref(new_ref);
12,057,132✔
269
}
12,057,132✔
270

271
void Array::set_type(Type type)
272
{
4,779✔
273
    REALM_ASSERT_DEBUG(is_attached());
4,779✔
274

275
    copy_on_write(); // Throws
4,779✔
276

277
    bool init_is_inner_bptree_node = false, init_has_refs = false;
4,779✔
278
    switch (type) {
4,779✔
279
        case type_Normal:
6✔
280
            break;
6✔
281
        case type_InnerBptreeNode:
6✔
282
            init_is_inner_bptree_node = true;
6✔
283
            init_has_refs = true;
6✔
284
            break;
6✔
285
        case type_HasRefs:
4,767✔
286
            init_has_refs = true;
4,767✔
287
            break;
4,767✔
288
    }
4,779✔
289
    m_is_inner_bptree_node = init_is_inner_bptree_node;
4,779✔
290
    m_has_refs = init_has_refs;
4,779✔
291

292
    char* header = get_header();
4,779✔
293
    set_is_inner_bptree_node_in_header(init_is_inner_bptree_node, header);
4,779✔
294
    set_hasrefs_in_header(init_has_refs, header);
4,779✔
295
}
4,779✔
296

297
void Array::destroy_children(size_t offset) noexcept
298
{
191,694✔
299
    for (size_t i = offset; i != m_size; ++i) {
1,325,958✔
300
        int64_t value = get(i);
1,134,264✔
301

302
        // Null-refs indicate empty sub-trees
303
        if (value == 0)
1,134,264✔
304
            continue;
156,330✔
305

306
        // A ref is always 8-byte aligned, so the lowest bit
307
        // cannot be set. If it is, it means that it should not be
308
        // interpreted as a ref.
309
        if ((value & 1) != 0)
977,934✔
310
            continue;
130,179✔
311

312
        ref_type ref = to_ref(value);
847,755✔
313
        destroy_deep(ref, m_alloc);
847,755✔
314
    }
847,755✔
315
}
191,694✔
316

317
// size_t Array::get_byte_size() const noexcept
318
//{
319
//     const auto header = get_header();
320
//     auto num_bytes = get_byte_size_from_header(header);
321
//     auto read_only = m_alloc.is_read_only(m_ref) == true;
322
//     auto capacity = get_capacity_from_header(header);
323
//     auto bytes_ok = num_bytes <= capacity;
324
//     REALM_ASSERT(read_only || bytes_ok);
325
//     REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
326
//     return num_bytes;
327
// }
328

329
ref_type Array::do_write_shallow(_impl::ArrayWriterBase& out) const
330
{
20,271,813✔
331
    // here we might want to compress the array and write down.
332
    const char* header = get_header_from_data(m_data);
20,271,813✔
333
    size_t byte_size = get_byte_size();
20,271,813✔
334
    const auto compressed = is_compressed();
20,271,813✔
335
    uint32_t dummy_checksum = compressed ? 0x42424242UL : 0x41414141UL; //
20,271,813✔
336
    uint32_t dummy_checksum_bytes = compressed ? 2 : 4; // AAAA / BB (only 2 bytes for extended arrays)
20,271,813✔
337
    ref_type new_ref = out.write_array(header, byte_size, dummy_checksum, dummy_checksum_bytes); // Throws
20,271,813✔
338
    REALM_ASSERT_3(new_ref % 8, ==, 0);                                                          // 8-byte alignment
20,271,813✔
339
    return new_ref;
20,271,813✔
340
}
20,271,813✔
341

342

343
ref_type Array::do_write_deep(_impl::ArrayWriterBase& out, bool only_if_modified, bool compress) const
344
{
3,135,789✔
345
    // Temp array for updated refs
346
    Array new_array(Allocator::get_default());
3,135,789✔
347
    Type type = m_is_inner_bptree_node ? type_InnerBptreeNode : type_HasRefs;
3,135,789✔
348
    new_array.create(type, m_context_flag); // Throws
3,135,789✔
349
    _impl::ShallowArrayDestroyGuard dg(&new_array);
3,135,789✔
350

351
    // First write out all sub-arrays
352
    size_t n = size();
3,135,789✔
353
    for (size_t i = 0; i < n; ++i) {
89,986,491✔
354
        int_fast64_t value = get(i);
86,850,702✔
355
        bool is_ref = (value != 0 && (value & 1) == 0);
86,850,702✔
356
        if (is_ref) {
86,850,702✔
357
            ref_type subref = to_ref(value);
68,420,661✔
358
            ref_type new_subref = write(subref, m_alloc, out, only_if_modified, compress); // Throws
68,420,661✔
359
            value = from_ref(new_subref);
68,420,661✔
360
        }
68,420,661✔
361
        new_array.add(value); // Throws
86,850,702✔
362
    }
86,850,702✔
363
    return new_array.do_write_shallow(out); // Throws
3,135,789✔
364
}
3,135,789✔
365

366

367
void Array::move(size_t begin, size_t end, size_t dest_begin)
368
{
9,972,204✔
369
    REALM_ASSERT_3(begin, <=, end);
9,972,204✔
370
    REALM_ASSERT_3(end, <=, m_size);
9,972,204✔
371
    REALM_ASSERT_3(dest_begin, <=, m_size);
9,972,204✔
372
    REALM_ASSERT_3(end - begin, <=, m_size - dest_begin);
9,972,204✔
373
    REALM_ASSERT(!(dest_begin >= begin && dest_begin < end)); // Required by std::copy
9,972,204✔
374

375
    // Check if we need to copy before modifying
376
    copy_on_write(); // Throws
9,972,204✔
377

378
    size_t bits_per_elem = m_width;
9,972,204✔
379
    const char* header = get_header_from_data(m_data);
9,972,204✔
380
    if (get_wtype_from_header(header) == wtype_Multiply) {
9,972,204✔
381
        bits_per_elem *= 8;
×
382
    }
×
383

384
    if (bits_per_elem < 8) {
9,972,204✔
385
        // FIXME: Should be optimized
386
        for (size_t i = begin; i != end; ++i) {
44,385,555✔
387
            int_fast64_t v = m_getter(*this, i);
43,080,414✔
388
            m_vtable->setter(*this, dest_begin++, v);
43,080,414✔
389
        }
43,080,414✔
390
        return;
1,305,141✔
391
    }
1,305,141✔
392

393
    size_t bytes_per_elem = bits_per_elem / 8;
8,667,063✔
394
    const char* begin_2 = m_data + begin * bytes_per_elem;
8,667,063✔
395
    const char* end_2 = m_data + end * bytes_per_elem;
8,667,063✔
396
    char* dest_begin_2 = m_data + dest_begin * bytes_per_elem;
8,667,063✔
397
    realm::safe_copy_n(begin_2, end_2 - begin_2, dest_begin_2);
8,667,063✔
398
}
8,667,063✔
399

400
void Array::move(Array& dst, size_t ndx)
401
{
12,246✔
402
    size_t dest_begin = dst.m_size;
12,246✔
403
    size_t nb_to_move = m_size - ndx;
12,246✔
404
    dst.copy_on_write();
12,246✔
405
    dst.ensure_minimum_width(this->m_ubound);
12,246✔
406
    dst.alloc(dst.m_size + nb_to_move, dst.m_width); // Make room for the new elements
12,246✔
407

408
    // cache variables used in tight loop
409
    auto getter = m_getter;
12,246✔
410
    auto setter = dst.m_vtable->setter;
12,246✔
411
    size_t sz = m_size;
12,246✔
412

413
    for (size_t i = ndx; i < sz; i++) {
1,378,704✔
414
        auto v = getter(*this, i);
1,366,458✔
415
        setter(dst, dest_begin++, v);
1,366,458✔
416
    }
1,366,458✔
417

418
    truncate(ndx);
12,246✔
419
}
12,246✔
420

421
void Array::set(size_t ndx, int64_t value)
422
{
218,722,077✔
423
    REALM_ASSERT_3(ndx, <, m_size);
218,722,077✔
424
    if (m_vtable->getter(*this, ndx) == value)
218,722,077✔
425
        return;
12,793,695✔
426

427
    // Check if we need to copy before modifying
428
    copy_on_write(); // Throws
205,928,382✔
429
    // Grow the array if needed to store this value
430
    ensure_minimum_width(value); // Throws
205,928,382✔
431
    // Set the value
432
    m_vtable->setter(*this, ndx, value);
205,928,382✔
433
}
205,928,382✔
434

435
void Array::set_as_ref(size_t ndx, ref_type ref)
436
{
17,887,515✔
437
    set(ndx, from_ref(ref));
17,887,515✔
438
}
17,887,515✔
439

440
/*
441
// Optimization for the common case of adding positive values to a local array
442
// (happens a lot when returning results to TableViews)
443
void Array::add_positive_local(int64_t value)
444
{
445
    REALM_ASSERT(value >= 0);
446
    REALM_ASSERT(&m_alloc == &Allocator::get_default());
447

448
    if (value <= m_ubound) {
449
        if (m_size < m_capacity) {
450
            (this->*(m_vtable->setter))(m_size, value);
451
            ++m_size;
452
            set_header_size(m_size);
453
            return;
454
        }
455
    }
456

457
    insert(m_size, value);
458
}
459
*/
460

461
size_t Array::blob_size() const noexcept
462
{
13,003,929✔
463
    if (get_context_flag()) {
13,003,929✔
464
        size_t total_size = 0;
96✔
465
        for (size_t i = 0; i < size(); ++i) {
216✔
466
            char* header = m_alloc.translate(Array::get_as_ref(i));
120✔
467
            total_size += Array::get_size_from_header(header);
120✔
468
        }
120✔
469
        return total_size;
96✔
470
    }
96✔
471
    else {
13,003,833✔
472
        return m_size;
13,003,833✔
473
    }
13,003,833✔
474
}
13,003,929✔
475

476
void Array::insert(size_t ndx, int_fast64_t value)
477
{
942,905,523✔
478
    REALM_ASSERT_DEBUG(ndx <= m_size);
942,905,523✔
479

480
    decompress_array(*this);
942,905,523✔
481
    const auto old_width = m_width;
942,905,523✔
482
    const auto old_size = m_size;
942,905,523✔
483
    const Getter old_getter = m_getter; // Save old getter before potential width expansion
942,905,523✔
484

485
    bool do_expand = value < m_lbound || value > m_ubound;
944,949,648✔
486
    if (do_expand) {
942,905,523✔
487
        size_t width = bit_width(value);
10,242,480✔
488
        REALM_ASSERT_DEBUG(width > m_width);
10,242,480✔
489
        alloc(m_size + 1, width); // Throws
10,242,480✔
490
    }
10,242,480✔
491
    else {
932,663,043✔
492
        alloc(m_size + 1, m_width); // Throws
932,663,043✔
493
    }
932,663,043✔
494

495
    // Move values below insertion (may expand)
496
    if (do_expand || old_width < 8) {
942,905,523✔
497
        size_t i = old_size;
296,645,166✔
498
        while (i > ndx) {
299,323,005✔
499
            --i;
2,677,839✔
500
            int64_t v = old_getter(*this, i);
2,677,839✔
501
            m_vtable->setter(*this, i + 1, v);
2,677,839✔
502
        }
2,677,839✔
503
    }
296,645,166✔
504
    else if (ndx != old_size) {
646,260,357✔
505
        // when byte sized and no expansion, use memmove
506
        // FIXME: Optimize by simply dividing by 8 (or shifting right by 3 bit positions)
507
        size_t w = (old_width == 64) ? 8 : (old_width == 32) ? 4 : (old_width == 16) ? 2 : 1;
3,219,273✔
508
        char* src_begin = m_data + ndx * w;
3,219,273✔
509
        char* src_end = m_data + old_size * w;
3,219,273✔
510
        char* dst_end = src_end + w;
3,219,273✔
511
        std::copy_backward(src_begin, src_end, dst_end);
3,219,273✔
512
    }
3,219,273✔
513

514
    // Insert the new value
515
    m_vtable->setter(*this, ndx, value);
942,905,523✔
516

517
    // Expand values above insertion
518
    if (do_expand) {
942,905,523✔
519
        size_t i = ndx;
10,239,645✔
520
        while (i != 0) {
37,549,458✔
521
            --i;
27,309,813✔
522
            int64_t v = old_getter(*this, i);
27,309,813✔
523
            m_vtable->setter(*this, i, v);
27,309,813✔
524
        }
27,309,813✔
525
    }
10,239,645✔
526
}
942,905,523✔
527

528
void Array::copy_on_write()
529
{
270,141,666✔
530
    if (is_read_only() && !decompress_array(*this))
270,141,666✔
531
        Node::copy_on_write();
7,977,687✔
532
}
270,141,666✔
533

534
void Array::copy_on_write(size_t min_size)
535
{
24,766,734✔
536
    if (is_read_only() && !decompress_array(*this))
24,766,734✔
537
        Node::copy_on_write(min_size);
221,643✔
538
}
24,766,734✔
539

540
void Array::truncate(size_t new_size)
541
{
2,122,014✔
542
    REALM_ASSERT(is_attached());
2,122,014✔
543
    REALM_ASSERT_3(new_size, <=, m_size);
2,122,014✔
544

545
    if (new_size == m_size)
2,122,014✔
546
        return;
162,972✔
547

548
    copy_on_write(); // Throws
1,959,042✔
549

550
    // Update size in accessor and in header. This leaves the capacity
551
    // unchanged.
552
    m_size = new_size;
1,959,042✔
553
    set_header_size(new_size);
1,959,042✔
554

555
    // If the array is completely cleared, we take the opportunity to
556
    // drop the width back to zero.
557
    if (new_size == 0) {
1,959,042✔
558
        set_width_in_header(0, get_header());
1,927,809✔
559
        update_width_cache_from_header();
1,927,809✔
560
    }
1,927,809✔
561
}
1,959,042✔
562

563
void Array::truncate_and_destroy_children(size_t new_size)
564
{
24,264✔
565
    REALM_ASSERT(is_attached());
24,264✔
566
    REALM_ASSERT_3(new_size, <=, m_size);
24,264✔
567

568
    if (new_size == m_size)
24,264✔
569
        return;
1,767✔
570

571
    copy_on_write(); // Throws
22,497✔
572

573
    if (m_has_refs) {
22,497✔
574
        size_t offset = new_size;
22,497✔
575
        destroy_children(offset);
22,497✔
576
    }
22,497✔
577

578
    // Update size in accessor and in header. This leaves the capacity
579
    // unchanged.
580
    m_size = new_size;
22,497✔
581
    set_header_size(new_size);
22,497✔
582

583
    // If the array is completely cleared, we take the opportunity to
584
    // drop the width back to zero.
585
    if (new_size == 0) {
22,497✔
586
        set_width_in_header(0, get_header());
19,458✔
587
        update_width_cache_from_header();
19,458✔
588
    }
19,458✔
589
}
22,497✔
590

591
void Array::do_ensure_minimum_width(int_fast64_t value)
592
{
9,060,396✔
593
    // Make room for the new value
594
    const size_t width = bit_width(value);
9,060,396✔
595

596
    REALM_ASSERT_3(width, >, m_width);
9,060,396✔
597

598
    Getter old_getter = m_getter; // Save old getter before width expansion
9,060,396✔
599
    alloc(m_size, width);         // Throws
9,060,396✔
600

601
    // Expand the old values
602
    size_t i = m_size;
9,060,396✔
603
    while (i != 0) {
75,919,101✔
604
        --i;
66,858,705✔
605
        int64_t v = old_getter(*this, i);
66,858,705✔
606
        m_vtable->setter(*this, i, v);
66,858,705✔
607
    }
66,858,705✔
608
}
9,060,396✔
609

610
bool Array::compress_array(Array& arr) const
611
{
967,161✔
612
    if (m_integer_compressor.get_encoding() == NodeHeader::Encoding::WTypBits) {
967,161✔
613
        return m_integer_compressor.compress(*this, arr);
859,848✔
614
    }
859,848✔
615
    return false;
107,313✔
616
}
967,161✔
617

618
bool Array::decompress_array(Array& arr) const
619
{
951,016,308✔
620
    return arr.is_compressed() ? m_integer_compressor.decompress(arr) : false;
951,016,308✔
621
}
951,016,308✔
622

623
bool Array::try_compress(Array& arr) const
624
{
168✔
625
    return compress_array(arr);
168✔
626
}
168✔
627

628
bool Array::try_decompress()
629
{
30✔
630
    return decompress_array(*this);
30✔
631
}
30✔
632

633
size_t Array::calc_aligned_byte_size(size_t size, int width)
UNCOV
634
{
×
UNCOV
635
    REALM_ASSERT(width != 0 && (width & (width - 1)) == 0); // Is a power of two
×
UNCOV
636
    size_t max = std::numeric_limits<size_t>::max();
×
UNCOV
637
    size_t max_2 = max & ~size_t(7); // Allow for upwards 8-byte alignment
×
UNCOV
638
    bool overflow;
×
UNCOV
639
    size_t byte_size;
×
UNCOV
640
    if (width < 8) {
×
UNCOV
641
        size_t elems_per_byte = 8 / width;
×
UNCOV
642
        size_t byte_size_0 = size / elems_per_byte;
×
UNCOV
643
        if (size % elems_per_byte != 0)
×
UNCOV
644
            ++byte_size_0;
×
UNCOV
645
        overflow = byte_size_0 > max_2 - header_size;
×
UNCOV
646
        byte_size = header_size + byte_size_0;
×
UNCOV
647
    }
×
UNCOV
648
    else {
×
UNCOV
649
        size_t bytes_per_elem = width / 8;
×
UNCOV
650
        overflow = size > (max_2 - header_size) / bytes_per_elem;
×
UNCOV
651
        byte_size = header_size + size * bytes_per_elem;
×
UNCOV
652
    }
×
UNCOV
653
    if (overflow)
×
654
        throw std::overflow_error("Byte size overflow");
×
UNCOV
655
    REALM_ASSERT_3(byte_size, >, 0);
×
UNCOV
656
    size_t aligned_byte_size = ((byte_size - 1) | 7) + 1; // 8-byte alignment
×
UNCOV
657
    return aligned_byte_size;
×
UNCOV
658
}
×
659

660
MemRef Array::clone(MemRef mem, Allocator& alloc, Allocator& target_alloc)
661
{
×
662
    const char* header = mem.get_addr();
×
663
    if (!get_hasrefs_from_header(header)) {
×
664
        // This array has no subarrays, so we can make a byte-for-byte
665
        // copy, which is more efficient.
666

667
        // Calculate size of new array in bytes
668
        size_t size = get_byte_size_from_header(header);
×
669

670
        // Create the new array
671
        MemRef clone_mem = target_alloc.alloc(size); // Throws
×
672
        char* clone_header = clone_mem.get_addr();
×
673

674
        // Copy contents
675
        const char* src_begin = header;
×
676
        const char* src_end = header + size;
×
677
        char* dst_begin = clone_header;
×
678
        realm::safe_copy_n(src_begin, src_end - src_begin, dst_begin);
×
679

680
        // Update with correct capacity
681
        set_capacity_in_header(size, clone_header);
×
682

683
        return clone_mem;
×
684
    }
×
685

686
    // Refs are integers, and integers arrays use wtype_Bits.
687
    REALM_ASSERT_3(get_wtype_from_header(header), ==, wtype_Bits);
×
688

689
    Array array{alloc};
×
690
    array.init_from_mem(mem);
×
691

692
    // Create new empty array of refs
693
    Array new_array(target_alloc);
×
694
    _impl::DeepArrayDestroyGuard dg(&new_array);
×
695
    Type type = get_type_from_header(header);
×
696
    bool context_flag = get_context_flag_from_header(header);
×
697
    new_array.create(type, context_flag); // Throws
×
698

699
    _impl::DeepArrayRefDestroyGuard dg_2(target_alloc);
×
700
    size_t n = array.size();
×
701
    for (size_t i = 0; i != n; ++i) {
×
702
        int_fast64_t value = array.get(i);
×
703

704
        // Null-refs signify empty subtrees. Also, all refs are
705
        // 8-byte aligned, so the lowest bits cannot be set. If they
706
        // are, it means that it should not be interpreted as a ref.
707
        bool is_subarray = value != 0 && (value & 1) == 0;
×
708
        if (!is_subarray) {
×
709
            new_array.add(value); // Throws
×
710
            continue;
×
711
        }
×
712

713
        ref_type ref = to_ref(value);
×
714
        MemRef new_mem = clone(MemRef(ref, alloc), alloc, target_alloc); // Throws
×
715
        dg_2.reset(new_mem.get_ref());
×
716
        value = from_ref(new_mem.get_ref());
×
717
        new_array.add(value); // Throws
×
718
        dg_2.release();
×
719
    }
×
720

721
    dg.release();
×
722
    return new_array.get_mem();
×
723
}
×
724

725
MemRef Array::create(Type type, bool context_flag, WidthType width_type, size_t size, int_fast64_t value,
726
                     Allocator& alloc)
727
{
15,987,057✔
728
    REALM_ASSERT_DEBUG(value == 0 || width_type == wtype_Bits);
15,987,057✔
729
    REALM_ASSERT_DEBUG(size == 0 || width_type != wtype_Ignore);
15,987,057✔
730
    uint8_t width = 0;
15,987,057✔
731
    if (value != 0)
15,987,057✔
732
        width = bit_width(value);
187,524✔
733
    auto mem = Node::create_node(size, alloc, context_flag, type, width_type, width);
15,987,057✔
734
    if (value != 0) {
15,987,057✔
735
        const auto header = mem.get_addr();
187,524✔
736
        char* data = get_data_from_header(header);
187,524✔
737
        size_t begin = 0, end = size;
187,524✔
738
        REALM_TEMPEX(fill_direct, width, (data, begin, end, value));
187,524!
739
    }
187,524✔
740
    return mem;
15,987,057✔
741
}
15,987,057✔
742

743
// This is the one installed into the m_vtable->finder slots.
744
template <class cond>
745
bool Array::find_vtable(const Array& arr, int64_t value, size_t start, size_t end, size_t baseindex,
746
                        QueryStateBase* state)
747
{
11,932,848✔
748
    REALM_TEMPEX2(return ArrayWithFind(arr).find_optimized, cond, arr.m_width, (value, start, end, baseindex, state));
11,932,848✔
UNCOV
749
}
×
750

751
void Array::update_width_cache_from_header() noexcept
752
{
1,664,717,802✔
753
    m_width = get_width_from_header(get_header());
1,664,717,802✔
754
    m_lbound = lbound_for_width(m_width);
1,664,717,802✔
755
    m_ubound = ubound_for_width(m_width);
1,664,717,802✔
756
    REALM_ASSERT_DEBUG(m_lbound <= m_ubound);
1,664,717,802✔
757
    REALM_ASSERT_DEBUG(m_width >= m_lbound);
1,664,717,802✔
758
    REALM_ASSERT_DEBUG(m_width <= m_ubound);
1,664,717,802✔
759
    REALM_TEMPEX(m_vtable = &VTableForWidth, m_width, ::vtable);
1,664,717,802✔
760
    m_getter = m_vtable->getter;
1,664,717,802✔
761
}
1,664,717,802✔
762

763
// This method reads 8 concecutive values into res[8], starting from index 'ndx'. It's allowed for the 8 values to
764
// exceed array length; in this case, remainder of res[8] will be be set to 0.
765
template <size_t w>
766
void Array::get_chunk(const Array& arr, size_t ndx, int64_t res[8]) noexcept
767
{
2,958,084✔
768
    auto sz = arr.size();
2,958,084✔
769
    REALM_ASSERT_3(ndx, <, sz);
2,958,084✔
770
    size_t i = 0;
2,958,084✔
771

772
    // if constexpr to avoid producing spurious warnings resulting from
773
    // instantiating for too large w
774
    if constexpr (w > 0 && w <= 4) {
2,958,084✔
775
        // Calling get<w>() in a loop results in one load per call to get, but
776
        // for w < 8 we can do better than that
777
        constexpr size_t elements_per_byte = 8 / w;
1,985,136✔
778

779
        // Round m_size down to byte granularity as the trailing bits in the last
780
        // byte are uninitialized
781
        size_t bytes_available = sz / elements_per_byte;
1,985,136✔
782

783
        // Round start and end to be byte-aligned. Start is rounded down and
784
        // end is rounded up as we may read up to 7 unused bits at each end.
785
        size_t start = ndx / elements_per_byte;
1,985,136✔
786
        size_t end = std::min(bytes_available, (ndx + 8 + elements_per_byte - 1) / elements_per_byte);
1,985,136✔
787

788
        if (end > start) {
1,985,136✔
789
            // Loop in reverse order because data is stored in little endian order
790
            uint64_t c = 0;
1,983,618✔
791
            for (size_t i = end; i > start; --i) {
6,725,796✔
792
                c <<= 8;
4,742,178✔
793
                c += *reinterpret_cast<const uint8_t*>(arr.m_data + i - 1);
4,742,178✔
794
            }
4,742,178✔
795
            // Trim off leading bits which aren't part of the requested range
796
            c >>= (ndx - start * elements_per_byte) * w;
1,983,618✔
797

798
            uint64_t mask = (1ULL << w) - 1ULL;
1,983,618✔
799
            res[0] = (c >> 0 * w) & mask;
1,983,618✔
800
            res[1] = (c >> 1 * w) & mask;
1,983,618✔
801
            res[2] = (c >> 2 * w) & mask;
1,983,618✔
802
            res[3] = (c >> 3 * w) & mask;
1,983,618✔
803
            res[4] = (c >> 4 * w) & mask;
1,983,618✔
804
            res[5] = (c >> 5 * w) & mask;
1,983,618✔
805
            res[6] = (c >> 6 * w) & mask;
1,983,618✔
806
            res[7] = (c >> 7 * w) & mask;
1,983,618✔
807

808
            // Read the last few elements via get<w> if needed
809
            i = std::min<size_t>(8, end * elements_per_byte - ndx);
1,983,618✔
810
        }
1,983,618✔
811
    }
1,985,136✔
812

813
    for (; i + ndx < sz && i < 8; i++)
10,637,202✔
814
        res[i] = get<w>(arr, ndx + i);
7,679,118✔
815
    for (; i < 8; i++)
3,265,638✔
816
        res[i] = 0;
307,554✔
817

818
#ifdef REALM_DEBUG
2,958,084✔
819
    for (int j = 0; j + ndx < sz && j < 8; j++) {
26,315,202✔
820
        int64_t expected = Array::get_universal<w>(arr.m_data, ndx + j);
23,357,118✔
821
        REALM_ASSERT(res[j] == expected);
23,357,118✔
822
    }
23,357,118✔
823
#endif
2,958,084✔
824
}
2,958,084✔
825

826
template <>
827
void Array::get_chunk<0>(const Array& arr, size_t ndx, int64_t res[8]) noexcept
828
{
214,560✔
829
    REALM_ASSERT_3(ndx, <, arr.m_size);
214,560✔
830
    memset(res, 0, sizeof(int64_t) * 8);
214,560✔
831
}
214,560✔
832

833

834
template <size_t width>
835
void Array::set(Array& arr, size_t ndx, int64_t value)
836
{
1,312,051,434✔
837
    realm::set_direct<width>(arr.m_data, ndx, value);
1,312,051,434✔
838
}
1,312,051,434✔
839

840
void Array::_mem_usage(size_t& mem) const noexcept
841
{
3,534✔
842
    mem += get_byte_size();
3,534✔
843
    if (m_has_refs) {
3,534✔
844
        for (size_t i = 0; i < m_size; ++i) {
3,648✔
845
            int64_t val = get(i);
1,923✔
846
            if (val && !(val & 1)) {
1,923✔
847
                Array subarray(m_alloc);
1,839✔
848
                subarray.init_from_ref(to_ref(val));
1,839✔
849
                subarray._mem_usage(mem);
1,839✔
850
            }
1,839✔
851
        }
1,923✔
852
    }
1,725✔
853
}
3,534✔
854

855
#ifdef REALM_DEBUG
856
namespace {
857

858
class MemStatsHandler : public Array::MemUsageHandler {
859
public:
860
    MemStatsHandler(MemStats& stats) noexcept
861
        : m_stats(stats)
862
    {
×
863
    }
×
864
    void handle(ref_type, size_t allocated, size_t used) noexcept override
865
    {
×
866
        m_stats.allocated += allocated;
×
867
        m_stats.used += used;
×
868
        m_stats.array_count += 1;
×
869
    }
×
870

871
private:
872
    MemStats& m_stats;
873
};
874

875
} // anonymous namespace
876

877

878
void Array::stats(MemStats& stats_dest) const noexcept
879
{
×
880
    MemStatsHandler handler(stats_dest);
×
881
    report_memory_usage(handler);
×
882
}
×
883

884

885
void Array::report_memory_usage(MemUsageHandler& handler) const
886
{
894,468✔
887
    if (m_has_refs)
894,468✔
888
        report_memory_usage_2(handler); // Throws
894,474✔
889

890
    size_t used = get_byte_size();
894,468✔
891
    size_t allocated;
894,468✔
892
    if (m_alloc.is_read_only(m_ref)) {
894,468✔
893
        allocated = used;
876,486✔
894
    }
876,486✔
895
    else {
17,982✔
896
        char* header = get_header_from_data(m_data);
17,982✔
897
        allocated = get_capacity_from_header(header);
17,982✔
898
    }
17,982✔
899
    handler.handle(m_ref, allocated, used); // Throws
894,468✔
900
}
894,468✔
901

902

903
void Array::report_memory_usage_2(MemUsageHandler& handler) const
904
{
11,266,623✔
905
    Array subarray(m_alloc);
11,266,623✔
906
    for (size_t i = 0; i < m_size; ++i) {
198,956,154✔
907
        int_fast64_t value = get(i);
187,689,531✔
908
        // Skip null refs and values that are not refs. Values are not refs when
909
        // the least significant bit is set.
910
        if (value == 0 || (value & 1) == 1)
187,689,531✔
911
            continue;
27,255,369✔
912

913
        size_t used;
160,434,162✔
914
        ref_type ref = to_ref(value);
160,434,162✔
915
        char* header = m_alloc.translate(ref);
160,434,162✔
916
        bool array_has_refs = get_hasrefs_from_header(header);
160,434,162✔
917
        if (array_has_refs) {
160,434,162✔
918
            MemRef mem(header, ref, m_alloc);
10,375,920✔
919
            subarray.init_from_mem(mem);
10,375,920✔
920
            subarray.report_memory_usage_2(handler); // Throws
10,375,920✔
921
            used = subarray.get_byte_size();
10,375,920✔
922
        }
10,375,920✔
923
        else {
150,058,242✔
924
            used = get_byte_size_from_header(header);
150,058,242✔
925
        }
150,058,242✔
926

927
        size_t allocated;
160,434,162✔
928
        if (m_alloc.is_read_only(ref)) {
160,434,162✔
929
            allocated = used;
160,030,398✔
930
        }
160,030,398✔
931
        else {
403,764✔
932
            allocated = get_capacity_from_header(header);
403,764✔
933
        }
403,764✔
934
        handler.handle(ref, allocated, used); // Throws
160,434,162✔
935
    }
160,434,162✔
936
}
11,266,623✔
937
#endif
938

939
void Array::verify() const
940
{
1,047,324✔
941
#ifdef REALM_DEBUG
1,047,324✔
942

943
    REALM_ASSERT(is_attached());
1,047,324✔
944
    if (!wtype_is_extended(get_header())) {
1,047,324✔
945
        REALM_ASSERT(m_width == 0 || m_width == 1 || m_width == 2 || m_width == 4 || m_width == 8 || m_width == 16 ||
1,028,871✔
946
                     m_width == 32 || m_width == 64);
1,028,871✔
947
    }
1,028,871✔
948
    else {
18,453✔
949
        REALM_ASSERT(m_width <= 64);
18,453✔
950
    }
18,453✔
951

952
    if (!get_parent())
1,047,324✔
953
        return;
6,801✔
954

955
    // Check that parent is set correctly
956
    ref_type ref_in_parent = get_ref_from_parent();
1,040,523✔
957
    REALM_ASSERT_3(ref_in_parent, ==, m_ref);
1,040,523✔
958
#endif
1,040,523✔
959
}
1,040,523✔
960

961
size_t Array::lower_bound_int(int64_t value) const noexcept
962
{
11,636,127✔
963
    if (is_compressed())
11,636,127✔
NEW
964
        return lower_bound_int_compressed(value);
×
965
    REALM_TEMPEX(return lower_bound, m_width, (m_data, m_size, value));
11,636,127✔
966
}
×
967

968
size_t Array::upper_bound_int(int64_t value) const noexcept
969
{
5,492,643✔
970
    if (is_compressed())
5,492,643✔
NEW
971
        return upper_bound_int_compressed(value);
×
972
    REALM_TEMPEX(return upper_bound, m_width, (m_data, m_size, value));
5,492,643✔
973
}
×
974

975
size_t Array::lower_bound_int_compressed(int64_t value) const noexcept
UNCOV
976
{
×
NEW
977
    static impl::CompressedDataFetcher<IntegerCompressor> encoder;
×
NEW
978
    encoder.ptr = &m_integer_compressor;
×
NEW
979
    return lower_bound(m_data, m_size, value, encoder);
×
UNCOV
980
}
×
981

982
size_t Array::upper_bound_int_compressed(int64_t value) const noexcept
NEW
983
{
×
NEW
984
    static impl::CompressedDataFetcher<IntegerCompressor> encoder;
×
NEW
985
    encoder.ptr = &m_integer_compressor;
×
NEW
986
    return upper_bound(m_data, m_size, value, encoder);
×
NEW
987
}
×
988

989
int_fast64_t Array::get(const char* header, size_t ndx) noexcept
990
{
480,142,113✔
991
    // this is very important. Most of the times we end up here
992
    // because we are traversing the cluster, the keys/refs in the cluster
993
    // are not compressed (because there is almost no gain), so the intent
994
    // is avoiding to pollute traversing the cluster as little as possible.
995
    // We need to check the header wtype and only initialise the
996
    // integer compressor, if needed. Otherwise we should just call
997
    // get_direct. On average there should be one more access to the header
998
    // while traversing the cluster tree.
999
    if (REALM_LIKELY(!NodeHeader::wtype_is_extended(header))) {
480,142,113✔
1000
        const char* data = get_data_from_header(header);
439,816,401✔
1001
        uint_least8_t width = get_width_from_header(header);
439,816,401✔
1002
        return get_direct(data, width, ndx);
439,816,401✔
1003
    }
439,816,401✔
1004
    // Ideally, we would not want to construct a compressor every time we end up here.
1005
    // However the compressor initalization should be fast enough. Creating an array,
1006
    // which owns a compressor internally, is the better approach if we intend to access
1007
    // the same data over and over again. The compressor basically caches the most important
1008
    // information about the layuot of the data itself.
1009
    IntegerCompressor s_compressor;
40,325,712✔
1010
    s_compressor.init(header);
40,325,712✔
1011
    return s_compressor.get(ndx);
40,325,712✔
1012
}
480,142,113✔
1013

1014
std::pair<int64_t, int64_t> Array::get_two(const char* header, size_t ndx) noexcept
1015
{
625,521✔
1016
    return std::make_pair(get(header, ndx), get(header, ndx + 1));
625,521✔
1017
}
625,521✔
1018

1019
bool QueryStateCount::match(size_t, Mixed) noexcept
1020
{
32,214✔
1021
    ++m_match_count;
32,214✔
1022
    return (m_limit > m_match_count);
32,214✔
1023
}
32,214✔
1024

1025
bool QueryStateCount::match(size_t) noexcept
1026
{
22,072,698✔
1027
    ++m_match_count;
22,072,698✔
1028
    return (m_limit > m_match_count);
22,072,698✔
1029
}
22,072,698✔
1030

1031
bool QueryStateFindFirst::match(size_t index, Mixed) noexcept
1032
{
149,820✔
1033
    m_match_count++;
149,820✔
1034
    m_state = index;
149,820✔
1035
    return false;
149,820✔
1036
}
149,820✔
1037

1038
bool QueryStateFindFirst::match(size_t index) noexcept
1039
{
10,435,443✔
1040
    ++m_match_count;
10,435,443✔
1041
    m_state = index;
10,435,443✔
1042
    return false;
10,435,443✔
1043
}
10,435,443✔
1044

1045
template <>
1046
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index, Mixed) noexcept
1047
{
98,939,565✔
1048
    ++m_match_count;
98,939,565✔
1049

1050
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
98,939,565✔
1051
    m_keys.push_back(ObjKey(key_value));
98,939,565✔
1052

1053
    return (m_limit > m_match_count);
98,939,565✔
1054
}
98,939,565✔
1055

1056
template <>
1057
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index) noexcept
1058
{
24,019,617✔
1059
    ++m_match_count;
24,019,617✔
1060
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
24,019,617✔
1061
    m_keys.push_back(ObjKey(key_value));
24,019,617✔
1062
    return (m_limit > m_match_count);
24,019,617✔
1063
}
24,019,617✔
1064

1065
template <>
1066
bool QueryStateFindAll<IntegerColumn>::match(size_t index, Mixed) noexcept
1067
{
4,833,024✔
1068
    ++m_match_count;
4,833,024✔
1069
    m_keys.add(index);
4,833,024✔
1070

1071
    return (m_limit > m_match_count);
4,833,024✔
1072
}
4,833,024✔
1073

1074
template <>
1075
bool QueryStateFindAll<IntegerColumn>::match(size_t index) noexcept
1076
{
33,784,749✔
1077
    ++m_match_count;
33,784,749✔
1078
    m_keys.add(index);
33,784,749✔
1079

1080
    return (m_limit > m_match_count);
33,784,749✔
1081
}
33,784,749✔
1082

1083
void Array::typed_print(std::string prefix) const
NEW
1084
{
×
NEW
1085
    std::cout << "Generic Array " << header_to_string(get_header()) << " @ " << m_ref;
×
NEW
1086
    if (!is_attached()) {
×
NEW
1087
        std::cout << " Unattached";
×
NEW
1088
        return;
×
NEW
1089
    }
×
NEW
1090
    if (size() == 0) {
×
NEW
1091
        std::cout << " Empty" << std::endl;
×
NEW
1092
        return;
×
NEW
1093
    }
×
NEW
1094
    std::cout << " size = " << size() << " {";
×
NEW
1095
    if (has_refs()) {
×
NEW
1096
        std::cout << std::endl;
×
NEW
1097
        for (unsigned n = 0; n < size(); ++n) {
×
NEW
1098
            auto pref = prefix + "  " + to_string(n) + ":\t";
×
NEW
1099
            RefOrTagged rot = get_as_ref_or_tagged(n);
×
NEW
1100
            if (rot.is_ref() && rot.get_as_ref()) {
×
NEW
1101
                Array a(m_alloc);
×
NEW
1102
                a.init_from_ref(rot.get_as_ref());
×
NEW
1103
                std::cout << pref;
×
NEW
1104
                a.typed_print(pref);
×
NEW
1105
            }
×
NEW
1106
            else if (rot.is_tagged()) {
×
NEW
1107
                std::cout << pref << rot.get_as_int() << std::endl;
×
NEW
1108
            }
×
NEW
1109
        }
×
NEW
1110
        std::cout << prefix << "}" << std::endl;
×
NEW
1111
    }
×
NEW
1112
    else {
×
NEW
1113
        std::cout << " Leaf of unknown type }" << std::endl;
×
NEW
1114
    }
×
NEW
1115
}
×
1116

1117
ref_type ArrayPayload::typed_write(ref_type ref, _impl::ArrayWriterBase& out, Allocator& alloc)
1118
{
848,604✔
1119
    Array arr(alloc);
848,604✔
1120
    arr.init_from_ref(ref);
848,604✔
1121
    // By default we are not compressing
1122
    constexpr bool compress = false;
848,604✔
1123
    return arr.write(out, true, out.only_modified, compress);
848,604✔
1124
}
848,604✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc