• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_337

03 Jul 2024 01:04PM UTC coverage: 90.864% (-0.1%) from 90.984%
jorgen.edelbo_337

Pull #7826

Evergreen

nicola-cab
Merge branch 'master' of github.com:realm/realm-core into next-major
Pull Request #7826: Merge Next major

102968 of 181176 branches covered (56.83%)

3131 of 3738 new or added lines in 54 files covered. (83.76%)

106 existing lines in 23 files now uncovered.

217725 of 239616 relevant lines covered (90.86%)

6844960.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.38
/src/realm/array.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/array_with_find.hpp>
20
#include <realm/utilities.hpp>
21
#include <realm/impl/destroy_guard.hpp>
22
#include <realm/column_integer.hpp>
23
#include <realm/bplustree.hpp>
24
#include <realm/query_conditions.hpp>
25
#include <realm/array_integer.hpp>
26
#include <realm/array_key.hpp>
27
#include <realm/impl/array_writer.hpp>
28

29
#include <array>
30
#include <cstring> // std::memcpy
31
#include <iomanip>
32
#include <limits>
33
#include <tuple>
34

35
#ifdef REALM_DEBUG
36
#include <iostream>
37
#include <sstream>
38
#endif
39

40
#ifdef _MSC_VER
41
#include <intrin.h>
42
#pragma warning(disable : 4127) // Condition is constant warning
43
#endif
44

45
// Header format (8 bytes):
46
// ------------------------
47
//
48
// In mutable part / outside file:
49
//
50
// |--------|--------|--------|--------|--------|--------|--------|--------|
51
// |         capacity         |reserved|12344555|           size           |
52
//
53
//
54
// In immutable part / in file:
55
//
56
// |--------|--------|--------|--------|--------|--------|--------|--------|
57
// |             checksum              |12344555|           size           |
58
//
59
//
60
//  1: 'is_inner_bptree_node' (inner node of B+-tree).
61
//
62
//  2: 'has_refs' (elements whose first bit is zero are refs to subarrays).
63
//
64
//  3: 'context_flag' (meaning depends on context)
65
//
66
//  4: 'width_scheme' (2 bits)
67
//
68
//      value  |  meaning of 'width'  |  number of bytes used after header
69
//      -------|----------------------|------------------------------------
70
//        0    |  number of bits      |  ceil(width * size / 8)
71
//        1    |  number of bytes     |  width * size
72
//        2    |  ignored             |  size
73
//
74
//  5: 'width_ndx' (3 bits)
75
//
76
//      'width_ndx'       |  0 |  1 |  2 |  3 |  4 |  5 |  6 |  7 |
77
//      ------------------|----|----|----|----|----|----|----|----|
78
//      value of 'width'  |  0 |  1 |  2 |  4 |  8 | 16 | 32 | 64 |
79
//
80
//
81
// 'capacity' is the total number of bytes allocated for this array
82
// including the header.
83
//
84
// 'size' (aka length) is the number of elements in the array.
85
//
86
// 'checksum' (not yet implemented) is the checksum of the array
87
// including the header.
88
//
89
//
90
// Inner node of B+-tree:
91
// ----------------------
92
//
93
// An inner node of a B+-tree is has one of two forms: The 'compact'
94
// form which uses a single array node, or the 'general' form which
95
// uses two. The compact form is used by default but is converted to
96
// the general form when the corresponding subtree is modified in
97
// certain ways. There are two kinds of modification that require
98
// conversion to the general form:
99
//
100
//  - Insertion of an element into the corresponding subtree, except
101
//    when insertion occurs after the last element in the subtree
102
//    (append).
103
//
104
//  - Removal of an element from the corresponding subtree, except
105
//    when the removed element is the last element in the subtree.
106
//
107
// Compact form:
108
//
109
//   --> | N_c | r_1 | r_2 | ... | r_N | N_t |
110
//
111
// General form:
112
//
113
//   --> |  .  | r_1 | r_2 | ... | r_N | N_t |  (main array node)
114
//          |
115
//           ------> | o_2 | ... | o_N |  (offsets array node)
116
//
117
// Here,
118
//   `r_i` is the i'th child ref,
119
//   `o_i` is the total number of elements preceeding the i'th child,
120
//   `N`   is the number of children,
121
//   'M'   is one less than the number of children,
122
//   `N_c` is the fixed number of elements per child
123
//         (`elems_per_child`), and
124
//   `N_t` is the total number of elements in the subtree
125
//         (`total_elems_in_subtree`).
126
//
127
// `N_c` must always be a power of `REALM_MAX_BPNODE_SIZE`.
128
//
129
// It is expected that `N_t` will be removed in a future version of
130
// the file format. This will make it much more efficient to append
131
// elements to the B+-tree (or remove elements from the end).
132
//
133
// The last child of an inner node on the compact form, may have fewer
134
// elements than `N_c`. All other children must have exactly `N_c`
135
// elements in them.
136
//
137
// When an inner node is on the general form, and has only one child,
138
// it has an empty `offsets` array.
139
//
140
//
141
// B+-tree invariants:
142
//
143
//  - Every inner node must have at least one child
144
//    (invar:bptree-nonempty-inner).
145
//
146
//  - A leaf node, that is not also a root node, must contain at least
147
//    one element (invar:bptree-nonempty-leaf).
148
//
149
//  - All leaf nodes must reside at the same depth in the tree
150
//    (invar:bptree-leaf-depth).
151
//
152
//  - If an inner node is on the general form, and has a parent, the
153
//    parent must also be on the general form
154
//    (invar:bptree-node-form).
155
//
156
// It follows from invar:bptree-nonempty-leaf that the root of an
157
// empty tree (zero elements) is a leaf.
158
//
159
// It follows from invar:bptree-nonempty-inner and
160
// invar:bptree-nonempty-leaf that in a tree with precisely one
161
// element, every inner node has precisely one child, there is
162
// precisely one leaf node, and that leaf node has precisely one
163
// element.
164
//
165
// It follows from invar:bptree-node-form that if the root is on the
166
// compact form, then so is every other inner node in the tree.
167
//
168
// In general, when the root node is an inner node, it will have at
169
// least two children, because otherwise it would be
170
// superflous. However, to allow for exception safety during element
171
// insertion and removal, this shall not be guaranteed.
172

173
// LIMITATION: The code below makes the non-portable assumption that
174
// negative number are represented using two's complement. This is not
175
// guaranteed by C++03, but holds for all known target platforms.
176
//
177
// LIMITATION: The code below makes the non-portable assumption that
178
// the types `int8_t`, `int16_t`, `int32_t`, and `int64_t`
179
// exist. This is not guaranteed by C++03, but holds for all
180
// known target platforms.
181
//
182
// LIMITATION: The code below makes the assumption that a reference into
183
// a realm file will never grow in size above what can be represented in
184
// a size_t, which is 2^31-1 on a 32-bit platform, and 2^63-1 on a 64 bit
185
// platform.
186

187
using namespace realm;
188
using namespace realm::util;
189

190
void QueryStateBase::dyncast() {}
×
191

192
uint8_t Array::bit_width(int64_t v)
193
{
21,553,869✔
194
    // FIXME: Assuming there is a 64-bit CPU reverse bitscan
195
    // instruction and it is fast, then this function could be
196
    // implemented as a table lookup on the result of the scan
197
    if ((uint64_t(v) >> 4) == 0) {
21,553,869✔
198
        static const int8_t bits[] = {0, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
2,777,571✔
199
        return bits[int8_t(v)];
2,777,571✔
200
    }
2,777,571✔
201
    if (v < 0)
18,776,298✔
202
        v = ~v;
208,962✔
203
    // Then check if bits 15-31 used (32b), 7-31 used (16b), else (8b)
204
    return uint64_t(v) >> 31 ? 64 : uint64_t(v) >> 15 ? 32 : uint64_t(v) >> 7 ? 16 : 8;
18,776,298✔
205
}
21,553,869✔
206

207
template <size_t width>
208
struct Array::VTableForWidth {
209
    struct PopulatedVTable : VTable {
210
        PopulatedVTable()
211
        {
192✔
212
            getter = &Array::get<width>;
192✔
213
            setter = &Array::set<width>;
192✔
214
            chunk_getter = &Array::get_chunk<width>;
192✔
215
            finder[cond_Equal] = &Array::find_vtable<Equal>;
192✔
216
            finder[cond_NotEqual] = &Array::find_vtable<NotEqual>;
192✔
217
            finder[cond_Greater] = &Array::find_vtable<Greater>;
192✔
218
            finder[cond_Less] = &Array::find_vtable<Less>;
192✔
219
        }
192✔
220
    };
221
    static const PopulatedVTable vtable;
222
};
223

224
template <size_t width>
225
const typename Array::VTableForWidth<width>::PopulatedVTable Array::VTableForWidth<width>::vtable;
226

227
void Array::init_from_mem(MemRef mem) noexcept
228
{
876,759,552✔
229
    // Header is the type of header that has been allocated, in case we are decompressing,
230
    // the header is of kind A, which is kind of deceiving the purpose of these checks.
231
    // Since we will try to fetch some data from the just initialised header, and never reset
232
    // important fields used for type A arrays, like width, lower, upper_bound which are used
233
    // for expanding the array, but also query the data.
234
    const auto header = mem.get_addr();
876,759,552✔
235
    const auto is_extended = m_integer_compressor.init(header);
876,759,552✔
236

237
    m_is_inner_bptree_node = get_is_inner_bptree_node_from_header(header);
876,759,552✔
238
    m_has_refs = get_hasrefs_from_header(header);
876,759,552✔
239
    m_context_flag = get_context_flag_from_header(header);
876,759,552✔
240

241
    if (is_extended) {
876,759,552✔
242
        m_ref = mem.get_ref();
5,342,232✔
243
        m_data = get_data_from_header(header);
5,342,232✔
244
        m_size = m_integer_compressor.size();
5,342,232✔
245
        m_width = m_integer_compressor.v_width();
5,342,232✔
246
        m_lbound = -m_integer_compressor.v_mask();
5,342,232✔
247
        m_ubound = m_integer_compressor.v_mask() - 1;
5,342,232✔
248
        m_integer_compressor.set_vtable(*this);
5,342,232✔
249
        m_getter = m_vtable->getter;
5,342,232✔
250
    }
5,342,232✔
251
    else {
871,417,320✔
252
        // Old init phase.
253
        Node::init_from_mem(mem);
871,417,320✔
254
        update_width_cache_from_header();
871,417,320✔
255
    }
871,417,320✔
256
}
876,759,552✔
257

258
MemRef Array::get_mem() const noexcept
259
{
333,959,784✔
260
    return MemRef(get_header_from_data(m_data), m_ref, m_alloc);
333,959,784✔
261
}
333,959,784✔
262

263
void Array::update_from_parent() noexcept
264
{
12,036,345✔
265
    ArrayParent* parent = get_parent();
12,036,345✔
266
    REALM_ASSERT_DEBUG(parent);
12,036,345✔
267
    ref_type new_ref = get_ref_from_parent();
12,036,345✔
268
    init_from_ref(new_ref);
12,036,345✔
269
}
12,036,345✔
270

271
void Array::set_type(Type type)
272
{
4,761✔
273
    REALM_ASSERT_DEBUG(is_attached());
4,761✔
274

275
    copy_on_write(); // Throws
4,761✔
276

277
    bool init_is_inner_bptree_node = false, init_has_refs = false;
4,761✔
278
    switch (type) {
4,761✔
279
        case type_Normal:
6✔
280
            break;
6✔
281
        case type_InnerBptreeNode:
6✔
282
            init_is_inner_bptree_node = true;
6✔
283
            init_has_refs = true;
6✔
284
            break;
6✔
285
        case type_HasRefs:
4,749✔
286
            init_has_refs = true;
4,749✔
287
            break;
4,749✔
288
    }
4,761✔
289
    m_is_inner_bptree_node = init_is_inner_bptree_node;
4,761✔
290
    m_has_refs = init_has_refs;
4,761✔
291

292
    char* header = get_header();
4,761✔
293
    set_is_inner_bptree_node_in_header(init_is_inner_bptree_node, header);
4,761✔
294
    set_hasrefs_in_header(init_has_refs, header);
4,761✔
295
}
4,761✔
296

297
void Array::destroy_children(size_t offset) noexcept
298
{
749,106✔
299
    for (size_t i = offset; i != m_size; ++i) {
4,185,039✔
300
        int64_t value = get(i);
3,435,933✔
301

302
        // Null-refs indicate empty sub-trees
303
        if (value == 0)
3,435,933✔
304
            continue;
1,202,376✔
305

306
        // A ref is always 8-byte aligned, so the lowest bit
307
        // cannot be set. If it is, it means that it should not be
308
        // interpreted as a ref.
309
        if ((value & 1) != 0)
2,233,557✔
310
            continue;
130,677✔
311

312
        ref_type ref = to_ref(value);
2,102,880✔
313
        destroy_deep(ref, m_alloc);
2,102,880✔
314
    }
2,102,880✔
315
}
749,106✔
316

317
// size_t Array::get_byte_size() const noexcept
318
//{
319
//     const auto header = get_header();
320
//     auto num_bytes = get_byte_size_from_header(header);
321
//     auto read_only = m_alloc.is_read_only(m_ref) == true;
322
//     auto capacity = get_capacity_from_header(header);
323
//     auto bytes_ok = num_bytes <= capacity;
324
//     REALM_ASSERT(read_only || bytes_ok);
325
//     REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
326
//     return num_bytes;
327
// }
328

329
ref_type Array::do_write_shallow(_impl::ArrayWriterBase& out) const
330
{
20,289,309✔
331
    // here we might want to compress the array and write down.
332
    const char* header = get_header_from_data(m_data);
20,289,309✔
333
    size_t byte_size = get_byte_size();
20,289,309✔
334
    const auto compressed = is_compressed();
20,289,309✔
335
    uint32_t dummy_checksum = compressed ? 0x42424242UL : 0x41414141UL; //
20,289,309✔
336
    uint32_t dummy_checksum_bytes = compressed ? 2 : 4; // AAAA / BB (only 2 bytes for extended arrays)
20,289,309✔
337
    ref_type new_ref = out.write_array(header, byte_size, dummy_checksum, dummy_checksum_bytes); // Throws
20,289,309✔
338
    REALM_ASSERT_3(new_ref % 8, ==, 0);                                                          // 8-byte alignment
20,289,309✔
339
    return new_ref;
20,289,309✔
340
}
20,289,309✔
341

342

343
ref_type Array::do_write_deep(_impl::ArrayWriterBase& out, bool only_if_modified, bool compress) const
344
{
3,141,171✔
345
    // Temp array for updated refs
346
    Array new_array(Allocator::get_default());
3,141,171✔
347
    Type type = m_is_inner_bptree_node ? type_InnerBptreeNode : type_HasRefs;
3,141,171✔
348
    new_array.create(type, m_context_flag); // Throws
3,141,171✔
349
    _impl::ShallowArrayDestroyGuard dg(&new_array);
3,141,171✔
350

351
    // First write out all sub-arrays
352
    size_t n = size();
3,141,171✔
353
    for (size_t i = 0; i < n; ++i) {
92,051,211✔
354
        int_fast64_t value = get(i);
88,910,040✔
355
        bool is_ref = (value != 0 && (value & 1) == 0);
88,910,040✔
356
        if (is_ref) {
88,910,040✔
357
            ref_type subref = to_ref(value);
70,397,511✔
358
            ref_type new_subref = write(subref, m_alloc, out, only_if_modified, compress); // Throws
70,397,511✔
359
            value = from_ref(new_subref);
70,397,511✔
360
        }
70,397,511✔
361
        new_array.add(value); // Throws
88,910,040✔
362
    }
88,910,040✔
363
    return new_array.do_write_shallow(out); // Throws
3,141,171✔
364
}
3,141,171✔
365

366

367
void Array::move(size_t begin, size_t end, size_t dest_begin)
368
{
11,555,025✔
369
    REALM_ASSERT_3(begin, <=, end);
11,555,025✔
370
    REALM_ASSERT_3(end, <=, m_size);
11,555,025✔
371
    REALM_ASSERT_3(dest_begin, <=, m_size);
11,555,025✔
372
    REALM_ASSERT_3(end - begin, <=, m_size - dest_begin);
11,555,025✔
373
    REALM_ASSERT(!(dest_begin >= begin && dest_begin < end)); // Required by std::copy
11,555,025✔
374

375
    // Check if we need to copy before modifying
376
    copy_on_write(); // Throws
11,555,025✔
377

378
    size_t bits_per_elem = m_width;
11,555,025✔
379
    const char* header = get_header_from_data(m_data);
11,555,025✔
380
    if (get_wtype_from_header(header) == wtype_Multiply) {
11,555,025✔
381
        bits_per_elem *= 8;
×
382
    }
×
383

384
    if (bits_per_elem < 8) {
11,555,025✔
385
        // FIXME: Should be optimized
386
        for (size_t i = begin; i != end; ++i) {
44,798,280✔
387
            int_fast64_t v = m_getter(*this, i);
43,419,366✔
388
            m_vtable->setter(*this, dest_begin++, v);
43,419,366✔
389
        }
43,419,366✔
390
        return;
1,378,914✔
391
    }
1,378,914✔
392

393
    size_t bytes_per_elem = bits_per_elem / 8;
10,176,111✔
394
    const char* begin_2 = m_data + begin * bytes_per_elem;
10,176,111✔
395
    const char* end_2 = m_data + end * bytes_per_elem;
10,176,111✔
396
    char* dest_begin_2 = m_data + dest_begin * bytes_per_elem;
10,176,111✔
397
    realm::safe_copy_n(begin_2, end_2 - begin_2, dest_begin_2);
10,176,111✔
398
}
10,176,111✔
399

400
void Array::move(Array& dst, size_t ndx)
401
{
12,591✔
402
    size_t dest_begin = dst.m_size;
12,591✔
403
    size_t nb_to_move = m_size - ndx;
12,591✔
404
    dst.copy_on_write();
12,591✔
405
    dst.ensure_minimum_width(this->m_ubound);
12,591✔
406
    dst.alloc(dst.m_size + nb_to_move, dst.m_width); // Make room for the new elements
12,591✔
407

408
    // cache variables used in tight loop
409
    auto getter = m_getter;
12,591✔
410
    auto setter = dst.m_vtable->setter;
12,591✔
411
    size_t sz = m_size;
12,591✔
412

413
    for (size_t i = ndx; i < sz; i++) {
1,404,255✔
414
        auto v = getter(*this, i);
1,391,664✔
415
        setter(dst, dest_begin++, v);
1,391,664✔
416
    }
1,391,664✔
417

418
    truncate(ndx);
12,591✔
419
}
12,591✔
420

421
void Array::set(size_t ndx, int64_t value)
422
{
224,865,495✔
423
    REALM_ASSERT_3(ndx, <, m_size);
224,865,495✔
424
    if (m_vtable->getter(*this, ndx) == value)
224,865,495✔
425
        return;
13,210,224✔
426

427
    // Check if we need to copy before modifying
428
    copy_on_write(); // Throws
211,655,271✔
429
    // Grow the array if needed to store this value
430
    ensure_minimum_width(value); // Throws
211,655,271✔
431
    // Set the value
432
    m_vtable->setter(*this, ndx, value);
211,655,271✔
433
}
211,655,271✔
434

435
void Array::set_as_ref(size_t ndx, ref_type ref)
436
{
17,992,983✔
437
    set(ndx, from_ref(ref));
17,992,983✔
438
}
17,992,983✔
439

440
/*
441
// Optimization for the common case of adding positive values to a local array
442
// (happens a lot when returning results to TableViews)
443
void Array::add_positive_local(int64_t value)
444
{
445
    REALM_ASSERT(value >= 0);
446
    REALM_ASSERT(&m_alloc == &Allocator::get_default());
447

448
    if (value <= m_ubound) {
449
        if (m_size < m_capacity) {
450
            (this->*(m_vtable->setter))(m_size, value);
451
            ++m_size;
452
            set_header_size(m_size);
453
            return;
454
        }
455
    }
456

457
    insert(m_size, value);
458
}
459
*/
460

461
size_t Array::blob_size() const noexcept
462
{
13,008,018✔
463
    if (get_context_flag()) {
13,008,018✔
464
        size_t total_size = 0;
96✔
465
        for (size_t i = 0; i < size(); ++i) {
216✔
466
            char* header = m_alloc.translate(Array::get_as_ref(i));
120✔
467
            total_size += Array::get_size_from_header(header);
120✔
468
        }
120✔
469
        return total_size;
96✔
470
    }
96✔
471
    else {
13,007,922✔
472
        return m_size;
13,007,922✔
473
    }
13,007,922✔
474
}
13,008,018✔
475

476
void Array::insert(size_t ndx, int_fast64_t value)
477
{
947,976,750✔
478
    REALM_ASSERT_DEBUG(ndx <= m_size);
947,976,750✔
479

480
    decompress_array(*this);
947,976,750✔
481
    const auto old_width = m_width;
947,976,750✔
482
    const auto old_size = m_size;
947,976,750✔
483
    const Getter old_getter = m_getter; // Save old getter before potential width expansion
947,976,750✔
484

485
    bool do_expand = value < m_lbound || value > m_ubound;
949,331,832✔
486
    if (do_expand) {
947,976,750✔
487
        size_t width = bit_width(value);
11,418,525✔
488
        REALM_ASSERT_DEBUG(width > m_width);
11,418,525✔
489
        alloc(m_size + 1, width); // Throws
11,418,525✔
490
    }
11,418,525✔
491
    else {
936,558,225✔
492
        alloc(m_size + 1, m_width); // Throws
936,558,225✔
493
    }
936,558,225✔
494

495
    // Move values below insertion (may expand)
496
    if (do_expand || old_width < 8) {
947,976,750✔
497
        size_t i = old_size;
296,560,260✔
498
        while (i > ndx) {
299,245,344✔
499
            --i;
2,685,084✔
500
            int64_t v = old_getter(*this, i);
2,685,084✔
501
            m_vtable->setter(*this, i + 1, v);
2,685,084✔
502
        }
2,685,084✔
503
    }
296,560,260✔
504
    else if (ndx != old_size) {
651,416,490✔
505
        // when byte sized and no expansion, use memmove
506
        // FIXME: Optimize by simply dividing by 8 (or shifting right by 3 bit positions)
507
        size_t w = (old_width == 64) ? 8 : (old_width == 32) ? 4 : (old_width == 16) ? 2 : 1;
4,061,547✔
508
        char* src_begin = m_data + ndx * w;
4,061,547✔
509
        char* src_end = m_data + old_size * w;
4,061,547✔
510
        char* dst_end = src_end + w;
4,061,547✔
511
        std::copy_backward(src_begin, src_end, dst_end);
4,061,547✔
512
    }
4,061,547✔
513

514
    // Insert the new value
515
    m_vtable->setter(*this, ndx, value);
947,976,750✔
516

517
    // Expand values above insertion
518
    if (do_expand) {
947,976,750✔
519
        size_t i = ndx;
11,415,180✔
520
        while (i != 0) {
39,795,828✔
521
            --i;
28,380,648✔
522
            int64_t v = old_getter(*this, i);
28,380,648✔
523
            m_vtable->setter(*this, i, v);
28,380,648✔
524
        }
28,380,648✔
525
    }
11,415,180✔
526
}
947,976,750✔
527

528
void Array::copy_on_write()
529
{
278,019,909✔
530
    if (is_read_only() && !decompress_array(*this))
278,019,909✔
531
        Node::copy_on_write();
7,989,300✔
532
}
278,019,909✔
533

534
void Array::copy_on_write(size_t min_size)
535
{
25,127,541✔
536
    if (is_read_only() && !decompress_array(*this))
25,127,541✔
537
        Node::copy_on_write(min_size);
221,166✔
538
}
25,127,541✔
539

540
void Array::truncate(size_t new_size)
541
{
2,127,180✔
542
    REALM_ASSERT(is_attached());
2,127,180✔
543
    REALM_ASSERT_3(new_size, <=, m_size);
2,127,180✔
544

545
    if (new_size == m_size)
2,127,180✔
546
        return;
163,002✔
547

548
    copy_on_write(); // Throws
1,964,178✔
549

550
    // Update size in accessor and in header. This leaves the capacity
551
    // unchanged.
552
    m_size = new_size;
1,964,178✔
553
    set_header_size(new_size);
1,964,178✔
554

555
    // If the array is completely cleared, we take the opportunity to
556
    // drop the width back to zero.
557
    if (new_size == 0) {
1,964,178✔
558
        set_width_in_header(0, get_header());
1,932,834✔
559
        update_width_cache_from_header();
1,932,834✔
560
    }
1,932,834✔
561
}
1,964,178✔
562

563
void Array::truncate_and_destroy_children(size_t new_size)
564
{
23,949✔
565
    REALM_ASSERT(is_attached());
23,949✔
566
    REALM_ASSERT_3(new_size, <=, m_size);
23,949✔
567

568
    if (new_size == m_size)
23,949✔
569
        return;
1,767✔
570

571
    copy_on_write(); // Throws
22,182✔
572

573
    if (m_has_refs) {
22,182✔
574
        size_t offset = new_size;
22,182✔
575
        destroy_children(offset);
22,182✔
576
    }
22,182✔
577

578
    // Update size in accessor and in header. This leaves the capacity
579
    // unchanged.
580
    m_size = new_size;
22,182✔
581
    set_header_size(new_size);
22,182✔
582

583
    // If the array is completely cleared, we take the opportunity to
584
    // drop the width back to zero.
585
    if (new_size == 0) {
22,182✔
586
        set_width_in_header(0, get_header());
19,161✔
587
        update_width_cache_from_header();
19,161✔
588
    }
19,161✔
589
}
22,182✔
590

591
void Array::do_ensure_minimum_width(int_fast64_t value)
592
{
9,628,977✔
593
    // Make room for the new value
594
    const size_t width = bit_width(value);
9,628,977✔
595

596
    REALM_ASSERT_3(width, >, m_width);
9,628,977✔
597

598
    Getter old_getter = m_getter; // Save old getter before width expansion
9,628,977✔
599
    alloc(m_size, width);         // Throws
9,628,977✔
600

601
    // Expand the old values
602
    size_t i = m_size;
9,628,977✔
603
    while (i != 0) {
79,709,748✔
604
        --i;
70,080,771✔
605
        int64_t v = old_getter(*this, i);
70,080,771✔
606
        m_vtable->setter(*this, i, v);
70,080,771✔
607
    }
70,080,771✔
608
}
9,628,977✔
609

610
bool Array::compress_array(Array& arr) const
611
{
964,476✔
612
    if (m_integer_compressor.get_encoding() == NodeHeader::Encoding::WTypBits) {
964,476✔
613
        return m_integer_compressor.compress(*this, arr);
857,028✔
614
    }
857,028✔
615
    return false;
107,448✔
616
}
964,476✔
617

618
bool Array::decompress_array(Array& arr) const
619
{
956,276,910✔
620
    return arr.is_compressed() ? m_integer_compressor.decompress(arr) : false;
956,276,910✔
621
}
956,276,910✔
622

623
bool Array::try_compress(Array& arr) const
624
{
168✔
625
    return compress_array(arr);
168✔
626
}
168✔
627

628
bool Array::try_decompress()
629
{
30✔
630
    return decompress_array(*this);
30✔
631
}
30✔
632

633
size_t Array::calc_aligned_byte_size(size_t size, int width)
UNCOV
634
{
×
UNCOV
635
    REALM_ASSERT(width != 0 && (width & (width - 1)) == 0); // Is a power of two
×
UNCOV
636
    size_t max = std::numeric_limits<size_t>::max();
×
UNCOV
637
    size_t max_2 = max & ~size_t(7); // Allow for upwards 8-byte alignment
×
UNCOV
638
    bool overflow;
×
UNCOV
639
    size_t byte_size;
×
UNCOV
640
    if (width < 8) {
×
UNCOV
641
        size_t elems_per_byte = 8 / width;
×
UNCOV
642
        size_t byte_size_0 = size / elems_per_byte;
×
UNCOV
643
        if (size % elems_per_byte != 0)
×
UNCOV
644
            ++byte_size_0;
×
UNCOV
645
        overflow = byte_size_0 > max_2 - header_size;
×
UNCOV
646
        byte_size = header_size + byte_size_0;
×
UNCOV
647
    }
×
UNCOV
648
    else {
×
UNCOV
649
        size_t bytes_per_elem = width / 8;
×
UNCOV
650
        overflow = size > (max_2 - header_size) / bytes_per_elem;
×
UNCOV
651
        byte_size = header_size + size * bytes_per_elem;
×
UNCOV
652
    }
×
UNCOV
653
    if (overflow)
×
654
        throw std::overflow_error("Byte size overflow");
×
UNCOV
655
    REALM_ASSERT_3(byte_size, >, 0);
×
UNCOV
656
    size_t aligned_byte_size = ((byte_size - 1) | 7) + 1; // 8-byte alignment
×
UNCOV
657
    return aligned_byte_size;
×
UNCOV
658
}
×
659

660
MemRef Array::clone(MemRef mem, Allocator& alloc, Allocator& target_alloc)
661
{
×
662
    const char* header = mem.get_addr();
×
663
    if (!get_hasrefs_from_header(header)) {
×
664
        // This array has no subarrays, so we can make a byte-for-byte
665
        // copy, which is more efficient.
666

667
        // Calculate size of new array in bytes
668
        size_t size = get_byte_size_from_header(header);
×
669

670
        // Create the new array
671
        MemRef clone_mem = target_alloc.alloc(size); // Throws
×
672
        char* clone_header = clone_mem.get_addr();
×
673

674
        // Copy contents
675
        const char* src_begin = header;
×
676
        const char* src_end = header + size;
×
677
        char* dst_begin = clone_header;
×
678
        realm::safe_copy_n(src_begin, src_end - src_begin, dst_begin);
×
679

680
        // Update with correct capacity
681
        set_capacity_in_header(size, clone_header);
×
682

683
        return clone_mem;
×
684
    }
×
685

686
    // Refs are integers, and integers arrays use wtype_Bits.
687
    REALM_ASSERT_3(get_wtype_from_header(header), ==, wtype_Bits);
×
688

689
    Array array{alloc};
×
690
    array.init_from_mem(mem);
×
691

692
    // Create new empty array of refs
693
    Array new_array(target_alloc);
×
694
    _impl::DeepArrayDestroyGuard dg(&new_array);
×
695
    Type type = get_type_from_header(header);
×
696
    bool context_flag = get_context_flag_from_header(header);
×
697
    new_array.create(type, context_flag); // Throws
×
698

699
    _impl::DeepArrayRefDestroyGuard dg_2(target_alloc);
×
700
    size_t n = array.size();
×
701
    for (size_t i = 0; i != n; ++i) {
×
702
        int_fast64_t value = array.get(i);
×
703

704
        // Null-refs signify empty subtrees. Also, all refs are
705
        // 8-byte aligned, so the lowest bits cannot be set. If they
706
        // are, it means that it should not be interpreted as a ref.
707
        bool is_subarray = value != 0 && (value & 1) == 0;
×
708
        if (!is_subarray) {
×
709
            new_array.add(value); // Throws
×
710
            continue;
×
711
        }
×
712

713
        ref_type ref = to_ref(value);
×
714
        MemRef new_mem = clone(MemRef(ref, alloc), alloc, target_alloc); // Throws
×
715
        dg_2.reset(new_mem.get_ref());
×
716
        value = from_ref(new_mem.get_ref());
×
717
        new_array.add(value); // Throws
×
718
        dg_2.release();
×
719
    }
×
720

721
    dg.release();
×
722
    return new_array.get_mem();
×
723
}
×
724

725
MemRef Array::create(Type type, bool context_flag, WidthType width_type, size_t size, int_fast64_t value,
726
                     Allocator& alloc)
727
{
17,540,877✔
728
    REALM_ASSERT_DEBUG(value == 0 || width_type == wtype_Bits);
17,540,877✔
729
    REALM_ASSERT_DEBUG(size == 0 || width_type != wtype_Ignore);
17,540,877✔
730
    uint8_t width = 0;
17,540,877✔
731
    if (value != 0)
17,540,877✔
732
        width = bit_width(value);
187,701✔
733
    auto mem = Node::create_node(size, alloc, context_flag, type, width_type, width);
17,540,877✔
734
    if (value != 0) {
17,540,877✔
735
        const auto header = mem.get_addr();
187,701✔
736
        char* data = get_data_from_header(header);
187,701✔
737
        size_t begin = 0, end = size;
187,701✔
738
        REALM_TEMPEX(fill_direct, width, (data, begin, end, value));
187,701!
739
    }
187,701✔
740
    return mem;
17,540,877✔
741
}
17,540,877✔
742

743
// This is the one installed into the m_vtable->finder slots.
744
template <class cond>
745
bool Array::find_vtable(const Array& arr, int64_t value, size_t start, size_t end, size_t baseindex,
746
                        QueryStateBase* state)
747
{
12,466,248✔
748
    REALM_TEMPEX2(return ArrayWithFind(arr).find_optimized, cond, arr.m_width, (value, start, end, baseindex, state));
12,466,248✔
UNCOV
749
}
×
750

751
void Array::update_width_cache_from_header() noexcept
752
{
1,696,076,172✔
753
    m_width = get_width_from_header(get_header());
1,696,076,172✔
754
    m_lbound = lbound_for_width(m_width);
1,696,076,172✔
755
    m_ubound = ubound_for_width(m_width);
1,696,076,172✔
756
    REALM_ASSERT_DEBUG(m_lbound <= m_ubound);
1,696,076,172✔
757
    REALM_ASSERT_DEBUG(m_width >= m_lbound);
1,696,076,172✔
758
    REALM_ASSERT_DEBUG(m_width <= m_ubound);
1,696,076,172✔
759
    REALM_TEMPEX(m_vtable = &VTableForWidth, m_width, ::vtable);
1,696,076,172✔
760
    m_getter = m_vtable->getter;
1,696,076,172✔
761
}
1,696,076,172✔
762

763
// This method reads 8 concecutive values into res[8], starting from index 'ndx'. It's allowed for the 8 values to
764
// exceed array length; in this case, remainder of res[8] will be be set to 0.
765
template <size_t w>
766
void Array::get_chunk(const Array& arr, size_t ndx, int64_t res[8]) noexcept
767
{
2,535,684✔
768
    auto sz = arr.size();
2,535,684✔
769
    REALM_ASSERT_3(ndx, <, sz);
2,535,684✔
770
    size_t i = 0;
2,535,684✔
771

772
    // if constexpr to avoid producing spurious warnings resulting from
773
    // instantiating for too large w
774
    if constexpr (w > 0 && w <= 4) {
2,535,684✔
775
        // Calling get<w>() in a loop results in one load per call to get, but
776
        // for w < 8 we can do better than that
777
        constexpr size_t elements_per_byte = 8 / w;
1,562,736✔
778

779
        // Round m_size down to byte granularity as the trailing bits in the last
780
        // byte are uninitialized
781
        size_t bytes_available = sz / elements_per_byte;
1,562,736✔
782

783
        // Round start and end to be byte-aligned. Start is rounded down and
784
        // end is rounded up as we may read up to 7 unused bits at each end.
785
        size_t start = ndx / elements_per_byte;
1,562,736✔
786
        size_t end = std::min(bytes_available, (ndx + 8 + elements_per_byte - 1) / elements_per_byte);
1,562,736✔
787

788
        if (end > start) {
1,562,736✔
789
            // Loop in reverse order because data is stored in little endian order
790
            uint64_t c = 0;
1,561,026✔
791
            for (size_t i = end; i > start; --i) {
5,352,036✔
792
                c <<= 8;
3,791,010✔
793
                c += *reinterpret_cast<const uint8_t*>(arr.m_data + i - 1);
3,791,010✔
794
            }
3,791,010✔
795
            // Trim off leading bits which aren't part of the requested range
796
            c >>= (ndx - start * elements_per_byte) * w;
1,561,026✔
797

798
            uint64_t mask = (1ULL << w) - 1ULL;
1,561,026✔
799
            res[0] = (c >> 0 * w) & mask;
1,561,026✔
800
            res[1] = (c >> 1 * w) & mask;
1,561,026✔
801
            res[2] = (c >> 2 * w) & mask;
1,561,026✔
802
            res[3] = (c >> 3 * w) & mask;
1,561,026✔
803
            res[4] = (c >> 4 * w) & mask;
1,561,026✔
804
            res[5] = (c >> 5 * w) & mask;
1,561,026✔
805
            res[6] = (c >> 6 * w) & mask;
1,561,026✔
806
            res[7] = (c >> 7 * w) & mask;
1,561,026✔
807

808
            // Read the last few elements via get<w> if needed
809
            i = std::min<size_t>(8, end * elements_per_byte - ndx);
1,561,026✔
810
        }
1,561,026✔
811
    }
1,562,736✔
812

813
    for (; i + ndx < sz && i < 8; i++)
10,216,194✔
814
        res[i] = get<w>(arr, ndx + i);
7,680,510✔
815
    for (; i < 8; i++)
2,792,118✔
816
        res[i] = 0;
256,434✔
817

818
#ifdef REALM_DEBUG
2,535,684✔
819
    for (int j = 0; j + ndx < sz && j < 8; j++) {
22,564,722✔
820
        int64_t expected = Array::get_universal<w>(arr.m_data, ndx + j);
20,029,038✔
821
        REALM_ASSERT(res[j] == expected);
20,029,038✔
822
    }
20,029,038✔
823
#endif
2,535,684✔
824
}
2,535,684✔
825

826
template <>
827
void Array::get_chunk<0>(const Array& arr, size_t ndx, int64_t res[8]) noexcept
828
{
280,800✔
829
    REALM_ASSERT_3(ndx, <, arr.m_size);
280,800✔
830
    memset(res, 0, sizeof(int64_t) * 8);
280,800✔
831
}
280,800✔
832

833

834
template <size_t width>
835
void Array::set(Array& arr, size_t ndx, int64_t value)
836
{
1,325,899,611✔
837
    realm::set_direct<width>(arr.m_data, ndx, value);
1,325,899,611✔
838
}
1,325,899,611✔
839

840
void Array::_mem_usage(size_t& mem) const noexcept
841
{
432✔
842
    mem += get_byte_size();
432✔
843
    if (m_has_refs) {
432✔
844
        for (size_t i = 0; i < m_size; ++i) {
546✔
845
            int64_t val = get(i);
360✔
846
            if (val && !(val & 1)) {
360✔
847
                Array subarray(m_alloc);
276✔
848
                subarray.init_from_ref(to_ref(val));
276✔
849
                subarray._mem_usage(mem);
276✔
850
            }
276✔
851
        }
360✔
852
    }
186✔
853
}
432✔
854

855
#ifdef REALM_DEBUG
856
namespace {
857

858
class MemStatsHandler : public Array::MemUsageHandler {
859
public:
860
    MemStatsHandler(MemStats& stats) noexcept
861
        : m_stats(stats)
862
    {
×
863
    }
×
864
    void handle(ref_type, size_t allocated, size_t used) noexcept override
865
    {
×
866
        m_stats.allocated += allocated;
×
867
        m_stats.used += used;
×
868
        m_stats.array_count += 1;
×
869
    }
×
870

871
private:
872
    MemStats& m_stats;
873
};
874

875
} // anonymous namespace
876

877

878
void Array::stats(MemStats& stats_dest) const noexcept
879
{
×
880
    MemStatsHandler handler(stats_dest);
×
881
    report_memory_usage(handler);
×
882
}
×
883

884

885
void Array::report_memory_usage(MemUsageHandler& handler) const
886
{
896,286✔
887
    if (m_has_refs)
896,286✔
888
        report_memory_usage_2(handler); // Throws
896,304✔
889

890
    size_t used = get_byte_size();
896,286✔
891
    size_t allocated;
896,286✔
892
    if (m_alloc.is_read_only(m_ref)) {
896,286✔
893
        allocated = used;
878,160✔
894
    }
878,160✔
895
    else {
18,126✔
896
        char* header = get_header_from_data(m_data);
18,126✔
897
        allocated = get_capacity_from_header(header);
18,126✔
898
    }
18,126✔
899
    handler.handle(m_ref, allocated, used); // Throws
896,286✔
900
}
896,286✔
901

902

903
void Array::report_memory_usage_2(MemUsageHandler& handler) const
904
{
11,249,094✔
905
    Array subarray(m_alloc);
11,249,094✔
906
    for (size_t i = 0; i < m_size; ++i) {
198,490,635✔
907
        int_fast64_t value = get(i);
187,241,541✔
908
        // Skip null refs and values that are not refs. Values are not refs when
909
        // the least significant bit is set.
910
        if (value == 0 || (value & 1) == 1)
187,241,541✔
911
            continue;
27,292,125✔
912

913
        size_t used;
159,949,416✔
914
        ref_type ref = to_ref(value);
159,949,416✔
915
        char* header = m_alloc.translate(ref);
159,949,416✔
916
        bool array_has_refs = get_hasrefs_from_header(header);
159,949,416✔
917
        if (array_has_refs) {
159,949,416✔
918
            MemRef mem(header, ref, m_alloc);
10,363,485✔
919
            subarray.init_from_mem(mem);
10,363,485✔
920
            subarray.report_memory_usage_2(handler); // Throws
10,363,485✔
921
            used = subarray.get_byte_size();
10,363,485✔
922
        }
10,363,485✔
923
        else {
149,585,931✔
924
            used = get_byte_size_from_header(header);
149,585,931✔
925
        }
149,585,931✔
926

927
        size_t allocated;
159,949,416✔
928
        if (m_alloc.is_read_only(ref)) {
159,949,416✔
929
            allocated = used;
159,106,815✔
930
        }
159,106,815✔
931
        else {
842,601✔
932
            allocated = get_capacity_from_header(header);
842,601✔
933
        }
842,601✔
934
        handler.handle(ref, allocated, used); // Throws
159,949,416✔
935
    }
159,949,416✔
936
}
11,249,094✔
937
#endif
938

939
void Array::verify() const
940
{
1,018,926✔
941
#ifdef REALM_DEBUG
1,018,926✔
942

943
    REALM_ASSERT(is_attached());
1,018,926✔
944
    if (!wtype_is_extended(get_header())) {
1,018,926✔
945
        REALM_ASSERT(m_width == 0 || m_width == 1 || m_width == 2 || m_width == 4 || m_width == 8 || m_width == 16 ||
999,426✔
946
                     m_width == 32 || m_width == 64);
999,426✔
947
    }
999,426✔
948
    else {
19,500✔
949
        REALM_ASSERT(m_width <= 64);
19,500✔
950
    }
19,500✔
951

952
    if (!get_parent())
1,018,926✔
953
        return;
6,840✔
954

955
    // Check that parent is set correctly
956
    ref_type ref_in_parent = get_ref_from_parent();
1,012,086✔
957
    REALM_ASSERT_3(ref_in_parent, ==, m_ref);
1,012,086✔
958
#endif
1,012,086✔
959
}
1,012,086✔
960

961
size_t Array::lower_bound_int(int64_t value) const noexcept
962
{
13,582,764✔
963
    if (is_compressed())
13,582,764✔
NEW
964
        return lower_bound_int_compressed(value);
×
965
    REALM_TEMPEX(return lower_bound, m_width, (m_data, m_size, value));
13,582,764✔
966
}
×
967

968
size_t Array::upper_bound_int(int64_t value) const noexcept
969
{
5,659,461✔
970
    if (is_compressed())
5,659,461✔
NEW
971
        return upper_bound_int_compressed(value);
×
972
    REALM_TEMPEX(return upper_bound, m_width, (m_data, m_size, value));
5,659,461✔
973
}
×
974

975
size_t Array::lower_bound_int_compressed(int64_t value) const noexcept
UNCOV
976
{
×
NEW
977
    static impl::CompressedDataFetcher<IntegerCompressor> encoder;
×
NEW
978
    encoder.ptr = &m_integer_compressor;
×
NEW
979
    return lower_bound(m_data, m_size, value, encoder);
×
UNCOV
980
}
×
981

982
size_t Array::upper_bound_int_compressed(int64_t value) const noexcept
NEW
983
{
×
NEW
984
    static impl::CompressedDataFetcher<IntegerCompressor> encoder;
×
NEW
985
    encoder.ptr = &m_integer_compressor;
×
NEW
986
    return upper_bound(m_data, m_size, value, encoder);
×
NEW
987
}
×
988

989
int_fast64_t Array::get(const char* header, size_t ndx) noexcept
990
{
483,732,063✔
991
    // this is very important. Most of the times we end up here
992
    // because we are traversing the cluster, the keys/refs in the cluster
993
    // are not compressed (because there is almost no gain), so the intent
994
    // is avoiding to pollute traversing the cluster as little as possible.
995
    // We need to check the header wtype and only initialise the
996
    // integer compressor, if needed. Otherwise we should just call
997
    // get_direct. On average there should be one more access to the header
998
    // while traversing the cluster tree.
999
    if (REALM_LIKELY(!NodeHeader::wtype_is_extended(header))) {
483,732,063✔
1000
        const char* data = get_data_from_header(header);
443,188,320✔
1001
        uint_least8_t width = get_width_from_header(header);
443,188,320✔
1002
        return get_direct(data, width, ndx);
443,188,320✔
1003
    }
443,188,320✔
1004
    // Ideally, we would not want to construct a compressor every time we end up here.
1005
    // However the compressor initalization should be fast enough. Creating an array,
1006
    // which owns a compressor internally, is the better approach if we intend to access
1007
    // the same data over and over again. The compressor basically caches the most important
1008
    // information about the layuot of the data itself.
1009
    IntegerCompressor s_compressor;
40,543,743✔
1010
    s_compressor.init(header);
40,543,743✔
1011
    return s_compressor.get(ndx);
40,543,743✔
1012
}
483,732,063✔
1013

1014
std::pair<int64_t, int64_t> Array::get_two(const char* header, size_t ndx) noexcept
1015
{
626,724✔
1016
    return std::make_pair(get(header, ndx), get(header, ndx + 1));
626,724✔
1017
}
626,724✔
1018

1019
bool QueryStateCount::match(size_t, Mixed) noexcept
1020
{
32,361✔
1021
    ++m_match_count;
32,361✔
1022
    return (m_limit > m_match_count);
32,361✔
1023
}
32,361✔
1024

1025
bool QueryStateCount::match(size_t) noexcept
1026
{
22,017,078✔
1027
    ++m_match_count;
22,017,078✔
1028
    return (m_limit > m_match_count);
22,017,078✔
1029
}
22,017,078✔
1030

1031
bool QueryStateFindFirst::match(size_t index, Mixed) noexcept
1032
{
149,502✔
1033
    m_match_count++;
149,502✔
1034
    m_state = index;
149,502✔
1035
    return false;
149,502✔
1036
}
149,502✔
1037

1038
bool QueryStateFindFirst::match(size_t index) noexcept
1039
{
10,893,306✔
1040
    ++m_match_count;
10,893,306✔
1041
    m_state = index;
10,893,306✔
1042
    return false;
10,893,306✔
1043
}
10,893,306✔
1044

1045
template <>
1046
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index, Mixed) noexcept
1047
{
97,946,253✔
1048
    ++m_match_count;
97,946,253✔
1049

1050
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
97,946,253✔
1051
    m_keys.push_back(ObjKey(key_value));
97,946,253✔
1052

1053
    return (m_limit > m_match_count);
97,946,253✔
1054
}
97,946,253✔
1055

1056
template <>
1057
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index) noexcept
1058
{
23,890,344✔
1059
    ++m_match_count;
23,890,344✔
1060
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
23,890,344✔
1061
    m_keys.push_back(ObjKey(key_value));
23,890,344✔
1062
    return (m_limit > m_match_count);
23,890,344✔
1063
}
23,890,344✔
1064

1065
template <>
1066
bool QueryStateFindAll<IntegerColumn>::match(size_t index, Mixed) noexcept
1067
{
4,833,024✔
1068
    ++m_match_count;
4,833,024✔
1069
    m_keys.add(index);
4,833,024✔
1070

1071
    return (m_limit > m_match_count);
4,833,024✔
1072
}
4,833,024✔
1073

1074
template <>
1075
bool QueryStateFindAll<IntegerColumn>::match(size_t index) noexcept
1076
{
33,964,431✔
1077
    ++m_match_count;
33,964,431✔
1078
    m_keys.add(index);
33,964,431✔
1079

1080
    return (m_limit > m_match_count);
33,964,431✔
1081
}
33,964,431✔
1082

1083
void Array::typed_print(std::string prefix) const
NEW
1084
{
×
NEW
1085
    std::cout << "Generic Array " << header_to_string(get_header()) << " @ " << m_ref;
×
NEW
1086
    if (!is_attached()) {
×
NEW
1087
        std::cout << " Unattached";
×
NEW
1088
        return;
×
NEW
1089
    }
×
NEW
1090
    if (size() == 0) {
×
NEW
1091
        std::cout << " Empty" << std::endl;
×
NEW
1092
        return;
×
NEW
1093
    }
×
NEW
1094
    std::cout << " size = " << size() << " {";
×
NEW
1095
    if (has_refs()) {
×
NEW
1096
        std::cout << std::endl;
×
NEW
1097
        for (unsigned n = 0; n < size(); ++n) {
×
NEW
1098
            auto pref = prefix + "  " + to_string(n) + ":\t";
×
NEW
1099
            RefOrTagged rot = get_as_ref_or_tagged(n);
×
NEW
1100
            if (rot.is_ref() && rot.get_as_ref()) {
×
NEW
1101
                Array a(m_alloc);
×
NEW
1102
                a.init_from_ref(rot.get_as_ref());
×
NEW
1103
                std::cout << pref;
×
NEW
1104
                a.typed_print(pref);
×
NEW
1105
            }
×
NEW
1106
            else if (rot.is_tagged()) {
×
NEW
1107
                std::cout << pref << rot.get_as_int() << std::endl;
×
NEW
1108
            }
×
NEW
1109
        }
×
NEW
1110
        std::cout << prefix << "}" << std::endl;
×
NEW
1111
    }
×
NEW
1112
    else {
×
NEW
1113
        std::cout << " Leaf of unknown type }" << std::endl;
×
NEW
1114
    }
×
NEW
1115
}
×
1116

1117
ref_type ArrayPayload::typed_write(ref_type ref, _impl::ArrayWriterBase& out, Allocator& alloc)
1118
{
850,815✔
1119
    Array arr(alloc);
850,815✔
1120
    arr.init_from_ref(ref);
850,815✔
1121
    // By default we are not compressing
1122
    constexpr bool compress = false;
850,815✔
1123
    return arr.write(out, true, out.only_modified, compress);
850,815✔
1124
}
850,815✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc