• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / thomas.goyne_112

27 Oct 2023 10:49AM UTC coverage: 91.586% (+0.02%) from 91.571%
thomas.goyne_112

push

Evergreen

web-flow
Merge pull request #7085 from realm/release/13.23.2

Release/13.23.2

91754 of 168238 branches covered (0.0%)

230143 of 251285 relevant lines covered (91.59%)

7082763.83 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.13
/src/realm/array.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/array_with_find.hpp>
20
#include <realm/utilities.hpp>
21
#include <realm/impl/destroy_guard.hpp>
22
#include <realm/column_integer.hpp>
23
#include <realm/bplustree.hpp>
24
#include <realm/query_conditions.hpp>
25
#include <realm/array_integer.hpp>
26
#include <realm/array_key.hpp>
27
#include <realm/impl/array_writer.hpp>
28

29
#include <array>
30
#include <cstring> // std::memcpy
31
#include <iomanip>
32
#include <limits>
33
#include <tuple>
34

35
#ifdef REALM_DEBUG
36
#include <iostream>
37
#include <sstream>
38
#endif
39

40
#ifdef _MSC_VER
41
#include <intrin.h>
42
#pragma warning(disable : 4127) // Condition is constant warning
43
#endif
44

45

46
// Header format (8 bytes):
47
// ------------------------
48
//
49
// In mutable part / outside file:
50
//
51
// |--------|--------|--------|--------|--------|--------|--------|--------|
52
// |         capacity         |reserved|12344555|           size           |
53
//
54
//
55
// In immutable part / in file:
56
//
57
// |--------|--------|--------|--------|--------|--------|--------|--------|
58
// |             checksum              |12344555|           size           |
59
//
60
//
61
//  1: 'is_inner_bptree_node' (inner node of B+-tree).
62
//
63
//  2: 'has_refs' (elements whose first bit is zero are refs to subarrays).
64
//
65
//  3: 'context_flag' (meaning depends on context)
66
//
67
//  4: 'width_scheme' (2 bits)
68
//
69
//      value  |  meaning of 'width'  |  number of bytes used after header
70
//      -------|----------------------|------------------------------------
71
//        0    |  number of bits      |  ceil(width * size / 8)
72
//        1    |  number of bytes     |  width * size
73
//        2    |  ignored             |  size
74
//
75
//  5: 'width_ndx' (3 bits)
76
//
77
//      'width_ndx'       |  0 |  1 |  2 |  3 |  4 |  5 |  6 |  7 |
78
//      ------------------|----|----|----|----|----|----|----|----|
79
//      value of 'width'  |  0 |  1 |  2 |  4 |  8 | 16 | 32 | 64 |
80
//
81
//
82
// 'capacity' is the total number of bytes allocated for this array
83
// including the header.
84
//
85
// 'size' (aka length) is the number of elements in the array.
86
//
87
// 'checksum' (not yet implemented) is the checksum of the array
88
// including the header.
89
//
90
//
91
// Inner node of B+-tree:
92
// ----------------------
93
//
94
// An inner node of a B+-tree is has one of two forms: The 'compact'
95
// form which uses a single array node, or the 'general' form which
96
// uses two. The compact form is used by default but is converted to
97
// the general form when the corresponding subtree is modified in
98
// certain ways. There are two kinds of modification that require
99
// conversion to the general form:
100
//
101
//  - Insertion of an element into the corresponding subtree, except
102
//    when insertion occurs after the last element in the subtree
103
//    (append).
104
//
105
//  - Removal of an element from the corresponding subtree, except
106
//    when the removed element is the last element in the subtree.
107
//
108
// Compact form:
109
//
110
//   --> | N_c | r_1 | r_2 | ... | r_N | N_t |
111
//
112
// General form:
113
//
114
//   --> |  .  | r_1 | r_2 | ... | r_N | N_t |  (main array node)
115
//          |
116
//           ------> | o_2 | ... | o_N |  (offsets array node)
117
//
118
// Here,
119
//   `r_i` is the i'th child ref,
120
//   `o_i` is the total number of elements preceeding the i'th child,
121
//   `N`   is the number of children,
122
//   'M'   is one less than the number of children,
123
//   `N_c` is the fixed number of elements per child
124
//         (`elems_per_child`), and
125
//   `N_t` is the total number of elements in the subtree
126
//         (`total_elems_in_subtree`).
127
//
128
// `N_c` must always be a power of `REALM_MAX_BPNODE_SIZE`.
129
//
130
// It is expected that `N_t` will be removed in a future version of
131
// the file format. This will make it much more efficient to append
132
// elements to the B+-tree (or remove elements from the end).
133
//
134
// The last child of an inner node on the compact form, may have fewer
135
// elements than `N_c`. All other children must have exactly `N_c`
136
// elements in them.
137
//
138
// When an inner node is on the general form, and has only one child,
139
// it has an empty `offsets` array.
140
//
141
//
142
// B+-tree invariants:
143
//
144
//  - Every inner node must have at least one child
145
//    (invar:bptree-nonempty-inner).
146
//
147
//  - A leaf node, that is not also a root node, must contain at least
148
//    one element (invar:bptree-nonempty-leaf).
149
//
150
//  - All leaf nodes must reside at the same depth in the tree
151
//    (invar:bptree-leaf-depth).
152
//
153
//  - If an inner node is on the general form, and has a parent, the
154
//    parent must also be on the general form
155
//    (invar:bptree-node-form).
156
//
157
// It follows from invar:bptree-nonempty-leaf that the root of an
158
// empty tree (zero elements) is a leaf.
159
//
160
// It follows from invar:bptree-nonempty-inner and
161
// invar:bptree-nonempty-leaf that in a tree with precisely one
162
// element, every inner node has precisely one child, there is
163
// precisely one leaf node, and that leaf node has precisely one
164
// element.
165
//
166
// It follows from invar:bptree-node-form that if the root is on the
167
// compact form, then so is every other inner node in the tree.
168
//
169
// In general, when the root node is an inner node, it will have at
170
// least two children, because otherwise it would be
171
// superflous. However, to allow for exception safety during element
172
// insertion and removal, this shall not be guaranteed.
173

174
// LIMITATION: The code below makes the non-portable assumption that
175
// negative number are represented using two's complement. This is not
176
// guaranteed by C++03, but holds for all known target platforms.
177
//
178
// LIMITATION: The code below makes the non-portable assumption that
179
// the types `int8_t`, `int16_t`, `int32_t`, and `int64_t`
180
// exist. This is not guaranteed by C++03, but holds for all
181
// known target platforms.
182
//
183
// LIMITATION: The code below makes the assumption that a reference into
184
// a realm file will never grow in size above what can be represented in
185
// a size_t, which is 2^31-1 on a 32-bit platform, and 2^63-1 on a 64 bit
186
// platform.
187

188
using namespace realm;
189
using namespace realm::util;
190

191
void QueryStateBase::dyncast() {}
192

25,789,209✔
193
size_t Array::bit_width(int64_t v)
12,983,892✔
194
{
12,983,892✔
195
    // FIXME: Assuming there is a 64-bit CPU reverse bitscan
12,983,892✔
196
    // instruction and it is fast, then this function could be
12,983,892✔
197
    // implemented as a table lookup on the result of the scan
25,789,209✔
198

2,747,655✔
199
    if ((uint64_t(v) >> 4) == 0) {
2,747,655✔
200
        static const int8_t bits[] = {0, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
2,747,655✔
201
        return bits[int8_t(v)];
11,613,087✔
202
    }
11,613,087✔
203

23,041,554✔
204
    // First flip all bits if bit 63 is set (will now always be zero)
157,890✔
205
    if (v < 0)
11,613,087✔
206
        v = ~v;
11,613,087✔
207

22,311,210✔
208
    // Then check if bits 15-31 used (32b), 7-31 used (16b), else (8b)
23,041,554✔
209
    return uint64_t(v) >> 31 ? 64 : uint64_t(v) >> 15 ? 32 : uint64_t(v) >> 7 ? 16 : 8;
210
}
211

212

747,028,356✔
213
void Array::init_from_mem(MemRef mem) noexcept
747,028,356✔
214
{
371,184,123✔
215
    char* header = Node::init_from_mem(mem);
747,028,356✔
216
    // Parse header
747,028,356✔
217
    m_is_inner_bptree_node = get_is_inner_bptree_node_from_header(header);
747,028,356✔
218
    m_has_refs = get_hasrefs_from_header(header);
747,028,356✔
219
    m_context_flag = get_context_flag_from_header(header);
747,028,356✔
220
    update_width_cache_from_header();
221
}
222

9,123,504✔
223
void Array::update_from_parent() noexcept
9,123,504✔
224
{
9,123,504✔
225
    REALM_ASSERT_DEBUG(is_attached());
9,123,504✔
226
    ArrayParent* parent = get_parent();
9,123,504✔
227
    REALM_ASSERT_DEBUG(parent);
9,123,504✔
228
    ref_type new_ref = get_ref_from_parent();
9,123,504✔
229
    init_from_ref(new_ref);
230
}
231

2,958✔
232
void Array::set_type(Type type)
2,958✔
233
{
822✔
234
    REALM_ASSERT(is_attached());
2,958✔
235

822✔
236
    copy_on_write(); // Throws
2,958✔
237

2,958✔
238
    bool init_is_inner_bptree_node = false, init_has_refs = false;
6✔
239
    switch (type) {
6✔
240
        case type_Normal:
6✔
241
            break;
6✔
242
        case type_InnerBptreeNode:
6✔
243
            init_is_inner_bptree_node = true;
6✔
244
            init_has_refs = true;
2,946✔
245
            break;
2,946✔
246
        case type_HasRefs:
2,946✔
247
            init_has_refs = true;
2,958✔
248
            break;
2,958✔
249
    }
2,958✔
250
    m_is_inner_bptree_node = init_is_inner_bptree_node;
822✔
251
    m_has_refs = init_has_refs;
2,958✔
252

2,958✔
253
    char* header = get_header();
2,958✔
254
    set_is_inner_bptree_node_in_header(init_is_inner_bptree_node, header);
2,958✔
255
    set_hasrefs_in_header(init_has_refs, header);
256
}
257

258

203,496✔
259
void Array::destroy_children(size_t offset) noexcept
1,389,957✔
260
{
1,186,461✔
261
    for (size_t i = offset; i != m_size; ++i) {
568,839✔
262
        int64_t value = get(i);
568,839✔
263

1,186,461✔
264
        // Null-refs indicate empty sub-trees
175,476✔
265
        if (value == 0)
482,829✔
266
            continue;
482,829✔
267

482,829✔
268
        // A ref is always 8-byte aligned, so the lowest bit
482,829✔
269
        // cannot be set. If it is, it means that it should not be
1,010,985✔
270
        // interpreted as a ref.
182,787✔
271
        if ((value & 1) != 0)
392,970✔
272
            continue;
828,198✔
273

828,198✔
274
        ref_type ref = to_ref(value);
828,198✔
275
        destroy_deep(ref, m_alloc);
203,496✔
276
    }
277
}
278

279

23,196,453✔
280
ref_type Array::do_write_shallow(_impl::ArrayWriterBase& out) const
11,651,949✔
281
{
23,196,453✔
282
    // Write flat array
23,196,453✔
283
    const char* header = get_header_from_data(m_data);
23,196,453✔
284
    size_t byte_size = get_byte_size();
23,196,453✔
285
    uint32_t dummy_checksum = 0x41414141UL;                                // "AAAA" in ASCII
23,196,453✔
286
    ref_type new_ref = out.write_array(header, byte_size, dummy_checksum); // Throws
23,196,453✔
287
    REALM_ASSERT_3(new_ref % 8, ==, 0);                                    // 8-byte alignment
23,196,453✔
288
    return new_ref;
289
}
290

291

8,141,046✔
292
ref_type Array::do_write_deep(_impl::ArrayWriterBase& out, bool only_if_modified) const
4,101,291✔
293
{
8,141,046✔
294
    // Temp array for updated refs
8,058,972✔
295
    Array new_array(Allocator::get_default());
8,141,046✔
296
    Type type = m_is_inner_bptree_node ? type_InnerBptreeNode : type_HasRefs;
8,141,046✔
297
    new_array.create(type, m_context_flag); // Throws
4,101,291✔
298
    _impl::ShallowArrayDestroyGuard dg(&new_array);
4,101,291✔
299

8,141,046✔
300
    // First write out all sub-arrays
127,564,752✔
301
    size_t n = size();
119,423,706✔
302
    for (size_t i = 0; i < n; ++i) {
119,423,706✔
303
        int_fast64_t value = get(i);
119,423,706✔
304
        bool is_ref = (value != 0 && (value & 1) == 0);
86,091,372✔
305
        if (is_ref) {
86,091,372✔
306
            ref_type subref = to_ref(value);
86,091,372✔
307
            ref_type new_subref = write(subref, m_alloc, out, only_if_modified); // Throws
86,091,372✔
308
            value = from_ref(new_subref);
119,423,706✔
309
        }
119,423,706✔
310
        new_array.add(value); // Throws
4,101,291✔
311
    }
8,141,046✔
312

8,141,046✔
313
    return new_array.do_write_shallow(out); // Throws
314
}
315

316

10,490,490✔
317
void Array::move(size_t begin, size_t end, size_t dest_begin)
10,490,490✔
318
{
10,490,490✔
319
    REALM_ASSERT_3(begin, <=, end);
10,490,490✔
320
    REALM_ASSERT_3(end, <=, m_size);
10,490,490✔
321
    REALM_ASSERT_3(dest_begin, <=, m_size);
10,490,490!
322
    REALM_ASSERT_3(end - begin, <=, m_size - dest_begin);
5,298,615✔
323
    REALM_ASSERT(!(dest_begin >= begin && dest_begin < end)); // Required by std::copy
5,298,615✔
324

10,490,490✔
325
    // Check if we need to copy before modifying
5,298,615✔
326
    copy_on_write(); // Throws
10,490,490✔
327

10,490,490✔
328
    size_t bits_per_elem = m_width;
10,490,490✔
329
    const char* header = get_header_from_data(m_data);
×
330
    if (get_wtype_from_header(header) == wtype_Multiply) {
×
331
        bits_per_elem *= 8;
5,298,615✔
332
    }
10,490,490✔
333

842,124✔
334
    if (bits_per_elem < 8) {
36,449,181✔
335
        // FIXME: Should be optimized
34,830,720✔
336
        for (size_t i = begin; i != end; ++i) {
34,830,720✔
337
            int_fast64_t v = (this->*m_getter)(i);
34,830,720✔
338
            (this->*(m_vtable->setter))(dest_begin++, v);
1,618,461✔
339
        }
1,618,461✔
340
        return;
4,456,491✔
341
    }
8,872,029✔
342

8,872,029✔
343
    size_t bytes_per_elem = bits_per_elem / 8;
8,872,029✔
344
    const char* begin_2 = m_data + begin * bytes_per_elem;
8,872,029✔
345
    const char* end_2 = m_data + end * bytes_per_elem;
8,872,029✔
346
    char* dest_begin_2 = m_data + dest_begin * bytes_per_elem;
8,872,029✔
347
    realm::safe_copy_n(begin_2, end_2 - begin_2, dest_begin_2);
348
}
349

12,075✔
350
void Array::move(Array& dst, size_t ndx)
12,075✔
351
{
12,075✔
352
    size_t dest_begin = dst.m_size;
12,075✔
353
    size_t nb_to_move = m_size - ndx;
12,075✔
354
    dst.copy_on_write();
12,075✔
355
    dst.ensure_minimum_width(this->m_ubound);
6,114✔
356
    dst.alloc(dst.m_size + nb_to_move, dst.m_width); // Make room for the new elements
6,114✔
357

12,075✔
358
    // cache variables used in tight loop
12,075✔
359
    auto getter = m_getter;
12,075✔
360
    auto setter = dst.m_vtable->setter;
6,114✔
361
    size_t sz = m_size;
1,346,190✔
362

1,334,115✔
363
    for (size_t i = ndx; i < sz; i++) {
1,334,115✔
364
        auto v = (this->*getter)(i);
1,334,115✔
365
        (dst.*setter)(dest_begin++, v);
6,114✔
366
    }
12,075✔
367

12,075✔
368
    truncate(ndx);
369
}
370

162,527,142✔
371
void Array::set(size_t ndx, int64_t value)
162,527,142✔
372
{
162,527,142✔
373
    REALM_ASSERT_3(ndx, <, m_size);
8,919,669✔
374
    if ((this->*(m_vtable->getter))(ndx) == value)
76,466,394✔
375
        return;
76,466,394✔
376

153,607,473✔
377
    // Check if we need to copy before modifying
76,466,394✔
378
    copy_on_write(); // Throws
76,466,394✔
379

153,607,473✔
380
    // Grow the array if needed to store this value
76,466,394✔
381
    ensure_minimum_width(value); // Throws
76,466,394✔
382

153,607,473✔
383
    // Set the value
153,607,473✔
384
    (this->*(m_vtable->setter))(ndx, value);
385
}
386

5,293,089✔
387
void Array::set_as_ref(size_t ndx, ref_type ref)
5,293,089✔
388
{
5,293,089✔
389
    set(ndx, from_ref(ref));
390
}
391

392
/*
393
// Optimization for the common case of adding positive values to a local array
394
// (happens a lot when returning results to TableViews)
395
void Array::add_positive_local(int64_t value)
396
{
397
    REALM_ASSERT(value >= 0);
398
    REALM_ASSERT(&m_alloc == &Allocator::get_default());
399

400
    if (value <= m_ubound) {
401
        if (m_size < m_capacity) {
402
            (this->*(m_vtable->setter))(m_size, value);
403
            ++m_size;
404
            set_header_size(m_size);
405
            return;
406
        }
407
    }
408

409
    insert(m_size, value);
410
}
411
*/
412

12,765,114✔
413
size_t Array::blob_size() const noexcept
12,765,114✔
414
{
96✔
415
    if (get_context_flag()) {
216✔
416
        size_t total_size = 0;
120✔
417
        for (size_t i = 0; i < size(); ++i) {
120✔
418
            char* header = m_alloc.translate(Array::get_as_ref(i));
120✔
419
            total_size += Array::get_size_from_header(header);
96✔
420
        }
96✔
421
        return total_size;
12,765,018✔
422
    }
12,765,018✔
423
    else {
12,765,018✔
424
        return m_size;
12,765,114✔
425
    }
426
}
427

954,950,931✔
428
void Array::insert(size_t ndx, int_fast64_t value)
954,950,931✔
429
{
475,048,602✔
430
    REALM_ASSERT_DEBUG(ndx <= m_size);
954,950,931✔
431

954,950,931✔
432
    const auto old_width = m_width;
954,950,931✔
433
    const auto old_size = m_size;
475,048,602✔
434
    const Getter old_getter = m_getter; // Save old getter before potential width expansion
956,018,100✔
435

954,950,931✔
436
    bool do_expand = value < m_lbound || value > m_ubound;
20,097,669✔
437
    if (do_expand) {
20,097,669✔
438
        size_t width = bit_width(value);
20,097,669✔
439
        REALM_ASSERT_DEBUG(width > m_width);
20,097,669✔
440
        alloc(m_size + 1, width); // Throws
934,853,262✔
441
    }
934,853,262✔
442
    else {
934,853,262✔
443
        alloc(m_size + 1, m_width); // Throws
475,048,602✔
444
    }
475,048,602✔
445

954,950,931✔
446
    // Move values below insertion (may expand)
309,785,502✔
447
    if (do_expand || old_width < 8) {
312,460,509✔
448
        size_t i = old_size;
2,675,007✔
449
        while (i > ndx) {
2,675,007✔
450
            --i;
2,675,007✔
451
            int64_t v = (this->*old_getter)(i);
2,675,007✔
452
            (this->*(m_vtable->setter))(i + 1, v);
309,785,502✔
453
        }
645,165,429✔
454
    }
1,397,340✔
455
    else if (ndx != old_size) {
1,397,340✔
456
        // when byte sized and no expansion, use memmove
2,640,177✔
457
        // FIXME: Optimize by simply dividing by 8 (or shifting right by 3 bit positions)
2,825,472✔
458
        size_t w = (old_width == 64) ? 8 : (old_width == 32) ? 4 : (old_width == 16) ? 2 : 1;
2,825,472✔
459
        char* src_begin = m_data + ndx * w;
2,825,472✔
460
        char* src_end = m_data + old_size * w;
2,825,472✔
461
        char* dst_end = src_end + w;
2,825,472✔
462
        std::copy_backward(src_begin, src_end, dst_end);
475,048,602✔
463
    }
475,048,602✔
464

954,950,931✔
465
    // Insert the new value
475,048,602✔
466
    (this->*(m_vtable->setter))(ndx, value);
475,048,602✔
467

954,950,931✔
468
    // Expand values above insertion
20,093,439✔
469
    if (do_expand) {
63,921,435✔
470
        size_t i = ndx;
43,827,996✔
471
        while (i != 0) {
43,827,996✔
472
            --i;
43,827,996✔
473
            int64_t v = (this->*old_getter)(i);
43,827,996✔
474
            (this->*(m_vtable->setter))(i, v);
20,093,439✔
475
        }
954,950,931✔
476
    }
477
}
478

479

4,077,615✔
480
void Array::truncate(size_t new_size)
4,077,615✔
481
{
4,077,615✔
482
    REALM_ASSERT(is_attached());
2,064,408✔
483
    REALM_ASSERT_3(new_size, <=, m_size);
4,077,615✔
484

100,722✔
485
    if (new_size == m_size)
2,009,325✔
486
        return;
3,976,893✔
487

2,009,325✔
488
    copy_on_write(); // Throws
2,009,325✔
489

2,009,325✔
490
    // Update size in accessor and in header. This leaves the capacity
3,976,893✔
491
    // unchanged.
3,976,893✔
492
    m_size = new_size;
2,009,325✔
493
    set_header_size(new_size);
2,009,325✔
494

2,009,325✔
495
    // If the array is completely cleared, we take the opportunity to
3,976,893✔
496
    // drop the width back to zero.
3,879,150✔
497
    if (new_size == 0) {
3,879,150✔
498
        set_width_in_header(0, get_header());
3,879,150✔
499
        update_width_cache_from_header();
3,976,893✔
500
    }
501
}
502

503

3,093✔
504
void Array::truncate_and_destroy_children(size_t new_size)
3,093✔
505
{
3,093✔
506
    REALM_ASSERT(is_attached());
888✔
507
    REALM_ASSERT_3(new_size, <=, m_size);
3,093✔
508

132✔
509
    if (new_size == m_size)
831✔
510
        return;
2,961✔
511

831✔
512
    copy_on_write(); // Throws
2,961✔
513

2,961✔
514
    if (m_has_refs) {
2,961✔
515
        size_t offset = new_size;
2,961✔
516
        destroy_children(offset);
831✔
517
    }
831✔
518

831✔
519
    // Update size in accessor and in header. This leaves the capacity
2,961✔
520
    // unchanged.
2,961✔
521
    m_size = new_size;
831✔
522
    set_header_size(new_size);
831✔
523

831✔
524
    // If the array is completely cleared, we take the opportunity to
2,961✔
525
    // drop the width back to zero.
108✔
526
    if (new_size == 0) {
108✔
527
        set_width_in_header(0, get_header());
108✔
528
        update_width_cache_from_header();
2,961✔
529
    }
530
}
531

532

5,431,659✔
533
void Array::do_ensure_minimum_width(int_fast64_t value)
2,761,467✔
534
{
2,761,467✔
535

5,431,659✔
536
    // Make room for the new value
2,761,467✔
537
    const size_t width = bit_width(value);
5,431,659✔
538

2,761,467✔
539
    REALM_ASSERT_3(width, >, m_width);
5,431,659✔
540

5,431,659✔
541
    Getter old_getter = m_getter; // Save old getter before width expansion
2,761,467✔
542
    alloc(m_size, width);         // Throws
2,761,467✔
543

5,431,659✔
544
    // Expand the old values
39,931,596✔
545
    size_t i = m_size;
34,499,937✔
546
    while (i != 0) {
34,499,937✔
547
        --i;
34,499,937✔
548
        int64_t v = (this->*old_getter)(i);
34,499,937✔
549
        (this->*(m_vtable->setter))(i, v);
5,431,659✔
550
    }
551
}
552

366✔
553
int64_t Array::sum(size_t start, size_t end) const
366✔
554
{
×
555
    REALM_TEMPEX(return sum, m_width, (start, end));
556
}
557

558
template <size_t w>
366✔
559
int64_t Array::sum(size_t start, size_t end) const
366✔
560
{
366✔
561
    if (end == size_t(-1))
366✔
562
        end = m_size;
183✔
563
    REALM_ASSERT_EX(end <= m_size && start <= end, start, end, m_size);
366!
564

18✔
565
    if (w == 0 || start == end)
174✔
566
        return 0;
348✔
567

174✔
568
    int64_t s = 0;
174✔
569

1,491!
570
    // Sum manually until 128 bit aligned
1,143✔
571
    for (; (start < end) && (((size_t(m_data) & 0xf) * 8 + start * w) % 128 != 0); start++) {
1,143✔
572
        s += get<w>(start);
174✔
573
    }
348✔
574

9✔
575
    if (w == 1 || w == 2 || w == 4) {
9✔
576
        // Sum of bitwidths less than a byte (which are always positive)
9✔
577
        // uses a divide and conquer algorithm that is a variation of popolation count:
9✔
578
        // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
9✔
579

18✔
580
        // static values needed for fast sums
18✔
581
        const uint64_t m2 = 0x3333333333333333ULL;
18✔
582
        const uint64_t m4 = 0x0f0f0f0f0f0f0f0fULL;
9✔
583
        const uint64_t h01 = 0x0101010101010101ULL;
18✔
584

18✔
585
        int64_t* data = reinterpret_cast<int64_t*>(m_data + start * w / 8);
9✔
586
        size_t chunks = (end - start) * w / 8 / sizeof(int64_t);
1,179,642!
587

1,179,624✔
588
        for (size_t t = 0; t < chunks; t++) {
589
            if (w == 1) {
590
#if 0
591
#if defined(USE_SSE42) && defined(_MSC_VER) && defined(REALM_PTR_64)
592
                s += __popcnt64(data[t]);
593
#elif !defined(_MSC_VER) && defined(USE_SSE42) && defined(REALM_PTR_64)
594
                s += __builtin_popcountll(data[t]);
595
#else
596
                uint64_t a = data[t];
597
                const uint64_t m1  = 0x5555555555555555ULL;
598
                a -= (a >> 1) & m1;
599
                a = (a & m2) + ((a >> 2) & m2);
600
                a = (a + (a >> 4)) & m4;
601
                a = (a * h01) >> 56;
602
                s += a;
603
#endif
393,204✔
604
#endif
393,204✔
605
                s += fast_popcount64(data[t]);
786,420✔
606
            }
786,420✔
607
            else if (w == 2) {
786,420✔
608
                uint64_t a = data[t];
786,420✔
609
                a = (a & m2) + ((a >> 2) & m2);
786,420✔
610
                a = (a + (a >> 4)) & m4;
393,210✔
611
                a = (a * h01) >> 56;
786,420✔
612

786,420✔
613
                s += a;
×
614
            }
×
615
            else if (w == 4) {
×
616
                uint64_t a = data[t];
×
617
                a = (a & m4) + ((a >> 4) & m4);
×
618
                a = (a * h01) >> 56;
×
619
                s += a;
1,179,624✔
620
            }
18✔
621
        }
18✔
622
        start += sizeof(int64_t) * 8 / no0(w) * chunks;
174✔
623
    }
174✔
624

174✔
625
#ifdef REALM_COMPILER_SSE
174✔
626
    if (sseavx<42>()) {
174✔
627
        // 2000 items summed 500000 times, 8/16/32 bits, miliseconds:
174✔
628
        // Naive, templated get<>: 391 371 374
174✔
629
        // SSE:                     97 148 282
174✔
630

42✔
631
        if ((w == 8 || w == 16 || w == 32) && end - start > sizeof(__m128i) * 8 / no0(w)) {
42✔
632
            __m128i* data = reinterpret_cast<__m128i*>(m_data + start * w / 8);
42✔
633
            __m128i sum_result = {0};
42✔
634
            __m128i sum2;
42✔
635

42✔
636
            size_t chunks = (end - start) * w / 8 / sizeof(__m128i);
8,798,361✔
637

8,798,319✔
638
            for (size_t t = 0; t < chunks; t++) {
786,429✔
639
                if (w == 8) {
786,429✔
640
                    /*
786,429✔
641
                    // 469 ms AND disadvantage of handling max 64k elements before overflow
786,429✔
642
                    __m128i vl = _mm_cvtepi8_epi16(data[t]);
786,429✔
643
                    __m128i vh = data[t];
786,429✔
644
                    vh.m128i_i64[0] = vh.m128i_i64[1];
786,429✔
645
                    vh = _mm_cvtepi8_epi16(vh);
786,429✔
646
                    sum_result = _mm_add_epi16(sum_result, vl);
786,429✔
647
                    sum_result = _mm_add_epi16(sum_result, vh);
786,429✔
648
                    */
786,429✔
649

786,429✔
650
                    /*
786,429✔
651
                    // 424 ms
786,429✔
652
                    __m128i vl = _mm_unpacklo_epi8(data[t], _mm_set1_epi8(0));
786,429✔
653
                    __m128i vh = _mm_unpackhi_epi8(data[t], _mm_set1_epi8(0));
786,429✔
654
                    sum_result = _mm_add_epi32(sum_result, _mm_madd_epi16(vl, _mm_set1_epi16(1)));
786,429✔
655
                    sum_result = _mm_add_epi32(sum_result, _mm_madd_epi16(vh, _mm_set1_epi16(1)));
786,429✔
656
                    */
786,429✔
657

786,429✔
658
                    __m128i vl = _mm_cvtepi8_epi16(data[t]); // sign extend lower words 8->16
786,429✔
659
                    __m128i vh = data[t];
786,429✔
660
                    vh = _mm_srli_si128(vh, 8); // v >>= 64
786,429✔
661
                    vh = _mm_cvtepi8_epi16(vh); // sign extend lower words 8->16
786,429✔
662
                    __m128i sum1 = _mm_add_epi16(vl, vh);
786,429✔
663
                    __m128i sumH = _mm_cvtepi16_epi32(sum1);
786,429✔
664
                    __m128i sumL = _mm_srli_si128(sum1, 8); // v >>= 64
786,429✔
665
                    sumL = _mm_cvtepi16_epi32(sumL);
786,429✔
666
                    sum_result = _mm_add_epi32(sum_result, sumL);
786,429✔
667
                    sum_result = _mm_add_epi32(sum_result, sumH);
8,011,890✔
668
                }
1,572,909✔
669
                else if (w == 16) {
1,572,909✔
670
                    // todo, can overflow for array size > 2^32
1,572,909✔
671
                    __m128i vl = _mm_cvtepi16_epi32(data[t]); // sign extend lower words 16->32
1,572,909✔
672
                    __m128i vh = data[t];
1,572,909✔
673
                    vh = _mm_srli_si128(vh, 8);  // v >>= 64
1,572,909✔
674
                    vh = _mm_cvtepi16_epi32(vh); // sign extend lower words 16->32
1,572,909✔
675
                    sum_result = _mm_add_epi32(sum_result, vl);
1,572,909✔
676
                    sum_result = _mm_add_epi32(sum_result, vh);
6,438,981✔
677
                }
6,438,981✔
678
                else if (w == 32) {
6,438,981✔
679
                    __m128i v = data[t];
6,438,981✔
680
                    __m128i v0 = _mm_cvtepi32_epi64(v); // sign extend lower dwords 32->64
6,438,981✔
681
                    v = _mm_srli_si128(v, 8);           // v >>= 64
6,438,981✔
682
                    __m128i v1 = _mm_cvtepi32_epi64(v); // sign extend lower dwords 32->64
6,438,981✔
683
                    sum_result = _mm_add_epi64(sum_result, v0);
6,438,981✔
684
                    sum_result = _mm_add_epi64(sum_result, v1);
6,438,981✔
685

6,438,981✔
686
                    /*
6,438,981✔
687
                    __m128i m = _mm_set1_epi32(0xc000);             // test if overflow could happen (still need
6,438,981✔
688
                    underflow test).
6,438,981✔
689
                    __m128i mm = _mm_and_si128(data[t], m);
6,438,981✔
690
                    zz = _mm_or_si128(mm, zz);
6,438,981✔
691
                    sum_result = _mm_add_epi32(sum_result, data[t]);
6,438,981✔
692
                    */
8,798,319✔
693
                }
42✔
694
            }
42✔
695
            start += sizeof(__m128i) * 8 / no0(w) * chunks;
42✔
696

42✔
697
            // prevent taking address of 'state' to make the compiler keep it in SSE register in above loop
42✔
698
            // (vc2010/gcc4.6)
42✔
699
            sum2 = sum_result;
42✔
700

42✔
701
            // Avoid aliasing bug where sum2 might not yet be initialized when accessed by get_universal
42✔
702
            char sum3[sizeof sum2];
42✔
703
            memcpy(&sum3, &sum2, sizeof sum2);
42✔
704

180✔
705
            // Sum elements of sum
138✔
706
            for (size_t t = 0; t < sizeof(__m128i) * 8 / ((w == 8 || w == 16) ? 32 : 64); ++t) {
138✔
707
                int64_t v = get_universal < (w == 8 || w == 16) ? 32 : 64 > (reinterpret_cast<char*>(&sum3), t);
138✔
708
                s += v;
42✔
709
            }
174✔
710
        }
174✔
711
    }
174✔
712
#endif
174✔
713

76,089,555!
714
    // Sum remaining elements
76,089,207✔
715
    for (; start < end; ++start)
174✔
716
        s += get<w>(start);
348✔
717

348✔
718
    return s;
719
}
720

42✔
721
size_t Array::count(int64_t value) const noexcept
42✔
722
{
42✔
723
    const uint64_t* next = reinterpret_cast<uint64_t*>(m_data);
42✔
724
    size_t value_count = 0;
42✔
725
    const size_t end = m_size;
21✔
726
    size_t i = 0;
21✔
727

42✔
728
    // static values needed for fast population count
42✔
729
    const uint64_t m1 = 0x5555555555555555ULL;
42✔
730
    const uint64_t m2 = 0x3333333333333333ULL;
42✔
731
    const uint64_t m4 = 0x0f0f0f0f0f0f0f0fULL;
21✔
732
    const uint64_t h01 = 0x0101010101010101ULL;
42✔
733

×
734
    if (m_width == 0) {
×
735
        if (value == 0)
×
736
            return m_size;
×
737
        return 0;
42✔
738
    }
6✔
739
    if (m_width == 1) {
×
740
        if (uint64_t(value) > 1)
3✔
741
            return 0;
6✔
742

1,572,864✔
743
        const size_t chunkvals = 64;
1,572,858✔
744
        for (; i + chunkvals <= end; i += chunkvals) {
1,572,858✔
745
            uint64_t a = next[i / chunkvals];
×
746
            if (value == 0)
786,429✔
747
                a = ~a; // reverse
1,572,858✔
748

1,572,858✔
749
            a -= (a >> 1) & m1;
1,572,858✔
750
            a = (a & m2) + ((a >> 2) & m2);
1,572,858✔
751
            a = (a + (a >> 4)) & m4;
786,429✔
752
            a = (a * h01) >> 56;
786,429✔
753

786,429✔
754
            // Could use intrinsic instead:
786,429✔
755
            // a = __builtin_popcountll(a); // gcc intrinsic
1,572,858✔
756

1,572,858✔
757
            value_count += to_size_t(a);
6✔
758
        }
36✔
759
    }
6✔
760
    else if (m_width == 2) {
×
761
        if (uint64_t(value) > 3)
3✔
762
            return 0;
6✔
763

3✔
764
        const uint64_t v = ~0ULL / 0x3 * value;
3✔
765

6✔
766
        // Masks to avoid spillover between segments in cascades
3✔
767
        const uint64_t c1 = ~0ULL / 0x3 * 0x1;
6✔
768

3,145,728✔
769
        const size_t chunkvals = 32;
3,145,722✔
770
        for (; i + chunkvals <= end; i += chunkvals) {
3,145,722✔
771
            uint64_t a = next[i / chunkvals];
3,145,722✔
772
            a ^= v;             // zero matching bit segments
3,145,722✔
773
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
3,145,722✔
774
            a &= m1;            // isolate single bit in each segment
1,572,861✔
775
            a ^= m1;            // reverse isolated bits
1,572,861✔
776
            // if (!a) continue;
1,572,861✔
777

3,145,722✔
778
            // Population count
3,145,722✔
779
            a = (a & m2) + ((a >> 2) & m2);
3,145,722✔
780
            a = (a + (a >> 4)) & m4;
1,572,861✔
781
            a = (a * h01) >> 56;
3,145,722✔
782

3,145,722✔
783
            value_count += to_size_t(a);
6✔
784
        }
30✔
785
    }
×
786
    else if (m_width == 4) {
×
787
        if (uint64_t(value) > 15)
788
            return 0;
×
789

×
790
        const uint64_t v = ~0ULL / 0xF * value;
791
        const uint64_t m = ~0ULL / 0xF * 0x1;
792

×
793
        // Masks to avoid spillover between segments in cascades
×
794
        const uint64_t c1 = ~0ULL / 0xF * 0x7;
795
        const uint64_t c2 = ~0ULL / 0xF * 0x3;
×
796

×
797
        const size_t chunkvals = 16;
×
798
        for (; i + chunkvals <= end; i += chunkvals) {
×
799
            uint64_t a = next[i / chunkvals];
×
800
            a ^= v;             // zero matching bit segments
×
801
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
×
802
            a |= (a >> 2) & c2;
×
803
            a &= m; // isolate single bit in each segment
804
            a ^= m; // reverse isolated bits
805

×
806
            // Population count
×
807
            a = (a + (a >> 4)) & m4;
808
            a = (a * h01) >> 56;
×
809

×
810
            value_count += to_size_t(a);
×
811
        }
30✔
812
    }
6✔
813
    else if (m_width == 8) {
×
814
        if (value > 0x7FLL || value < -0x80LL)
3✔
815
            return 0; // by casting?
6✔
816

6✔
817
        const uint64_t v = ~0ULL / 0xFF * value;
3✔
818
        const uint64_t m = ~0ULL / 0xFF * 0x1;
3✔
819

6✔
820
        // Masks to avoid spillover between segments in cascades
6✔
821
        const uint64_t c1 = ~0ULL / 0xFF * 0x7F;
6✔
822
        const uint64_t c2 = ~0ULL / 0xFF * 0x3F;
3✔
823
        const uint64_t c3 = ~0ULL / 0xFF * 0x0F;
6✔
824

12,582,912✔
825
        const size_t chunkvals = 8;
12,582,906✔
826
        for (; i + chunkvals <= end; i += chunkvals) {
12,582,906✔
827
            uint64_t a = next[i / chunkvals];
12,582,906✔
828
            a ^= v;             // zero matching bit segments
12,582,906✔
829
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
12,582,906✔
830
            a |= (a >> 2) & c2;
12,582,906✔
831
            a |= (a >> 4) & c3;
12,582,906✔
832
            a &= m; // isolate single bit in each segment
6,291,453✔
833
            a ^= m; // reverse isolated bits
6,291,453✔
834

12,582,906✔
835
            // Population count
6,291,453✔
836
            a = (a * h01) >> 56;
12,582,906✔
837

12,582,906✔
838
            value_count += to_size_t(a);
6✔
839
        }
24✔
840
    }
6✔
841
    else if (m_width == 16) {
×
842
        if (value > 0x7FFFLL || value < -0x8000LL)
3✔
843
            return 0; // by casting?
6✔
844

6✔
845
        const uint64_t v = ~0ULL / 0xFFFF * value;
3✔
846
        const uint64_t m = ~0ULL / 0xFFFF * 0x1;
3✔
847

6✔
848
        // Masks to avoid spillover between segments in cascades
6✔
849
        const uint64_t c1 = ~0ULL / 0xFFFF * 0x7FFF;
6✔
850
        const uint64_t c2 = ~0ULL / 0xFFFF * 0x3FFF;
6✔
851
        const uint64_t c3 = ~0ULL / 0xFFFF * 0x0FFF;
3✔
852
        const uint64_t c4 = ~0ULL / 0xFFFF * 0x00FF;
6✔
853

25,165,824✔
854
        const size_t chunkvals = 4;
25,165,818✔
855
        for (; i + chunkvals <= end; i += chunkvals) {
25,165,818✔
856
            uint64_t a = next[i / chunkvals];
25,165,818✔
857
            a ^= v;             // zero matching bit segments
25,165,818✔
858
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
25,165,818✔
859
            a |= (a >> 2) & c2;
25,165,818✔
860
            a |= (a >> 4) & c3;
25,165,818✔
861
            a |= (a >> 8) & c4;
25,165,818✔
862
            a &= m; // isolate single bit in each segment
12,582,909✔
863
            a ^= m; // reverse isolated bits
12,582,909✔
864

25,165,818✔
865
            // Population count
12,582,909✔
866
            a = (a * h01) >> 56;
25,165,818✔
867

25,165,818✔
868
            value_count += to_size_t(a);
6✔
869
        }
18✔
870
    }
12✔
871
    else if (m_width == 32) {
12✔
872
        int32_t v = int32_t(value);
100,663,362✔
873
        const int32_t* d = reinterpret_cast<int32_t*>(m_data);
100,663,350✔
874
        for (; i < end; ++i) {
100,663,350✔
875
            if (d[i] == v)
100,663,350✔
876
                ++value_count;
12✔
877
        }
12✔
878
        return value_count;
6✔
879
    }
6✔
880
    else if (m_width == 64) {
66✔
881
        const int64_t* d = reinterpret_cast<int64_t*>(m_data);
60✔
882
        for (; i < end; ++i) {
60✔
883
            if (d[i] == value)
60✔
884
                ++value_count;
6✔
885
        }
6✔
886
        return value_count;
12✔
887
    }
12✔
888

648✔
889
    // Check remaining elements
624✔
890
    for (; i < end; ++i)
624✔
891
        if (value == get(i))
12✔
892
            ++value_count;
24✔
893

24✔
894
    return value_count;
895
}
896

170,064✔
897
size_t Array::calc_aligned_byte_size(size_t size, int width)
170,064✔
898
{
170,064✔
899
    REALM_ASSERT(width != 0 && (width & (width - 1)) == 0); // Is a power of two
170,064✔
900
    size_t max = std::numeric_limits<size_t>::max();
170,064✔
901
    size_t max_2 = max & ~size_t(7); // Allow for upwards 8-byte alignment
170,064✔
902
    bool overflow;
170,064✔
903
    size_t byte_size;
162,750✔
904
    if (width < 8) {
162,750✔
905
        size_t elems_per_byte = 8 / width;
162,750✔
906
        size_t byte_size_0 = size / elems_per_byte;
126✔
907
        if (size % elems_per_byte != 0)
162,750✔
908
            ++byte_size_0;
162,750✔
909
        overflow = byte_size_0 > max_2 - header_size;
162,750✔
910
        byte_size = header_size + byte_size_0;
7,314✔
911
    }
7,314✔
912
    else {
7,314✔
913
        size_t bytes_per_elem = width / 8;
7,314✔
914
        overflow = size > (max_2 - header_size) / bytes_per_elem;
7,314✔
915
        byte_size = header_size + size * bytes_per_elem;
170,064✔
916
    }
×
917
    if (overflow)
170,064✔
918
        throw std::overflow_error("Byte size overflow");
170,064✔
919
    REALM_ASSERT_3(byte_size, >, 0);
170,064✔
920
    size_t aligned_byte_size = ((byte_size - 1) | 7) + 1; // 8-byte alignment
170,064✔
921
    return aligned_byte_size;
922
}
923

×
924
MemRef Array::clone(MemRef mem, Allocator& alloc, Allocator& target_alloc)
×
925
{
×
926
    const char* header = mem.get_addr();
927
    if (!get_hasrefs_from_header(header)) {
928
        // This array has no subarrays, so we can make a byte-for-byte
929
        // copy, which is more efficient.
930

×
931
        // Calculate size of new array in bytes
932
        size_t size = get_byte_size_from_header(header);
933

×
934
        // Create the new array
×
935
        MemRef clone_mem = target_alloc.alloc(size); // Throws
936
        char* clone_header = clone_mem.get_addr();
937

×
938
        // Copy contents
×
939
        const char* src_begin = header;
×
940
        const char* src_end = header + size;
×
941
        char* dst_begin = clone_header;
942
        realm::safe_copy_n(src_begin, src_end - src_begin, dst_begin);
943

×
944
        // Update with correct capacity
945
        set_capacity_in_header(size, clone_header);
×
946

×
947
        return clone_mem;
948
    }
949

×
950
    // Refs are integers, and integers arrays use wtype_Bits.
951
    REALM_ASSERT_3(get_wtype_from_header(header), ==, wtype_Bits);
×
952

×
953
    Array array{alloc};
954
    array.init_from_mem(mem);
955

×
956
    // Create new empty array of refs
×
957
    Array new_array(target_alloc);
×
958
    _impl::DeepArrayDestroyGuard dg(&new_array);
×
959
    Type type = get_type_from_header(header);
×
960
    bool context_flag = get_context_flag_from_header(header);
961
    new_array.create(type, context_flag); // Throws
×
962

×
963
    _impl::DeepArrayRefDestroyGuard dg_2(target_alloc);
×
964
    size_t n = array.size();
×
965
    for (size_t i = 0; i != n; ++i) {
966
        int_fast64_t value = array.get(i);
967

968
        // Null-refs signify empty subtrees. Also, all refs are
969
        // 8-byte aligned, so the lowest bits cannot be set. If they
×
970
        // are, it means that it should not be interpreted as a ref.
×
971
        bool is_subarray = value != 0 && (value & 1) == 0;
×
972
        if (!is_subarray) {
×
973
            new_array.add(value); // Throws
×
974
            continue;
975
        }
×
976

×
977
        ref_type ref = to_ref(value);
×
978
        MemRef new_mem = clone(MemRef(ref, alloc), alloc, target_alloc); // Throws
×
979
        dg_2.reset(new_mem.get_ref());
×
980
        value = from_ref(new_mem.get_ref());
×
981
        new_array.add(value); // Throws
×
982
        dg_2.release();
983
    }
×
984

×
985
    dg.release();
×
986
    return new_array.get_mem();
987
}
988

989
MemRef Array::create(Type type, bool context_flag, WidthType width_type, size_t size, int_fast64_t value,
17,481,456✔
990
                     Allocator& alloc)
17,481,456✔
991
{
17,481,456✔
992
    REALM_ASSERT_7(value, ==, 0, ||, width_type, ==, wtype_Bits);
8,693,904✔
993
    REALM_ASSERT_7(size, ==, 0, ||, width_type, !=, wtype_Ignore);
17,481,456✔
994

17,481,456✔
995
    bool is_inner_bptree_node = false, has_refs = false;
6,767,835✔
996
    switch (type) {
6,767,835✔
997
        case type_Normal:
185,241✔
998
            break;
185,241✔
999
        case type_InnerBptreeNode:
185,241✔
1000
            is_inner_bptree_node = true;
185,241✔
1001
            has_refs = true;
10,531,254✔
1002
            break;
10,531,254✔
1003
        case type_HasRefs:
10,531,254✔
1004
            has_refs = true;
17,482,839✔
1005
            break;
8,694,381✔
1006
    }
17,482,839✔
1007

17,482,839✔
1008
    int width = 0;
17,482,839✔
1009
    size_t byte_size_0 = header_size;
170,064✔
1010
    if (value != 0) {
170,064✔
1011
        width = int(bit_width(value));
170,064✔
1012
        byte_size_0 = calc_aligned_byte_size(size, width); // Throws
8,694,381✔
1013
    }
8,694,381✔
1014
    // Adding zero to Array::initial_capacity to avoid taking the
17,482,839✔
1015
    // address of that member
17,482,839✔
1016
    size_t byte_size = std::max(byte_size_0, initial_capacity + 0);
17,482,839✔
1017
    MemRef mem = alloc.alloc(byte_size); // Throws
8,694,381✔
1018
    char* header = mem.get_addr();
17,482,839✔
1019

8,694,381✔
1020
    init_header(header, is_inner_bptree_node, has_refs, context_flag, width_type, width, size, byte_size);
17,482,839✔
1021

170,067✔
1022
    if (value != 0) {
170,067✔
1023
        char* data = get_data_from_header(header);
170,067✔
1024
        size_t begin = 0, end = size;
170,067✔
1025
        REALM_TEMPEX(fill_direct, width, (data, begin, end, value));
8,694,381✔
1026
    }
17,482,839✔
1027

17,482,839✔
1028
    return mem;
1029
}
1030

1031
// This is the one installed into the m_vtable->finder slots.
1032
template <class cond, size_t bitwidth>
9,550,692✔
1033
bool Array::find_vtable(int64_t value, size_t start, size_t end, size_t baseindex, QueryStateBase* state) const
9,550,692✔
1034
{
9,550,692✔
1035
    return ArrayWithFind(*this).find_optimized<cond, bitwidth>(value, start, end, baseindex, state, nullptr);
1036
}
1037

1038

1039
template <size_t width>
1040
struct Array::VTableForWidth {
1041
    struct PopulatedVTable : Array::VTable {
192✔
1042
        PopulatedVTable()
192✔
1043
        {
192✔
1044
            getter = &Array::get<width>;
192✔
1045
            setter = &Array::set<width>;
192✔
1046
            chunk_getter = &Array::get_chunk<width>;
192✔
1047
            finder[cond_Equal] = &Array::find_vtable<Equal, width>;
192✔
1048
            finder[cond_NotEqual] = &Array::find_vtable<NotEqual, width>;
192✔
1049
            finder[cond_Greater] = &Array::find_vtable<Greater, width>;
192✔
1050
            finder[cond_Less] = &Array::find_vtable<Less, width>;
1051
        }
1052
    };
1053
    static const PopulatedVTable vtable;
1054
};
1055

1056
template <size_t width>
1057
const typename Array::VTableForWidth<width>::PopulatedVTable Array::VTableForWidth<width>::vtable;
1058

1,624,764,771✔
1059
void Array::update_width_cache_from_header() noexcept
1,624,764,771✔
1060
{
1,624,764,771✔
1061
    auto width = get_width_from_header(get_header());
1,624,764,771✔
1062
    m_lbound = lbound_for_width(width);
804,295,881✔
1063
    m_ubound = ubound_for_width(width);
1,624,764,771✔
1064

804,295,881✔
1065
    m_width = width;
1,624,764,771✔
1066

1,624,764,771✔
1067
    REALM_TEMPEX(m_vtable = &VTableForWidth, width, ::vtable);
1,624,764,771✔
1068
    m_getter = m_vtable->getter;
1069
}
1070

1071
// This method reads 8 concecutive values into res[8], starting from index 'ndx'. It's allowed for the 8 values to
1072
// exceed array length; in this case, remainder of res[8] will be be set to 0.
1073
template <size_t w>
1,018,080✔
1074
void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
1,018,080!
1075
{
555,144✔
1076
    REALM_ASSERT_3(ndx, <, m_size);
1,018,080✔
1077

555,144✔
1078
    size_t i = 0;
555,144✔
1079

555,144✔
1080
    // if constexpr to avoid producing spurious warnings resulting from
1,018,080✔
1081
    // instantiating for too large w
549,444✔
1082
    if constexpr (w > 0 && w <= 4) {
549,444✔
1083
        // Calling get<w>() in a loop results in one load per call to get, but
1,006,680✔
1084
        // for w < 8 we can do better than that
549,444✔
1085
        constexpr size_t elements_per_byte = 8 / w;
549,444✔
1086

549,444✔
1087
        // Round m_size down to byte granularity as the trailing bits in the last
1,006,680✔
1088
        // byte are uninitialized
549,444✔
1089
        size_t bytes_available = m_size / elements_per_byte;
549,444✔
1090

549,444✔
1091
        // Round start and end to be byte-aligned. Start is rounded down and
1,006,680✔
1092
        // end is rounded up as we may read up to 7 unused bits at each end.
1,006,680✔
1093
        size_t start = ndx / elements_per_byte;
549,444✔
1094
        size_t end = std::min(bytes_available, (ndx + 8 + elements_per_byte - 1) / elements_per_byte);
1,006,680✔
1095

548,607✔
1096
        if (end > start) {
1,005,006✔
1097
            // Loop in reverse order because data is stored in little endian order
3,660,198✔
1098
            uint64_t c = 0;
2,655,192✔
1099
            for (size_t i = end; i > start; --i) {
2,655,192✔
1100
                c <<= 8;
2,655,192✔
1101
                c += *reinterpret_cast<const uint8_t*>(m_data + i - 1);
548,607✔
1102
            }
1,005,006✔
1103
            // Trim off leading bits which aren't part of the requested range
548,607✔
1104
            c >>= (ndx - start * elements_per_byte) * w;
1,005,006✔
1105

1,005,006✔
1106
            uint64_t mask = (1ULL << w) - 1ULL;
1,005,006✔
1107
            res[0] = (c >> 0 * w) & mask;
1,005,006✔
1108
            res[1] = (c >> 1 * w) & mask;
1,005,006✔
1109
            res[2] = (c >> 2 * w) & mask;
1,005,006✔
1110
            res[3] = (c >> 3 * w) & mask;
1,005,006✔
1111
            res[4] = (c >> 4 * w) & mask;
1,005,006✔
1112
            res[5] = (c >> 5 * w) & mask;
1,005,006✔
1113
            res[6] = (c >> 6 * w) & mask;
548,607✔
1114
            res[7] = (c >> 7 * w) & mask;
548,607✔
1115

1,005,006✔
1116
            // Read the last few elements via get<w> if needed
1,005,006✔
1117
            i = std::min<size_t>(8, end * elements_per_byte - ndx);
1,006,680✔
1118
        }
555,144✔
1119
    }
1,098,180!
1120

80,100✔
1121
    for (; i + ndx < m_size && i < 8; i++)
1,169,340!
1122
        res[i] = get<w>(ndx + i);
151,260✔
1123
    for (; i < 8; i++)
555,144✔
1124
        res[i] = 0;
1,018,080✔
1125

9,011,460!
1126
#ifdef REALM_DEBUG
7,993,380✔
1127
    for (int j = 0; j + ndx < m_size && j < 8; j++) {
7,993,380!
1128
        int64_t expected = get<w>(ndx + j);
7,993,380✔
1129
        REALM_ASSERT(res[j] == expected);
1,018,080✔
1130
    }
1,018,080✔
1131
#endif
1132
}
1133

1134
template <>
184,281✔
1135
void Array::get_chunk<0>(size_t ndx, int64_t res[8]) const noexcept
184,281✔
1136
{
184,281✔
1137
    REALM_ASSERT_3(ndx, <, m_size);
184,281✔
1138
    memset(res, 0, sizeof(int64_t) * 8);
1139
}
1140

1141

1142
template <size_t width>
1,237,503,777✔
1143
void Array::set(size_t ndx, int64_t value)
1,237,503,777✔
1144
{
1,237,503,777✔
1145
    set_direct<width>(m_data, ndx, value);
1146
}
1147

1148
#ifdef REALM_DEBUG
1149
namespace {
1150

1151
class MemStatsHandler : public Array::MemUsageHandler {
1152
public:
1153
    MemStatsHandler(MemStats& stats) noexcept
×
1154
        : m_stats(stats)
×
1155
    {
1156
    }
×
1157
    void handle(ref_type, size_t allocated, size_t used) noexcept override
×
1158
    {
×
1159
        m_stats.allocated += allocated;
×
1160
        m_stats.used += used;
×
1161
        m_stats.array_count += 1;
1162
    }
1163

1164
private:
1165
    MemStats& m_stats;
1166
};
1167

1168
} // anonymous namespace
1169

1170

×
1171
void Array::stats(MemStats& stats_dest) const noexcept
×
1172
{
×
1173
    MemStatsHandler handler(stats_dest);
×
1174
    report_memory_usage(handler);
1175
}
1176

1177

1,307,370✔
1178
void Array::report_memory_usage(MemUsageHandler& handler) const
1,307,370✔
1179
{
1,307,370✔
1180
    if (m_has_refs)
662,295✔
1181
        report_memory_usage_2(handler); // Throws
1,307,370✔
1182

1,307,370✔
1183
    size_t used = get_byte_size();
1,307,370✔
1184
    size_t allocated;
998,622✔
1185
    if (m_alloc.is_read_only(m_ref)) {
998,622✔
1186
        allocated = used;
308,748✔
1187
    }
308,748✔
1188
    else {
308,748✔
1189
        char* header = get_header_from_data(m_data);
308,748✔
1190
        allocated = get_capacity_from_header(header);
1,307,370✔
1191
    }
1,307,370✔
1192
    handler.handle(m_ref, allocated, used); // Throws
1193
}
1194

1195

24,800,658✔
1196
void Array::report_memory_usage_2(MemUsageHandler& handler) const
24,800,658✔
1197
{
164,154,909✔
1198
    Array subarray(m_alloc);
139,354,251✔
1199
    for (size_t i = 0; i < m_size; ++i) {
69,710,988✔
1200
        int_fast64_t value = get(i);
69,710,988✔
1201
        // Skip null refs and values that are not refs. Values are not refs when
139,354,251✔
1202
        // the least significant bit is set.
67,312,821✔
1203
        if (value == 0 || (value & 1) == 1)
35,995,233✔
1204
            continue;
72,041,430✔
1205

72,041,430✔
1206
        size_t used;
72,041,430✔
1207
        ref_type ref = to_ref(value);
72,041,430✔
1208
        char* header = m_alloc.translate(ref);
72,041,430✔
1209
        bool array_has_refs = get_hasrefs_from_header(header);
23,493,843✔
1210
        if (array_has_refs) {
23,493,843✔
1211
            MemRef mem(header, ref, m_alloc);
23,493,843✔
1212
            subarray.init_from_mem(mem);
23,493,843✔
1213
            subarray.report_memory_usage_2(handler); // Throws
23,493,843✔
1214
            used = subarray.get_byte_size();
48,547,587✔
1215
        }
48,547,587✔
1216
        else {
48,547,587✔
1217
            used = get_byte_size_from_header(header);
35,995,233✔
1218
        }
72,041,430✔
1219

72,041,430✔
1220
        size_t allocated;
69,467,307✔
1221
        if (m_alloc.is_read_only(ref)) {
69,467,307✔
1222
            allocated = used;
2,574,123✔
1223
        }
2,574,123✔
1224
        else {
2,574,123✔
1225
            allocated = get_capacity_from_header(header);
72,041,430✔
1226
        }
72,041,430✔
1227
        handler.handle(ref, allocated, used); // Throws
24,800,658✔
1228
    }
1229
}
1230
#endif
1231

13,256,919✔
1232
void Array::verify() const
13,256,919✔
1233
{
13,256,919✔
1234
#ifdef REALM_DEBUG
6,611,226✔
1235
    REALM_ASSERT(is_attached());
13,256,919✔
1236

13,256,919✔
1237
    REALM_ASSERT(m_width == 0 || m_width == 1 || m_width == 2 || m_width == 4 || m_width == 8 || m_width == 16 ||
6,611,226✔
1238
                 m_width == 32 || m_width == 64);
13,256,919✔
1239

6,807✔
1240
    if (!get_parent())
6,607,842✔
1241
        return;
6,607,842✔
1242

13,250,112✔
1243
    // Check that parent is set correctly
13,250,112✔
1244
    ref_type ref_in_parent = get_ref_from_parent();
13,250,112✔
1245
    REALM_ASSERT_3(ref_in_parent, ==, m_ref);
13,250,112✔
1246
#endif
1247
}
1248

10,729,860✔
1249
size_t Array::lower_bound_int(int64_t value) const noexcept
10,729,860✔
1250
{
×
1251
    REALM_TEMPEX(return lower_bound, m_width, (m_data, m_size, value));
1252
}
1253

4,469,874✔
1254
size_t Array::upper_bound_int(int64_t value) const noexcept
4,469,874!
1255
{
×
1256
    REALM_TEMPEX(return upper_bound, m_width, (m_data, m_size, value));
1257
}
1258

1259

7,312,437✔
1260
size_t Array::find_first(int64_t value, size_t start, size_t end) const
7,312,437✔
1261
{
7,312,437✔
1262
    return find_first<Equal>(value, start, end);
1263
}
1264

1265

287,016,351✔
1266
int_fast64_t Array::get(const char* header, size_t ndx) noexcept
287,016,351✔
1267
{
287,016,351✔
1268
    const char* data = get_data_from_header(header);
287,016,351✔
1269
    uint_least8_t width = get_width_from_header(header);
287,016,351✔
1270
    return get_direct(data, width, ndx);
1271
}
1272

1273

602,904✔
1274
std::pair<int64_t, int64_t> Array::get_two(const char* header, size_t ndx) noexcept
602,904✔
1275
{
602,904✔
1276
    const char* data = get_data_from_header(header);
602,904✔
1277
    uint_least8_t width = get_width_from_header(header);
602,904✔
1278
    std::pair<int64_t, int64_t> p = ::get_two(data, width, ndx);
602,904✔
1279
    return std::make_pair(p.first, p.second);
1280
}
1281

21,926,196✔
1282
bool QueryStateCount::match(size_t, Mixed) noexcept
21,926,196✔
1283
{
21,926,196✔
1284
    ++m_match_count;
21,926,196✔
1285
    return (m_limit > m_match_count);
1286
}
1287

9,308,832✔
1288
bool QueryStateFindFirst::match(size_t index, Mixed) noexcept
9,308,832✔
1289
{
9,308,832✔
1290
    m_match_count++;
9,308,832✔
1291
    m_state = index;
9,308,832✔
1292
    return false;
1293
}
1294

1295
template <>
56,072,808✔
1296
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index, Mixed) noexcept
56,072,808✔
1297
{
26,373,252✔
1298
    ++m_match_count;
55,201,044✔
1299

56,072,808✔
1300
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
26,373,252✔
1301
    m_keys.push_back(ObjKey(key_value));
56,072,808✔
1302

56,072,808✔
1303
    return (m_limit > m_match_count);
1304
}
1305

1306
template <>
282✔
1307
bool QueryStateFindAll<IntegerColumn>::match(size_t index, Mixed) noexcept
282✔
1308
{
282✔
1309
    ++m_match_count;
141✔
1310
    m_keys.add(index);
282✔
1311

282✔
1312
    return (m_limit > m_match_count);
1313
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc