• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / github_pull_request_312964

19 Feb 2025 07:31PM UTC coverage: 90.814% (-0.3%) from 91.119%
github_pull_request_312964

Pull #8071

Evergreen

web-flow
Bump serialize-javascript and mocha

Bumps [serialize-javascript](https://github.com/yahoo/serialize-javascript) to 6.0.2 and updates ancestor dependency [mocha](https://github.com/mochajs/mocha). These dependencies need to be updated together.


Updates `serialize-javascript` from 6.0.0 to 6.0.2
- [Release notes](https://github.com/yahoo/serialize-javascript/releases)
- [Commits](https://github.com/yahoo/serialize-javascript/compare/v6.0.0...v6.0.2)

Updates `mocha` from 10.2.0 to 10.8.2
- [Release notes](https://github.com/mochajs/mocha/releases)
- [Changelog](https://github.com/mochajs/mocha/blob/main/CHANGELOG.md)
- [Commits](https://github.com/mochajs/mocha/compare/v10.2.0...v10.8.2)

---
updated-dependencies:
- dependency-name: serialize-javascript
  dependency-type: indirect
- dependency-name: mocha
  dependency-type: direct:development
...

Signed-off-by: dependabot[bot] <support@github.com>
Pull Request #8071: Bump serialize-javascript and mocha

96552 of 179126 branches covered (53.9%)

212672 of 234185 relevant lines covered (90.81%)

3115802.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.85
/src/realm/array.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/array_with_find.hpp>
20
#include <realm/utilities.hpp>
21
#include <realm/impl/destroy_guard.hpp>
22
#include <realm/column_integer.hpp>
23
#include <realm/bplustree.hpp>
24
#include <realm/query_conditions.hpp>
25
#include <realm/array_integer.hpp>
26
#include <realm/array_key.hpp>
27
#include <realm/impl/array_writer.hpp>
28

29
#include <array>
30
#include <cstring> // std::memcpy
31
#include <iomanip>
32
#include <limits>
33
#include <tuple>
34

35
#ifdef REALM_DEBUG
36
#include <iostream>
37
#include <sstream>
38
#endif
39

40
#ifdef _MSC_VER
41
#include <intrin.h>
42
#pragma warning(disable : 4127) // Condition is constant warning
43
#endif
44

45

46
// Header format (8 bytes):
47
// ------------------------
48
//
49
// In mutable part / outside file:
50
//
51
// |--------|--------|--------|--------|--------|--------|--------|--------|
52
// |         capacity         |reserved|12344555|           size           |
53
//
54
//
55
// In immutable part / in file:
56
//
57
// |--------|--------|--------|--------|--------|--------|--------|--------|
58
// |             checksum              |12344555|           size           |
59
//
60
//
61
//  1: 'is_inner_bptree_node' (inner node of B+-tree).
62
//
63
//  2: 'has_refs' (elements whose first bit is zero are refs to subarrays).
64
//
65
//  3: 'context_flag' (meaning depends on context)
66
//
67
//  4: 'width_scheme' (2 bits)
68
//
69
//      value  |  meaning of 'width'  |  number of bytes used after header
70
//      -------|----------------------|------------------------------------
71
//        0    |  number of bits      |  ceil(width * size / 8)
72
//        1    |  number of bytes     |  width * size
73
//        2    |  ignored             |  size
74
//
75
//  5: 'width_ndx' (3 bits)
76
//
77
//      'width_ndx'       |  0 |  1 |  2 |  3 |  4 |  5 |  6 |  7 |
78
//      ------------------|----|----|----|----|----|----|----|----|
79
//      value of 'width'  |  0 |  1 |  2 |  4 |  8 | 16 | 32 | 64 |
80
//
81
//
82
// 'capacity' is the total number of bytes allocated for this array
83
// including the header.
84
//
85
// 'size' (aka length) is the number of elements in the array.
86
//
87
// 'checksum' (not yet implemented) is the checksum of the array
88
// including the header.
89
//
90
//
91
// Inner node of B+-tree:
92
// ----------------------
93
//
94
// An inner node of a B+-tree is has one of two forms: The 'compact'
95
// form which uses a single array node, or the 'general' form which
96
// uses two. The compact form is used by default but is converted to
97
// the general form when the corresponding subtree is modified in
98
// certain ways. There are two kinds of modification that require
99
// conversion to the general form:
100
//
101
//  - Insertion of an element into the corresponding subtree, except
102
//    when insertion occurs after the last element in the subtree
103
//    (append).
104
//
105
//  - Removal of an element from the corresponding subtree, except
106
//    when the removed element is the last element in the subtree.
107
//
108
// Compact form:
109
//
110
//   --> | N_c | r_1 | r_2 | ... | r_N | N_t |
111
//
112
// General form:
113
//
114
//   --> |  .  | r_1 | r_2 | ... | r_N | N_t |  (main array node)
115
//          |
116
//           ------> | o_2 | ... | o_N |  (offsets array node)
117
//
118
// Here,
119
//   `r_i` is the i'th child ref,
120
//   `o_i` is the total number of elements preceeding the i'th child,
121
//   `N`   is the number of children,
122
//   'M'   is one less than the number of children,
123
//   `N_c` is the fixed number of elements per child
124
//         (`elems_per_child`), and
125
//   `N_t` is the total number of elements in the subtree
126
//         (`total_elems_in_subtree`).
127
//
128
// `N_c` must always be a power of `REALM_MAX_BPNODE_SIZE`.
129
//
130
// It is expected that `N_t` will be removed in a future version of
131
// the file format. This will make it much more efficient to append
132
// elements to the B+-tree (or remove elements from the end).
133
//
134
// The last child of an inner node on the compact form, may have fewer
135
// elements than `N_c`. All other children must have exactly `N_c`
136
// elements in them.
137
//
138
// When an inner node is on the general form, and has only one child,
139
// it has an empty `offsets` array.
140
//
141
//
142
// B+-tree invariants:
143
//
144
//  - Every inner node must have at least one child
145
//    (invar:bptree-nonempty-inner).
146
//
147
//  - A leaf node, that is not also a root node, must contain at least
148
//    one element (invar:bptree-nonempty-leaf).
149
//
150
//  - All leaf nodes must reside at the same depth in the tree
151
//    (invar:bptree-leaf-depth).
152
//
153
//  - If an inner node is on the general form, and has a parent, the
154
//    parent must also be on the general form
155
//    (invar:bptree-node-form).
156
//
157
// It follows from invar:bptree-nonempty-leaf that the root of an
158
// empty tree (zero elements) is a leaf.
159
//
160
// It follows from invar:bptree-nonempty-inner and
161
// invar:bptree-nonempty-leaf that in a tree with precisely one
162
// element, every inner node has precisely one child, there is
163
// precisely one leaf node, and that leaf node has precisely one
164
// element.
165
//
166
// It follows from invar:bptree-node-form that if the root is on the
167
// compact form, then so is every other inner node in the tree.
168
//
169
// In general, when the root node is an inner node, it will have at
170
// least two children, because otherwise it would be
171
// superflous. However, to allow for exception safety during element
172
// insertion and removal, this shall not be guaranteed.
173

174
// LIMITATION: The code below makes the non-portable assumption that
175
// negative number are represented using two's complement. This is not
176
// guaranteed by C++03, but holds for all known target platforms.
177
//
178
// LIMITATION: The code below makes the non-portable assumption that
179
// the types `int8_t`, `int16_t`, `int32_t`, and `int64_t`
180
// exist. This is not guaranteed by C++03, but holds for all
181
// known target platforms.
182
//
183
// LIMITATION: The code below makes the assumption that a reference into
184
// a realm file will never grow in size above what can be represented in
185
// a size_t, which is 2^31-1 on a 32-bit platform, and 2^63-1 on a 64 bit
186
// platform.
187

188
using namespace realm;
189
using namespace realm::util;
190

191
void QueryStateBase::dyncast() {}
×
192

193
size_t Array::bit_width(int64_t v)
194
{
10,358,031✔
195
    // FIXME: Assuming there is a 64-bit CPU reverse bitscan
196
    // instruction and it is fast, then this function could be
197
    // implemented as a table lookup on the result of the scan
198

199
    if ((uint64_t(v) >> 4) == 0) {
10,358,031✔
200
        static const int8_t bits[] = {0, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
1,338,324✔
201
        return bits[int8_t(v)];
1,338,324✔
202
    }
1,338,324✔
203

204
    // First flip all bits if bit 63 is set (will now always be zero)
205
    if (v < 0)
9,019,707✔
206
        v = ~v;
107,820✔
207

208
    // Then check if bits 15-31 used (32b), 7-31 used (16b), else (8b)
209
    return uint64_t(v) >> 31 ? 64 : uint64_t(v) >> 15 ? 32 : uint64_t(v) >> 7 ? 16 : 8;
9,019,707✔
210
}
10,358,031✔
211

212

213
void Array::init_from_mem(MemRef mem) noexcept
214
{
352,481,097✔
215
    char* header = Node::init_from_mem(mem);
352,481,097✔
216
    // Parse header
217
    m_is_inner_bptree_node = get_is_inner_bptree_node_from_header(header);
352,481,097✔
218
    m_has_refs = get_hasrefs_from_header(header);
352,481,097✔
219
    m_context_flag = get_context_flag_from_header(header);
352,481,097✔
220
    update_width_cache_from_header();
352,481,097✔
221
}
352,481,097✔
222

223
void Array::set_type(Type type)
224
{
1,701✔
225
    REALM_ASSERT(is_attached());
1,701✔
226

227
    copy_on_write(); // Throws
1,701✔
228

229
    bool init_is_inner_bptree_node = false, init_has_refs = false;
1,701✔
230
    switch (type) {
1,701✔
231
        case type_Normal:
3✔
232
            break;
3✔
233
        case type_InnerBptreeNode:
3✔
234
            init_is_inner_bptree_node = true;
3✔
235
            init_has_refs = true;
3✔
236
            break;
3✔
237
        case type_HasRefs:
1,695✔
238
            init_has_refs = true;
1,695✔
239
            break;
1,695✔
240
    }
1,701✔
241
    m_is_inner_bptree_node = init_is_inner_bptree_node;
1,701✔
242
    m_has_refs = init_has_refs;
1,701✔
243

244
    char* header = get_header();
1,701✔
245
    set_is_inner_bptree_node_in_header(init_is_inner_bptree_node, header);
1,701✔
246
    set_hasrefs_in_header(init_has_refs, header);
1,701✔
247
}
1,701✔
248

249

250
void Array::destroy_children(size_t offset) noexcept
251
{
384,018✔
252
    for (size_t i = offset; i != m_size; ++i) {
2,116,248✔
253
        int64_t value = get(i);
1,732,230✔
254

255
        // Null-refs indicate empty sub-trees
256
        if (value == 0)
1,732,230✔
257
            continue;
600,363✔
258

259
        // A ref is always 8-byte aligned, so the lowest bit
260
        // cannot be set. If it is, it means that it should not be
261
        // interpreted as a ref.
262
        if ((value & 1) != 0)
1,131,867✔
263
            continue;
70,191✔
264

265
        ref_type ref = to_ref(value);
1,061,676✔
266
        destroy_deep(ref, m_alloc);
1,061,676✔
267
    }
1,061,676✔
268
}
384,018✔
269

270

271
ref_type Array::do_write_shallow(_impl::ArrayWriterBase& out) const
272
{
9,915,870✔
273
    // Write flat array
274
    const char* header = get_header_from_data(m_data);
9,915,870✔
275
    size_t byte_size = get_byte_size();
9,915,870✔
276
    uint32_t dummy_checksum = 0x41414141UL;                                // "AAAA" in ASCII
9,915,870✔
277
    ref_type new_ref = out.write_array(header, byte_size, dummy_checksum); // Throws
9,915,870✔
278
    REALM_ASSERT_3(new_ref % 8, ==, 0);                                    // 8-byte alignment
9,915,870✔
279
    return new_ref;
9,915,870✔
280
}
9,915,870✔
281

282

283
ref_type Array::do_write_deep(_impl::ArrayWriterBase& out, bool only_if_modified) const
284
{
3,039,207✔
285
    // Temp array for updated refs
286
    Array new_array(Allocator::get_default());
3,039,207✔
287
    Type type = m_is_inner_bptree_node ? type_InnerBptreeNode : type_HasRefs;
3,039,207✔
288
    new_array.create(type, m_context_flag); // Throws
3,039,207✔
289
    _impl::ShallowArrayDestroyGuard dg(&new_array);
3,039,207✔
290

291
    // First write out all sub-arrays
292
    size_t n = size();
3,039,207✔
293
    for (size_t i = 0; i < n; ++i) {
57,701,112✔
294
        int_fast64_t value = get(i);
54,661,905✔
295
        bool is_ref = (value != 0 && (value & 1) == 0);
54,661,905✔
296
        if (is_ref) {
54,661,905✔
297
            ref_type subref = to_ref(value);
40,617,687✔
298
            ref_type new_subref = write(subref, m_alloc, out, only_if_modified); // Throws
40,617,687✔
299
            value = from_ref(new_subref);
40,617,687✔
300
        }
40,617,687✔
301
        new_array.add(value); // Throws
54,661,905✔
302
    }
54,661,905✔
303

304
    return new_array.do_write_shallow(out); // Throws
3,039,207✔
305
}
3,039,207✔
306

307

308
void Array::move(size_t begin, size_t end, size_t dest_begin)
309
{
5,792,805✔
310
    REALM_ASSERT_3(begin, <=, end);
5,792,805✔
311
    REALM_ASSERT_3(end, <=, m_size);
5,792,805✔
312
    REALM_ASSERT_3(dest_begin, <=, m_size);
5,792,805✔
313
    REALM_ASSERT_3(end - begin, <=, m_size - dest_begin);
5,792,805✔
314
    REALM_ASSERT(!(dest_begin >= begin && dest_begin < end)); // Required by std::copy
5,792,805!
315

316
    // Check if we need to copy before modifying
317
    copy_on_write(); // Throws
5,792,805✔
318

319
    size_t bits_per_elem = m_width;
5,792,805✔
320
    const char* header = get_header_from_data(m_data);
5,792,805✔
321
    if (get_wtype_from_header(header) == wtype_Multiply) {
5,792,805✔
322
        bits_per_elem *= 8;
×
323
    }
×
324

325
    if (bits_per_elem < 8) {
5,792,805✔
326
        // FIXME: Should be optimized
327
        for (size_t i = begin; i != end; ++i) {
21,908,691✔
328
            int_fast64_t v = (this->*m_getter)(i);
21,216,117✔
329
            (this->*(m_vtable->setter))(dest_begin++, v);
21,216,117✔
330
        }
21,216,117✔
331
        return;
692,574✔
332
    }
692,574✔
333

334
    size_t bytes_per_elem = bits_per_elem / 8;
5,100,231✔
335
    const char* begin_2 = m_data + begin * bytes_per_elem;
5,100,231✔
336
    const char* end_2 = m_data + end * bytes_per_elem;
5,100,231✔
337
    char* dest_begin_2 = m_data + dest_begin * bytes_per_elem;
5,100,231✔
338
    realm::safe_copy_n(begin_2, end_2 - begin_2, dest_begin_2);
5,100,231✔
339
}
5,100,231✔
340

341
void Array::move(Array& dst, size_t ndx)
342
{
6,393✔
343
    size_t dest_begin = dst.m_size;
6,393✔
344
    size_t nb_to_move = m_size - ndx;
6,393✔
345
    dst.copy_on_write();
6,393✔
346
    dst.ensure_minimum_width(this->m_ubound);
6,393✔
347
    dst.alloc(dst.m_size + nb_to_move, dst.m_width); // Make room for the new elements
6,393✔
348

349
    // cache variables used in tight loop
350
    auto getter = m_getter;
6,393✔
351
    auto setter = dst.m_vtable->setter;
6,393✔
352
    size_t sz = m_size;
6,393✔
353

354
    for (size_t i = ndx; i < sz; i++) {
739,053✔
355
        auto v = (this->*getter)(i);
732,660✔
356
        (dst.*setter)(dest_begin++, v);
732,660✔
357
    }
732,660✔
358

359
    truncate(ndx);
6,393✔
360
}
6,393✔
361

362
void Array::set(size_t ndx, int64_t value)
363
{
77,717,250✔
364
    REALM_ASSERT_3(ndx, <, m_size);
77,717,250✔
365
    if ((this->*(m_vtable->getter))(ndx) == value)
77,717,250✔
366
        return;
3,932,076✔
367

368
    // Check if we need to copy before modifying
369
    copy_on_write(); // Throws
73,785,174✔
370

371
    // Grow the array if needed to store this value
372
    ensure_minimum_width(value); // Throws
73,785,174✔
373

374
    // Set the value
375
    (this->*(m_vtable->setter))(ndx, value);
73,785,174✔
376
}
73,785,174✔
377

378
void Array::set_as_ref(size_t ndx, ref_type ref)
379
{
2,709,648✔
380
    set(ndx, from_ref(ref));
2,709,648✔
381
}
2,709,648✔
382

383
/*
384
// Optimization for the common case of adding positive values to a local array
385
// (happens a lot when returning results to TableViews)
386
void Array::add_positive_local(int64_t value)
387
{
388
    REALM_ASSERT(value >= 0);
389
    REALM_ASSERT(&m_alloc == &Allocator::get_default());
390

391
    if (value <= m_ubound) {
392
        if (m_size < m_capacity) {
393
            (this->*(m_vtable->setter))(m_size, value);
394
            ++m_size;
395
            set_header_size(m_size);
396
            return;
397
        }
398
    }
399

400
    insert(m_size, value);
401
}
402
*/
403

404
size_t Array::blob_size() const noexcept
405
{
6,495,762✔
406
    if (get_context_flag()) {
6,495,762✔
407
        size_t total_size = 0;
48✔
408
        for (size_t i = 0; i < size(); ++i) {
108✔
409
            char* header = m_alloc.translate(Array::get_as_ref(i));
60✔
410
            total_size += Array::get_size_from_header(header);
60✔
411
        }
60✔
412
        return total_size;
48✔
413
    }
48✔
414
    else {
6,495,714✔
415
        return m_size;
6,495,714✔
416
    }
6,495,714✔
417
}
6,495,762✔
418

419
void Array::insert(size_t ndx, int_fast64_t value)
420
{
458,442,246✔
421
    REALM_ASSERT_DEBUG(ndx <= m_size);
458,442,246✔
422

423
    const auto old_width = m_width;
458,442,246✔
424
    const auto old_size = m_size;
458,442,246✔
425
    const Getter old_getter = m_getter; // Save old getter before potential width expansion
458,442,246✔
426

427
    bool do_expand = value < m_lbound || value > m_ubound;
459,364,209✔
428
    if (do_expand) {
458,442,246✔
429
        size_t width = bit_width(value);
7,605,777✔
430
        REALM_ASSERT_DEBUG(width > m_width);
7,605,777✔
431
        alloc(m_size + 1, width); // Throws
7,605,777✔
432
    }
7,605,777✔
433
    else {
450,836,469✔
434
        alloc(m_size + 1, m_width); // Throws
450,836,469✔
435
    }
450,836,469✔
436

437
    // Move values below insertion (may expand)
438
    if (do_expand || old_width < 8) {
458,442,246✔
439
        size_t i = old_size;
149,236,965✔
440
        while (i > ndx) {
150,582,915✔
441
            --i;
1,345,950✔
442
            int64_t v = (this->*old_getter)(i);
1,345,950✔
443
            (this->*(m_vtable->setter))(i + 1, v);
1,345,950✔
444
        }
1,345,950✔
445
    }
149,236,965✔
446
    else if (ndx != old_size) {
309,205,281✔
447
        // when byte sized and no expansion, use memmove
448
        // FIXME: Optimize by simply dividing by 8 (or shifting right by 3 bit positions)
449
        size_t w = (old_width == 64) ? 8 : (old_width == 32) ? 4 : (old_width == 16) ? 2 : 1;
2,024,547✔
450
        char* src_begin = m_data + ndx * w;
2,024,547✔
451
        char* src_end = m_data + old_size * w;
2,024,547✔
452
        char* dst_end = src_end + w;
2,024,547✔
453
        std::copy_backward(src_begin, src_end, dst_end);
2,024,547✔
454
    }
2,024,547✔
455

456
    // Insert the new value
457
    (this->*(m_vtable->setter))(ndx, value);
458,442,246✔
458

459
    // Expand values above insertion
460
    if (do_expand) {
458,442,246✔
461
        size_t i = ndx;
7,601,052✔
462
        while (i != 0) {
23,621,469✔
463
            --i;
16,020,417✔
464
            int64_t v = (this->*old_getter)(i);
16,020,417✔
465
            (this->*(m_vtable->setter))(i, v);
16,020,417✔
466
        }
16,020,417✔
467
    }
7,601,052✔
468
}
458,442,246✔
469

470

471
void Array::truncate(size_t new_size)
472
{
1,051,365✔
473
    REALM_ASSERT(is_attached());
1,051,365✔
474
    REALM_ASSERT_3(new_size, <=, m_size);
1,051,365✔
475

476
    if (new_size == m_size)
1,051,365✔
477
        return;
77,634✔
478

479
    copy_on_write(); // Throws
973,731✔
480

481
    // Update size in accessor and in header. This leaves the capacity
482
    // unchanged.
483
    m_size = new_size;
973,731✔
484
    set_header_size(new_size);
973,731✔
485

486
    // If the array is completely cleared, we take the opportunity to
487
    // drop the width back to zero.
488
    if (new_size == 0) {
973,731✔
489
        set_width_in_header(0, get_header());
957,942✔
490
        update_width_cache_from_header();
957,942✔
491
    }
957,942✔
492
}
973,731✔
493

494

495
void Array::truncate_and_destroy_children(size_t new_size)
496
{
11,418✔
497
    REALM_ASSERT(is_attached());
11,418✔
498
    REALM_ASSERT_3(new_size, <=, m_size);
11,418✔
499

500
    if (new_size == m_size)
11,418✔
501
        return;
1,041✔
502

503
    copy_on_write(); // Throws
10,377✔
504

505
    if (m_has_refs) {
10,377✔
506
        size_t offset = new_size;
10,377✔
507
        destroy_children(offset);
10,377✔
508
    }
10,377✔
509

510
    // Update size in accessor and in header. This leaves the capacity
511
    // unchanged.
512
    m_size = new_size;
10,377✔
513
    set_header_size(new_size);
10,377✔
514

515
    // If the array is completely cleared, we take the opportunity to
516
    // drop the width back to zero.
517
    if (new_size == 0) {
10,377✔
518
        set_width_in_header(0, get_header());
9,702✔
519
        update_width_cache_from_header();
9,702✔
520
    }
9,702✔
521
}
10,377✔
522

523

524
void Array::do_ensure_minimum_width(int_fast64_t value)
525
{
2,614,377✔
526

527
    // Make room for the new value
528
    const size_t width = bit_width(value);
2,614,377✔
529

530
    REALM_ASSERT_3(width, >, m_width);
2,614,377✔
531

532
    Getter old_getter = m_getter; // Save old getter before width expansion
2,614,377✔
533
    alloc(m_size, width);         // Throws
2,614,377✔
534

535
    // Expand the old values
536
    size_t i = m_size;
2,614,377✔
537
    while (i != 0) {
18,336,477✔
538
        --i;
15,722,100✔
539
        int64_t v = (this->*old_getter)(i);
15,722,100✔
540
        (this->*(m_vtable->setter))(i, v);
15,722,100✔
541
    }
15,722,100✔
542
}
2,614,377✔
543

544
int64_t Array::sum(size_t start, size_t end) const
545
{
12,348✔
546
    REALM_TEMPEX(return sum, m_width, (start, end));
12,348✔
547
}
×
548

549
template <size_t w>
550
int64_t Array::sum(size_t start, size_t end) const
551
{
12,348✔
552
    if (end == size_t(-1))
12,348✔
553
        end = m_size;
12,348✔
554
    REALM_ASSERT_EX(end <= m_size && start <= end, start, end, m_size);
12,348✔
555

556
    if (w == 0 || start == end)
12,348!
557
        return 0;
12✔
558

559
    int64_t s = 0;
12,336✔
560

561
    // Sum manually until 128 bit aligned
562
    for (; (start < end) && (((size_t(m_data) & 0xf) * 8 + start * w) % 128 != 0); start++) {
25,122!
563
        s += get<w>(start);
12,786✔
564
    }
12,786✔
565

566
    if (w == 1 || w == 2 || w == 4) {
12,336✔
567
        // Sum of bitwidths less than a byte (which are always positive)
568
        // uses a divide and conquer algorithm that is a variation of popolation count:
569
        // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
570

571
        // static values needed for fast sums
572
        const uint64_t m2 = 0x3333333333333333ULL;
9✔
573
        const uint64_t m4 = 0x0f0f0f0f0f0f0f0fULL;
9✔
574
        const uint64_t h01 = 0x0101010101010101ULL;
9✔
575

576
        int64_t* data = reinterpret_cast<int64_t*>(m_data + start * w / 8);
9✔
577
        size_t chunks = (end - start) * w / 8 / sizeof(int64_t);
9✔
578

579
        for (size_t t = 0; t < chunks; t++) {
589,821!
580
            if (w == 1) {
589,812✔
581
#if 0
582
#if defined(USE_SSE42) && defined(_MSC_VER) && defined(REALM_PTR_64)
583
                s += __popcnt64(data[t]);
584
#elif !defined(_MSC_VER) && defined(USE_SSE42) && defined(REALM_PTR_64)
585
                s += __builtin_popcountll(data[t]);
586
#else
587
                uint64_t a = data[t];
588
                const uint64_t m1  = 0x5555555555555555ULL;
589
                a -= (a >> 1) & m1;
590
                a = (a & m2) + ((a >> 2) & m2);
591
                a = (a + (a >> 4)) & m4;
592
                a = (a * h01) >> 56;
593
                s += a;
594
#endif
595
#endif
596
                s += fast_popcount64(data[t]);
196,602✔
597
            }
196,602✔
598
            else if (w == 2) {
393,210✔
599
                uint64_t a = data[t];
393,210✔
600
                a = (a & m2) + ((a >> 2) & m2);
393,210✔
601
                a = (a + (a >> 4)) & m4;
393,210✔
602
                a = (a * h01) >> 56;
393,210✔
603

604
                s += a;
393,210✔
605
            }
393,210✔
606
            else if (w == 4) {
×
607
                uint64_t a = data[t];
×
608
                a = (a & m4) + ((a >> 4) & m4);
×
609
                a = (a * h01) >> 56;
×
610
                s += a;
×
611
            }
×
612
        }
589,812✔
613
        start += sizeof(int64_t) * 8 / no0(w) * chunks;
9✔
614
    }
9✔
615

616
#ifdef REALM_COMPILER_SSE
12,336✔
617
    if (sseavx<42>()) {
12,336!
618
        // 2000 items summed 500000 times, 8/16/32 bits, miliseconds:
619
        // Naive, templated get<>: 391 371 374
620
        // SSE:                     97 148 282
621

622
        if ((w == 8 || w == 16 || w == 32) && end - start > sizeof(__m128i) * 8 / no0(w)) {
12,336!
623
            __m128i* data = reinterpret_cast<__m128i*>(m_data + start * w / 8);
99✔
624
            __m128i sum_result = {0};
99✔
625
            __m128i sum2;
99✔
626

627
            size_t chunks = (end - start) * w / 8 / sizeof(__m128i);
99✔
628

629
            for (size_t t = 0; t < chunks; t++) {
8,798,472!
630
                if (w == 8) {
8,798,373✔
631
                    /*
632
                    // 469 ms AND disadvantage of handling max 64k elements before overflow
633
                    __m128i vl = _mm_cvtepi8_epi16(data[t]);
634
                    __m128i vh = data[t];
635
                    vh.m128i_i64[0] = vh.m128i_i64[1];
636
                    vh = _mm_cvtepi8_epi16(vh);
637
                    sum_result = _mm_add_epi16(sum_result, vl);
638
                    sum_result = _mm_add_epi16(sum_result, vh);
639
                    */
640

641
                    /*
642
                    // 424 ms
643
                    __m128i vl = _mm_unpacklo_epi8(data[t], _mm_set1_epi8(0));
644
                    __m128i vh = _mm_unpackhi_epi8(data[t], _mm_set1_epi8(0));
645
                    sum_result = _mm_add_epi32(sum_result, _mm_madd_epi16(vl, _mm_set1_epi16(1)));
646
                    sum_result = _mm_add_epi32(sum_result, _mm_madd_epi16(vh, _mm_set1_epi16(1)));
647
                    */
648

649
                    __m128i vl = _mm_cvtepi8_epi16(data[t]); // sign extend lower words 8->16
786,429✔
650
                    __m128i vh = data[t];
786,429✔
651
                    vh = _mm_srli_si128(vh, 8); // v >>= 64
786,429✔
652
                    vh = _mm_cvtepi8_epi16(vh); // sign extend lower words 8->16
786,429✔
653
                    __m128i sum1 = _mm_add_epi16(vl, vh);
786,429✔
654
                    __m128i sumH = _mm_cvtepi16_epi32(sum1);
786,429✔
655
                    __m128i sumL = _mm_srli_si128(sum1, 8); // v >>= 64
786,429✔
656
                    sumL = _mm_cvtepi16_epi32(sumL);
786,429✔
657
                    sum_result = _mm_add_epi32(sum_result, sumL);
786,429✔
658
                    sum_result = _mm_add_epi32(sum_result, sumH);
786,429✔
659
                }
786,429✔
660
                else if (w == 16) {
8,011,944✔
661
                    // todo, can overflow for array size > 2^32
662
                    __m128i vl = _mm_cvtepi16_epi32(data[t]); // sign extend lower words 16->32
1,572,960✔
663
                    __m128i vh = data[t];
1,572,960✔
664
                    vh = _mm_srli_si128(vh, 8);  // v >>= 64
1,572,960✔
665
                    vh = _mm_cvtepi16_epi32(vh); // sign extend lower words 16->32
1,572,960✔
666
                    sum_result = _mm_add_epi32(sum_result, vl);
1,572,960✔
667
                    sum_result = _mm_add_epi32(sum_result, vh);
1,572,960✔
668
                }
1,572,960✔
669
                else if (w == 32) {
6,438,984✔
670
                    __m128i v = data[t];
6,438,984✔
671
                    __m128i v0 = _mm_cvtepi32_epi64(v); // sign extend lower dwords 32->64
6,438,984✔
672
                    v = _mm_srli_si128(v, 8);           // v >>= 64
6,438,984✔
673
                    __m128i v1 = _mm_cvtepi32_epi64(v); // sign extend lower dwords 32->64
6,438,984✔
674
                    sum_result = _mm_add_epi64(sum_result, v0);
6,438,984✔
675
                    sum_result = _mm_add_epi64(sum_result, v1);
6,438,984✔
676

677
                    /*
678
                    __m128i m = _mm_set1_epi32(0xc000);             // test if overflow could happen (still need
679
                    underflow test).
680
                    __m128i mm = _mm_and_si128(data[t], m);
681
                    zz = _mm_or_si128(mm, zz);
682
                    sum_result = _mm_add_epi32(sum_result, data[t]);
683
                    */
684
                }
6,438,984✔
685
            }
8,798,373✔
686
            start += sizeof(__m128i) * 8 / no0(w) * chunks;
99✔
687

688
            // prevent taking address of 'state' to make the compiler keep it in SSE register in above loop
689
            // (vc2010/gcc4.6)
690
            sum2 = sum_result;
99✔
691

692
            // Avoid aliasing bug where sum2 might not yet be initialized when accessed by get_universal
693
            char sum3[sizeof sum2];
99✔
694
            memcpy(&sum3, &sum2, sizeof sum2);
99✔
695

696
            // Sum elements of sum
697
            for (size_t t = 0; t < sizeof(__m128i) * 8 / ((w == 8 || w == 16) ? 32 : 64); ++t) {
459!
698
                int64_t v = get_universal < (w == 8 || w == 16) ? 32 : 64 > (reinterpret_cast<char*>(&sum3), t);
360✔
699
                s += v;
360✔
700
            }
360✔
701
        }
99✔
702
    }
12,336✔
703
#endif
12,336✔
704

705
    // Sum remaining elements
706
    for (; start < end; ++start)
12,596,613!
707
        s += get<w>(start);
12,584,277✔
708

709
    return s;
12,336✔
710
}
12,348✔
711

712
size_t Array::count(int64_t value) const noexcept
713
{
21✔
714
    const uint64_t* next = reinterpret_cast<uint64_t*>(m_data);
21✔
715
    size_t value_count = 0;
21✔
716
    const size_t end = m_size;
21✔
717
    size_t i = 0;
21✔
718

719
    // static values needed for fast population count
720
    const uint64_t m1 = 0x5555555555555555ULL;
21✔
721
    const uint64_t m2 = 0x3333333333333333ULL;
21✔
722
    const uint64_t m4 = 0x0f0f0f0f0f0f0f0fULL;
21✔
723
    const uint64_t h01 = 0x0101010101010101ULL;
21✔
724

725
    if (m_width == 0) {
21✔
726
        if (value == 0)
×
727
            return m_size;
×
728
        return 0;
×
729
    }
×
730
    if (m_width == 1) {
21✔
731
        if (uint64_t(value) > 1)
3✔
732
            return 0;
×
733

734
        const size_t chunkvals = 64;
3✔
735
        for (; i + chunkvals <= end; i += chunkvals) {
786,432✔
736
            uint64_t a = next[i / chunkvals];
786,429✔
737
            if (value == 0)
786,429✔
738
                a = ~a; // reverse
×
739

740
            a -= (a >> 1) & m1;
786,429✔
741
            a = (a & m2) + ((a >> 2) & m2);
786,429✔
742
            a = (a + (a >> 4)) & m4;
786,429✔
743
            a = (a * h01) >> 56;
786,429✔
744

745
            // Could use intrinsic instead:
746
            // a = __builtin_popcountll(a); // gcc intrinsic
747

748
            value_count += to_size_t(a);
786,429✔
749
        }
786,429✔
750
    }
3✔
751
    else if (m_width == 2) {
18✔
752
        if (uint64_t(value) > 3)
3✔
753
            return 0;
×
754

755
        const uint64_t v = ~0ULL / 0x3 * value;
3✔
756

757
        // Masks to avoid spillover between segments in cascades
758
        const uint64_t c1 = ~0ULL / 0x3 * 0x1;
3✔
759

760
        const size_t chunkvals = 32;
3✔
761
        for (; i + chunkvals <= end; i += chunkvals) {
1,572,864✔
762
            uint64_t a = next[i / chunkvals];
1,572,861✔
763
            a ^= v;             // zero matching bit segments
1,572,861✔
764
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
1,572,861✔
765
            a &= m1;            // isolate single bit in each segment
1,572,861✔
766
            a ^= m1;            // reverse isolated bits
1,572,861✔
767
            // if (!a) continue;
768

769
            // Population count
770
            a = (a & m2) + ((a >> 2) & m2);
1,572,861✔
771
            a = (a + (a >> 4)) & m4;
1,572,861✔
772
            a = (a * h01) >> 56;
1,572,861✔
773

774
            value_count += to_size_t(a);
1,572,861✔
775
        }
1,572,861✔
776
    }
3✔
777
    else if (m_width == 4) {
15✔
778
        if (uint64_t(value) > 15)
×
779
            return 0;
×
780

781
        const uint64_t v = ~0ULL / 0xF * value;
×
782
        const uint64_t m = ~0ULL / 0xF * 0x1;
×
783

784
        // Masks to avoid spillover between segments in cascades
785
        const uint64_t c1 = ~0ULL / 0xF * 0x7;
×
786
        const uint64_t c2 = ~0ULL / 0xF * 0x3;
×
787

788
        const size_t chunkvals = 16;
×
789
        for (; i + chunkvals <= end; i += chunkvals) {
×
790
            uint64_t a = next[i / chunkvals];
×
791
            a ^= v;             // zero matching bit segments
×
792
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
×
793
            a |= (a >> 2) & c2;
×
794
            a &= m; // isolate single bit in each segment
×
795
            a ^= m; // reverse isolated bits
×
796

797
            // Population count
798
            a = (a + (a >> 4)) & m4;
×
799
            a = (a * h01) >> 56;
×
800

801
            value_count += to_size_t(a);
×
802
        }
×
803
    }
×
804
    else if (m_width == 8) {
15✔
805
        if (value > 0x7FLL || value < -0x80LL)
3✔
806
            return 0; // by casting?
×
807

808
        const uint64_t v = ~0ULL / 0xFF * value;
3✔
809
        const uint64_t m = ~0ULL / 0xFF * 0x1;
3✔
810

811
        // Masks to avoid spillover between segments in cascades
812
        const uint64_t c1 = ~0ULL / 0xFF * 0x7F;
3✔
813
        const uint64_t c2 = ~0ULL / 0xFF * 0x3F;
3✔
814
        const uint64_t c3 = ~0ULL / 0xFF * 0x0F;
3✔
815

816
        const size_t chunkvals = 8;
3✔
817
        for (; i + chunkvals <= end; i += chunkvals) {
6,291,456✔
818
            uint64_t a = next[i / chunkvals];
6,291,453✔
819
            a ^= v;             // zero matching bit segments
6,291,453✔
820
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
6,291,453✔
821
            a |= (a >> 2) & c2;
6,291,453✔
822
            a |= (a >> 4) & c3;
6,291,453✔
823
            a &= m; // isolate single bit in each segment
6,291,453✔
824
            a ^= m; // reverse isolated bits
6,291,453✔
825

826
            // Population count
827
            a = (a * h01) >> 56;
6,291,453✔
828

829
            value_count += to_size_t(a);
6,291,453✔
830
        }
6,291,453✔
831
    }
3✔
832
    else if (m_width == 16) {
12✔
833
        if (value > 0x7FFFLL || value < -0x8000LL)
3✔
834
            return 0; // by casting?
×
835

836
        const uint64_t v = ~0ULL / 0xFFFF * value;
3✔
837
        const uint64_t m = ~0ULL / 0xFFFF * 0x1;
3✔
838

839
        // Masks to avoid spillover between segments in cascades
840
        const uint64_t c1 = ~0ULL / 0xFFFF * 0x7FFF;
3✔
841
        const uint64_t c2 = ~0ULL / 0xFFFF * 0x3FFF;
3✔
842
        const uint64_t c3 = ~0ULL / 0xFFFF * 0x0FFF;
3✔
843
        const uint64_t c4 = ~0ULL / 0xFFFF * 0x00FF;
3✔
844

845
        const size_t chunkvals = 4;
3✔
846
        for (; i + chunkvals <= end; i += chunkvals) {
12,582,912✔
847
            uint64_t a = next[i / chunkvals];
12,582,909✔
848
            a ^= v;             // zero matching bit segments
12,582,909✔
849
            a |= (a >> 1) & c1; // cascade ones in non-zeroed segments
12,582,909✔
850
            a |= (a >> 2) & c2;
12,582,909✔
851
            a |= (a >> 4) & c3;
12,582,909✔
852
            a |= (a >> 8) & c4;
12,582,909✔
853
            a &= m; // isolate single bit in each segment
12,582,909✔
854
            a ^= m; // reverse isolated bits
12,582,909✔
855

856
            // Population count
857
            a = (a * h01) >> 56;
12,582,909✔
858

859
            value_count += to_size_t(a);
12,582,909✔
860
        }
12,582,909✔
861
    }
3✔
862
    else if (m_width == 32) {
9✔
863
        int32_t v = int32_t(value);
6✔
864
        const int32_t* d = reinterpret_cast<int32_t*>(m_data);
6✔
865
        for (; i < end; ++i) {
50,331,681✔
866
            if (d[i] == v)
50,331,675✔
867
                ++value_count;
50,331,675✔
868
        }
50,331,675✔
869
        return value_count;
6✔
870
    }
6✔
871
    else if (m_width == 64) {
3✔
872
        const int64_t* d = reinterpret_cast<int64_t*>(m_data);
3✔
873
        for (; i < end; ++i) {
33✔
874
            if (d[i] == value)
30✔
875
                ++value_count;
30✔
876
        }
30✔
877
        return value_count;
3✔
878
    }
3✔
879

880
    // Check remaining elements
881
    for (; i < end; ++i)
324✔
882
        if (value == get(i))
312✔
883
            ++value_count;
312✔
884

885
    return value_count;
12✔
886
}
21✔
887

888
size_t Array::calc_aligned_byte_size(size_t size, int width)
889
{
88,014✔
890
    REALM_ASSERT(width != 0 && (width & (width - 1)) == 0); // Is a power of two
88,014✔
891
    size_t max = std::numeric_limits<size_t>::max();
88,014✔
892
    size_t max_2 = max & ~size_t(7); // Allow for upwards 8-byte alignment
88,014✔
893
    bool overflow;
88,014✔
894
    size_t byte_size;
88,014✔
895
    if (width < 8) {
88,014✔
896
        size_t elems_per_byte = 8 / width;
81,381✔
897
        size_t byte_size_0 = size / elems_per_byte;
81,381✔
898
        if (size % elems_per_byte != 0)
81,381✔
899
            ++byte_size_0;
63✔
900
        overflow = byte_size_0 > max_2 - header_size;
81,381✔
901
        byte_size = header_size + byte_size_0;
81,381✔
902
    }
81,381✔
903
    else {
6,633✔
904
        size_t bytes_per_elem = width / 8;
6,633✔
905
        overflow = size > (max_2 - header_size) / bytes_per_elem;
6,633✔
906
        byte_size = header_size + size * bytes_per_elem;
6,633✔
907
    }
6,633✔
908
    if (overflow)
88,014✔
909
        throw std::overflow_error("Byte size overflow");
×
910
    REALM_ASSERT_3(byte_size, >, 0);
88,014✔
911
    size_t aligned_byte_size = ((byte_size - 1) | 7) + 1; // 8-byte alignment
88,014✔
912
    return aligned_byte_size;
88,014✔
913
}
88,014✔
914

915
MemRef Array::clone(MemRef mem, Allocator& alloc, Allocator& target_alloc)
916
{
×
917
    const char* header = mem.get_addr();
×
918
    if (!get_hasrefs_from_header(header)) {
×
919
        // This array has no subarrays, so we can make a byte-for-byte
920
        // copy, which is more efficient.
921

922
        // Calculate size of new array in bytes
923
        size_t size = get_byte_size_from_header(header);
×
924

925
        // Create the new array
926
        MemRef clone_mem = target_alloc.alloc(size); // Throws
×
927
        char* clone_header = clone_mem.get_addr();
×
928

929
        // Copy contents
930
        const char* src_begin = header;
×
931
        const char* src_end = header + size;
×
932
        char* dst_begin = clone_header;
×
933
        realm::safe_copy_n(src_begin, src_end - src_begin, dst_begin);
×
934

935
        // Update with correct capacity
936
        set_capacity_in_header(size, clone_header);
×
937

938
        return clone_mem;
×
939
    }
×
940

941
    // Refs are integers, and integers arrays use wtype_Bits.
942
    REALM_ASSERT_3(get_wtype_from_header(header), ==, wtype_Bits);
×
943

944
    Array array{alloc};
×
945
    array.init_from_mem(mem);
×
946

947
    // Create new empty array of refs
948
    Array new_array(target_alloc);
×
949
    _impl::DeepArrayDestroyGuard dg(&new_array);
×
950
    Type type = get_type_from_header(header);
×
951
    bool context_flag = get_context_flag_from_header(header);
×
952
    new_array.create(type, context_flag); // Throws
×
953

954
    _impl::DeepArrayRefDestroyGuard dg_2(target_alloc);
×
955
    size_t n = array.size();
×
956
    for (size_t i = 0; i != n; ++i) {
×
957
        int_fast64_t value = array.get(i);
×
958

959
        // Null-refs signify empty subtrees. Also, all refs are
960
        // 8-byte aligned, so the lowest bits cannot be set. If they
961
        // are, it means that it should not be interpreted as a ref.
962
        bool is_subarray = value != 0 && (value & 1) == 0;
×
963
        if (!is_subarray) {
×
964
            new_array.add(value); // Throws
×
965
            continue;
×
966
        }
×
967

968
        ref_type ref = to_ref(value);
×
969
        MemRef new_mem = clone(MemRef(ref, alloc), alloc, target_alloc); // Throws
×
970
        dg_2.reset(new_mem.get_ref());
×
971
        value = from_ref(new_mem.get_ref());
×
972
        new_array.add(value); // Throws
×
973
        dg_2.release();
×
974
    }
×
975

976
    dg.release();
×
977
    return new_array.get_mem();
×
978
}
×
979

980
MemRef Array::create(Type type, bool context_flag, WidthType width_type, size_t size, int_fast64_t value,
981
                     Allocator& alloc)
982
{
8,641,224✔
983
    REALM_ASSERT_7(value, ==, 0, ||, width_type, ==, wtype_Bits);
8,641,224✔
984
    REALM_ASSERT_7(size, ==, 0, ||, width_type, !=, wtype_Ignore);
8,641,224✔
985

986
    bool is_inner_bptree_node = false, has_refs = false;
8,641,224✔
987
    switch (type) {
8,641,224✔
988
        case type_Normal:
3,933,915✔
989
            break;
3,933,915✔
990
        case type_InnerBptreeNode:
97,125✔
991
            is_inner_bptree_node = true;
97,125✔
992
            has_refs = true;
97,125✔
993
            break;
97,125✔
994
        case type_HasRefs:
4,611,702✔
995
            has_refs = true;
4,611,702✔
996
            break;
4,611,702✔
997
    }
8,641,224✔
998

999
    int width = 0;
8,641,815✔
1000
    size_t byte_size_0 = header_size;
8,641,815✔
1001
    if (value != 0) {
8,641,815✔
1002
        width = int(bit_width(value));
88,011✔
1003
        byte_size_0 = calc_aligned_byte_size(size, width); // Throws
88,011✔
1004
    }
88,011✔
1005
    // Adding zero to Array::initial_capacity to avoid taking the
1006
    // address of that member
1007
    size_t byte_size = std::max(byte_size_0, initial_capacity + 0);
8,641,815✔
1008
    MemRef mem = alloc.alloc(byte_size); // Throws
8,641,815✔
1009
    char* header = mem.get_addr();
8,641,815✔
1010

1011
    init_header(header, is_inner_bptree_node, has_refs, context_flag, width_type, width, size, byte_size);
8,641,815✔
1012

1013
    if (value != 0) {
8,641,815✔
1014
        char* data = get_data_from_header(header);
88,011✔
1015
        size_t begin = 0, end = size;
88,011✔
1016
        REALM_TEMPEX(fill_direct, width, (data, begin, end, value));
88,011!
1017
    }
88,011✔
1018

1019
    return mem;
8,641,815✔
1020
}
8,641,224✔
1021

1022
// This is the one installed into the m_vtable->finder slots.
1023
template <class cond, size_t bitwidth>
1024
bool Array::find_vtable(int64_t value, size_t start, size_t end, size_t baseindex, QueryStateBase* state) const
1025
{
5,351,070✔
1026
    return ArrayWithFind(*this).find_optimized<cond, bitwidth>(value, start, end, baseindex, state);
5,351,070✔
1027
}
5,351,070✔
1028

1029

1030
template <size_t width>
1031
struct Array::VTableForWidth {
1032
    struct PopulatedVTable : Array::VTable {
1033
        PopulatedVTable()
1034
        {
96✔
1035
            getter = &Array::get<width>;
96✔
1036
            setter = &Array::set<width>;
96✔
1037
            chunk_getter = &Array::get_chunk<width>;
96✔
1038
            finder[cond_Equal] = &Array::find_vtable<Equal, width>;
96✔
1039
            finder[cond_NotEqual] = &Array::find_vtable<NotEqual, width>;
96✔
1040
            finder[cond_Greater] = &Array::find_vtable<Greater, width>;
96✔
1041
            finder[cond_Less] = &Array::find_vtable<Less, width>;
96✔
1042
        }
96✔
1043
    };
1044
    static const PopulatedVTable vtable;
1045
};
1046

1047
template <size_t width>
1048
const typename Array::VTableForWidth<width>::PopulatedVTable Array::VTableForWidth<width>::vtable;
1049

1050
void Array::update_width_cache_from_header() noexcept
1051
{
753,312,825✔
1052
    auto width = get_width_from_header(get_header());
753,312,825✔
1053
    m_lbound = lbound_for_width(width);
753,312,825✔
1054
    m_ubound = ubound_for_width(width);
753,312,825✔
1055

1056
    m_width = width;
753,312,825✔
1057

1058
    REALM_TEMPEX(m_vtable = &VTableForWidth, width, ::vtable);
753,312,825✔
1059
    m_getter = m_vtable->getter;
753,312,825✔
1060
}
753,312,825✔
1061

1062
// This method reads 8 concecutive values into res[8], starting from index 'ndx'. It's allowed for the 8 values to
1063
// exceed array length; in this case, remainder of res[8] will be be set to 0.
1064
template <size_t w>
1065
void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
1066
{
1,157,874✔
1067
    REALM_ASSERT_3(ndx, <, m_size);
1,157,874✔
1068

1069
    size_t i = 0;
1,157,874✔
1070

1071
    // if constexpr to avoid producing spurious warnings resulting from
1072
    // instantiating for too large w
1073
    if constexpr (w > 0 && w <= 4) {
1,157,874✔
1074
        // Calling get<w>() in a loop results in one load per call to get, but
1075
        // for w < 8 we can do better than that
1076
        constexpr size_t elements_per_byte = 8 / w;
671,400✔
1077

1078
        // Round m_size down to byte granularity as the trailing bits in the last
1079
        // byte are uninitialized
1080
        size_t bytes_available = m_size / elements_per_byte;
671,400✔
1081

1082
        // Round start and end to be byte-aligned. Start is rounded down and
1083
        // end is rounded up as we may read up to 7 unused bits at each end.
1084
        size_t start = ndx / elements_per_byte;
671,400✔
1085
        size_t end = std::min(bytes_available, (ndx + 8 + elements_per_byte - 1) / elements_per_byte);
671,400✔
1086

1087
        if (end > start) {
671,400✔
1088
            // Loop in reverse order because data is stored in little endian order
1089
            uint64_t c = 0;
670,761✔
1090
            for (size_t i = end; i > start; --i) {
2,313,282✔
1091
                c <<= 8;
1,642,521✔
1092
                c += *reinterpret_cast<const uint8_t*>(m_data + i - 1);
1,642,521✔
1093
            }
1,642,521✔
1094
            // Trim off leading bits which aren't part of the requested range
1095
            c >>= (ndx - start * elements_per_byte) * w;
670,761✔
1096

1097
            uint64_t mask = (1ULL << w) - 1ULL;
670,761✔
1098
            res[0] = (c >> 0 * w) & mask;
670,761✔
1099
            res[1] = (c >> 1 * w) & mask;
670,761✔
1100
            res[2] = (c >> 2 * w) & mask;
670,761✔
1101
            res[3] = (c >> 3 * w) & mask;
670,761✔
1102
            res[4] = (c >> 4 * w) & mask;
670,761✔
1103
            res[5] = (c >> 5 * w) & mask;
670,761✔
1104
            res[6] = (c >> 6 * w) & mask;
670,761✔
1105
            res[7] = (c >> 7 * w) & mask;
670,761✔
1106

1107
            // Read the last few elements via get<w> if needed
1108
            i = std::min<size_t>(8, end * elements_per_byte - ndx);
670,761✔
1109
        }
670,761✔
1110
    }
671,400✔
1111

1112
    for (; i + ndx < m_size && i < 8; i++)
4,998,225✔
1113
        res[i] = get<w>(ndx + i);
3,840,351✔
1114
    for (; i < 8; i++)
1,272,795✔
1115
        res[i] = 0;
114,921✔
1116

1117
#ifdef REALM_DEBUG
1,157,874✔
1118
    for (int j = 0; j + ndx < m_size && j < 8; j++) {
10,305,945✔
1119
        int64_t expected = get<w>(ndx + j);
9,148,071✔
1120
        REALM_ASSERT(res[j] == expected);
9,148,071✔
1121
    }
9,148,071✔
1122
#endif
1,157,874✔
1123
}
1,157,874✔
1124

1125
template <>
1126
void Array::get_chunk<0>(size_t ndx, int64_t res[8]) const noexcept
1127
{
78,456✔
1128
    REALM_ASSERT_3(ndx, <, m_size);
78,456✔
1129
    memset(res, 0, sizeof(int64_t) * 8);
78,456✔
1130
}
78,456✔
1131

1132

1133
template <size_t width>
1134
void Array::set(size_t ndx, int64_t value)
1135
{
588,977,094✔
1136
    set_direct<width>(m_data, ndx, value);
588,977,094✔
1137
}
588,977,094✔
1138

1139
void Array::_mem_usage(size_t& mem) const noexcept
1140
{
210✔
1141
    mem += get_byte_size();
210✔
1142
    if (m_has_refs) {
210✔
1143
        for (size_t i = 0; i < m_size; ++i) {
267✔
1144
            int64_t val = get(i);
177✔
1145
            if (val && !(val & 1)) {
177✔
1146
                Array subarray(m_alloc);
135✔
1147
                subarray.init_from_ref(to_ref(val));
135✔
1148
                subarray._mem_usage(mem);
135✔
1149
            }
135✔
1150
        }
177✔
1151
    }
90✔
1152
}
210✔
1153

1154
#ifdef REALM_DEBUG
1155
namespace {
1156

1157
class MemStatsHandler : public Array::MemUsageHandler {
1158
public:
1159
    MemStatsHandler(MemStats& stats) noexcept
1160
        : m_stats(stats)
×
1161
    {
×
1162
    }
×
1163
    void handle(ref_type, size_t allocated, size_t used) noexcept override
1164
    {
×
1165
        m_stats.allocated += allocated;
×
1166
        m_stats.used += used;
×
1167
        m_stats.array_count += 1;
×
1168
    }
×
1169

1170
private:
1171
    MemStats& m_stats;
1172
};
1173

1174
} // anonymous namespace
1175

1176

1177
void Array::stats(MemStats& stats_dest) const noexcept
1178
{
×
1179
    MemStatsHandler handler(stats_dest);
×
1180
    report_memory_usage(handler);
×
1181
}
×
1182

1183

1184
void Array::report_memory_usage(MemUsageHandler& handler) const
1185
{
443,811✔
1186
    if (m_has_refs)
443,811✔
1187
        report_memory_usage_2(handler); // Throws
443,811✔
1188

1189
    size_t used = get_byte_size();
443,811✔
1190
    size_t allocated;
443,811✔
1191
    if (m_alloc.is_read_only(m_ref)) {
443,811✔
1192
        allocated = used;
435,180✔
1193
    }
435,180✔
1194
    else {
8,631✔
1195
        char* header = get_header_from_data(m_data);
8,631✔
1196
        allocated = get_capacity_from_header(header);
8,631✔
1197
    }
8,631✔
1198
    handler.handle(m_ref, allocated, used); // Throws
443,811✔
1199
}
443,811✔
1200

1201

1202
void Array::report_memory_usage_2(MemUsageHandler& handler) const
1203
{
5,305,725✔
1204
    Array subarray(m_alloc);
5,305,725✔
1205
    for (size_t i = 0; i < m_size; ++i) {
97,115,328✔
1206
        int_fast64_t value = get(i);
91,809,603✔
1207
        // Skip null refs and values that are not refs. Values are not refs when
1208
        // the least significant bit is set.
1209
        if (value == 0 || (value & 1) == 1)
91,809,603✔
1210
            continue;
12,703,041✔
1211

1212
        size_t used;
79,106,562✔
1213
        ref_type ref = to_ref(value);
79,106,562✔
1214
        char* header = m_alloc.translate(ref);
79,106,562✔
1215
        bool array_has_refs = get_hasrefs_from_header(header);
79,106,562✔
1216
        if (array_has_refs) {
79,106,562✔
1217
            MemRef mem(header, ref, m_alloc);
4,866,981✔
1218
            subarray.init_from_mem(mem);
4,866,981✔
1219
            subarray.report_memory_usage_2(handler); // Throws
4,866,981✔
1220
            used = subarray.get_byte_size();
4,866,981✔
1221
        }
4,866,981✔
1222
        else {
74,239,581✔
1223
            used = get_byte_size_from_header(header);
74,239,581✔
1224
        }
74,239,581✔
1225

1226
        size_t allocated;
79,106,562✔
1227
        if (m_alloc.is_read_only(ref)) {
79,106,562✔
1228
            allocated = used;
78,954,642✔
1229
        }
78,954,642✔
1230
        else {
151,920✔
1231
            allocated = get_capacity_from_header(header);
151,920✔
1232
        }
151,920✔
1233
        handler.handle(ref, allocated, used); // Throws
79,106,562✔
1234
    }
79,106,562✔
1235
}
5,305,725✔
1236
#endif
1237

1238
void Array::verify() const
1239
{
518,163✔
1240
#ifdef REALM_DEBUG
518,163✔
1241
    REALM_ASSERT(is_attached());
518,163✔
1242

1243
    REALM_ASSERT(m_width == 0 || m_width == 1 || m_width == 2 || m_width == 4 || m_width == 8 || m_width == 16 ||
518,163✔
1244
                 m_width == 32 || m_width == 64);
518,163✔
1245

1246
    if (!get_parent())
518,163✔
1247
        return;
3,426✔
1248

1249
    // Check that parent is set correctly
1250
    ref_type ref_in_parent = get_ref_from_parent();
514,737✔
1251
    REALM_ASSERT_3(ref_in_parent, ==, m_ref);
514,737✔
1252
#endif
514,737✔
1253
}
514,737✔
1254

1255
size_t Array::lower_bound_int(int64_t value) const noexcept
1256
{
6,771,684✔
1257
    REALM_TEMPEX(return lower_bound, m_width, (m_data, m_size, value));
6,771,684✔
1258
}
×
1259

1260
size_t Array::upper_bound_int(int64_t value) const noexcept
1261
{
2,713,080✔
1262
    REALM_TEMPEX(return upper_bound, m_width, (m_data, m_size, value));
2,713,080✔
1263
}
×
1264

1265

1266
size_t Array::find_first(int64_t value, size_t start, size_t end) const
1267
{
4,415,073✔
1268
    return find_first<Equal>(value, start, end);
4,415,073✔
1269
}
4,415,073✔
1270

1271

1272
int_fast64_t Array::get(const char* header, size_t ndx) noexcept
1273
{
148,896,318✔
1274
    const char* data = get_data_from_header(header);
148,896,318✔
1275
    uint_least8_t width = get_width_from_header(header);
148,896,318✔
1276
    return get_direct(data, width, ndx);
148,896,318✔
1277
}
148,896,318✔
1278

1279

1280
std::pair<int64_t, int64_t> Array::get_two(const char* header, size_t ndx) noexcept
1281
{
313,428✔
1282
    const char* data = get_data_from_header(header);
313,428✔
1283
    uint_least8_t width = get_width_from_header(header);
313,428✔
1284
    std::pair<int64_t, int64_t> p = ::get_two(data, width, ndx);
313,428✔
1285
    return std::make_pair(p.first, p.second);
313,428✔
1286
}
313,428✔
1287

1288
bool QueryStateCount::match(size_t, Mixed) noexcept
1289
{
5,616✔
1290
    ++m_match_count;
5,616✔
1291
    return (m_limit > m_match_count);
5,616✔
1292
}
5,616✔
1293

1294
bool QueryStateCount::match(size_t) noexcept
1295
{
10,996,824✔
1296
    ++m_match_count;
10,996,824✔
1297
    return (m_limit > m_match_count);
10,996,824✔
1298
}
10,996,824✔
1299

1300
bool QueryStateFindFirst::match(size_t index, Mixed) noexcept
1301
{
48,075✔
1302
    m_match_count++;
48,075✔
1303
    m_state = index;
48,075✔
1304
    return false;
48,075✔
1305
}
48,075✔
1306

1307
bool QueryStateFindFirst::match(size_t index) noexcept
1308
{
5,391,492✔
1309
    ++m_match_count;
5,391,492✔
1310
    m_state = index;
5,391,492✔
1311
    return false;
5,391,492✔
1312
}
5,391,492✔
1313

1314
template <>
1315
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index, Mixed) noexcept
1316
{
13,629,372✔
1317
    ++m_match_count;
13,629,372✔
1318

1319
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
13,629,372✔
1320
    m_keys.push_back(ObjKey(key_value));
13,629,372✔
1321

1322
    return (m_limit > m_match_count);
13,629,372✔
1323
}
13,629,372✔
1324

1325
template <>
1326
bool QueryStateFindAll<std::vector<ObjKey>>::match(size_t index) noexcept
1327
{
14,228,898✔
1328
    ++m_match_count;
14,228,898✔
1329
    int64_t key_value = (m_key_values ? m_key_values->get(index) : index) + m_key_offset;
14,228,898✔
1330
    m_keys.push_back(ObjKey(key_value));
14,228,898✔
1331

1332
    return (m_limit > m_match_count);
14,228,898✔
1333
}
14,228,898✔
1334

1335
template <>
1336
bool QueryStateFindAll<IntegerColumn>::match(size_t index, Mixed) noexcept
1337
{
×
1338
    ++m_match_count;
×
1339
    m_keys.add(index);
×
1340

1341
    return (m_limit > m_match_count);
×
1342
}
×
1343

1344
template <>
1345
bool QueryStateFindAll<IntegerColumn>::match(size_t index) noexcept
1346
{
141✔
1347
    ++m_match_count;
141✔
1348
    m_keys.add(index);
141✔
1349

1350
    return (m_limit > m_match_count);
141✔
1351
}
141✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc