• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / nicola.cabiddu_1042

27 Sep 2023 06:04PM UTC coverage: 91.085% (-1.8%) from 92.915%
nicola.cabiddu_1042

Pull #6766

Evergreen

nicola-cab
Fix logic for dictionaries
Pull Request #6766: Client Reset for collections in mixed / nested collections

97276 of 178892 branches covered (0.0%)

1994 of 2029 new or added lines in 7 files covered. (98.28%)

4556 existing lines in 112 files now uncovered.

237059 of 260260 relevant lines covered (91.09%)

6321099.55 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.29
/src/realm/array.hpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#ifndef REALM_ARRAY_HPP
20
#define REALM_ARRAY_HPP
21

22
#include <realm/node.hpp>
23
#include <realm/query_state.hpp>
24
#include <realm/column_fwd.hpp>
25
#include <realm/array_direct.hpp>
26

27
namespace realm {
28

29
// Pre-definitions
30
class GroupWriter;
31
namespace _impl {
32
class ArrayWriterBase;
33
}
34

35
struct MemStats {
36
    size_t allocated = 0;
37
    size_t used = 0;
38
    size_t array_count = 0;
39
};
40

41
// Stores a value obtained from Array::get(). It is a ref if the least
42
// significant bit is clear, otherwise it is a tagged integer. A tagged interger
43
// is obtained from a logical integer value by left shifting by one bit position
44
// (multiplying by two), and then setting the least significant bit to
45
// one. Clearly, this means that the maximum value that can be stored as a
46
// tagged integer is 2**63 - 1.
47
class RefOrTagged {
48
public:
49
    bool is_ref() const noexcept;
50
    bool is_tagged() const noexcept;
51
    ref_type get_as_ref() const noexcept;
52
    uint_fast64_t get_as_int() const noexcept;
53

54
    static RefOrTagged make_ref(ref_type) noexcept;
55
    static RefOrTagged make_tagged(uint_fast64_t) noexcept;
56

57
private:
58
    int_fast64_t m_value;
59
    RefOrTagged(int_fast64_t) noexcept;
60
    friend class Array;
61
};
62

63

64
template <class T>
65
class QueryStateFindAll : public QueryStateBase {
66
public:
67
    explicit QueryStateFindAll(T& keys, size_t limit = -1)
68
        : QueryStateBase(limit)
69
        , m_keys(keys)
70
    {
1,452,483✔
71
    }
1,452,483✔
72
    bool match(size_t index, Mixed) noexcept final;
73

74
private:
75
    T& m_keys;
76
};
77

78
class QueryStateFindFirst : public QueryStateBase {
79
public:
80
    size_t m_state = realm::not_found;
81
    QueryStateFindFirst()
82
        : QueryStateBase(1)
83
    {
10,085,949✔
84
    }
10,085,949✔
85
    bool match(size_t index, Mixed) noexcept final;
86
};
87

88
class Array : public Node, public ArrayParent {
89
public:
90
    /// Create an array accessor in the unattached state.
91
    explicit Array(Allocator& allocator) noexcept
92
        : Node(allocator)
93
    {
760,411,245✔
94
    }
760,411,245✔
95

96
    ~Array() noexcept override {}
689,560,014✔
97

98
    /// Create a new integer array of the specified type and size, and filled
99
    /// with the specified value, and attach this accessor to it. This does not
100
    /// modify the parent reference information of this accessor.
101
    ///
102
    /// Note that the caller assumes ownership of the allocated underlying
103
    /// node. It is not owned by the accessor.
104
    void create(Type, bool context_flag = false, size_t size = 0, int_fast64_t value = 0);
105

106
    /// Reinitialize this array accessor to point to the specified new
107
    /// underlying memory. This does not modify the parent reference information
108
    /// of this accessor.
109
    void init_from_ref(ref_type ref) noexcept
110
    {
231,762,996✔
111
        REALM_ASSERT_DEBUG(ref);
231,762,996✔
112
        char* header = m_alloc.translate(ref);
231,762,996✔
113
        init_from_mem(MemRef(header, ref, m_alloc));
231,762,996✔
114
    }
231,762,996✔
115

116
    /// Same as init_from_ref(ref_type) but avoid the mapping of 'ref' to memory
117
    /// pointer.
118
    void init_from_mem(MemRef) noexcept;
119

120
    /// Same as `init_from_ref(get_ref_from_parent())`.
121
    void init_from_parent() noexcept
122
    {
89,690,109✔
123
        ref_type ref = get_ref_from_parent();
89,690,109✔
124
        init_from_ref(ref);
89,690,109✔
125
    }
89,690,109✔
126

127
    /// Called in the context of Group::commit() to ensure that attached
128
    /// accessors stay valid across a commit. Please note that this works only
129
    /// for non-transactional commits. Accessors obtained during a transaction
130
    /// are always detached when the transaction ends.
131
    void update_from_parent() noexcept;
132

133
    /// Change the type of an already attached array node.
134
    ///
135
    /// The effect of calling this function on an unattached accessor is
136
    /// undefined.
137
    void set_type(Type);
138

139
    /// Construct an empty integer array of the specified type, and return just
140
    /// the reference to the underlying memory.
141
    static MemRef create_empty_array(Type, bool context_flag, Allocator&);
142

143
    /// Construct an integer array of the specified type and size, and return
144
    /// just the reference to the underlying memory. All elements will be
145
    /// initialized to the specified value.
146
    static MemRef create_array(Type, bool context_flag, size_t size, int_fast64_t value, Allocator&);
147

148
    Type get_type() const noexcept;
149

150
    /// The meaning of 'width' depends on the context in which this
151
    /// array is used.
152
    size_t get_width() const noexcept
153
    {
4,746,621✔
154
        REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
4,746,621✔
155
        return m_width;
4,746,621✔
156
    }
4,746,621✔
157

158
    void insert(size_t ndx, int_fast64_t value);
159
    void add(int_fast64_t value);
160

161
    // Used from ArrayBlob
162
    size_t blob_size() const noexcept;
163
    ref_type blob_replace(size_t begin, size_t end, const char* data, size_t data_size, bool add_zero_term);
164

165
    /// This function is guaranteed to not throw if the current width is
166
    /// sufficient for the specified value (e.g. if you have called
167
    /// ensure_minimum_width(value)) and get_alloc().is_read_only(get_ref())
168
    /// returns false (noexcept:array-set). Note that for a value of zero, the
169
    /// first criterion is trivially satisfied.
170
    void set(size_t ndx, int64_t value);
171

172
    void set_as_ref(size_t ndx, ref_type ref);
173

174
    template <size_t w>
175
    void set(size_t ndx, int64_t value);
176

177
    int64_t get(size_t ndx) const noexcept;
178

179
    template <size_t w>
180
    int64_t get(size_t ndx) const noexcept;
181

182
    void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
183

184
    template <size_t w>
185
    void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
186

187
    ref_type get_as_ref(size_t ndx) const noexcept;
188

189
    RefOrTagged get_as_ref_or_tagged(size_t ndx) const noexcept;
190
    void set(size_t ndx, RefOrTagged);
191
    void add(RefOrTagged);
192
    void ensure_minimum_width(RefOrTagged);
193

194
    int64_t front() const noexcept;
195
    int64_t back() const noexcept;
196

197
    void alloc(size_t init_size, size_t new_width)
198
    {
1,033,663,926✔
199
        REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
1,033,663,926✔
200
        REALM_ASSERT_3(m_size, ==, get_size_from_header(get_header()));
1,033,663,926✔
201
        Node::alloc(init_size, new_width);
1,033,663,926✔
202
        update_width_cache_from_header();
1,033,663,926✔
203
    }
1,033,663,926✔
204

205
    /// Remove the element at the specified index, and move elements at higher
206
    /// indexes to the next lower index.
207
    ///
208
    /// This function does **not** destroy removed subarrays. That is, if the
209
    /// erased element is a 'ref' pointing to a subarray, then that subarray
210
    /// will not be destroyed automatically.
211
    ///
212
    /// This function guarantees that no exceptions will be thrown if
213
    /// get_alloc().is_read_only(get_ref()) would return false before the
214
    /// call. This is automatically guaranteed if the array is used in a
215
    /// non-transactional context, or if the array has already been successfully
216
    /// modified within the current write transaction.
217
    void erase(size_t ndx);
218

219
    /// Same as erase(size_t), but remove all elements in the specified
220
    /// range.
221
    ///
222
    /// Please note that this function does **not** destroy removed subarrays.
223
    ///
224
    /// This function guarantees that no exceptions will be thrown if
225
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
226
    void erase(size_t begin, size_t end);
227

228
    /// Reduce the size of this array to the specified number of elements. It is
229
    /// an error to specify a size that is greater than the current size of this
230
    /// array. The effect of doing so is undefined. This is just a shorthand for
231
    /// calling the ranged erase() function with appropriate arguments.
232
    ///
233
    /// Please note that this function does **not** destroy removed
234
    /// subarrays. See clear_and_destroy_children() for an alternative.
235
    ///
236
    /// This function guarantees that no exceptions will be thrown if
237
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
238
    void truncate(size_t new_size);
239

240
    /// Reduce the size of this array to the specified number of elements. It is
241
    /// an error to specify a size that is greater than the current size of this
242
    /// array. The effect of doing so is undefined. Subarrays will be destroyed
243
    /// recursively, as if by a call to `destroy_deep(subarray_ref, alloc)`.
244
    ///
245
    /// This function is guaranteed not to throw if
246
    /// get_alloc().is_read_only(get_ref()) returns false.
247
    void truncate_and_destroy_children(size_t new_size);
248

249
    /// Remove every element from this array. This is just a shorthand for
250
    /// calling truncate(0).
251
    ///
252
    /// Please note that this function does **not** destroy removed
253
    /// subarrays. See clear_and_destroy_children() for an alternative.
254
    ///
255
    /// This function guarantees that no exceptions will be thrown if
256
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
257
    void clear();
258

259
    /// Remove every element in this array. Subarrays will be destroyed
260
    /// recursively, as if by a call to `destroy_deep(subarray_ref,
261
    /// alloc)`. This is just a shorthand for calling
262
    /// truncate_and_destroy_children(0).
263
    ///
264
    /// This function guarantees that no exceptions will be thrown if
265
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
266
    void clear_and_destroy_children();
267

268
    /// If neccessary, expand the representation so that it can store the
269
    /// specified value.
270
    void ensure_minimum_width(int_fast64_t value);
271

272
    /// Add \a diff to the element at the specified index.
273
    void adjust(size_t ndx, int_fast64_t diff);
274

275
    /// Add \a diff to all the elements in the specified index range.
276
    void adjust(size_t begin, size_t end, int_fast64_t diff);
277

278
    //@{
279
    /// This is similar in spirit to std::move() from `<algorithm>`.
280
    /// \a dest_begin must not be in the range [`begin`,`end`)
281
    ///
282
    /// This function is guaranteed to not throw if
283
    /// `get_alloc().is_read_only(get_ref())` returns false.
284
    void move(size_t begin, size_t end, size_t dest_begin);
285
    //@}
286

287
    // Move elements from ndx and above to another array
288
    void move(Array& dst, size_t ndx);
289

290
    //@{
291
    /// Find the lower/upper bound of the specified value in a sequence of
292
    /// integers which must already be sorted ascendingly.
293
    ///
294
    /// For an integer value '`v`', lower_bound_int(v) returns the index '`l`'
295
    /// of the first element such that `get(l) &ge; v`, and upper_bound_int(v)
296
    /// returns the index '`u`' of the first element such that `get(u) &gt;
297
    /// v`. In both cases, if no such element is found, the returned value is
298
    /// the number of elements in the array.
299
    ///
300
    ///     3 3 3 4 4 4 5 6 7 9 9 9
301
    ///     ^     ^     ^     ^     ^
302
    ///     |     |     |     |     |
303
    ///     |     |     |     |      -- Lower and upper bound of 15
304
    ///     |     |     |     |
305
    ///     |     |     |      -- Lower and upper bound of 8
306
    ///     |     |     |
307
    ///     |     |      -- Upper bound of 4
308
    ///     |     |
309
    ///     |      -- Lower bound of 4
310
    ///     |
311
    ///      -- Lower and upper bound of 1
312
    ///
313
    /// These functions are similar to std::lower_bound() and
314
    /// std::upper_bound().
315
    ///
316
    /// We currently use binary search. See for example
317
    /// http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary.
318
    ///
319
    /// FIXME: It may be worth considering if overall efficiency can be improved
320
    /// by doing a linear search for short sequences.
321
    size_t lower_bound_int(int64_t value) const noexcept;
322
    size_t upper_bound_int(int64_t value) const noexcept;
323
    //@}
324

325
    int64_t get_sum(size_t start = 0, size_t end = size_t(-1)) const
326
    {
366✔
327
        return sum(start, end);
366✔
328
    }
366✔
329

330
    /// This information is guaranteed to be cached in the array accessor.
331
    bool is_inner_bptree_node() const noexcept;
332

333
    /// Returns true if type is either type_HasRefs or type_InnerColumnNode.
334
    ///
335
    /// This information is guaranteed to be cached in the array accessor.
336
    bool has_refs() const noexcept;
337
    void set_has_refs(bool) noexcept;
338

339
    /// This information is guaranteed to be cached in the array accessor.
340
    ///
341
    /// Columns and indexes can use the context bit to differentiate leaf types.
342
    bool get_context_flag() const noexcept;
343
    void set_context_flag(bool) noexcept;
344

345
    /// Recursively destroy children (as if calling
346
    /// clear_and_destroy_children()), then put this accessor into the detached
347
    /// state (as if calling detach()), then free the allocated memory. If this
348
    /// accessor is already in the detached state, this function has no effect
349
    /// (idempotency).
350
    void destroy_deep() noexcept;
351

352
    /// Shorthand for `destroy_deep(MemRef(ref, alloc), alloc)`.
353
    static void destroy_deep(ref_type ref, Allocator& alloc) noexcept;
354

355
    /// Destroy the specified array node and all of its children, recursively.
356
    ///
357
    /// This is done by freeing the specified array node after calling
358
    /// destroy_deep() for every contained 'ref' element.
359
    static void destroy_deep(MemRef, Allocator&) noexcept;
360

361
    // Clone deep
362
    static MemRef clone(MemRef, Allocator& from_alloc, Allocator& target_alloc);
363

364
    // Serialization
365

366
    /// Returns the ref (position in the target stream) of the written copy of
367
    /// this array, or the ref of the original array if \a only_if_modified is
368
    /// true, and this array is unmodified (Alloc::is_read_only()).
369
    ///
370
    /// The number of bytes that will be written by a non-recursive invocation
371
    /// of this function is exactly the number returned by get_byte_size().
372
    ///
373
    /// \param out The destination stream (writer).
374
    ///
375
    /// \param deep If true, recursively write out subarrays, but still subject
376
    /// to \a only_if_modified.
377
    ///
378
    /// \param only_if_modified Set to `false` to always write, or to `true` to
379
    /// only write the array if it has been modified.
380
    ref_type write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const;
381

382
    /// Same as non-static write() with `deep` set to true. This is for the
383
    /// cases where you do not already have an array accessor available.
384
    static ref_type write(ref_type, Allocator&, _impl::ArrayWriterBase&, bool only_if_modified);
385

386
    size_t find_first(int64_t value, size_t begin = 0, size_t end = size_t(-1)) const;
387

388
    // Wrappers for backwards compatibility and for simple use without
389
    // setting up state initialization etc
390
    template <class cond>
391
    size_t find_first(int64_t value, size_t start = 0, size_t end = size_t(-1)) const
392
    {
10,001,373✔
393
        REALM_ASSERT(start <= m_size && (end <= m_size || end == size_t(-1)) && start <= end);
10,001,373✔
394
        // todo, would be nice to avoid this in order to speed up find_first loops
4,963,299✔
395
        QueryStateFindFirst state;
10,001,373✔
396
        Finder finder = m_vtable->finder[cond::condition];
10,001,373✔
397
        (this->*finder)(value, start, end, 0, &state);
10,001,373✔
398

4,963,299✔
399
        return static_cast<size_t>(state.m_state);
10,001,373✔
400
    }
10,001,373✔
401

402
    /// Get the specified element without the cost of constructing an
403
    /// array instance. If an array instance is already available, or
404
    /// you need to get multiple values, then this method will be
405
    /// slower.
406
    static int_fast64_t get(const char* header, size_t ndx) noexcept;
407

408
    /// Like get(const char*, size_t) but gets two consecutive
409
    /// elements.
410
    static std::pair<int64_t, int64_t> get_two(const char* header, size_t ndx) noexcept;
411

412
    static RefOrTagged get_as_ref_or_tagged(const char* header, size_t ndx) noexcept
413
    {
51,396,927✔
414
        return get(header, ndx);
51,396,927✔
415
    }
51,396,927✔
416

417
    /// Get the number of bytes currently in use by this array. This
418
    /// includes the array header, but it does not include allocated
419
    /// bytes corresponding to excess capacity. The result is
420
    /// guaranteed to be a multiple of 8 (i.e., 64-bit aligned).
421
    ///
422
    /// This number is exactly the number of bytes that will be
423
    /// written by a non-recursive invocation of write().
424
    size_t get_byte_size() const noexcept;
425

426
    /// Get the maximum number of bytes that can be written by a
427
    /// non-recursive invocation of write() on an array with the
428
    /// specified number of elements, that is, the maximum value that
429
    /// can be returned by get_byte_size().
430
    static size_t get_max_byte_size(size_t num_elems) noexcept;
431

432
    /// FIXME: Belongs in IntegerArray
433
    static size_t calc_aligned_byte_size(size_t size, int width);
434

435
#ifdef REALM_DEBUG
436
    class MemUsageHandler {
437
    public:
438
        virtual void handle(ref_type ref, size_t allocated, size_t used) = 0;
439
    };
440

441
    void report_memory_usage(MemUsageHandler&) const;
442

443
    void stats(MemStats& stats_dest) const noexcept;
444
#endif
445

446
    void verify() const;
447

448
    Array& operator=(const Array&) = delete; // not allowed
449
    Array(const Array&) = delete;            // not allowed
450

451
protected:
452
    // This returns the minimum value ("lower bound") of the representable values
453
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
454
    static constexpr int_fast64_t lbound_for_width(size_t width) noexcept;
455

456
    // This returns the maximum value ("inclusive upper bound") of the representable values
457
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
458
    static constexpr int_fast64_t ubound_for_width(size_t width) noexcept;
459

460
    // This will have to be eventually used, exposing this here for testing.
461
    size_t count(int64_t value) const noexcept;
462

463
private:
464
    void update_width_cache_from_header() noexcept;
465

466
    void do_ensure_minimum_width(int_fast64_t);
467

468
    int64_t sum(size_t start, size_t end) const;
469

470
    template <size_t w>
471
    int64_t sum(size_t start, size_t end) const;
472

473
protected:
474
    /// It is an error to specify a non-zero value unless the width
475
    /// type is wtype_Bits. It is also an error to specify a non-zero
476
    /// size if the width type is wtype_Ignore.
477
    static MemRef create(Type, bool context_flag, WidthType, size_t size, int_fast64_t value, Allocator&);
478

479
    // Overriding method in ArrayParent
480
    void update_child_ref(size_t, ref_type) override;
481

482
    // Overriding method in ArrayParent
483
    ref_type get_child_ref(size_t) const noexcept override;
484

485
    void destroy_children(size_t offset = 0) noexcept;
486

487
protected:
488
    // Getters and Setters for adaptive-packed arrays
489
    typedef int64_t (Array::*Getter)(size_t) const; // Note: getters must not throw
490
    typedef void (Array::*Setter)(size_t, int64_t);
491
    typedef bool (Array::*Finder)(int64_t, size_t, size_t, size_t, QueryStateBase*) const;
492
    typedef void (Array::*ChunkGetter)(size_t, int64_t res[8]) const; // Note: getters must not throw
493

494
    struct VTable {
495
        Getter getter;
496
        ChunkGetter chunk_getter;
497
        Setter setter;
498
        Finder finder[cond_VTABLE_FINDER_COUNT]; // one for each active function pointer
499
    };
500
    template <size_t w>
501
    struct VTableForWidth;
502

503
    // This is the one installed into the m_vtable->finder slots.
504
    template <class cond, size_t bitwidth>
505
    bool find_vtable(int64_t value, size_t start, size_t end, size_t baseindex, QueryStateBase* state) const;
506

507
    template <size_t w>
508
    int64_t get_universal(const char* const data, const size_t ndx) const;
509

510
protected:
511
    /// Takes a 64-bit value and returns the minimum number of bits needed
512
    /// to fit the value. For alignment this is rounded up to nearest
513
    /// log2. Posssible results {0, 1, 2, 4, 8, 16, 32, 64}
514
    static size_t bit_width(int64_t value);
515

516
protected:
517
    Getter m_getter = nullptr; // cached to avoid indirection
518
    const VTable* m_vtable = nullptr;
519

520
    uint_least8_t m_width = 0; // Size of an element (meaning depend on type of array).
521
    int64_t m_lbound;          // min number that can be stored with current m_width
522
    int64_t m_ubound;          // max number that can be stored with current m_width
523

524
    bool m_is_inner_bptree_node; // This array is an inner node of B+-tree.
525
    bool m_has_refs;             // Elements whose first bit is zero are refs to subarrays.
526
    bool m_context_flag;         // Meaning depends on context.
527

528
private:
529
    ref_type do_write_shallow(_impl::ArrayWriterBase&) const;
530
    ref_type do_write_deep(_impl::ArrayWriterBase&, bool only_if_modified) const;
531

532
#ifdef REALM_DEBUG
533
    void report_memory_usage_2(MemUsageHandler&) const;
534
#endif
535

536
    friend class Allocator;
537
    friend class SlabAlloc;
538
    friend class GroupWriter;
539
    friend class ArrayWithFind;
540
};
541

542
// Implementation:
543

544

545
constexpr inline int_fast64_t Array::lbound_for_width(size_t width) noexcept
546
{
1,695,980,172✔
547
    if (width == 32) {
1,695,980,172✔
548
        return -0x80000000LL;
724,851,150✔
549
    }
724,851,150✔
550
    else if (width == 16) {
971,129,022✔
551
        return -0x8000LL;
345,134,835✔
552
    }
345,134,835✔
553
    else if (width < 8) {
625,994,187✔
554
        return 0;
421,232,709✔
555
    }
421,232,709✔
556
    else if (width == 8) {
204,761,478✔
557
        return -0x80LL;
169,542,678✔
558
    }
169,542,678✔
559
    else if (width == 64) {
144,366,303✔
560
        return -0x8000000000000000LL;
144,366,303✔
561
    }
144,366,303✔
562
    else {
4,294,967,294✔
563
        REALM_UNREACHABLE();
4,294,967,294✔
564
    }
4,294,967,294✔
565
}
1,695,980,172✔
566

567
constexpr inline int_fast64_t Array::ubound_for_width(size_t width) noexcept
568
{
1,709,691,579✔
569
    if (width == 32) {
1,709,691,579✔
570
        return 0x7FFFFFFFLL;
725,447,862✔
571
    }
725,447,862✔
572
    else if (width == 16) {
984,243,717✔
573
        return 0x7FFFLL;
346,480,146✔
574
    }
346,480,146✔
575
    else if (width == 0) {
637,763,571✔
576
        return 0;
79,977,969✔
577
    }
79,977,969✔
578
    else if (width == 1) {
557,785,602✔
579
        return 1;
171,432,378✔
580
    }
171,432,378✔
581
    else if (width == 2) {
386,353,224✔
582
        return 3;
153,759,684✔
583
    }
153,759,684✔
584
    else if (width == 4) {
232,593,540✔
585
        return 15;
25,087,728✔
586
    }
25,087,728✔
587
    else if (width == 8) {
207,505,812✔
588
        return 0x7FLL;
169,534,812✔
589
    }
169,534,812✔
590
    else if (width == 64) {
144,271,272✔
591
        return 0x7FFFFFFFFFFFFFFFLL;
144,271,272✔
592
    }
144,271,272✔
593
    else {
4,294,967,294✔
594
        REALM_UNREACHABLE();
4,294,967,294✔
595
    }
4,294,967,294✔
596
}
1,709,691,579✔
597

598
inline bool RefOrTagged::is_ref() const noexcept
599
{
366,399,048✔
600
    return (m_value & 1) == 0;
366,399,048✔
601
}
366,399,048✔
602

603
inline bool RefOrTagged::is_tagged() const noexcept
604
{
304,677,825✔
605
    return !is_ref();
304,677,825✔
606
}
304,677,825✔
607

608
inline ref_type RefOrTagged::get_as_ref() const noexcept
609
{
100,101,945✔
610
    // to_ref() is defined in <alloc.hpp>
51,856,293✔
611
    return to_ref(m_value);
100,101,945✔
612
}
100,101,945✔
613

614
inline uint_fast64_t RefOrTagged::get_as_int() const noexcept
615
{
118,289,439✔
616
    // The bitwise AND is there in case uint_fast64_t is wider than 64 bits.
60,148,746✔
617
    return (uint_fast64_t(m_value) & 0xFFFFFFFFFFFFFFFFULL) >> 1;
118,289,439✔
618
}
118,289,439✔
619

620
inline RefOrTagged RefOrTagged::make_ref(ref_type ref) noexcept
621
{
408,048✔
622
    // from_ref() is defined in <alloc.hpp>
202,221✔
623
    int_fast64_t value = from_ref(ref);
408,048✔
624
    return RefOrTagged(value);
408,048✔
625
}
408,048✔
626

627
inline RefOrTagged RefOrTagged::make_tagged(uint_fast64_t i) noexcept
628
{
26,445,321✔
629
    REALM_ASSERT(i < (1ULL << 63));
26,445,321✔
630
    return RefOrTagged((i << 1) | 1);
26,445,321✔
631
}
26,445,321✔
632

633
inline RefOrTagged::RefOrTagged(int_fast64_t value) noexcept
634
    : m_value(value)
635
{
425,778,060✔
636
}
425,778,060✔
637

638
inline void Array::create(Type type, bool context_flag, size_t length, int_fast64_t value)
639
{
13,328,379✔
640
    MemRef mem = create_array(type, context_flag, length, value, m_alloc); // Throws
13,328,379✔
641
    init_from_mem(mem);
13,328,379✔
642
}
13,328,379✔
643

644

645
inline Array::Type Array::get_type() const noexcept
646
{
18✔
647
    if (m_is_inner_bptree_node) {
18✔
648
        REALM_ASSERT_DEBUG(m_has_refs);
6✔
649
        return type_InnerBptreeNode;
6✔
650
    }
6✔
651
    if (m_has_refs)
12✔
652
        return type_HasRefs;
6✔
653
    return type_Normal;
6✔
654
}
6✔
655

656

657
inline void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
658
{
1,068,270✔
659
    REALM_ASSERT_DEBUG(ndx < m_size);
1,068,270✔
660
    (this->*(m_vtable->chunk_getter))(ndx, res);
1,068,270✔
661
}
1,068,270✔
662

663
template <size_t w>
664
int64_t Array::get_universal(const char* data, size_t ndx) const
665
{
3,510,117,664✔
666
    if (w == 0) {
3,510,117,664✔
667
        return 0;
66,161,523✔
668
    }
66,161,523✔
669
    else if (w == 1) {
3,476,364,733✔
670
        size_t offset = ndx >> 3;
54,755,391✔
671
        return (data[offset] >> (ndx & 7)) & 0x01;
54,755,391✔
672
    }
54,755,391✔
673
    else if (w == 2) {
3,444,462,823✔
674
        size_t offset = ndx >> 2;
48,379,383✔
675
        return (data[offset] >> ((ndx & 3) << 1)) & 0x03;
48,379,383✔
676
    }
48,379,383✔
677
    else if (w == 4) {
3,421,386,457✔
678
        size_t offset = ndx >> 1;
25,917,327✔
679
        return (data[offset] >> ((ndx & 1) << 2)) & 0x0F;
25,917,327✔
680
    }
25,917,327✔
681
    else if (w == 8) {
3,408,762,253✔
682
        return *reinterpret_cast<const signed char*>(data + ndx);
72,615,087✔
683
    }
72,615,087✔
684
    else if (w == 16) {
3,374,916,973✔
685
        size_t offset = ndx * 2;
609,827,298✔
686
        return *reinterpret_cast<const int16_t*>(data + offset);
609,827,298✔
687
    }
609,827,298✔
688
    else if (w == 32) {
3,084,914,008✔
689
        size_t offset = ndx * 4;
2,868,163,795✔
690
        return *reinterpret_cast<const int32_t*>(data + offset);
2,868,163,795✔
691
    }
2,868,163,795✔
692
    else if (w == 64) {
456,388,731✔
693
        size_t offset = ndx * 8;
449,164,914✔
694
        return *reinterpret_cast<const int64_t*>(data + offset);
449,164,914✔
695
    }
449,164,914✔
696
    else {
7,223,817✔
697
        REALM_ASSERT_DEBUG(false);
7,223,817✔
698
        return int64_t(-1);
7,223,817✔
699
    }
7,223,817✔
700
}
3,510,117,664✔
701

702
template <size_t w>
703
int64_t Array::get(size_t ndx) const noexcept
704
{
3,481,288,498✔
705
    return get_universal<w>(m_data, ndx);
3,481,288,498✔
706
}
3,481,288,498✔
707

708
inline int64_t Array::get(size_t ndx) const noexcept
709
{
2,212,723,113✔
710
    REALM_ASSERT_DEBUG(is_attached());
2,212,723,113✔
711
    REALM_ASSERT_DEBUG_EX(ndx < m_size, ndx, m_size);
2,212,723,113✔
712
    return (this->*m_getter)(ndx);
2,212,723,113✔
713

1,066,940,970✔
714
    // Two ideas that are not efficient but may be worth looking into again:
1,066,940,970✔
715
    /*
1,066,940,970✔
716
        // Assume correct width is found early in REALM_TEMPEX, which is the case for B tree offsets that
1,066,940,970✔
717
        // are probably either 2^16 long. Turns out to be 25% faster if found immediately, but 50-300% slower
1,066,940,970✔
718
        // if found later
1,066,940,970✔
719
        REALM_TEMPEX(return get, (ndx));
1,066,940,970✔
720
    */
1,066,940,970✔
721
    /*
1,066,940,970✔
722
        // Slightly slower in both of the if-cases. Also needs an matchcount m_size check too, to avoid
1,066,940,970✔
723
        // reading beyond array.
1,066,940,970✔
724
        if (m_width >= 8 && m_size > ndx + 7)
1,066,940,970✔
725
            return get<64>(ndx >> m_shift) & m_widthmask;
1,066,940,970✔
726
        else
1,066,940,970✔
727
            return (this->*(m_vtable->getter))(ndx);
1,066,940,970✔
728
    */
1,066,940,970✔
729
}
2,212,723,113✔
730

731
inline int64_t Array::front() const noexcept
732
{
×
733
    return get(0);
×
734
}
×
735

736
inline int64_t Array::back() const noexcept
737
{
16,392,741✔
738
    return get(m_size - 1);
16,392,741✔
739
}
16,392,741✔
740

741
inline ref_type Array::get_as_ref(size_t ndx) const noexcept
742
{
793,733,286✔
743
    REALM_ASSERT_DEBUG(is_attached());
793,733,286✔
744
    REALM_ASSERT_DEBUG_EX(m_has_refs, m_ref, ndx, m_size);
793,733,286✔
745
    int64_t v = get(ndx);
793,733,286✔
746
    return to_ref(v);
793,733,286✔
747
}
793,733,286✔
748

749
inline RefOrTagged Array::get_as_ref_or_tagged(size_t ndx) const noexcept
750
{
360,606,708✔
751
    REALM_ASSERT(has_refs());
360,606,708✔
752
    return RefOrTagged(get(ndx));
360,606,708✔
753
}
360,606,708✔
754

755
inline void Array::set(size_t ndx, RefOrTagged ref_or_tagged)
756
{
24,823,863✔
757
    REALM_ASSERT(has_refs());
24,823,863✔
758
    set(ndx, ref_or_tagged.m_value); // Throws
24,823,863✔
759
}
24,823,863✔
760

761
inline void Array::add(RefOrTagged ref_or_tagged)
762
{
2,067,375✔
763
    REALM_ASSERT(has_refs());
2,067,375✔
764
    add(ref_or_tagged.m_value); // Throws
2,067,375✔
765
}
2,067,375✔
766

767
inline void Array::ensure_minimum_width(RefOrTagged ref_or_tagged)
768
{
462✔
769
    REALM_ASSERT(has_refs());
462✔
770
    ensure_minimum_width(ref_or_tagged.m_value); // Throws
462✔
771
}
462✔
772

773
inline bool Array::is_inner_bptree_node() const noexcept
774
{
20,738,532✔
775
    return m_is_inner_bptree_node;
20,738,532✔
776
}
20,738,532✔
777

778
inline bool Array::has_refs() const noexcept
779
{
388,782,423✔
780
    return m_has_refs;
388,782,423✔
781
}
388,782,423✔
782

783
inline void Array::set_has_refs(bool value) noexcept
784
{
×
785
    if (m_has_refs != value) {
×
786
        REALM_ASSERT(!is_read_only());
×
787
        m_has_refs = value;
×
788
        set_hasrefs_in_header(value, get_header());
×
789
    }
×
790
}
×
791

792
inline bool Array::get_context_flag() const noexcept
793
{
22,233,846✔
794
    return m_context_flag;
22,233,846✔
795
}
22,233,846✔
796

797
inline void Array::set_context_flag(bool value) noexcept
798
{
184,464✔
799
    if (m_context_flag != value) {
184,464✔
800
        copy_on_write();
184,452✔
801
        m_context_flag = value;
184,452✔
802
        set_context_flag_in_header(value, get_header());
184,452✔
803
    }
184,452✔
804
}
184,464✔
805

806
inline void Array::destroy_deep() noexcept
807
{
200,448✔
808
    if (!is_attached())
200,448✔
809
        return;
12✔
810

96,438✔
811
    if (m_has_refs)
200,436✔
812
        destroy_children();
199,734✔
813

96,438✔
814
    char* header = get_header_from_data(m_data);
200,436✔
815
    m_alloc.free_(m_ref, header);
200,436✔
816
    m_data = nullptr;
200,436✔
817
}
200,436✔
818

819
inline ref_type Array::write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const
820
{
2,733,417✔
821
    REALM_ASSERT(is_attached());
2,733,417✔
822

1,376,430✔
823
    if (only_if_modified && m_alloc.is_read_only(m_ref))
2,733,417✔
824
        return m_ref;
1,697,025✔
825

521,316✔
826
    if (!deep || !m_has_refs)
1,036,392✔
827
        return do_write_shallow(out); // Throws
124,377✔
828

459,936✔
829
    return do_write_deep(out, only_if_modified); // Throws
912,015✔
830
}
912,015✔
831

832
inline ref_type Array::write(ref_type ref, Allocator& alloc, _impl::ArrayWriterBase& out, bool only_if_modified)
833
{
88,969,428✔
834
    if (only_if_modified && alloc.is_read_only(ref))
88,969,428✔
835
        return ref;
66,944,682✔
836

10,945,920✔
837
    Array array(alloc);
22,024,746✔
838
    array.init_from_ref(ref);
22,024,746✔
839

10,945,920✔
840
    if (!array.m_has_refs)
22,024,746✔
841
        return array.do_write_shallow(out); // Throws
15,043,878✔
842

3,427,572✔
843
    return array.do_write_deep(out, only_if_modified); // Throws
6,980,868✔
844
}
6,980,868✔
845

846
inline void Array::add(int_fast64_t value)
847
{
909,883,032✔
848
    insert(m_size, value);
909,883,032✔
849
}
909,883,032✔
850

851
inline void Array::erase(size_t ndx)
852
{
10,916,586✔
853
    // This can throw, but only if array is currently in read-only
5,486,199✔
854
    // memory.
5,486,199✔
855
    move(ndx + 1, size(), ndx);
10,916,586✔
856

5,486,199✔
857
    // Update size (also in header)
5,486,199✔
858
    --m_size;
10,916,586✔
859
    set_header_size(m_size);
10,916,586✔
860
}
10,916,586✔
861

862

863
inline void Array::erase(size_t begin, size_t end)
UNCOV
864
{
×
UNCOV
865
    if (begin != end) {
×
866
        // This can throw, but only if array is currently in read-only memory.
UNCOV
867
        move(end, size(), begin); // Throws
×
868

869
        // Update size (also in header)
UNCOV
870
        m_size -= end - begin;
×
UNCOV
871
        set_header_size(m_size);
×
UNCOV
872
    }
×
UNCOV
873
}
×
874

875
inline void Array::clear()
876
{
5,583,015✔
877
    truncate(0); // Throws
5,583,015✔
878
}
5,583,015✔
879

880
inline void Array::clear_and_destroy_children()
881
{
153✔
882
    truncate_and_destroy_children(0);
153✔
883
}
153✔
884

885
inline void Array::destroy_deep(ref_type ref, Allocator& alloc) noexcept
886
{
2,745,534✔
887
    destroy_deep(MemRef(ref, alloc), alloc);
2,745,534✔
888
}
2,745,534✔
889

890
inline void Array::destroy_deep(MemRef mem, Allocator& alloc) noexcept
891
{
2,767,740✔
892
    if (!get_hasrefs_from_header(mem.get_addr())) {
2,767,740✔
893
        alloc.free_(mem);
2,627,670✔
894
        return;
2,627,670✔
895
    }
2,627,670✔
896
    Array array(alloc);
140,070✔
897
    array.init_from_mem(mem);
140,070✔
898
    array.destroy_deep();
140,070✔
899
}
140,070✔
900

901

902
inline void Array::adjust(size_t ndx, int_fast64_t diff)
903
{
22,683,114✔
904
    REALM_ASSERT_3(ndx, <=, m_size);
22,683,114✔
905
    if (diff != 0) {
22,706,682✔
906
        // FIXME: Should be optimized
10,940,463✔
907
        int_fast64_t v = get(ndx);
22,706,682✔
908
        set(ndx, int64_t(v + diff)); // Throws
22,706,682✔
909
    }
22,706,682✔
910
}
22,683,114✔
911

912
inline void Array::adjust(size_t begin, size_t end, int_fast64_t diff)
913
{
2,296,008✔
914
    if (diff != 0) {
2,296,008✔
915
        // FIXME: Should be optimized
781,407✔
916
        for (size_t i = begin; i != end; ++i)
15,482,388✔
917
            adjust(i, diff); // Throws
13,967,787✔
918
    }
1,514,601✔
919
}
2,296,008✔
920

921

922
//-------------------------------------------------
923

924

925
inline size_t Array::get_byte_size() const noexcept
926
{
54,722,154✔
927
    const char* header = get_header_from_data(m_data);
54,722,154✔
928
    WidthType wtype = Node::get_wtype_from_header(header);
54,722,154✔
929
    size_t num_bytes = NodeHeader::calc_byte_size(wtype, m_size, m_width);
54,722,154✔
930

27,417,957✔
931
    REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
54,722,154✔
932

27,417,957✔
933
    return num_bytes;
54,722,154✔
934
}
54,722,154✔
935

936

937
//-------------------------------------------------
938

939
inline MemRef Array::create_empty_array(Type type, bool context_flag, Allocator& alloc)
940
{
2,192,031✔
941
    size_t size = 0;
2,192,031✔
942
    int_fast64_t value = 0;
2,192,031✔
943
    return create_array(type, context_flag, size, value, alloc); // Throws
2,192,031✔
944
}
2,192,031✔
945

946
inline MemRef Array::create_array(Type type, bool context_flag, size_t size, int_fast64_t value, Allocator& alloc)
947
{
15,847,020✔
948
    return create(type, context_flag, wtype_Bits, size, value, alloc); // Throws
15,847,020✔
949
}
15,847,020✔
950

951
inline size_t Array::get_max_byte_size(size_t num_elems) noexcept
952
{
1,363,065✔
953
    int max_bytes_per_elem = 8;
1,363,065✔
954
    return header_size + num_elems * max_bytes_per_elem;
1,363,065✔
955
}
1,363,065✔
956

957

958
inline void Array::update_child_ref(size_t child_ndx, ref_type new_ref)
959
{
19,763,364✔
960
    set(child_ndx, new_ref);
19,763,364✔
961
}
19,763,364✔
962

963
inline ref_type Array::get_child_ref(size_t child_ndx) const noexcept
964
{
153,254,802✔
965
    return get_as_ref(child_ndx);
153,254,802✔
966
}
153,254,802✔
967

968
inline void Array::ensure_minimum_width(int_fast64_t value)
969
{
159,367,842✔
970
    if (value >= m_lbound && value <= m_ubound)
159,687,921✔
971
        return;
154,283,763✔
972
    do_ensure_minimum_width(value);
5,084,079✔
973
}
5,084,079✔
974

975

976
} // namespace realm
977

978
#endif // REALM_ARRAY_HPP
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc