• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / nicola.cabiddu_1040

26 Sep 2023 05:08PM UTC coverage: 91.056% (-1.9%) from 92.915%
nicola.cabiddu_1040

Pull #6766

Evergreen

nicola-cab
several fixes and final client reset algo for collection in mixed
Pull Request #6766: Client Reset for collections in mixed / nested collections

97128 of 178458 branches covered (0.0%)

1524 of 1603 new or added lines in 5 files covered. (95.07%)

4511 existing lines in 109 files now uncovered.

236619 of 259862 relevant lines covered (91.06%)

7169640.31 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.29
/src/realm/array.hpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#ifndef REALM_ARRAY_HPP
20
#define REALM_ARRAY_HPP
21

22
#include <realm/node.hpp>
23
#include <realm/query_state.hpp>
24
#include <realm/column_fwd.hpp>
25
#include <realm/array_direct.hpp>
26

27
namespace realm {
28

29
// Pre-definitions
30
class GroupWriter;
31
namespace _impl {
32
class ArrayWriterBase;
33
}
34

35
struct MemStats {
36
    size_t allocated = 0;
37
    size_t used = 0;
38
    size_t array_count = 0;
39
};
40

41
// Stores a value obtained from Array::get(). It is a ref if the least
42
// significant bit is clear, otherwise it is a tagged integer. A tagged interger
43
// is obtained from a logical integer value by left shifting by one bit position
44
// (multiplying by two), and then setting the least significant bit to
45
// one. Clearly, this means that the maximum value that can be stored as a
46
// tagged integer is 2**63 - 1.
47
class RefOrTagged {
48
public:
49
    bool is_ref() const noexcept;
50
    bool is_tagged() const noexcept;
51
    ref_type get_as_ref() const noexcept;
52
    uint_fast64_t get_as_int() const noexcept;
53

54
    static RefOrTagged make_ref(ref_type) noexcept;
55
    static RefOrTagged make_tagged(uint_fast64_t) noexcept;
56

57
private:
58
    int_fast64_t m_value;
59
    RefOrTagged(int_fast64_t) noexcept;
60
    friend class Array;
61
};
62

63

64
template <class T>
65
class QueryStateFindAll : public QueryStateBase {
66
public:
67
    explicit QueryStateFindAll(T& keys, size_t limit = -1)
68
        : QueryStateBase(limit)
69
        , m_keys(keys)
70
    {
1,536,255✔
71
    }
1,536,255✔
72
    bool match(size_t index, Mixed) noexcept final;
73

74
private:
75
    T& m_keys;
76
};
77

78
class QueryStateFindFirst : public QueryStateBase {
79
public:
80
    size_t m_state = realm::not_found;
81
    QueryStateFindFirst()
82
        : QueryStateBase(1)
83
    {
10,082,910✔
84
    }
10,082,910✔
85
    bool match(size_t index, Mixed) noexcept final;
86
};
87

88
class Array : public Node, public ArrayParent {
89
public:
90
    /// Create an array accessor in the unattached state.
91
    explicit Array(Allocator& allocator) noexcept
92
        : Node(allocator)
93
    {
754,313,118✔
94
    }
754,313,118✔
95

96
    ~Array() noexcept override {}
683,466,204✔
97

98
    /// Create a new integer array of the specified type and size, and filled
99
    /// with the specified value, and attach this accessor to it. This does not
100
    /// modify the parent reference information of this accessor.
101
    ///
102
    /// Note that the caller assumes ownership of the allocated underlying
103
    /// node. It is not owned by the accessor.
104
    void create(Type, bool context_flag = false, size_t size = 0, int_fast64_t value = 0);
105

106
    /// Reinitialize this array accessor to point to the specified new
107
    /// underlying memory. This does not modify the parent reference information
108
    /// of this accessor.
109
    void init_from_ref(ref_type ref) noexcept
110
    {
230,273,730✔
111
        REALM_ASSERT_DEBUG(ref);
230,273,730✔
112
        char* header = m_alloc.translate(ref);
230,273,730✔
113
        init_from_mem(MemRef(header, ref, m_alloc));
230,273,730✔
114
    }
230,273,730✔
115

116
    /// Same as init_from_ref(ref_type) but avoid the mapping of 'ref' to memory
117
    /// pointer.
118
    void init_from_mem(MemRef) noexcept;
119

120
    /// Same as `init_from_ref(get_ref_from_parent())`.
121
    void init_from_parent() noexcept
122
    {
89,260,626✔
123
        ref_type ref = get_ref_from_parent();
89,260,626✔
124
        init_from_ref(ref);
89,260,626✔
125
    }
89,260,626✔
126

127
    /// Called in the context of Group::commit() to ensure that attached
128
    /// accessors stay valid across a commit. Please note that this works only
129
    /// for non-transactional commits. Accessors obtained during a transaction
130
    /// are always detached when the transaction ends.
131
    void update_from_parent() noexcept;
132

133
    /// Change the type of an already attached array node.
134
    ///
135
    /// The effect of calling this function on an unattached accessor is
136
    /// undefined.
137
    void set_type(Type);
138

139
    /// Construct an empty integer array of the specified type, and return just
140
    /// the reference to the underlying memory.
141
    static MemRef create_empty_array(Type, bool context_flag, Allocator&);
142

143
    /// Construct an integer array of the specified type and size, and return
144
    /// just the reference to the underlying memory. All elements will be
145
    /// initialized to the specified value.
146
    static MemRef create_array(Type, bool context_flag, size_t size, int_fast64_t value, Allocator&);
147

148
    Type get_type() const noexcept;
149

150
    /// The meaning of 'width' depends on the context in which this
151
    /// array is used.
152
    size_t get_width() const noexcept
153
    {
4,744,452✔
154
        REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
4,744,452✔
155
        return m_width;
4,744,452✔
156
    }
4,744,452✔
157

158
    void insert(size_t ndx, int_fast64_t value);
159
    void add(int_fast64_t value);
160

161
    // Used from ArrayBlob
162
    size_t blob_size() const noexcept;
163
    ref_type blob_replace(size_t begin, size_t end, const char* data, size_t data_size, bool add_zero_term);
164

165
    /// This function is guaranteed to not throw if the current width is
166
    /// sufficient for the specified value (e.g. if you have called
167
    /// ensure_minimum_width(value)) and get_alloc().is_read_only(get_ref())
168
    /// returns false (noexcept:array-set). Note that for a value of zero, the
169
    /// first criterion is trivially satisfied.
170
    void set(size_t ndx, int64_t value);
171

172
    void set_as_ref(size_t ndx, ref_type ref);
173

174
    template <size_t w>
175
    void set(size_t ndx, int64_t value);
176

177
    int64_t get(size_t ndx) const noexcept;
178

179
    template <size_t w>
180
    int64_t get(size_t ndx) const noexcept;
181

182
    void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
183

184
    template <size_t w>
185
    void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
186

187
    ref_type get_as_ref(size_t ndx) const noexcept;
188

189
    RefOrTagged get_as_ref_or_tagged(size_t ndx) const noexcept;
190
    void set(size_t ndx, RefOrTagged);
191
    void add(RefOrTagged);
192
    void ensure_minimum_width(RefOrTagged);
193

194
    int64_t front() const noexcept;
195
    int64_t back() const noexcept;
196

197
    void alloc(size_t init_size, size_t new_width)
198
    {
1,027,868,250✔
199
        REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
1,027,868,250✔
200
        REALM_ASSERT_3(m_size, ==, get_size_from_header(get_header()));
1,027,868,250✔
201
        Node::alloc(init_size, new_width);
1,027,868,250✔
202
        update_width_cache_from_header();
1,027,868,250✔
203
    }
1,027,868,250✔
204

205
    /// Remove the element at the specified index, and move elements at higher
206
    /// indexes to the next lower index.
207
    ///
208
    /// This function does **not** destroy removed subarrays. That is, if the
209
    /// erased element is a 'ref' pointing to a subarray, then that subarray
210
    /// will not be destroyed automatically.
211
    ///
212
    /// This function guarantees that no exceptions will be thrown if
213
    /// get_alloc().is_read_only(get_ref()) would return false before the
214
    /// call. This is automatically guaranteed if the array is used in a
215
    /// non-transactional context, or if the array has already been successfully
216
    /// modified within the current write transaction.
217
    void erase(size_t ndx);
218

219
    /// Same as erase(size_t), but remove all elements in the specified
220
    /// range.
221
    ///
222
    /// Please note that this function does **not** destroy removed subarrays.
223
    ///
224
    /// This function guarantees that no exceptions will be thrown if
225
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
226
    void erase(size_t begin, size_t end);
227

228
    /// Reduce the size of this array to the specified number of elements. It is
229
    /// an error to specify a size that is greater than the current size of this
230
    /// array. The effect of doing so is undefined. This is just a shorthand for
231
    /// calling the ranged erase() function with appropriate arguments.
232
    ///
233
    /// Please note that this function does **not** destroy removed
234
    /// subarrays. See clear_and_destroy_children() for an alternative.
235
    ///
236
    /// This function guarantees that no exceptions will be thrown if
237
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
238
    void truncate(size_t new_size);
239

240
    /// Reduce the size of this array to the specified number of elements. It is
241
    /// an error to specify a size that is greater than the current size of this
242
    /// array. The effect of doing so is undefined. Subarrays will be destroyed
243
    /// recursively, as if by a call to `destroy_deep(subarray_ref, alloc)`.
244
    ///
245
    /// This function is guaranteed not to throw if
246
    /// get_alloc().is_read_only(get_ref()) returns false.
247
    void truncate_and_destroy_children(size_t new_size);
248

249
    /// Remove every element from this array. This is just a shorthand for
250
    /// calling truncate(0).
251
    ///
252
    /// Please note that this function does **not** destroy removed
253
    /// subarrays. See clear_and_destroy_children() for an alternative.
254
    ///
255
    /// This function guarantees that no exceptions will be thrown if
256
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
257
    void clear();
258

259
    /// Remove every element in this array. Subarrays will be destroyed
260
    /// recursively, as if by a call to `destroy_deep(subarray_ref,
261
    /// alloc)`. This is just a shorthand for calling
262
    /// truncate_and_destroy_children(0).
263
    ///
264
    /// This function guarantees that no exceptions will be thrown if
265
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
266
    void clear_and_destroy_children();
267

268
    /// If neccessary, expand the representation so that it can store the
269
    /// specified value.
270
    void ensure_minimum_width(int_fast64_t value);
271

272
    /// Add \a diff to the element at the specified index.
273
    void adjust(size_t ndx, int_fast64_t diff);
274

275
    /// Add \a diff to all the elements in the specified index range.
276
    void adjust(size_t begin, size_t end, int_fast64_t diff);
277

278
    //@{
279
    /// This is similar in spirit to std::move() from `<algorithm>`.
280
    /// \a dest_begin must not be in the range [`begin`,`end`)
281
    ///
282
    /// This function is guaranteed to not throw if
283
    /// `get_alloc().is_read_only(get_ref())` returns false.
284
    void move(size_t begin, size_t end, size_t dest_begin);
285
    //@}
286

287
    // Move elements from ndx and above to another array
288
    void move(Array& dst, size_t ndx);
289

290
    //@{
291
    /// Find the lower/upper bound of the specified value in a sequence of
292
    /// integers which must already be sorted ascendingly.
293
    ///
294
    /// For an integer value '`v`', lower_bound_int(v) returns the index '`l`'
295
    /// of the first element such that `get(l) &ge; v`, and upper_bound_int(v)
296
    /// returns the index '`u`' of the first element such that `get(u) &gt;
297
    /// v`. In both cases, if no such element is found, the returned value is
298
    /// the number of elements in the array.
299
    ///
300
    ///     3 3 3 4 4 4 5 6 7 9 9 9
301
    ///     ^     ^     ^     ^     ^
302
    ///     |     |     |     |     |
303
    ///     |     |     |     |      -- Lower and upper bound of 15
304
    ///     |     |     |     |
305
    ///     |     |     |      -- Lower and upper bound of 8
306
    ///     |     |     |
307
    ///     |     |      -- Upper bound of 4
308
    ///     |     |
309
    ///     |      -- Lower bound of 4
310
    ///     |
311
    ///      -- Lower and upper bound of 1
312
    ///
313
    /// These functions are similar to std::lower_bound() and
314
    /// std::upper_bound().
315
    ///
316
    /// We currently use binary search. See for example
317
    /// http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary.
318
    ///
319
    /// FIXME: It may be worth considering if overall efficiency can be improved
320
    /// by doing a linear search for short sequences.
321
    size_t lower_bound_int(int64_t value) const noexcept;
322
    size_t upper_bound_int(int64_t value) const noexcept;
323
    //@}
324

325
    int64_t get_sum(size_t start = 0, size_t end = size_t(-1)) const
326
    {
366✔
327
        return sum(start, end);
366✔
328
    }
366✔
329

330
    /// This information is guaranteed to be cached in the array accessor.
331
    bool is_inner_bptree_node() const noexcept;
332

333
    /// Returns true if type is either type_HasRefs or type_InnerColumnNode.
334
    ///
335
    /// This information is guaranteed to be cached in the array accessor.
336
    bool has_refs() const noexcept;
337
    void set_has_refs(bool) noexcept;
338

339
    /// This information is guaranteed to be cached in the array accessor.
340
    ///
341
    /// Columns and indexes can use the context bit to differentiate leaf types.
342
    bool get_context_flag() const noexcept;
343
    void set_context_flag(bool) noexcept;
344

345
    /// Recursively destroy children (as if calling
346
    /// clear_and_destroy_children()), then put this accessor into the detached
347
    /// state (as if calling detach()), then free the allocated memory. If this
348
    /// accessor is already in the detached state, this function has no effect
349
    /// (idempotency).
350
    void destroy_deep() noexcept;
351

352
    /// Shorthand for `destroy_deep(MemRef(ref, alloc), alloc)`.
353
    static void destroy_deep(ref_type ref, Allocator& alloc) noexcept;
354

355
    /// Destroy the specified array node and all of its children, recursively.
356
    ///
357
    /// This is done by freeing the specified array node after calling
358
    /// destroy_deep() for every contained 'ref' element.
359
    static void destroy_deep(MemRef, Allocator&) noexcept;
360

361
    // Clone deep
362
    static MemRef clone(MemRef, Allocator& from_alloc, Allocator& target_alloc);
363

364
    // Serialization
365

366
    /// Returns the ref (position in the target stream) of the written copy of
367
    /// this array, or the ref of the original array if \a only_if_modified is
368
    /// true, and this array is unmodified (Alloc::is_read_only()).
369
    ///
370
    /// The number of bytes that will be written by a non-recursive invocation
371
    /// of this function is exactly the number returned by get_byte_size().
372
    ///
373
    /// \param out The destination stream (writer).
374
    ///
375
    /// \param deep If true, recursively write out subarrays, but still subject
376
    /// to \a only_if_modified.
377
    ///
378
    /// \param only_if_modified Set to `false` to always write, or to `true` to
379
    /// only write the array if it has been modified.
380
    ref_type write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const;
381

382
    /// Same as non-static write() with `deep` set to true. This is for the
383
    /// cases where you do not already have an array accessor available.
384
    static ref_type write(ref_type, Allocator&, _impl::ArrayWriterBase&, bool only_if_modified);
385

386
    size_t find_first(int64_t value, size_t begin = 0, size_t end = size_t(-1)) const;
387

388
    // Wrappers for backwards compatibility and for simple use without
389
    // setting up state initialization etc
390
    template <class cond>
391
    size_t find_first(int64_t value, size_t start = 0, size_t end = size_t(-1)) const
392
    {
9,998,565✔
393
        REALM_ASSERT(start <= m_size && (end <= m_size || end == size_t(-1)) && start <= end);
9,998,565✔
394
        // todo, would be nice to avoid this in order to speed up find_first loops
5,101,563✔
395
        QueryStateFindFirst state;
9,998,565✔
396
        Finder finder = m_vtable->finder[cond::condition];
9,998,565✔
397
        (this->*finder)(value, start, end, 0, &state);
9,998,565✔
398

5,101,563✔
399
        return static_cast<size_t>(state.m_state);
9,998,565✔
400
    }
9,998,565✔
401

402
    /// Get the specified element without the cost of constructing an
403
    /// array instance. If an array instance is already available, or
404
    /// you need to get multiple values, then this method will be
405
    /// slower.
406
    static int_fast64_t get(const char* header, size_t ndx) noexcept;
407

408
    /// Like get(const char*, size_t) but gets two consecutive
409
    /// elements.
410
    static std::pair<int64_t, int64_t> get_two(const char* header, size_t ndx) noexcept;
411

412
    static RefOrTagged get_as_ref_or_tagged(const char* header, size_t ndx) noexcept
413
    {
50,606,808✔
414
        return get(header, ndx);
50,606,808✔
415
    }
50,606,808✔
416

417
    /// Get the number of bytes currently in use by this array. This
418
    /// includes the array header, but it does not include allocated
419
    /// bytes corresponding to excess capacity. The result is
420
    /// guaranteed to be a multiple of 8 (i.e., 64-bit aligned).
421
    ///
422
    /// This number is exactly the number of bytes that will be
423
    /// written by a non-recursive invocation of write().
424
    size_t get_byte_size() const noexcept;
425

426
    /// Get the maximum number of bytes that can be written by a
427
    /// non-recursive invocation of write() on an array with the
428
    /// specified number of elements, that is, the maximum value that
429
    /// can be returned by get_byte_size().
430
    static size_t get_max_byte_size(size_t num_elems) noexcept;
431

432
    /// FIXME: Belongs in IntegerArray
433
    static size_t calc_aligned_byte_size(size_t size, int width);
434

435
#ifdef REALM_DEBUG
436
    class MemUsageHandler {
437
    public:
438
        virtual void handle(ref_type ref, size_t allocated, size_t used) = 0;
439
    };
440

441
    void report_memory_usage(MemUsageHandler&) const;
442

443
    void stats(MemStats& stats_dest) const noexcept;
444
#endif
445

446
    void verify() const;
447

448
    Array& operator=(const Array&) = delete; // not allowed
449
    Array(const Array&) = delete;            // not allowed
450

451
protected:
452
    // This returns the minimum value ("lower bound") of the representable values
453
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
454
    static constexpr int_fast64_t lbound_for_width(size_t width) noexcept;
455

456
    // This returns the maximum value ("inclusive upper bound") of the representable values
457
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
458
    static constexpr int_fast64_t ubound_for_width(size_t width) noexcept;
459

460
    // This will have to be eventually used, exposing this here for testing.
461
    size_t count(int64_t value) const noexcept;
462

463
private:
464
    void update_width_cache_from_header() noexcept;
465

466
    void do_ensure_minimum_width(int_fast64_t);
467

468
    int64_t sum(size_t start, size_t end) const;
469

470
    template <size_t w>
471
    int64_t sum(size_t start, size_t end) const;
472

473
protected:
474
    /// It is an error to specify a non-zero value unless the width
475
    /// type is wtype_Bits. It is also an error to specify a non-zero
476
    /// size if the width type is wtype_Ignore.
477
    static MemRef create(Type, bool context_flag, WidthType, size_t size, int_fast64_t value, Allocator&);
478

479
    // Overriding method in ArrayParent
480
    void update_child_ref(size_t, ref_type) override;
481

482
    // Overriding method in ArrayParent
483
    ref_type get_child_ref(size_t) const noexcept override;
484

485
    void destroy_children(size_t offset = 0) noexcept;
486

487
protected:
488
    // Getters and Setters for adaptive-packed arrays
489
    typedef int64_t (Array::*Getter)(size_t) const; // Note: getters must not throw
490
    typedef void (Array::*Setter)(size_t, int64_t);
491
    typedef bool (Array::*Finder)(int64_t, size_t, size_t, size_t, QueryStateBase*) const;
492
    typedef void (Array::*ChunkGetter)(size_t, int64_t res[8]) const; // Note: getters must not throw
493

494
    struct VTable {
495
        Getter getter;
496
        ChunkGetter chunk_getter;
497
        Setter setter;
498
        Finder finder[cond_VTABLE_FINDER_COUNT]; // one for each active function pointer
499
    };
500
    template <size_t w>
501
    struct VTableForWidth;
502

503
    // This is the one installed into the m_vtable->finder slots.
504
    template <class cond, size_t bitwidth>
505
    bool find_vtable(int64_t value, size_t start, size_t end, size_t baseindex, QueryStateBase* state) const;
506

507
    template <size_t w>
508
    int64_t get_universal(const char* const data, const size_t ndx) const;
509

510
protected:
511
    /// Takes a 64-bit value and returns the minimum number of bits needed
512
    /// to fit the value. For alignment this is rounded up to nearest
513
    /// log2. Posssible results {0, 1, 2, 4, 8, 16, 32, 64}
514
    static size_t bit_width(int64_t value);
515

516
protected:
517
    Getter m_getter = nullptr; // cached to avoid indirection
518
    const VTable* m_vtable = nullptr;
519

520
    uint_least8_t m_width = 0; // Size of an element (meaning depend on type of array).
521
    int64_t m_lbound;          // min number that can be stored with current m_width
522
    int64_t m_ubound;          // max number that can be stored with current m_width
523

524
    bool m_is_inner_bptree_node; // This array is an inner node of B+-tree.
525
    bool m_has_refs;             // Elements whose first bit is zero are refs to subarrays.
526
    bool m_context_flag;         // Meaning depends on context.
527

528
private:
529
    ref_type do_write_shallow(_impl::ArrayWriterBase&) const;
530
    ref_type do_write_deep(_impl::ArrayWriterBase&, bool only_if_modified) const;
531

532
#ifdef REALM_DEBUG
533
    void report_memory_usage_2(MemUsageHandler&) const;
534
#endif
535

536
    friend class Allocator;
537
    friend class SlabAlloc;
538
    friend class GroupWriter;
539
    friend class ArrayWithFind;
540
};
541

542
// Implementation:
543

544

545
constexpr inline int_fast64_t Array::lbound_for_width(size_t width) noexcept
546
{
1,682,175,366✔
547
    if (width == 32) {
1,682,175,366✔
548
        return -0x80000000LL;
713,952,153✔
549
    }
713,952,153✔
550
    else if (width == 16) {
968,223,213✔
551
        return -0x8000LL;
341,759,925✔
552
    }
341,759,925✔
553
    else if (width < 8) {
626,463,288✔
554
        return 0;
422,895,597✔
555
    }
422,895,597✔
556
    else if (width == 8) {
203,567,691✔
557
        return -0x80LL;
169,427,784✔
558
    }
169,427,784✔
559
    else if (width == 64) {
145,731,336✔
560
        return -0x8000000000000000LL;
145,731,336✔
561
    }
145,731,336✔
562
    else {
4,294,967,294✔
563
        REALM_UNREACHABLE();
4,294,967,294✔
564
    }
4,294,967,294✔
565
}
1,682,175,366✔
566

567
constexpr inline int_fast64_t Array::ubound_for_width(size_t width) noexcept
568
{
1,695,976,710✔
569
    if (width == 32) {
1,695,976,710✔
570
        return 0x7FFFFFFFLL;
714,628,455✔
571
    }
714,628,455✔
572
    else if (width == 16) {
981,348,255✔
573
        return 0x7FFFLL;
343,754,436✔
574
    }
343,754,436✔
575
    else if (width == 0) {
637,593,819✔
576
        return 0;
80,229,333✔
577
    }
80,229,333✔
578
    else if (width == 1) {
557,364,486✔
579
        return 1;
170,926,449✔
580
    }
170,926,449✔
581
    else if (width == 2) {
386,438,037✔
582
        return 3;
153,357,018✔
583
    }
153,357,018✔
584
    else if (width == 4) {
233,081,019✔
585
        return 15;
25,000,191✔
586
    }
25,000,191✔
587
    else if (width == 8) {
208,080,828✔
588
        return 0x7FLL;
169,423,203✔
589
    }
169,423,203✔
590
    else if (width == 64) {
145,639,710✔
591
        return 0x7FFFFFFFFFFFFFFFLL;
145,639,710✔
592
    }
145,639,710✔
593
    else {
4,294,967,294✔
594
        REALM_UNREACHABLE();
4,294,967,294✔
595
    }
4,294,967,294✔
596
}
1,695,976,710✔
597

598
inline bool RefOrTagged::is_ref() const noexcept
599
{
370,885,191✔
600
    return (m_value & 1) == 0;
370,885,191✔
601
}
370,885,191✔
602

603
inline bool RefOrTagged::is_tagged() const noexcept
604
{
301,283,685✔
605
    return !is_ref();
301,283,685✔
606
}
301,283,685✔
607

608
inline ref_type RefOrTagged::get_as_ref() const noexcept
609
{
99,342,207✔
610
    // to_ref() is defined in <alloc.hpp>
51,081,555✔
611
    return to_ref(m_value);
99,342,207✔
612
}
99,342,207✔
613

614
inline uint_fast64_t RefOrTagged::get_as_int() const noexcept
615
{
116,901,654✔
616
    // The bitwise AND is there in case uint_fast64_t is wider than 64 bits.
59,663,253✔
617
    return (uint_fast64_t(m_value) & 0xFFFFFFFFFFFFFFFFULL) >> 1;
116,901,654✔
618
}
116,901,654✔
619

620
inline RefOrTagged RefOrTagged::make_ref(ref_type ref) noexcept
621
{
408,165✔
622
    // from_ref() is defined in <alloc.hpp>
202,080✔
623
    int_fast64_t value = from_ref(ref);
408,165✔
624
    return RefOrTagged(value);
408,165✔
625
}
408,165✔
626

627
inline RefOrTagged RefOrTagged::make_tagged(uint_fast64_t i) noexcept
628
{
26,503,449✔
629
    REALM_ASSERT(i < (1ULL << 63));
26,503,449✔
630
    return RefOrTagged((i << 1) | 1);
26,503,449✔
631
}
26,503,449✔
632

633
inline RefOrTagged::RefOrTagged(int_fast64_t value) noexcept
634
    : m_value(value)
635
{
432,151,692✔
636
}
432,151,692✔
637

638
inline void Array::create(Type type, bool context_flag, size_t length, int_fast64_t value)
639
{
13,229,595✔
640
    MemRef mem = create_array(type, context_flag, length, value, m_alloc); // Throws
13,229,595✔
641
    init_from_mem(mem);
13,229,595✔
642
}
13,229,595✔
643

644

645
inline Array::Type Array::get_type() const noexcept
646
{
18✔
647
    if (m_is_inner_bptree_node) {
18✔
648
        REALM_ASSERT_DEBUG(m_has_refs);
6✔
649
        return type_InnerBptreeNode;
6✔
650
    }
6✔
651
    if (m_has_refs)
12✔
652
        return type_HasRefs;
6✔
653
    return type_Normal;
6✔
654
}
6✔
655

656

657
inline void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
658
{
1,024,446✔
659
    REALM_ASSERT_DEBUG(ndx < m_size);
1,024,446✔
660
    (this->*(m_vtable->chunk_getter))(ndx, res);
1,024,446✔
661
}
1,024,446✔
662

663
template <size_t w>
664
int64_t Array::get_universal(const char* data, size_t ndx) const
665
{
3,491,420,980✔
666
    if (w == 0) {
3,491,420,980✔
667
        return 0;
62,749,227✔
668
    }
62,749,227✔
669
    else if (w == 1) {
3,459,363,811✔
670
        size_t offset = ndx >> 3;
53,582,718✔
671
        return (data[offset] >> (ndx & 7)) & 0x01;
53,582,718✔
672
    }
53,582,718✔
673
    else if (w == 2) {
3,428,774,158✔
674
        size_t offset = ndx >> 2;
47,155,458✔
675
        return (data[offset] >> ((ndx & 3) << 1)) & 0x03;
47,155,458✔
676
    }
47,155,458✔
677
    else if (w == 4) {
3,403,910,359✔
678
        size_t offset = ndx >> 1;
25,454,259✔
679
        return (data[offset] >> ((ndx & 1) << 2)) & 0x0F;
25,454,259✔
680
    }
25,454,259✔
681
    else if (w == 8) {
3,391,441,321✔
682
        return *reinterpret_cast<const signed char*>(data + ndx);
71,077,389✔
683
    }
71,077,389✔
684
    else if (w == 16) {
3,359,076,925✔
685
        size_t offset = ndx * 2;
603,498,171✔
686
        return *reinterpret_cast<const int16_t*>(data + offset);
603,498,171✔
687
    }
603,498,171✔
688
    else if (w == 32) {
3,071,412,049✔
689
        size_t offset = ndx * 4;
2,849,496,151✔
690
        return *reinterpret_cast<const int32_t*>(data + offset);
2,849,496,151✔
691
    }
2,849,496,151✔
692
    else if (w == 64) {
467,069,121✔
693
        size_t offset = ndx * 8;
464,563,458✔
694
        return *reinterpret_cast<const int64_t*>(data + offset);
464,563,458✔
695
    }
464,563,458✔
696
    else {
2,149,989,310✔
697
        REALM_ASSERT_DEBUG(false);
2,149,989,310✔
698
        return int64_t(-1);
2,149,989,310✔
699
    }
2,149,989,310✔
700
}
3,491,420,980✔
701

702
template <size_t w>
703
int64_t Array::get(size_t ndx) const noexcept
704
{
3,465,381,856✔
705
    return get_universal<w>(m_data, ndx);
3,465,381,856✔
706
}
3,465,381,856✔
707

708
inline int64_t Array::get(size_t ndx) const noexcept
709
{
2,190,920,883✔
710
    REALM_ASSERT_DEBUG(is_attached());
2,190,920,883✔
711
    REALM_ASSERT_DEBUG_EX(ndx < m_size, ndx, m_size);
2,190,920,883✔
712
    return (this->*m_getter)(ndx);
2,190,920,883✔
713

1,042,698,873✔
714
    // Two ideas that are not efficient but may be worth looking into again:
1,042,698,873✔
715
    /*
1,042,698,873✔
716
        // Assume correct width is found early in REALM_TEMPEX, which is the case for B tree offsets that
1,042,698,873✔
717
        // are probably either 2^16 long. Turns out to be 25% faster if found immediately, but 50-300% slower
1,042,698,873✔
718
        // if found later
1,042,698,873✔
719
        REALM_TEMPEX(return get, (ndx));
1,042,698,873✔
720
    */
1,042,698,873✔
721
    /*
1,042,698,873✔
722
        // Slightly slower in both of the if-cases. Also needs an matchcount m_size check too, to avoid
1,042,698,873✔
723
        // reading beyond array.
1,042,698,873✔
724
        if (m_width >= 8 && m_size > ndx + 7)
1,042,698,873✔
725
            return get<64>(ndx >> m_shift) & m_widthmask;
1,042,698,873✔
726
        else
1,042,698,873✔
727
            return (this->*(m_vtable->getter))(ndx);
1,042,698,873✔
728
    */
1,042,698,873✔
729
}
2,190,920,883✔
730

731
inline int64_t Array::front() const noexcept
732
{
×
733
    return get(0);
×
734
}
×
735

736
inline int64_t Array::back() const noexcept
737
{
15,957,267✔
738
    return get(m_size - 1);
15,957,267✔
739
}
15,957,267✔
740

741
inline ref_type Array::get_as_ref(size_t ndx) const noexcept
742
{
785,367,660✔
743
    REALM_ASSERT_DEBUG(is_attached());
785,367,660✔
744
    REALM_ASSERT_DEBUG_EX(m_has_refs, m_ref, ndx, m_size);
785,367,660✔
745
    int64_t v = get(ndx);
785,367,660✔
746
    return to_ref(v);
785,367,660✔
747
}
785,367,660✔
748

749
inline RefOrTagged Array::get_as_ref_or_tagged(size_t ndx) const noexcept
750
{
364,446,615✔
751
    REALM_ASSERT(has_refs());
364,446,615✔
752
    return RefOrTagged(get(ndx));
364,446,615✔
753
}
364,446,615✔
754

755
inline void Array::set(size_t ndx, RefOrTagged ref_or_tagged)
756
{
24,877,935✔
757
    REALM_ASSERT(has_refs());
24,877,935✔
758
    set(ndx, ref_or_tagged.m_value); // Throws
24,877,935✔
759
}
24,877,935✔
760

761
inline void Array::add(RefOrTagged ref_or_tagged)
762
{
2,067,714✔
763
    REALM_ASSERT(has_refs());
2,067,714✔
764
    add(ref_or_tagged.m_value); // Throws
2,067,714✔
765
}
2,067,714✔
766

767
inline void Array::ensure_minimum_width(RefOrTagged ref_or_tagged)
768
{
462✔
769
    REALM_ASSERT(has_refs());
462✔
770
    ensure_minimum_width(ref_or_tagged.m_value); // Throws
462✔
771
}
462✔
772

773
inline bool Array::is_inner_bptree_node() const noexcept
774
{
20,750,058✔
775
    return m_is_inner_bptree_node;
20,750,058✔
776
}
20,750,058✔
777

778
inline bool Array::has_refs() const noexcept
779
{
395,236,803✔
780
    return m_has_refs;
395,236,803✔
781
}
395,236,803✔
782

783
inline void Array::set_has_refs(bool value) noexcept
784
{
×
785
    if (m_has_refs != value) {
×
786
        REALM_ASSERT(!is_read_only());
×
787
        m_has_refs = value;
×
788
        set_hasrefs_in_header(value, get_header());
×
789
    }
×
790
}
×
791

792
inline bool Array::get_context_flag() const noexcept
793
{
22,171,269✔
794
    return m_context_flag;
22,171,269✔
795
}
22,171,269✔
796

797
inline void Array::set_context_flag(bool value) noexcept
798
{
185,625✔
799
    if (m_context_flag != value) {
185,625✔
800
        copy_on_write();
185,619✔
801
        m_context_flag = value;
185,619✔
802
        set_context_flag_in_header(value, get_header());
185,619✔
803
    }
185,619✔
804
}
185,625✔
805

806
inline void Array::destroy_deep() noexcept
807
{
200,790✔
808
    if (!is_attached())
200,790✔
809
        return;
12✔
810

99,414✔
811
    if (m_has_refs)
200,778✔
812
        destroy_children();
200,076✔
813

99,414✔
814
    char* header = get_header_from_data(m_data);
200,778✔
815
    m_alloc.free_(m_ref, header);
200,778✔
816
    m_data = nullptr;
200,778✔
817
}
200,778✔
818

819
inline ref_type Array::write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const
820
{
2,729,868✔
821
    REALM_ASSERT(is_attached());
2,729,868✔
822

1,376,391✔
823
    if (only_if_modified && m_alloc.is_read_only(m_ref))
2,729,868✔
824
        return m_ref;
1,696,545✔
825

522,036✔
826
    if (!deep || !m_has_refs)
1,033,323✔
827
        return do_write_shallow(out); // Throws
124,839✔
828

460,428✔
829
    return do_write_deep(out, only_if_modified); // Throws
908,484✔
830
}
908,484✔
831

832
inline ref_type Array::write(ref_type ref, Allocator& alloc, _impl::ArrayWriterBase& out, bool only_if_modified)
833
{
87,785,985✔
834
    if (only_if_modified && alloc.is_read_only(ref))
87,785,985✔
835
        return ref;
66,123,783✔
836

10,724,094✔
837
    Array array(alloc);
21,662,202✔
838
    array.init_from_ref(ref);
21,662,202✔
839

10,724,094✔
840
    if (!array.m_has_refs)
21,662,202✔
841
        return array.do_write_shallow(out); // Throws
15,007,935✔
842

3,227,196✔
843
    return array.do_write_deep(out, only_if_modified); // Throws
6,654,267✔
844
}
6,654,267✔
845

846
inline void Array::add(int_fast64_t value)
847
{
904,589,706✔
848
    insert(m_size, value);
904,589,706✔
849
}
904,589,706✔
850

851
inline void Array::erase(size_t ndx)
852
{
10,877,292✔
853
    // This can throw, but only if array is currently in read-only
5,469,693✔
854
    // memory.
5,469,693✔
855
    move(ndx + 1, size(), ndx);
10,877,292✔
856

5,469,693✔
857
    // Update size (also in header)
5,469,693✔
858
    --m_size;
10,877,292✔
859
    set_header_size(m_size);
10,877,292✔
860
}
10,877,292✔
861

862

863
inline void Array::erase(size_t begin, size_t end)
UNCOV
864
{
×
UNCOV
865
    if (begin != end) {
×
866
        // This can throw, but only if array is currently in read-only memory.
UNCOV
867
        move(end, size(), begin); // Throws
×
868

869
        // Update size (also in header)
UNCOV
870
        m_size -= end - begin;
×
UNCOV
871
        set_header_size(m_size);
×
UNCOV
872
    }
×
UNCOV
873
}
×
874

875
inline void Array::clear()
876
{
5,583,552✔
877
    truncate(0); // Throws
5,583,552✔
878
}
5,583,552✔
879

880
inline void Array::clear_and_destroy_children()
881
{
147✔
882
    truncate_and_destroy_children(0);
147✔
883
}
147✔
884

885
inline void Array::destroy_deep(ref_type ref, Allocator& alloc) noexcept
886
{
2,776,509✔
887
    destroy_deep(MemRef(ref, alloc), alloc);
2,776,509✔
888
}
2,776,509✔
889

890
inline void Array::destroy_deep(MemRef mem, Allocator& alloc) noexcept
891
{
2,786,703✔
892
    if (!get_hasrefs_from_header(mem.get_addr())) {
2,786,703✔
893
        alloc.free_(mem);
2,646,810✔
894
        return;
2,646,810✔
895
    }
2,646,810✔
896
    Array array(alloc);
139,893✔
897
    array.init_from_mem(mem);
139,893✔
898
    array.destroy_deep();
139,893✔
899
}
139,893✔
900

901

902
inline void Array::adjust(size_t ndx, int_fast64_t diff)
903
{
22,434,585✔
904
    REALM_ASSERT_3(ndx, <=, m_size);
22,434,585✔
905
    if (diff != 0) {
22,447,527✔
906
        // FIXME: Should be optimized
11,122,902✔
907
        int_fast64_t v = get(ndx);
22,447,527✔
908
        set(ndx, int64_t(v + diff)); // Throws
22,447,527✔
909
    }
22,447,527✔
910
}
22,434,585✔
911

912
inline void Array::adjust(size_t begin, size_t end, int_fast64_t diff)
913
{
2,260,002✔
914
    if (diff != 0) {
2,260,002✔
915
        // FIXME: Should be optimized
763,434✔
916
        for (size_t i = begin; i != end; ++i)
15,572,703✔
917
            adjust(i, diff); // Throws
14,076,564✔
918
    }
1,496,139✔
919
}
2,260,002✔
920

921

922
//-------------------------------------------------
923

924

925
inline size_t Array::get_byte_size() const noexcept
926
{
54,780,342✔
927
    const char* header = get_header_from_data(m_data);
54,780,342✔
928
    WidthType wtype = Node::get_wtype_from_header(header);
54,780,342✔
929
    size_t num_bytes = NodeHeader::calc_byte_size(wtype, m_size, m_width);
54,780,342✔
930

27,445,422✔
931
    REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
54,780,342✔
932

27,445,422✔
933
    return num_bytes;
54,780,342✔
934
}
54,780,342✔
935

936

937
//-------------------------------------------------
938

939
inline MemRef Array::create_empty_array(Type type, bool context_flag, Allocator& alloc)
940
{
2,193,129✔
941
    size_t size = 0;
2,193,129✔
942
    int_fast64_t value = 0;
2,193,129✔
943
    return create_array(type, context_flag, size, value, alloc); // Throws
2,193,129✔
944
}
2,193,129✔
945

946
inline MemRef Array::create_array(Type type, bool context_flag, size_t size, int_fast64_t value, Allocator& alloc)
947
{
15,814,236✔
948
    return create(type, context_flag, wtype_Bits, size, value, alloc); // Throws
15,814,236✔
949
}
15,814,236✔
950

951
inline size_t Array::get_max_byte_size(size_t num_elems) noexcept
952
{
1,361,397✔
953
    int max_bytes_per_elem = 8;
1,361,397✔
954
    return header_size + num_elems * max_bytes_per_elem;
1,361,397✔
955
}
1,361,397✔
956

957

958
inline void Array::update_child_ref(size_t child_ndx, ref_type new_ref)
959
{
19,686,684✔
960
    set(child_ndx, new_ref);
19,686,684✔
961
}
19,686,684✔
962

963
inline ref_type Array::get_child_ref(size_t child_ndx) const noexcept
964
{
152,682,870✔
965
    return get_as_ref(child_ndx);
152,682,870✔
966
}
152,682,870✔
967

968
inline void Array::ensure_minimum_width(int_fast64_t value)
969
{
160,166,109✔
970
    if (value >= m_lbound && value <= m_ubound)
160,382,949✔
971
        return;
155,023,227✔
972
    do_ensure_minimum_width(value);
5,142,882✔
973
}
5,142,882✔
974

975

976
} // namespace realm
977

978
#endif // REALM_ARRAY_HPP
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc