• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / github_pull_request_281750

30 Oct 2023 03:37PM UTC coverage: 90.528% (-1.0%) from 91.571%
github_pull_request_281750

Pull #6073

Evergreen

jedelbo
Log free space and history sizes when opening file
Pull Request #6073: Merge next-major

95488 of 175952 branches covered (0.0%)

8973 of 12277 new or added lines in 149 files covered. (73.09%)

622 existing lines in 51 files now uncovered.

233503 of 257934 relevant lines covered (90.53%)

6533720.56 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.36
/src/realm/array.hpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#ifndef REALM_ARRAY_HPP
20
#define REALM_ARRAY_HPP
21

22
#include <realm/node.hpp>
23
#include <realm/query_state.hpp>
24
#include <realm/column_fwd.hpp>
25
#include <realm/array_direct.hpp>
26

27
namespace realm {
28

29
// Pre-definitions
30
class GroupWriter;
31
namespace _impl {
32
class ArrayWriterBase;
33
}
34

35
struct MemStats {
36
    size_t allocated = 0;
37
    size_t used = 0;
38
    size_t array_count = 0;
39
};
40

41
// Stores a value obtained from Array::get(). It is a ref if the least
42
// significant bit is clear, otherwise it is a tagged integer. A tagged interger
43
// is obtained from a logical integer value by left shifting by one bit position
44
// (multiplying by two), and then setting the least significant bit to
45
// one. Clearly, this means that the maximum value that can be stored as a
46
// tagged integer is 2**63 - 1.
47
class RefOrTagged {
48
public:
49
    bool is_ref() const noexcept;
50
    bool is_tagged() const noexcept;
51
    ref_type get_as_ref() const noexcept;
52
    uint_fast64_t get_as_int() const noexcept;
53

54
    static RefOrTagged make_ref(ref_type) noexcept;
55
    static RefOrTagged make_tagged(uint_fast64_t) noexcept;
56

57
private:
58
    int_fast64_t m_value;
59
    RefOrTagged(int_fast64_t) noexcept;
60
    friend class Array;
61
};
62

63

64
template <class T>
65
class QueryStateFindAll : public QueryStateBase {
66
public:
67
    explicit QueryStateFindAll(T& keys, size_t limit = -1)
68
        : QueryStateBase(limit)
69
        , m_keys(keys)
70
    {
1,594,329✔
71
    }
1,594,329✔
72
    bool match(size_t index, Mixed) noexcept final;
73

74
private:
75
    T& m_keys;
76
};
77

78
class QueryStateFindFirst : public QueryStateBase {
79
public:
80
    size_t m_state = realm::not_found;
81
    QueryStateFindFirst()
82
        : QueryStateBase(1)
83
    {
10,525,635✔
84
    }
10,525,635✔
85
    bool match(size_t index, Mixed) noexcept final;
86
};
87

88
class Array : public Node, public ArrayParent {
89
public:
90
    /// Create an array accessor in the unattached state.
91
    explicit Array(Allocator& allocator) noexcept
92
        : Node(allocator)
93
    {
700,374,240✔
94
    }
700,374,240✔
95

96
    ~Array() noexcept override {}
635,915,613✔
97

98
    /// Create a new integer array of the specified type and size, and filled
99
    /// with the specified value, and attach this accessor to it. This does not
100
    /// modify the parent reference information of this accessor.
101
    ///
102
    /// Note that the caller assumes ownership of the allocated underlying
103
    /// node. It is not owned by the accessor.
104
    void create(Type, bool context_flag = false, size_t size = 0, int_fast64_t value = 0);
105

106
    /// Reinitialize this array accessor to point to the specified new
107
    /// underlying memory. This does not modify the parent reference information
108
    /// of this accessor.
109
    void init_from_ref(ref_type ref) noexcept
110
    {
232,624,614✔
111
        REALM_ASSERT_DEBUG(ref);
232,624,614✔
112
        char* header = m_alloc.translate(ref);
232,624,614✔
113
        init_from_mem(MemRef(header, ref, m_alloc));
232,624,614✔
114
    }
232,624,614✔
115

116
    /// Same as init_from_ref(ref_type) but avoid the mapping of 'ref' to memory
117
    /// pointer.
118
    void init_from_mem(MemRef) noexcept;
119

120
    /// Same as `init_from_ref(get_ref_from_parent())`.
121
    void init_from_parent() noexcept
122
    {
91,692,849✔
123
        ref_type ref = get_ref_from_parent();
91,692,849✔
124
        init_from_ref(ref);
91,692,849✔
125
    }
91,692,849✔
126

127
    /// Called in the context of Group::commit() to ensure that attached
128
    /// accessors stay valid across a commit. Please note that this works only
129
    /// for non-transactional commits. Accessors obtained during a transaction
130
    /// are always detached when the transaction ends.
131
    void update_from_parent() noexcept;
132

133
    /// Change the type of an already attached array node.
134
    ///
135
    /// The effect of calling this function on an unattached accessor is
136
    /// undefined.
137
    void set_type(Type);
138

139
    /// Construct an empty integer array of the specified type, and return just
140
    /// the reference to the underlying memory.
141
    static MemRef create_empty_array(Type, bool context_flag, Allocator&);
142

143
    /// Construct an integer array of the specified type and size, and return
144
    /// just the reference to the underlying memory. All elements will be
145
    /// initialized to the specified value.
146
    static MemRef create_array(Type, bool context_flag, size_t size, int_fast64_t value, Allocator&);
147

148
    Type get_type() const noexcept;
149

150
    /// The meaning of 'width' depends on the context in which this
151
    /// array is used.
152
    size_t get_width() const noexcept
153
    {
4,746,210✔
154
        REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
4,746,210✔
155
        return m_width;
4,746,210✔
156
    }
4,746,210✔
157

158
    void insert(size_t ndx, int_fast64_t value);
159
    void add(int_fast64_t value);
160

161
    // Used from ArrayBlob
162
    size_t blob_size() const noexcept;
163
    ref_type blob_replace(size_t begin, size_t end, const char* data, size_t data_size, bool add_zero_term);
164

165
    /// This function is guaranteed to not throw if the current width is
166
    /// sufficient for the specified value (e.g. if you have called
167
    /// ensure_minimum_width(value)) and get_alloc().is_read_only(get_ref())
168
    /// returns false (noexcept:array-set). Note that for a value of zero, the
169
    /// first criterion is trivially satisfied.
170
    void set(size_t ndx, int64_t value);
171

172
    void set_as_ref(size_t ndx, ref_type ref);
173

174
    template <size_t w>
175
    void set(size_t ndx, int64_t value);
176

177
    int64_t get(size_t ndx) const noexcept;
178

179
    template <size_t w>
180
    int64_t get(size_t ndx) const noexcept;
181

182
    void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
183

184
    template <size_t w>
185
    void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
186

187
    ref_type get_as_ref(size_t ndx) const noexcept;
188

189
    RefOrTagged get_as_ref_or_tagged(size_t ndx) const noexcept;
190
    void set(size_t ndx, RefOrTagged);
191
    void add(RefOrTagged);
192
    void ensure_minimum_width(RefOrTagged);
193

194
    int64_t front() const noexcept;
195
    int64_t back() const noexcept;
196

197
    void alloc(size_t init_size, size_t new_width)
198
    {
970,153,650✔
199
        REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
970,153,650✔
200
        REALM_ASSERT_3(m_size, ==, get_size_from_header(get_header()));
970,153,650✔
201
        Node::alloc(init_size, new_width);
970,153,650✔
202
        update_width_cache_from_header();
970,153,650✔
203
    }
970,153,650✔
204

205
    /// Remove the element at the specified index, and move elements at higher
206
    /// indexes to the next lower index.
207
    ///
208
    /// This function does **not** destroy removed subarrays. That is, if the
209
    /// erased element is a 'ref' pointing to a subarray, then that subarray
210
    /// will not be destroyed automatically.
211
    ///
212
    /// This function guarantees that no exceptions will be thrown if
213
    /// get_alloc().is_read_only(get_ref()) would return false before the
214
    /// call. This is automatically guaranteed if the array is used in a
215
    /// non-transactional context, or if the array has already been successfully
216
    /// modified within the current write transaction.
217
    void erase(size_t ndx);
218

219
    /// Same as erase(size_t), but remove all elements in the specified
220
    /// range.
221
    ///
222
    /// Please note that this function does **not** destroy removed subarrays.
223
    ///
224
    /// This function guarantees that no exceptions will be thrown if
225
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
226
    void erase(size_t begin, size_t end);
227

228
    /// Reduce the size of this array to the specified number of elements. It is
229
    /// an error to specify a size that is greater than the current size of this
230
    /// array. The effect of doing so is undefined. This is just a shorthand for
231
    /// calling the ranged erase() function with appropriate arguments.
232
    ///
233
    /// Please note that this function does **not** destroy removed
234
    /// subarrays. See clear_and_destroy_children() for an alternative.
235
    ///
236
    /// This function guarantees that no exceptions will be thrown if
237
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
238
    void truncate(size_t new_size);
239

240
    /// Reduce the size of this array to the specified number of elements. It is
241
    /// an error to specify a size that is greater than the current size of this
242
    /// array. The effect of doing so is undefined. Subarrays will be destroyed
243
    /// recursively, as if by a call to `destroy_deep(subarray_ref, alloc)`.
244
    ///
245
    /// This function is guaranteed not to throw if
246
    /// get_alloc().is_read_only(get_ref()) returns false.
247
    void truncate_and_destroy_children(size_t new_size);
248

249
    /// Remove every element from this array. This is just a shorthand for
250
    /// calling truncate(0).
251
    ///
252
    /// Please note that this function does **not** destroy removed
253
    /// subarrays. See clear_and_destroy_children() for an alternative.
254
    ///
255
    /// This function guarantees that no exceptions will be thrown if
256
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
257
    void clear();
258

259
    /// Remove every element in this array. Subarrays will be destroyed
260
    /// recursively, as if by a call to `destroy_deep(subarray_ref,
261
    /// alloc)`. This is just a shorthand for calling
262
    /// truncate_and_destroy_children(0).
263
    ///
264
    /// This function guarantees that no exceptions will be thrown if
265
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
266
    void clear_and_destroy_children();
267

268
    /// If neccessary, expand the representation so that it can store the
269
    /// specified value.
270
    void ensure_minimum_width(int_fast64_t value);
271

272
    /// Add \a diff to the element at the specified index.
273
    void adjust(size_t ndx, int_fast64_t diff);
274

275
    /// Add \a diff to all the elements in the specified index range.
276
    void adjust(size_t begin, size_t end, int_fast64_t diff);
277

278
    //@{
279
    /// This is similar in spirit to std::move() from `<algorithm>`.
280
    /// \a dest_begin must not be in the range [`begin`,`end`)
281
    ///
282
    /// This function is guaranteed to not throw if
283
    /// `get_alloc().is_read_only(get_ref())` returns false.
284
    void move(size_t begin, size_t end, size_t dest_begin);
285
    //@}
286

287
    // Move elements from ndx and above to another array
288
    void move(Array& dst, size_t ndx);
289

290
    //@{
291
    /// Find the lower/upper bound of the specified value in a sequence of
292
    /// integers which must already be sorted ascendingly.
293
    ///
294
    /// For an integer value '`v`', lower_bound_int(v) returns the index '`l`'
295
    /// of the first element such that `get(l) &ge; v`, and upper_bound_int(v)
296
    /// returns the index '`u`' of the first element such that `get(u) &gt;
297
    /// v`. In both cases, if no such element is found, the returned value is
298
    /// the number of elements in the array.
299
    ///
300
    ///     3 3 3 4 4 4 5 6 7 9 9 9
301
    ///     ^     ^     ^     ^     ^
302
    ///     |     |     |     |     |
303
    ///     |     |     |     |      -- Lower and upper bound of 15
304
    ///     |     |     |     |
305
    ///     |     |     |      -- Lower and upper bound of 8
306
    ///     |     |     |
307
    ///     |     |      -- Upper bound of 4
308
    ///     |     |
309
    ///     |      -- Lower bound of 4
310
    ///     |
311
    ///      -- Lower and upper bound of 1
312
    ///
313
    /// These functions are similar to std::lower_bound() and
314
    /// std::upper_bound().
315
    ///
316
    /// We currently use binary search. See for example
317
    /// http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary.
318
    ///
319
    /// FIXME: It may be worth considering if overall efficiency can be improved
320
    /// by doing a linear search for short sequences.
321
    size_t lower_bound_int(int64_t value) const noexcept;
322
    size_t upper_bound_int(int64_t value) const noexcept;
323
    //@}
324

325
    int64_t get_sum(size_t start = 0, size_t end = size_t(-1)) const
326
    {
24,372✔
327
        return sum(start, end);
24,372✔
328
    }
24,372✔
329

330
    /// This information is guaranteed to be cached in the array accessor.
331
    bool is_inner_bptree_node() const noexcept;
332

333
    /// Returns true if type is either type_HasRefs or type_InnerColumnNode.
334
    ///
335
    /// This information is guaranteed to be cached in the array accessor.
336
    bool has_refs() const noexcept;
337
    void set_has_refs(bool) noexcept;
338

339
    /// This information is guaranteed to be cached in the array accessor.
340
    ///
341
    /// Columns and indexes can use the context bit to differentiate leaf types.
342
    bool get_context_flag() const noexcept;
343
    void set_context_flag(bool) noexcept;
344

345
    /// Recursively destroy children (as if calling
346
    /// clear_and_destroy_children()), then put this accessor into the detached
347
    /// state (as if calling detach()), then free the allocated memory. If this
348
    /// accessor is already in the detached state, this function has no effect
349
    /// (idempotency).
350
    void destroy_deep() noexcept;
351

352
    /// Shorthand for `destroy_deep(MemRef(ref, alloc), alloc)`.
353
    static void destroy_deep(ref_type ref, Allocator& alloc) noexcept;
354

355
    /// Destroy the specified array node and all of its children, recursively.
356
    ///
357
    /// This is done by freeing the specified array node after calling
358
    /// destroy_deep() for every contained 'ref' element.
359
    static void destroy_deep(MemRef, Allocator&) noexcept;
360

361
    // Clone deep
362
    static MemRef clone(MemRef, Allocator& from_alloc, Allocator& target_alloc);
363

364
    // Serialization
365

366
    /// Returns the ref (position in the target stream) of the written copy of
367
    /// this array, or the ref of the original array if \a only_if_modified is
368
    /// true, and this array is unmodified (Alloc::is_read_only()).
369
    ///
370
    /// The number of bytes that will be written by a non-recursive invocation
371
    /// of this function is exactly the number returned by get_byte_size().
372
    ///
373
    /// \param out The destination stream (writer).
374
    ///
375
    /// \param deep If true, recursively write out subarrays, but still subject
376
    /// to \a only_if_modified.
377
    ///
378
    /// \param only_if_modified Set to `false` to always write, or to `true` to
379
    /// only write the array if it has been modified.
380
    ref_type write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const;
381

382
    /// Same as non-static write() with `deep` set to true. This is for the
383
    /// cases where you do not already have an array accessor available.
384
    static ref_type write(ref_type, Allocator&, _impl::ArrayWriterBase&, bool only_if_modified);
385

386
    size_t find_first(int64_t value, size_t begin = 0, size_t end = size_t(-1)) const;
387

388
    // Wrappers for backwards compatibility and for simple use without
389
    // setting up state initialization etc
390
    template <class cond>
391
    size_t find_first(int64_t value, size_t start = 0, size_t end = size_t(-1)) const
392
    {
10,442,322✔
393
        REALM_ASSERT(start <= m_size && (end <= m_size || end == size_t(-1)) && start <= end);
10,442,322✔
394
        // todo, would be nice to avoid this in order to speed up find_first loops
5,187,096✔
395
        QueryStateFindFirst state;
10,442,322✔
396
        Finder finder = m_vtable->finder[cond::condition];
10,442,322✔
397
        (this->*finder)(value, start, end, 0, &state);
10,442,322✔
398

5,187,096✔
399
        return static_cast<size_t>(state.m_state);
10,442,322✔
400
    }
10,442,322✔
401

402
    /// Get the specified element without the cost of constructing an
403
    /// array instance. If an array instance is already available, or
404
    /// you need to get multiple values, then this method will be
405
    /// slower.
406
    static int_fast64_t get(const char* header, size_t ndx) noexcept;
407

408
    /// Like get(const char*, size_t) but gets two consecutive
409
    /// elements.
410
    static std::pair<int64_t, int64_t> get_two(const char* header, size_t ndx) noexcept;
411

412
    static RefOrTagged get_as_ref_or_tagged(const char* header, size_t ndx) noexcept
413
    {
51,486,483✔
414
        return get(header, ndx);
51,486,483✔
415
    }
51,486,483✔
416

417
    /// Get the number of bytes currently in use by this array. This
418
    /// includes the array header, but it does not include allocated
419
    /// bytes corresponding to excess capacity. The result is
420
    /// guaranteed to be a multiple of 8 (i.e., 64-bit aligned).
421
    ///
422
    /// This number is exactly the number of bytes that will be
423
    /// written by a non-recursive invocation of write().
424
    size_t get_byte_size() const noexcept;
425

426
    // Get the number of bytes used by this array and its sub-arrays
427
    size_t get_byte_size_deep() const noexcept
428
    {
6✔
429
        size_t mem = 0;
6✔
430
        _mem_usage(mem);
6✔
431
        return mem;
6✔
432
    }
6✔
433

434

435
    /// Get the maximum number of bytes that can be written by a
436
    /// non-recursive invocation of write() on an array with the
437
    /// specified number of elements, that is, the maximum value that
438
    /// can be returned by get_byte_size().
439
    static size_t get_max_byte_size(size_t num_elems) noexcept;
440

441
    /// FIXME: Belongs in IntegerArray
442
    static size_t calc_aligned_byte_size(size_t size, int width);
443

444
#ifdef REALM_DEBUG
445
    class MemUsageHandler {
446
    public:
447
        virtual void handle(ref_type ref, size_t allocated, size_t used) = 0;
448
    };
449

450
    void report_memory_usage(MemUsageHandler&) const;
451

452
    void stats(MemStats& stats_dest) const noexcept;
453
#endif
454

455
    void verify() const;
456

457
    Array& operator=(const Array&) = delete; // not allowed
458
    Array(const Array&) = delete;            // not allowed
459

460
protected:
461
    // This returns the minimum value ("lower bound") of the representable values
462
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
463
    static constexpr int_fast64_t lbound_for_width(size_t width) noexcept;
464

465
    // This returns the maximum value ("inclusive upper bound") of the representable values
466
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
467
    static constexpr int_fast64_t ubound_for_width(size_t width) noexcept;
468

469
    // This will have to be eventually used, exposing this here for testing.
470
    size_t count(int64_t value) const noexcept;
471

472
private:
473
    void update_width_cache_from_header() noexcept;
474

475
    void do_ensure_minimum_width(int_fast64_t);
476

477
    int64_t sum(size_t start, size_t end) const;
478

479
    template <size_t w>
480
    int64_t sum(size_t start, size_t end) const;
481

482
protected:
483
    /// It is an error to specify a non-zero value unless the width
484
    /// type is wtype_Bits. It is also an error to specify a non-zero
485
    /// size if the width type is wtype_Ignore.
486
    static MemRef create(Type, bool context_flag, WidthType, size_t size, int_fast64_t value, Allocator&);
487

488
    // Overriding method in ArrayParent
489
    void update_child_ref(size_t, ref_type) override;
490

491
    // Overriding method in ArrayParent
492
    ref_type get_child_ref(size_t) const noexcept override;
493

494
    void destroy_children(size_t offset = 0) noexcept;
495

496
protected:
497
    // Getters and Setters for adaptive-packed arrays
498
    typedef int64_t (Array::*Getter)(size_t) const; // Note: getters must not throw
499
    typedef void (Array::*Setter)(size_t, int64_t);
500
    typedef bool (Array::*Finder)(int64_t, size_t, size_t, size_t, QueryStateBase*) const;
501
    typedef void (Array::*ChunkGetter)(size_t, int64_t res[8]) const; // Note: getters must not throw
502

503
    struct VTable {
504
        Getter getter;
505
        ChunkGetter chunk_getter;
506
        Setter setter;
507
        Finder finder[cond_VTABLE_FINDER_COUNT]; // one for each active function pointer
508
    };
509
    template <size_t w>
510
    struct VTableForWidth;
511

512
    // This is the one installed into the m_vtable->finder slots.
513
    template <class cond, size_t bitwidth>
514
    bool find_vtable(int64_t value, size_t start, size_t end, size_t baseindex, QueryStateBase* state) const;
515

516
    template <size_t w>
517
    int64_t get_universal(const char* const data, const size_t ndx) const;
518

519
protected:
520
    /// Takes a 64-bit value and returns the minimum number of bits needed
521
    /// to fit the value. For alignment this is rounded up to nearest
522
    /// log2. Posssible results {0, 1, 2, 4, 8, 16, 32, 64}
523
    static size_t bit_width(int64_t value);
524

525
protected:
526
    Getter m_getter = nullptr; // cached to avoid indirection
527
    const VTable* m_vtable = nullptr;
528

529
    uint_least8_t m_width = 0; // Size of an element (meaning depend on type of array).
530
    int64_t m_lbound;          // min number that can be stored with current m_width
531
    int64_t m_ubound;          // max number that can be stored with current m_width
532

533
    bool m_is_inner_bptree_node; // This array is an inner node of B+-tree.
534
    bool m_has_refs;             // Elements whose first bit is zero are refs to subarrays.
535
    bool m_context_flag;         // Meaning depends on context.
536

537
private:
538
    ref_type do_write_shallow(_impl::ArrayWriterBase&) const;
539
    ref_type do_write_deep(_impl::ArrayWriterBase&, bool only_if_modified) const;
540

541
    void _mem_usage(size_t& mem) const noexcept;
542

543
#ifdef REALM_DEBUG
544
    void report_memory_usage_2(MemUsageHandler&) const;
545
#endif
546

547
    friend class Allocator;
548
    friend class SlabAlloc;
549
    friend class GroupWriter;
550
    friend class ArrayWithFind;
551
};
552

553
// Implementation:
554

555

556
constexpr inline int_fast64_t Array::lbound_for_width(size_t width) noexcept
557
{
1,571,909,634✔
558
    if (width == 32) {
1,571,909,634✔
559
        return -0x80000000LL;
638,967,423✔
560
    }
638,967,423✔
561
    else if (width == 16) {
932,942,211✔
562
        return -0x8000LL;
321,774,378✔
563
    }
321,774,378✔
564
    else if (width < 8) {
611,167,833✔
565
        return 0;
419,836,278✔
566
    }
419,836,278✔
567
    else if (width == 8) {
191,331,555✔
568
        return -0x80LL;
162,679,155✔
569
    }
162,679,155✔
570
    else if (width == 64) {
141,189,930✔
571
        return -0x8000000000000000LL;
141,189,930✔
572
    }
141,189,930✔
573
    else {
4,294,967,294✔
574
        REALM_UNREACHABLE();
4,294,967,294✔
575
    }
4,294,967,294✔
576
}
1,571,909,634✔
577

578
constexpr inline int_fast64_t Array::ubound_for_width(size_t width) noexcept
579
{
1,580,254,014✔
580
    if (width == 32) {
1,580,254,014✔
581
        return 0x7FFFFFFFLL;
639,023,349✔
582
    }
639,023,349✔
583
    else if (width == 16) {
941,230,665✔
584
        return 0x7FFFLL;
325,236,054✔
585
    }
325,236,054✔
586
    else if (width == 0) {
615,994,611✔
587
        return 0;
79,300,311✔
588
    }
79,300,311✔
589
    else if (width == 1) {
536,694,300✔
590
        return 1;
169,444,389✔
591
    }
169,444,389✔
592
    else if (width == 2) {
367,249,911✔
593
        return 3;
152,371,773✔
594
    }
152,371,773✔
595
    else if (width == 4) {
214,878,138✔
596
        return 15;
24,090,000✔
597
    }
24,090,000✔
598
    else if (width == 8) {
190,788,138✔
599
        return 0x7FLL;
162,701,835✔
600
    }
162,701,835✔
601
    else if (width == 64) {
141,338,835✔
602
        return 0x7FFFFFFFFFFFFFFFLL;
141,338,835✔
603
    }
141,338,835✔
604
    else {
4,294,967,294✔
605
        REALM_UNREACHABLE();
4,294,967,294✔
606
    }
4,294,967,294✔
607
}
1,580,254,014✔
608

609
inline bool RefOrTagged::is_ref() const noexcept
610
{
348,066,543✔
611
    return (m_value & 1) == 0;
348,066,543✔
612
}
348,066,543✔
613

614
inline bool RefOrTagged::is_tagged() const noexcept
615
{
279,276,825✔
616
    return !is_ref();
279,276,825✔
617
}
279,276,825✔
618

619
inline ref_type RefOrTagged::get_as_ref() const noexcept
620
{
100,244,016✔
621
    // to_ref() is defined in <alloc.hpp>
52,778,901✔
622
    return to_ref(m_value);
100,244,016✔
623
}
100,244,016✔
624

625
inline uint_fast64_t RefOrTagged::get_as_int() const noexcept
626
{
119,631,804✔
627
    // The bitwise AND is there in case uint_fast64_t is wider than 64 bits.
61,527,966✔
628
    return (uint_fast64_t(m_value) & 0xFFFFFFFFFFFFFFFFULL) >> 1;
119,631,804✔
629
}
119,631,804✔
630

631
inline RefOrTagged RefOrTagged::make_ref(ref_type ref) noexcept
632
{
404,946✔
633
    // from_ref() is defined in <alloc.hpp>
200,556✔
634
    int_fast64_t value = from_ref(ref);
404,946✔
635
    return RefOrTagged(value);
404,946✔
636
}
404,946✔
637

638
inline RefOrTagged RefOrTagged::make_tagged(uint_fast64_t i) noexcept
639
{
26,632,287✔
640
    REALM_ASSERT(i < (1ULL << 63));
26,632,287✔
641
    return RefOrTagged((i << 1) | 1);
26,632,287✔
642
}
26,632,287✔
643

644
inline RefOrTagged::RefOrTagged(int_fast64_t value) noexcept
645
    : m_value(value)
646
{
417,871,950✔
647
}
417,871,950✔
648

649
inline void Array::create(Type type, bool context_flag, size_t length, int_fast64_t value)
650
{
11,567,415✔
651
    MemRef mem = create_array(type, context_flag, length, value, m_alloc); // Throws
11,567,415✔
652
    init_from_mem(mem);
11,567,415✔
653
}
11,567,415✔
654

655

656
inline Array::Type Array::get_type() const noexcept
657
{
18✔
658
    if (m_is_inner_bptree_node) {
18✔
659
        REALM_ASSERT_DEBUG(m_has_refs);
6✔
660
        return type_InnerBptreeNode;
6✔
661
    }
6✔
662
    if (m_has_refs)
12✔
663
        return type_HasRefs;
6✔
664
    return type_Normal;
6✔
665
}
6✔
666

667

668
inline void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
669
{
1,366,134✔
670
    REALM_ASSERT_DEBUG(ndx < m_size);
1,366,134✔
671
    (this->*(m_vtable->chunk_getter))(ndx, res);
1,366,134✔
672
}
1,366,134✔
673

674
template <size_t w>
675
int64_t Array::get_universal(const char* data, size_t ndx) const
676
{
3,458,975,860✔
677
    if (w == 0) {
3,458,975,860✔
678
        return 0;
67,727,580✔
679
    }
67,727,580✔
680
    else if (w == 1) {
3,423,837,076✔
681
        size_t offset = ndx >> 3;
68,988,252✔
682
        return (data[offset] >> (ndx & 7)) & 0x01;
68,988,252✔
683
    }
68,988,252✔
684
    else if (w == 2) {
3,377,104,279✔
685
        size_t offset = ndx >> 2;
48,405,822✔
686
        return (data[offset] >> ((ndx & 3) << 1)) & 0x03;
48,405,822✔
687
    }
48,405,822✔
688
    else if (w == 4) {
3,354,162,403✔
689
        size_t offset = ndx >> 1;
23,947,332✔
690
        return (data[offset] >> ((ndx & 1) << 2)) & 0x0F;
23,947,332✔
691
    }
23,947,332✔
692
    else if (w == 8) {
3,342,552,445✔
693
        return *reinterpret_cast<const signed char*>(data + ndx);
65,830,629✔
694
    }
65,830,629✔
695
    else if (w == 16) {
3,311,186,341✔
696
        size_t offset = ndx * 2;
627,573,402✔
697
        return *reinterpret_cast<const int16_t*>(data + offset);
627,573,402✔
698
    }
627,573,402✔
699
    else if (w == 32) {
3,001,058,179✔
700
        size_t offset = ndx * 4;
2,792,823,526✔
701
        return *reinterpret_cast<const int32_t*>(data + offset);
2,792,823,526✔
702
    }
2,792,823,526✔
703
    else if (w == 64) {
437,004,468✔
704
        size_t offset = ndx * 8;
437,004,468✔
705
        return *reinterpret_cast<const int64_t*>(data + offset);
437,004,468✔
706
    }
437,004,468✔
707
    else {
4,294,967,294✔
708
        REALM_ASSERT_DEBUG(false);
4,294,967,294✔
709
        return int64_t(-1);
4,294,967,294✔
710
    }
4,294,967,294✔
711
}
3,458,975,860✔
712

713
template <size_t w>
714
int64_t Array::get(size_t ndx) const noexcept
715
{
3,433,362,325✔
716
    return get_universal<w>(m_data, ndx);
3,433,362,325✔
717
}
3,433,362,325✔
718

719
inline int64_t Array::get(size_t ndx) const noexcept
720
{
2,005,569,831✔
721
    REALM_ASSERT_DEBUG(is_attached());
2,005,569,831✔
722
    REALM_ASSERT_DEBUG_EX(ndx < m_size, ndx, m_size);
2,005,569,831✔
723
    return (this->*m_getter)(ndx);
2,005,569,831✔
724

986,206,887✔
725
    // Two ideas that are not efficient but may be worth looking into again:
986,206,887✔
726
    /*
986,206,887✔
727
        // Assume correct width is found early in REALM_TEMPEX, which is the case for B tree offsets that
986,206,887✔
728
        // are probably either 2^16 long. Turns out to be 25% faster if found immediately, but 50-300% slower
986,206,887✔
729
        // if found later
986,206,887✔
730
        REALM_TEMPEX(return get, (ndx));
986,206,887✔
731
    */
986,206,887✔
732
    /*
986,206,887✔
733
        // Slightly slower in both of the if-cases. Also needs an matchcount m_size check too, to avoid
986,206,887✔
734
        // reading beyond array.
986,206,887✔
735
        if (m_width >= 8 && m_size > ndx + 7)
986,206,887✔
736
            return get<64>(ndx >> m_shift) & m_widthmask;
986,206,887✔
737
        else
986,206,887✔
738
            return (this->*(m_vtable->getter))(ndx);
986,206,887✔
739
    */
986,206,887✔
740
}
2,005,569,831✔
741

742
inline int64_t Array::front() const noexcept
743
{
×
744
    return get(0);
×
745
}
×
746

747
inline int64_t Array::back() const noexcept
748
{
9,148,701✔
749
    return get(m_size - 1);
9,148,701✔
750
}
9,148,701✔
751

752
inline ref_type Array::get_as_ref(size_t ndx) const noexcept
753
{
715,507,386✔
754
    REALM_ASSERT_DEBUG(is_attached());
715,507,386✔
755
    REALM_ASSERT_DEBUG_EX(m_has_refs, m_ref, ndx, m_size);
715,507,386✔
756
    int64_t v = get(ndx);
715,507,386✔
757
    return to_ref(v);
715,507,386✔
758
}
715,507,386✔
759

760
inline RefOrTagged Array::get_as_ref_or_tagged(size_t ndx) const noexcept
761
{
340,391,805✔
762
    REALM_ASSERT(has_refs());
340,391,805✔
763
    return RefOrTagged(get(ndx));
340,391,805✔
764
}
340,391,805✔
765

766
inline void Array::set(size_t ndx, RefOrTagged ref_or_tagged)
767
{
25,010,358✔
768
    REALM_ASSERT(has_refs());
25,010,358✔
769
    set(ndx, ref_or_tagged.m_value); // Throws
25,010,358✔
770
}
25,010,358✔
771

772
inline void Array::add(RefOrTagged ref_or_tagged)
773
{
2,051,721✔
774
    REALM_ASSERT(has_refs());
2,051,721✔
775
    add(ref_or_tagged.m_value); // Throws
2,051,721✔
776
}
2,051,721✔
777

778
inline void Array::ensure_minimum_width(RefOrTagged ref_or_tagged)
779
{
462✔
780
    REALM_ASSERT(has_refs());
462✔
781
    ensure_minimum_width(ref_or_tagged.m_value); // Throws
462✔
782
}
462✔
783

784
inline bool Array::is_inner_bptree_node() const noexcept
785
{
20,789,610✔
786
    return m_is_inner_bptree_node;
20,789,610✔
787
}
20,789,610✔
788

789
inline bool Array::has_refs() const noexcept
790
{
369,025,962✔
791
    return m_has_refs;
369,025,962✔
792
}
369,025,962✔
793

794
inline void Array::set_has_refs(bool value) noexcept
795
{
×
796
    if (m_has_refs != value) {
×
797
        REALM_ASSERT(!is_read_only());
×
798
        m_has_refs = value;
×
799
        set_hasrefs_in_header(value, get_header());
×
800
    }
×
801
}
×
802

803
inline bool Array::get_context_flag() const noexcept
804
{
22,221,339✔
805
    return m_context_flag;
22,221,339✔
806
}
22,221,339✔
807

808
inline void Array::set_context_flag(bool value) noexcept
809
{
182,310✔
810
    if (m_context_flag != value) {
182,310✔
811
        copy_on_write();
182,298✔
812
        m_context_flag = value;
182,298✔
813
        set_context_flag_in_header(value, get_header());
182,298✔
814
    }
182,298✔
815
}
182,310✔
816

817
inline void Array::destroy_deep() noexcept
818
{
199,509✔
819
    if (!is_attached())
199,509✔
820
        return;
246✔
821

95,844✔
822
    if (m_has_refs)
199,263✔
823
        destroy_children();
198,558✔
824

95,844✔
825
    char* header = get_header_from_data(m_data);
199,263✔
826
    m_alloc.free_(m_ref, header);
199,263✔
827
    m_data = nullptr;
199,263✔
828
}
199,263✔
829

830
inline ref_type Array::write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const
831
{
2,733,018✔
832
    REALM_ASSERT(is_attached());
2,733,018✔
833

1,377,879✔
834
    if (only_if_modified && m_alloc.is_read_only(m_ref))
2,733,018✔
835
        return m_ref;
1,700,478✔
836

521,211✔
837
    if (!deep || !m_has_refs)
1,032,540✔
838
        return do_write_shallow(out); // Throws
124,488✔
839

460,140✔
840
    return do_write_deep(out, only_if_modified); // Throws
908,052✔
841
}
908,052✔
842

843
inline ref_type Array::write(ref_type ref, Allocator& alloc, _impl::ArrayWriterBase& out, bool only_if_modified)
844
{
87,625,701✔
845
    if (only_if_modified && alloc.is_read_only(ref))
87,625,701✔
846
        return ref;
65,655,099✔
847

10,925,589✔
848
    Array array(alloc);
21,970,602✔
849
    array.init_from_ref(ref);
21,970,602✔
850

10,925,589✔
851
    if (!array.m_has_refs)
21,970,602✔
852
        return array.do_write_shallow(out); // Throws
15,020,778✔
853

3,408,570✔
854
    return array.do_write_deep(out, only_if_modified); // Throws
6,949,824✔
855
}
6,949,824✔
856

857
inline void Array::add(int_fast64_t value)
858
{
898,707,387✔
859
    insert(m_size, value);
898,707,387✔
860
}
898,707,387✔
861

862
inline void Array::erase(size_t ndx)
863
{
10,895,502✔
864
    // This can throw, but only if array is currently in read-only
5,496,765✔
865
    // memory.
5,496,765✔
866
    move(ndx + 1, size(), ndx);
10,895,502✔
867

5,496,765✔
868
    // Update size (also in header)
5,496,765✔
869
    --m_size;
10,895,502✔
870
    set_header_size(m_size);
10,895,502✔
871
}
10,895,502✔
872

873

874
inline void Array::erase(size_t begin, size_t end)
UNCOV
875
{
×
UNCOV
876
    if (begin != end) {
×
877
        // This can throw, but only if array is currently in read-only memory.
UNCOV
878
        move(end, size(), begin); // Throws
×
879

880
        // Update size (also in header)
UNCOV
881
        m_size -= end - begin;
×
UNCOV
882
        set_header_size(m_size);
×
UNCOV
883
    }
×
UNCOV
884
}
×
885

886
inline void Array::clear()
887
{
3,989,085✔
888
    truncate(0); // Throws
3,989,085✔
889
}
3,989,085✔
890

891
inline void Array::clear_and_destroy_children()
892
{
144✔
893
    truncate_and_destroy_children(0);
144✔
894
}
144✔
895

896
inline void Array::destroy_deep(ref_type ref, Allocator& alloc) noexcept
897
{
1,155,720✔
898
    destroy_deep(MemRef(ref, alloc), alloc);
1,155,720✔
899
}
1,155,720✔
900

901
inline void Array::destroy_deep(MemRef mem, Allocator& alloc) noexcept
902
{
1,155,741✔
903
    if (!get_hasrefs_from_header(mem.get_addr())) {
1,155,741✔
904
        alloc.free_(mem);
1,007,004✔
905
        return;
1,007,004✔
906
    }
1,007,004✔
907
    Array array(alloc);
148,737✔
908
    array.init_from_mem(mem);
148,737✔
909
    array.destroy_deep();
148,737✔
910
}
148,737✔
911

912

913
inline void Array::adjust(size_t ndx, int_fast64_t diff)
914
{
15,628,845✔
915
    REALM_ASSERT_3(ndx, <=, m_size);
15,628,845✔
916
    if (diff != 0) {
15,633,042✔
917
        // FIXME: Should be optimized
7,939,119✔
918
        int_fast64_t v = get(ndx);
15,633,042✔
919
        set(ndx, int64_t(v + diff)); // Throws
15,633,042✔
920
    }
15,633,042✔
921
}
15,628,845✔
922

923
inline void Array::adjust(size_t begin, size_t end, int_fast64_t diff)
924
{
2,291,451✔
925
    if (diff != 0) {
2,291,451✔
926
        // FIXME: Should be optimized
795,009✔
927
        for (size_t i = begin; i != end; ++i)
15,572,862✔
928
            adjust(i, diff); // Throws
14,054,373✔
929
    }
1,518,489✔
930
}
2,291,451✔
931

932

933
//-------------------------------------------------
934

935

936
inline size_t Array::get_byte_size() const noexcept
937
{
54,756,417✔
938
    const char* header = get_header_from_data(m_data);
54,756,417✔
939
    WidthType wtype = Node::get_wtype_from_header(header);
54,756,417✔
940
    size_t num_bytes = NodeHeader::calc_byte_size(wtype, m_size, m_width);
54,756,417✔
941

27,555,435✔
942
    REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
54,756,417✔
943

27,555,435✔
944
    return num_bytes;
54,756,417✔
945
}
54,756,417✔
946

947

948
//-------------------------------------------------
949

950
inline MemRef Array::create_empty_array(Type type, bool context_flag, Allocator& alloc)
951
{
2,175,414✔
952
    size_t size = 0;
2,175,414✔
953
    int_fast64_t value = 0;
2,175,414✔
954
    return create_array(type, context_flag, size, value, alloc); // Throws
2,175,414✔
955
}
2,175,414✔
956

957
inline MemRef Array::create_array(Type type, bool context_flag, size_t size, int_fast64_t value, Allocator& alloc)
958
{
14,067,864✔
959
    return create(type, context_flag, wtype_Bits, size, value, alloc); // Throws
14,067,864✔
960
}
14,067,864✔
961

962
inline size_t Array::get_max_byte_size(size_t num_elems) noexcept
963
{
1,362,711✔
964
    int max_bytes_per_elem = 8;
1,362,711✔
965
    return header_size + num_elems * max_bytes_per_elem;
1,362,711✔
966
}
1,362,711✔
967

968

969
inline void Array::update_child_ref(size_t child_ndx, ref_type new_ref)
970
{
19,721,547✔
971
    set(child_ndx, new_ref);
19,721,547✔
972
}
19,721,547✔
973

974
inline ref_type Array::get_child_ref(size_t child_ndx) const noexcept
975
{
155,776,995✔
976
    return get_as_ref(child_ndx);
155,776,995✔
977
}
155,776,995✔
978

979
inline void Array::ensure_minimum_width(int_fast64_t value)
980
{
153,883,386✔
981
    if (value >= m_lbound && value <= m_ubound)
154,037,532✔
982
        return;
148,782,984✔
983
    do_ensure_minimum_width(value);
5,100,402✔
984
}
5,100,402✔
985

986

987
} // namespace realm
988

989
#endif // REALM_ARRAY_HPP
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc