• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_337

03 Jul 2024 01:04PM UTC coverage: 90.864% (-0.1%) from 90.984%
jorgen.edelbo_337

Pull #7826

Evergreen

nicola-cab
Merge branch 'master' of github.com:realm/realm-core into next-major
Pull Request #7826: Merge Next major

102968 of 181176 branches covered (56.83%)

3131 of 3738 new or added lines in 54 files covered. (83.76%)

106 existing lines in 23 files now uncovered.

217725 of 239616 relevant lines covered (90.86%)

6844960.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.81
/src/realm/array.hpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#ifndef REALM_ARRAY_HPP
20
#define REALM_ARRAY_HPP
21

22
#include <realm/node.hpp>
23
#include <realm/query_state.hpp>
24
#include <realm/query_conditions.hpp>
25
#include <realm/column_fwd.hpp>
26
#include <realm/array_direct.hpp>
27
#include <realm/integer_compressor.hpp>
28

29
namespace realm {
30

31
// Pre-definitions
32
class GroupWriter;
33
namespace _impl {
34
class ArrayWriterBase;
35
}
36

37
struct MemStats {
38
    size_t allocated = 0;
39
    size_t used = 0;
40
    size_t array_count = 0;
41
};
42

43
// Stores a value obtained from Array::get(). It is a ref if the least
44
// significant bit is clear, otherwise it is a tagged integer. A tagged interger
45
// is obtained from a logical integer value by left shifting by one bit position
46
// (multiplying by two), and then setting the least significant bit to
47
// one. Clearly, this means that the maximum value that can be stored as a
48
// tagged integer is 2**63 - 1.
49
class RefOrTagged {
50
public:
51
    bool is_ref() const noexcept;
52
    bool is_tagged() const noexcept;
53
    ref_type get_as_ref() const noexcept;
54
    uint_fast64_t get_as_int() const noexcept;
55

56
    static RefOrTagged make_ref(ref_type) noexcept;
57
    static RefOrTagged make_tagged(uint_fast64_t) noexcept;
58

59
private:
60
    int_fast64_t m_value;
61
    RefOrTagged(int_fast64_t) noexcept;
62
    friend class Array;
63
};
64

65

66
template <class T>
67
class QueryStateFindAll : public QueryStateBase {
68
public:
69
    explicit QueryStateFindAll(T& keys, size_t limit = -1)
70
        : QueryStateBase(limit)
697,206✔
71
        , m_keys(keys)
697,206✔
72
    {
1,519,965✔
73
    }
1,519,965✔
74
    bool match(size_t index, Mixed) noexcept final;
75
    bool match(size_t index) noexcept final;
76

77
private:
78
    T& m_keys;
79
};
80

81
class QueryStateFindFirst : public QueryStateBase {
82
public:
83
    size_t m_state = realm::not_found;
84
    QueryStateFindFirst()
85
        : QueryStateBase(1)
5,816,784✔
86
    {
11,919,345✔
87
    }
11,919,345✔
88
    bool match(size_t index, Mixed) noexcept final;
89
    bool match(size_t index) noexcept final;
90
};
91

92
class Array : public Node, public ArrayParent {
93
public:
94
    /// Create an array accessor in the unattached state.
95
    explicit Array(Allocator& allocator) noexcept;
96
    virtual ~Array() noexcept = default;
852,064,209✔
97

98
    /// Create a new integer array of the specified type and size, and filled
99
    /// with the specified value, and attach this accessor to it. This does not
100
    /// modify the parent reference information of this accessor.
101
    ///
102
    /// Note that the caller assumes ownership of the allocated underlying
103
    /// node. It is not owned by the accessor.
104
    void create(Type, bool context_flag = false, size_t size = 0, int_fast64_t value = 0);
105

106
    /// Reinitialize this array accessor to point to the specified new
107
    /// underlying memory. This does not modify the parent reference information
108
    /// of this accessor.
109
    void init_from_ref(ref_type ref) noexcept
110
    {
233,198,583✔
111
        REALM_ASSERT_DEBUG(ref);
233,198,583✔
112
        char* header = m_alloc.translate(ref);
233,198,583✔
113
        init_from_mem(MemRef(header, ref, m_alloc));
233,198,583✔
114
    }
233,198,583✔
115

116
    /// Same as init_from_ref(ref_type) but avoid the mapping of 'ref' to memory
117
    /// pointer.
118
    void init_from_mem(MemRef) noexcept;
119

120
    /// Same as `init_from_ref(get_ref_from_parent())`.
121
    void init_from_parent() noexcept
122
    {
93,335,613✔
123
        ref_type ref = get_ref_from_parent();
93,335,613✔
124
        init_from_ref(ref);
93,335,613✔
125
    }
93,335,613✔
126

127
    MemRef get_mem() const noexcept;
128

129
    /// Called in the context of Group::commit() to ensure that attached
130
    /// accessors stay valid across a commit. Please note that this works only
131
    /// for non-transactional commits. Accessors obtained during a transaction
132
    /// are always detached when the transaction ends.
133
    void update_from_parent() noexcept;
134

135
    /// Change the type of an already attached array node.
136
    ///
137
    /// The effect of calling this function on an unattached accessor is
138
    /// undefined.
139
    void set_type(Type);
140

141
    /// Construct an empty integer array of the specified type, and return just
142
    /// the reference to the underlying memory.
143
    static MemRef create_empty_array(Type, bool context_flag, Allocator&);
144

145
    /// Construct an integer array of the specified type and size, and return
146
    /// just the reference to the underlying memory. All elements will be
147
    /// initialized to the specified value.
148
    static MemRef create_array(Type, bool context_flag, size_t size, int_fast64_t value, Allocator&);
149

150
    Type get_type() const noexcept;
151

152
    /// The meaning of 'width' depends on the context in which this
153
    /// array is used.
154
    size_t get_width() const noexcept
155
    {
3,365,625✔
156
        REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
3,365,625✔
157
        return m_width;
3,365,625✔
158
    }
3,365,625✔
159

160
    void insert(size_t ndx, int_fast64_t value);
161
    void add(int_fast64_t value);
162

163
    // Used from ArrayBlob
164
    size_t blob_size() const noexcept;
165
    ref_type blob_replace(size_t begin, size_t end, const char* data, size_t data_size, bool add_zero_term);
166

167
    /// This function is guaranteed to not throw if the current width is
168
    /// sufficient for the specified value (e.g. if you have called
169
    /// ensure_minimum_width(value)) and get_alloc().is_read_only(get_ref())
170
    /// returns false (noexcept:array-set). Note that for a value of zero, the
171
    /// first criterion is trivially satisfied.
172
    void set(size_t ndx, int64_t value);
173

174
    void set_as_ref(size_t ndx, ref_type ref);
175

176
    template <size_t w>
177
    static void set(Array&, size_t ndx, int64_t value);
178

179
    int64_t get(size_t ndx) const noexcept;
180

181
    std::vector<int64_t> get_all(size_t b, size_t e) const;
182

183
    template <size_t w>
184
    static int64_t get(const Array& arr, size_t ndx) noexcept;
185

186
    void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
187

188
    template <size_t w>
189
    static void get_chunk(const Array&, size_t ndx, int64_t res[8]) noexcept;
190

191
    ref_type get_as_ref(size_t ndx) const noexcept;
192
    RefOrTagged get_as_ref_or_tagged(size_t ndx) const noexcept;
193

194
    void set(size_t ndx, RefOrTagged);
195
    void add(RefOrTagged);
196
    void ensure_minimum_width(RefOrTagged);
197

198
    int64_t front() const noexcept;
199
    int64_t back() const noexcept;
200

201
    void alloc(size_t init_size, size_t new_width)
202
    {
971,536,191✔
203
        // Node::alloc is the one that triggers copy on write. If we call alloc for a B
204
        //       array we have a bug in our machinery, the array should have been decompressed
205
        //       way before calling alloc.
206
        const auto header = get_header();
971,536,191✔
207
        REALM_ASSERT_3(m_width, ==, get_width_from_header(header));
971,536,191✔
208
        REALM_ASSERT_3(m_size, ==, get_size_from_header(header));
971,536,191✔
209
        Node::alloc(init_size, new_width);
971,536,191✔
210
        update_width_cache_from_header();
971,536,191✔
211
    }
971,536,191✔
212

213
    bool is_empty() const noexcept
214
    {
77,472✔
215
        return size() == 0;
77,472✔
216
    }
77,472✔
217

218
    /// Remove the element at the specified index, and move elements at higher
219
    /// indexes to the next lower index.
220
    ///
221
    /// This function does **not** destroy removed subarrays. That is, if the
222
    /// erased element is a 'ref' pointing to a subarray, then that subarray
223
    /// will not be destroyed automatically.
224
    ///
225
    /// This function guarantees that no exceptions will be thrown if
226
    /// get_alloc().is_read_only(get_ref()) would return false before the
227
    /// call. This is automatically guaranteed if the array is used in a
228
    /// non-transactional context, or if the array has already been successfully
229
    /// modified within the current write transaction.
230
    void erase(size_t ndx);
231

232
    /// Same as erase(size_t), but remove all elements in the specified
233
    /// range.
234
    ///
235
    /// Please note that this function does **not** destroy removed subarrays.
236
    ///
237
    /// This function guarantees that no exceptions will be thrown if
238
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
239
    void erase(size_t begin, size_t end);
240

241
    /// Reduce the size of this array to the specified number of elements. It is
242
    /// an error to specify a size that is greater than the current size of this
243
    /// array. The effect of doing so is undefined. This is just a shorthand for
244
    /// calling the ranged erase() function with appropriate arguments.
245
    ///
246
    /// Please note that this function does **not** destroy removed
247
    /// subarrays. See clear_and_destroy_children() for an alternative.
248
    ///
249
    /// This function guarantees that no exceptions will be thrown if
250
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
251
    void truncate(size_t new_size);
252

253
    /// Reduce the size of this array to the specified number of elements. It is
254
    /// an error to specify a size that is greater than the current size of this
255
    /// array. The effect of doing so is undefined. Subarrays will be destroyed
256
    /// recursively, as if by a call to `destroy_deep(subarray_ref, alloc)`.
257
    ///
258
    /// This function is guaranteed not to throw if
259
    /// get_alloc().is_read_only(get_ref()) returns false.
260
    void truncate_and_destroy_children(size_t new_size);
261

262
    /// Remove every element from this array. This is just a shorthand for
263
    /// calling truncate(0).
264
    ///
265
    /// Please note that this function does **not** destroy removed
266
    /// subarrays. See clear_and_destroy_children() for an alternative.
267
    ///
268
    /// This function guarantees that no exceptions will be thrown if
269
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
270
    void clear();
271

272
    /// Remove every element in this array. Subarrays will be destroyed
273
    /// recursively, as if by a call to `destroy_deep(subarray_ref,
274
    /// alloc)`. This is just a shorthand for calling
275
    /// truncate_and_destroy_children(0).
276
    ///
277
    /// This function guarantees that no exceptions will be thrown if
278
    /// get_alloc().is_read_only(get_ref()) would return false before the call.
279
    void clear_and_destroy_children();
280

281
    /// If neccessary, expand the representation so that it can store the
282
    /// specified value.
283
    void ensure_minimum_width(int_fast64_t value);
284

285
    /// Add \a diff to the element at the specified index.
286
    void adjust(size_t ndx, int_fast64_t diff);
287

288
    /// Add \a diff to all the elements in the specified index range.
289
    void adjust(size_t begin, size_t end, int_fast64_t diff);
290

291
    //@{
292
    /// This is similar in spirit to std::move() from `<algorithm>`.
293
    /// \a dest_begin must not be in the range [`begin`,`end`)
294
    ///
295
    /// This function is guaranteed to not throw if
296
    /// `get_alloc().is_read_only(get_ref())` returns false.
297
    void move(size_t begin, size_t end, size_t dest_begin);
298
    //@}
299

300
    // Move elements from ndx and above to another array
301
    void move(Array& dst, size_t ndx);
302

303
    //@{
304
    /// Find the lower/upper bound of the specified value in a sequence of
305
    /// integers which must already be sorted ascendingly.
306
    ///
307
    /// For an integer value '`v`', lower_bound_int(v) returns the index '`l`'
308
    /// of the first element such that `get(l) &ge; v`, and upper_bound_int(v)
309
    /// returns the index '`u`' of the first element such that `get(u) &gt;
310
    /// v`. In both cases, if no such element is found, the returned value is
311
    /// the number of elements in the array.
312
    ///
313
    ///     3 3 3 4 4 4 5 6 7 9 9 9
314
    ///     ^     ^     ^     ^     ^
315
    ///     |     |     |     |     |
316
    ///     |     |     |     |      -- Lower and upper bound of 15
317
    ///     |     |     |     |
318
    ///     |     |     |      -- Lower and upper bound of 8
319
    ///     |     |     |
320
    ///     |     |      -- Upper bound of 4
321
    ///     |     |
322
    ///     |      -- Lower bound of 4
323
    ///     |
324
    ///      -- Lower and upper bound of 1
325
    ///
326
    /// These functions are similar to std::lower_bound() and
327
    /// std::upper_bound().
328
    ///
329
    /// We currently use binary search. See for example
330
    /// http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary.
331
    ///
332
    /// FIXME: It may be worth considering if overall efficiency can be improved
333
    /// by doing a linear search for short sequences.
334
    size_t lower_bound_int(int64_t value) const noexcept;
335
    size_t upper_bound_int(int64_t value) const noexcept;
336
    size_t lower_bound_int_compressed(int64_t value) const noexcept;
337
    size_t upper_bound_int_compressed(int64_t value) const noexcept;
338
    //@}
339

340
    int64_t get_sum(size_t start = 0, size_t end = size_t(-1)) const
341
    {
24,696✔
342
        return sum(start, end);
24,696✔
343
    }
24,696✔
344

345
    /// This information is guaranteed to be cached in the array accessor.
346
    bool is_inner_bptree_node() const noexcept;
347

348
    /// Returns true if type is either type_HasRefs or type_InnerColumnNode.
349
    ///
350
    /// This information is guaranteed to be cached in the array accessor.
351
    bool has_refs() const noexcept;
352
    void set_has_refs(bool) noexcept;
353

354
    /// This information is guaranteed to be cached in the array accessor.
355
    ///
356
    /// Columns and indexes can use the context bit to differentiate leaf types.
357
    bool get_context_flag() const noexcept;
358
    void set_context_flag(bool) noexcept;
359

360
    /// Recursively destroy children (as if calling
361
    /// clear_and_destroy_children()), then put this accessor into the detached
362
    /// state (as if calling detach()), then free the allocated memory. If this
363
    /// accessor is already in the detached state, this function has no effect
364
    /// (idempotency).
365
    void destroy_deep() noexcept;
366

367
    /// check if the array is encoded (in B format)
368
    inline bool is_compressed() const;
369

370
    inline const IntegerCompressor& integer_compressor() const;
371

372
    /// used only for testing, encode the array passed as argument
373
    bool try_compress(Array&) const;
374

375
    /// used only for testing, decode the array, on which this method is invoked. If the array is not encoded, this is
376
    /// a NOP
377
    bool try_decompress();
378

379
    /// Shorthand for `destroy_deep(MemRef(ref, alloc), alloc)`.
380
    static void destroy_deep(ref_type ref, Allocator& alloc) noexcept;
381

382
    /// Destroy the specified array node and all of its children, recursively.
383
    ///
384
    /// This is done by freeing the specified array node after calling
385
    /// destroy_deep() for every contained 'ref' element.
386
    static void destroy_deep(MemRef, Allocator&) noexcept;
387

388
    // Clone deep
389
    static MemRef clone(MemRef, Allocator& from_alloc, Allocator& target_alloc);
390

391
    // Serialization
392

393
    /// Returns the ref (position in the target stream) of the written copy of
394
    /// this array, or the ref of the original array if \a only_if_modified is
395
    /// true, and this array is unmodified (Alloc::is_read_only()).
396
    ///
397
    /// The number of bytes that will be written by a non-recursive invocation
398
    /// of this function is exactly the number returned by get_byte_size().
399
    ///
400
    /// \param out The destination stream (writer).
401
    ///
402
    /// \param deep If true, recursively write out subarrays, but still subject
403
    /// to \a only_if_modified.
404
    ///
405
    /// \param only_if_modified Set to `false` to always write, or to `true` to
406
    /// only write the array if it has been modified.
407
    ref_type write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified, bool compress_in_flight) const;
408

409
    /// Same as non-static write() with `deep` set to true. This is for the
410
    /// cases where you do not already have an array accessor available.
411
    /// Compression may be attempted if `compress_in_flight` is true.
412
    /// This should be avoided if you rely on the size of the array beeing unchanged.
413
    static ref_type write(ref_type, Allocator&, _impl::ArrayWriterBase&, bool only_if_modified,
414
                          bool compress_in_flight);
415

416
    inline size_t find_first(int64_t value, size_t begin = 0, size_t end = size_t(-1)) const
417
    {
8,805,984✔
418
        return find_first<Equal>(value, begin, end);
8,805,984✔
419
    }
8,805,984✔
420

421
    // Wrappers for backwards compatibility and for simple use without
422
    // setting up state initialization etc
423
    template <class cond>
424
    size_t find_first(int64_t value, size_t start = 0, size_t end = size_t(-1)) const
425
    {
12,451,362✔
426
        static cond c;
12,451,362✔
427
        REALM_ASSERT(start <= m_size && (end <= m_size || end == size_t(-1)) && start <= end);
12,451,362✔
428
        if (end - start == 1) {
12,451,362✔
429
            return c(get(start), value) ? start : realm::not_found;
1,516,572✔
430
        }
1,516,572✔
431
        // todo, would be nice to avoid this in order to speed up find_first loops
432
        QueryStateFindFirst state;
10,934,790✔
433
        Finder finder = m_vtable->finder[cond::condition];
10,934,790✔
434
        finder(*this, value, start, end, 0, &state);
10,934,790✔
435
        return state.m_state;
10,934,790✔
436
    }
12,451,362✔
437

438
    template <class cond>
439
    bool find(int64_t value, size_t start, size_t end, size_t baseIndex, QueryStateBase* state) const
440
    {
6,669,732✔
441
        Finder finder = m_vtable->finder[cond::condition];
6,669,732✔
442
        return finder(*this, value, start, end, baseIndex, state);
6,669,732✔
443
    }
6,669,732✔
444

445

446
    /// Get the specified element without the cost of constructing an
447
    /// array instance. If an array instance is already available, or
448
    /// you need to get multiple values, then this method will be
449
    /// slower.
450
    static int_fast64_t get(const char* header, size_t ndx) noexcept;
451

452
    /// Like get(const char*, size_t) but gets two consecutive
453
    /// elements.
454
    static std::pair<int64_t, int64_t> get_two(const char* header, size_t ndx) noexcept;
455

456
    static RefOrTagged get_as_ref_or_tagged(const char* header, size_t ndx) noexcept
457
    {
71,984,475✔
458
        return get(header, ndx);
71,984,475✔
459
    }
71,984,475✔
460

461
    /// Get the number of bytes currently in use by this array. This
462
    /// includes the array header, but it does not include allocated
463
    /// bytes corresponding to excess capacity. The result is
464
    /// guaranteed to be a multiple of 8 (i.e., 64-bit aligned).
465
    ///
466
    /// This number is exactly the number of bytes that will be
467
    /// written by a non-recursive invocation of write().
468
    size_t get_byte_size() const noexcept;
469

470
    // Get the number of bytes used by this array and its sub-arrays
471
    size_t get_byte_size_deep() const noexcept
472
    {
156✔
473
        size_t mem = 0;
156✔
474
        _mem_usage(mem);
156✔
475
        return mem;
156✔
476
    }
156✔
477

478

479
    /// Get the maximum number of bytes that can be written by a
480
    /// non-recursive invocation of write() on an array with the
481
    /// specified number of elements, that is, the maximum value that
482
    /// can be returned by get_byte_size().
483
    static size_t get_max_byte_size(size_t num_elems) noexcept;
484

485
    /// FIXME: Belongs in IntegerArray
486
    static size_t calc_aligned_byte_size(size_t size, int width);
487

488
#ifdef REALM_DEBUG
489
    class MemUsageHandler {
490
    public:
491
        virtual void handle(ref_type ref, size_t allocated, size_t used) = 0;
492
    };
493

494
    void report_memory_usage(MemUsageHandler&) const;
495

496
    void stats(MemStats& stats_dest) const noexcept;
497
#endif
498

499
    void verify() const;
500

501
    Array& operator=(const Array&) = delete; // not allowed
502
    Array(const Array&) = delete;            // not allowed
503

504
    /// Takes a 64-bit value and returns the minimum number of bits needed
505
    /// to fit the value. For alignment this is rounded up to nearest
506
    /// log2. Possible results {0, 1, 2, 4, 8, 16, 32, 64}
507
    static uint8_t bit_width(int64_t value);
508

509
    void typed_print(std::string prefix) const;
510

511
protected:
512
    friend class NodeTree;
513
    void copy_on_write();
514
    void copy_on_write(size_t min_size);
515

516
    // This returns the minimum value ("lower bound") of the representable values
517
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
518
    static constexpr int_fast64_t lbound_for_width(size_t width) noexcept;
519

520
    // This returns the maximum value ("inclusive upper bound") of the representable values
521
    // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
522
    static constexpr int_fast64_t ubound_for_width(size_t width) noexcept;
523

524
    // This will have to be eventually used, exposing this here for testing.
525
    size_t count(int64_t value) const noexcept;
526

527
private:
528
    void update_width_cache_from_header() noexcept;
529

530
    void do_ensure_minimum_width(int_fast64_t);
531

532
    int64_t sum(size_t start, size_t end) const;
533

534
    template <size_t w>
535
    int64_t sum(size_t start, size_t end) const;
536

537
protected:
538
    /// It is an error to specify a non-zero value unless the width
539
    /// type is wtype_Bits. It is also an error to specify a non-zero
540
    /// size if the width type is wtype_Ignore.
541
    static MemRef create(Type, bool, WidthType, size_t, int_fast64_t, Allocator&);
542

543
    // Overriding method in ArrayParent
544
    void update_child_ref(size_t, ref_type) override;
545

546
    // Overriding method in ArrayParent
547
    ref_type get_child_ref(size_t) const noexcept override;
548

549
    void destroy_children(size_t offset = 0) noexcept;
550

551
protected:
552
    // Getters and Setters for adaptive-packed arrays
553
    typedef int64_t (*Getter)(const Array&, size_t); // Note: getters must not throw
554
    typedef void (*Setter)(Array&, size_t, int64_t);
555
    typedef bool (*Finder)(const Array&, int64_t, size_t, size_t, size_t, QueryStateBase*);
556
    typedef void (*ChunkGetter)(const Array&, size_t, int64_t res[8]); // Note: getters must not throw
557

558
    typedef std::vector<int64_t> (*GetterAll)(const Array&, size_t, size_t); // Note: getters must not throw
559

560
    struct VTable {
561
        Getter getter;
562
        ChunkGetter chunk_getter;
563
        GetterAll getter_all;
564
        Setter setter;
565
        Finder finder[cond_VTABLE_FINDER_COUNT]; // one for each active function pointer
566
    };
567
    template <size_t w>
568
    struct VTableForWidth;
569

570
    // This is the one installed into the m_vtable->finder slots.
571
    template <class cond>
572
    static bool find_vtable(const Array&, int64_t value, size_t start, size_t end, size_t baseindex,
573
                            QueryStateBase* state);
574

575
    template <size_t w>
576
    static int64_t get_universal(const char* const data, const size_t ndx);
577

578
protected:
579
    Getter m_getter = nullptr; // cached to avoid indirection
580
    const VTable* m_vtable = nullptr;
581

582
    uint_least8_t m_width = 0; // Size of an element (meaning depend on type of array).
583
    int64_t m_lbound;          // min number that can be stored with current m_width
584
    int64_t m_ubound;          // max number that can be stored with current m_width
585

586
    bool m_is_inner_bptree_node; // This array is an inner node of B+-tree.
587
    bool m_has_refs;             // Elements whose first bit is zero are refs to subarrays.
588
    bool m_context_flag;         // Meaning depends on context.
589

590
    IntegerCompressor m_integer_compressor;
591
    // compress/decompress this array
592
    bool compress_array(Array&) const;
593
    bool decompress_array(Array& arr) const;
594

595
private:
596
    ref_type do_write_shallow(_impl::ArrayWriterBase&) const;
597
    ref_type do_write_deep(_impl::ArrayWriterBase&, bool only_if_modified, bool compress) const;
598

599
    void _mem_usage(size_t& mem) const noexcept;
600

601
#ifdef REALM_DEBUG
602
    void report_memory_usage_2(MemUsageHandler&) const;
603
#endif
604

605

606
private:
607
    friend class Allocator;
608
    friend class SlabAlloc;
609
    friend class GroupWriter;
610
    friend class ArrayWithFind;
611
    friend class IntegerCompressor;
612
    friend class PackedCompressor;
613
    friend class FlexCompressor;
614
};
615

616
class TempArray : public Array {
617
public:
618
    TempArray(size_t sz, Type type = Type::type_HasRefs, bool cf = false)
619
        : Array(Allocator::get_default())
1,540,434✔
620
    {
3,083,004✔
621
        create(type, cf, sz);
3,083,004✔
622
    }
3,083,004✔
623
    ~TempArray()
624
    {
3,083,109✔
625
        destroy();
3,083,109✔
626
    }
3,083,109✔
627
    ref_type write(_impl::ArrayWriterBase& out)
628
    {
3,083,130✔
629
        return Array::write(out, false, false, false);
3,083,130✔
630
    }
3,083,130✔
631
};
632

633
// Implementation:
634

635
inline Array::Array(Allocator& allocator) noexcept
636
    : Node(allocator)
454,865,694✔
637
{
925,987,512✔
638
}
925,987,512✔
639

640
inline bool Array::is_compressed() const
641
{
993,228,258✔
642
    const auto enc = m_integer_compressor.get_encoding();
993,228,258✔
643
    return enc == NodeHeader::Encoding::Flex || enc == NodeHeader::Encoding::Packed;
993,388,809✔
644
}
993,228,258✔
645

646
inline const IntegerCompressor& Array::integer_compressor() const
647
{
4,004,664✔
648
    return m_integer_compressor;
4,004,664✔
649
}
4,004,664✔
650

651
inline int64_t Array::get(size_t ndx) const noexcept
652
{
2,732,912,973✔
653
    REALM_ASSERT_DEBUG(is_attached());
2,732,912,973✔
654
    REALM_ASSERT_DEBUG_EX(ndx < m_size, ndx, m_size);
2,732,912,973✔
655
    return m_getter(*this, ndx);
2,732,912,973✔
656

657
    // Two ideas that are not efficient but may be worth looking into again:
658
    /*
659
        // Assume correct width is found early in REALM_TEMPEX, which is the case for B tree offsets that
660
        // are probably either 2^16 long. Turns out to be 25% faster if found immediately, but 50-300% slower
661
        // if found later
662
        REALM_TEMPEX(return get, (ndx));
663
    */
664
    /*
665
        // Slightly slower in both of the if-cases. Also needs an matchcount m_size check too, to avoid
666
        // reading beyond array.
667
        if (m_width >= 8 && m_size > ndx + 7)
668
            return get<64>(ndx >> m_shift) & m_widthmask;
669
        else
670
            return (this->*(m_vtable->getter))(ndx);
671
    */
672
}
2,732,912,973✔
673

674
inline std::vector<int64_t> Array::get_all(size_t b, size_t e) const
675
{
2✔
676
    REALM_ASSERT_DEBUG(is_compressed());
2✔
677
    return m_vtable->getter_all(*this, b, e);
2✔
678
}
2✔
679

680
template <size_t w>
681
inline int64_t Array::get(const Array& arr, size_t ndx) noexcept
682
{
3,799,282,825✔
683
    REALM_ASSERT_DEBUG(arr.is_attached());
3,799,282,825✔
684
    return get_universal<w>(arr.m_data, ndx);
3,799,282,825✔
685
}
3,799,282,825✔
686

687
constexpr inline int_fast64_t Array::lbound_for_width(size_t width) noexcept
688
{
1,674,142,800✔
689
    if (width == 32) {
1,674,142,800✔
690
        return -0x80000000LL;
677,027,451✔
691
    }
677,027,451✔
692
    else if (width == 16) {
997,115,349✔
693
        return -0x8000LL;
345,311,589✔
694
    }
345,311,589✔
695
    else if (width < 8) {
651,803,760✔
696
        return 0;
409,577,388✔
697
    }
409,577,388✔
698
    else if (width == 8) {
242,226,372✔
699
        return -0x80LL;
156,126,036✔
700
    }
156,126,036✔
701
    else if (width == 64) {
240,527,451✔
702
        return -0x8000000000000000LL;
240,527,451✔
703
    }
240,527,451✔
704
    else {
4,294,967,294✔
705
        REALM_UNREACHABLE();
706
    }
4,294,967,294✔
707
}
1,674,142,800✔
708

709
constexpr inline int_fast64_t Array::ubound_for_width(size_t width) noexcept
710
{
1,680,269,496✔
711
    if (width == 32) {
1,680,269,496✔
712
        return 0x7FFFFFFFLL;
678,756,876✔
713
    }
678,756,876✔
714
    else if (width == 16) {
1,001,512,620✔
715
        return 0x7FFFLL;
347,718,855✔
716
    }
347,718,855✔
717
    else if (width == 0) {
653,793,765✔
718
        return 0;
66,526,398✔
719
    }
66,526,398✔
720
    else if (width == 1) {
587,267,367✔
721
        return 1;
181,184,058✔
722
    }
181,184,058✔
723
    else if (width == 2) {
406,083,309✔
724
        return 3;
153,824,091✔
725
    }
153,824,091✔
726
    else if (width == 4) {
252,259,218✔
727
        return 15;
19,302,990✔
728
    }
19,302,990✔
729
    else if (width == 8) {
232,956,228✔
730
        return 0x7FLL;
156,143,547✔
731
    }
156,143,547✔
732
    else if (width == 64) {
240,917,727✔
733
        return 0x7FFFFFFFFFFFFFFFLL;
240,917,727✔
734
    }
240,917,727✔
735
    else {
4,294,967,294✔
736
        REALM_UNREACHABLE();
737
    }
4,294,967,294✔
738
}
1,680,269,496✔
739

740
inline bool RefOrTagged::is_ref() const noexcept
741
{
502,513,491✔
742
    return (m_value & 1) == 0;
502,513,491✔
743
}
502,513,491✔
744

745
inline bool RefOrTagged::is_tagged() const noexcept
746
{
429,761,415✔
747
    return !is_ref();
429,761,415✔
748
}
429,761,415✔
749

750
inline ref_type RefOrTagged::get_as_ref() const noexcept
751
{
102,711,297✔
752
    // to_ref() is defined in <alloc.hpp>
753
    return to_ref(m_value);
102,711,297✔
754
}
102,711,297✔
755

756
inline uint_fast64_t RefOrTagged::get_as_int() const noexcept
757
{
115,595,688✔
758
    // The bitwise AND is there in case uint_fast64_t is wider than 64 bits.
759
    return (uint_fast64_t(m_value) & 0xFFFFFFFFFFFFFFFFULL) >> 1;
115,595,688✔
760
}
115,595,688✔
761

762
inline RefOrTagged RefOrTagged::make_ref(ref_type ref) noexcept
763
{
342,588✔
764
    // from_ref() is defined in <alloc.hpp>
765
    int_fast64_t value = from_ref(ref);
342,588✔
766
    return RefOrTagged(value);
342,588✔
767
}
342,588✔
768

769
inline RefOrTagged RefOrTagged::make_tagged(uint_fast64_t i) noexcept
770
{
25,324,005✔
771
    REALM_ASSERT(i < (1ULL << 63));
25,324,005✔
772
    return RefOrTagged((i << 1) | 1);
25,324,005✔
773
}
25,324,005✔
774

775
inline RefOrTagged::RefOrTagged(int_fast64_t value) noexcept
776
    : m_value(value)
263,253,546✔
777
{
540,721,179✔
778
}
540,721,179✔
779

780
inline void Array::create(Type type, bool context_flag, size_t length, int_fast64_t value)
781
{
11,705,331✔
782
    MemRef mem = create_array(type, context_flag, length, value, m_alloc); // Throws
11,705,331✔
783
    init_from_mem(mem);
11,705,331✔
784
}
11,705,331✔
785

786
inline Array::Type Array::get_type() const noexcept
787
{
18✔
788
    if (m_is_inner_bptree_node) {
18✔
789
        REALM_ASSERT_DEBUG(m_has_refs);
6✔
790
        return type_InnerBptreeNode;
6✔
791
    }
6✔
792
    if (m_has_refs)
12✔
793
        return type_HasRefs;
6✔
794
    return type_Normal;
6✔
795
}
12✔
796

797

798
inline void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
799
{
2,816,484✔
800
    REALM_ASSERT_DEBUG(ndx < m_size);
2,816,484✔
801
    m_vtable->chunk_getter(*this, ndx, res);
2,816,484✔
802
}
2,816,484✔
803

804
template <size_t w>
805
inline int64_t Array::get_universal(const char* data, size_t ndx)
806
{
3,789,180,589✔
807
    if (w == 64) {
3,789,180,589✔
808
        size_t offset = ndx << 3;
926,895,735✔
809
        return *reinterpret_cast<const int64_t*>(data + offset);
926,895,735✔
810
    }
926,895,735✔
811
    else if (w == 32) {
3,339,635,386✔
812
        size_t offset = ndx << 2;
2,920,193,056✔
813
        return *reinterpret_cast<const int32_t*>(data + offset);
2,920,193,056✔
814
    }
2,920,193,056✔
815
    else if (w == 16) {
896,372,214✔
816
        size_t offset = ndx << 1;
550,552,458✔
817
        return *reinterpret_cast<const int16_t*>(data + offset);
550,552,458✔
818
    }
550,552,458✔
819
    else if (w == 8) {
345,819,756✔
820
        return *reinterpret_cast<const signed char*>(data + ndx);
72,120,594✔
821
    }
72,120,594✔
822
    else if (w == 4) {
273,699,162✔
823
        size_t offset = ndx >> 1;
39,632,550✔
824
        auto d = data[offset];
39,632,550✔
825
        return (d >> ((ndx & 1) << 2)) & 0x0F;
39,632,550✔
826
    }
39,632,550✔
827
    else if (w == 2) {
234,066,612✔
828
        size_t offset = ndx >> 2;
76,836,390✔
829
        auto d = data[offset];
76,836,390✔
830
        return (d >> ((ndx & 3) << 1)) & 0x03;
76,836,390✔
831
    }
76,836,390✔
832
    else if (w == 1) {
157,230,222✔
833
        size_t offset = ndx >> 3;
63,454,767✔
834
        auto d = data[offset];
63,454,767✔
835
        return (d >> (ndx & 7)) & 0x01;
63,454,767✔
836
    }
63,454,767✔
837
    else if (w == 0) {
97,202,355✔
838
        return 0;
96,716,853✔
839
    }
96,716,853✔
840
    else {
2,147,969,149✔
841
        REALM_ASSERT_DEBUG(false);
2,147,969,149✔
842
        return int64_t(-1);
2,147,969,149✔
843
    }
2,147,969,149✔
844
}
3,789,180,589✔
845

846
inline int64_t Array::front() const noexcept
UNCOV
847
{
×
848
    return get(0);
×
849
}
×
850

851
inline int64_t Array::back() const noexcept
852
{
47,966,136✔
853
    return get(m_size - 1);
47,966,136✔
854
}
47,966,136✔
855

856
inline ref_type Array::get_as_ref(size_t ndx) const noexcept
857
{
867,107,694✔
858
    REALM_ASSERT_DEBUG(is_attached());
867,107,694✔
859
    REALM_ASSERT_DEBUG_EX(m_has_refs, m_ref, ndx, m_size);
867,107,694✔
860
    int64_t v = get(ndx);
867,107,694✔
861
    return to_ref(v);
867,107,694✔
862
}
867,107,694✔
863

864
inline RefOrTagged Array::get_as_ref_or_tagged(size_t ndx) const noexcept
865
{
448,359,285✔
866
    REALM_ASSERT(has_refs());
448,359,285✔
867
    return RefOrTagged(get(ndx));
448,359,285✔
868
}
448,359,285✔
869

870
inline void Array::set(size_t ndx, RefOrTagged ref_or_tagged)
871
{
34,592,046✔
872
    REALM_ASSERT(has_refs());
34,592,046✔
873
    set(ndx, ref_or_tagged.m_value); // Throws
34,592,046✔
874
}
34,592,046✔
875

876
inline void Array::add(RefOrTagged ref_or_tagged)
877
{
1,732,554✔
878
    REALM_ASSERT(has_refs());
1,732,554✔
879
    add(ref_or_tagged.m_value); // Throws
1,732,554✔
880
}
1,732,554✔
881

882
inline void Array::ensure_minimum_width(RefOrTagged ref_or_tagged)
883
{
660✔
884
    REALM_ASSERT(has_refs());
660✔
885
    ensure_minimum_width(ref_or_tagged.m_value); // Throws
660✔
886
}
660✔
887

888
inline bool Array::is_inner_bptree_node() const noexcept
889
{
25,565,745✔
890
    return m_is_inner_bptree_node;
25,565,745✔
891
}
25,565,745✔
892

893
inline bool Array::has_refs() const noexcept
894
{
497,132,559✔
895
    return m_has_refs;
497,132,559✔
896
}
497,132,559✔
897

898
inline void Array::set_has_refs(bool value) noexcept
899
{
×
900
    if (m_has_refs != value) {
×
901
        REALM_ASSERT(!is_read_only());
×
902
        m_has_refs = value;
×
903
        set_hasrefs_in_header(value, get_header());
×
904
    }
×
905
}
×
906

907
inline bool Array::get_context_flag() const noexcept
908
{
33,811,080✔
909
    return m_context_flag;
33,811,080✔
910
}
33,811,080✔
911

912
inline void Array::set_context_flag(bool value) noexcept
913
{
732,315✔
914
    if (m_context_flag != value) {
732,315✔
915
        copy_on_write();
732,270✔
916
        m_context_flag = value;
732,270✔
917
        set_context_flag_in_header(value, get_header());
732,270✔
918
    }
732,270✔
919
}
732,315✔
920

921
inline void Array::destroy_deep() noexcept
922
{
728,799✔
923
    if (!is_attached())
728,799✔
924
        return;
684✔
925

926
    if (m_has_refs)
728,115✔
927
        destroy_children();
726,912✔
928

929
    char* header = get_header_from_data(m_data);
728,115✔
930
    m_alloc.free_(m_ref, header);
728,115✔
931
    m_data = nullptr;
728,115✔
932
}
728,115✔
933

934
inline void Array::add(int_fast64_t value)
935
{
850,249,956✔
936
    insert(m_size, value);
850,249,956✔
937
}
850,249,956✔
938

939
inline void Array::erase(size_t ndx)
940
{
11,556,135✔
941
    // This can throw, but only if array is currently in read-only
942
    // memory.
943
    move(ndx + 1, size(), ndx);
11,556,135✔
944

945
    // Update size (also in header)
946
    --m_size;
11,556,135✔
947
    set_header_size(m_size);
11,556,135✔
948
}
11,556,135✔
949

950

951
inline void Array::erase(size_t begin, size_t end)
952
{
×
953
    if (begin != end) {
×
954
        // This can throw, but only if array is currently in read-only memory.
955
        move(end, size(), begin); // Throws
×
956

957
        // Update size (also in header)
958
        m_size -= end - begin;
×
959
        set_header_size(m_size);
×
960
    }
×
961
}
×
962

963
inline void Array::clear()
964
{
2,081,001✔
965
    truncate(0); // Throws
2,081,001✔
966
}
2,081,001✔
967

968
inline void Array::clear_and_destroy_children()
969
{
19,200✔
970
    truncate_and_destroy_children(0);
19,200✔
971
}
19,200✔
972

973
inline void Array::destroy_deep(ref_type ref, Allocator& alloc) noexcept
974
{
2,536,443✔
975
    destroy_deep(MemRef(ref, alloc), alloc);
2,536,443✔
976
}
2,536,443✔
977

978
inline void Array::destroy_deep(MemRef mem, Allocator& alloc) noexcept
979
{
2,535,915✔
980
    if (!get_hasrefs_from_header(mem.get_addr())) {
2,535,915✔
981
        alloc.free_(mem);
1,871,460✔
982
        return;
1,871,460✔
983
    }
1,871,460✔
984
    Array array(alloc);
664,455✔
985
    array.init_from_mem(mem);
664,455✔
986
    array.destroy_deep();
664,455✔
987
}
664,455✔
988

989

990
inline void Array::adjust(size_t ndx, int_fast64_t diff)
991
{
55,200,189✔
992
    REALM_ASSERT_3(ndx, <=, m_size);
55,200,189✔
993
    if (diff != 0) {
55,217,061✔
994
        // FIXME: Should be optimized
995
        int_fast64_t v = get(ndx);
55,217,061✔
996
        set(ndx, int64_t(v + diff)); // Throws
55,217,061✔
997
    }
55,217,061✔
998
}
55,200,189✔
999

1000
inline void Array::adjust(size_t begin, size_t end, int_fast64_t diff)
1001
{
2,388,408✔
1002
    if (diff != 0) {
2,388,408✔
1003
        // FIXME: Should be optimized
1004
        for (size_t i = begin; i != end; ++i)
16,899,042✔
1005
            adjust(i, diff); // Throws
15,267,759✔
1006
    }
1,631,283✔
1007
}
2,388,408✔
1008

1009

1010
//-------------------------------------------------
1011

1012

1013
inline size_t Array::get_byte_size() const noexcept
1014
{
35,818,755✔
1015
    const char* header = get_header_from_data(m_data);
35,818,755✔
1016
    size_t num_bytes = NodeHeader::get_byte_size_from_header(header);
35,818,755✔
1017

1018
    REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
35,818,755✔
1019

1020
    return num_bytes;
35,818,755✔
1021
}
35,818,755✔
1022

1023

1024
//-------------------------------------------------
1025

1026
inline MemRef Array::create_empty_array(Type type, bool context_flag, Allocator& alloc)
1027
{
1,857,336✔
1028
    size_t size = 0;
1,857,336✔
1029
    int_fast64_t value = 0;
1,857,336✔
1030
    return create_array(type, context_flag, size, value, alloc); // Throws
1,857,336✔
1031
}
1,857,336✔
1032

1033
inline MemRef Array::create_array(Type type, bool context_flag, size_t size, int_fast64_t value, Allocator& alloc)
1034
{
13,910,970✔
1035
    return create(type, context_flag, wtype_Bits, size, value, alloc); // Throws
13,910,970✔
1036
}
13,910,970✔
1037

1038
inline size_t Array::get_max_byte_size(size_t num_elems) noexcept
1039
{
622,749✔
1040
    int max_bytes_per_elem = 8;
622,749✔
1041
    return header_size + num_elems * max_bytes_per_elem;
622,749✔
1042
}
622,749✔
1043

1044
inline void Array::update_child_ref(size_t child_ndx, ref_type new_ref)
1045
{
17,016,555✔
1046
    set(child_ndx, new_ref);
17,016,555✔
1047
}
17,016,555✔
1048

1049
inline ref_type Array::get_child_ref(size_t child_ndx) const noexcept
1050
{
148,964,145✔
1051
    return get_as_ref(child_ndx);
148,964,145✔
1052
}
148,964,145✔
1053

1054
inline void Array::ensure_minimum_width(int_fast64_t value)
1055
{
212,323,062✔
1056
    if (value >= m_lbound && value <= m_ubound)
212,355,465✔
1057
        return;
203,062,215✔
1058
    do_ensure_minimum_width(value);
9,260,847✔
1059
}
9,260,847✔
1060

1061
inline ref_type Array::write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified,
1062
                             bool compress_in_flight) const
1063
{
8,869,794✔
1064
    REALM_ASSERT_DEBUG(is_attached());
8,869,794✔
1065
    // The default allocator cannot be trusted wrt is_read_only():
1066
    REALM_ASSERT_DEBUG(!only_if_modified || &m_alloc != &Allocator::get_default());
8,869,794✔
1067
    if (only_if_modified && m_alloc.is_read_only(m_ref))
8,869,794✔
1068
        return m_ref;
2,455,374✔
1069

1070
    if (!deep || !m_has_refs) {
6,414,420✔
1071
        // however - creating an array using ANYTHING BUT the default allocator during commit is also wrong....
1072
        // it only works by accident, because the whole slab area is reinitialized after commit.
1073
        // We should have: Array encoded_array{Allocator::get_default()};
1074
        Array compressed_array{Allocator::get_default()};
5,206,464✔
1075
        if (compress_in_flight && compress_array(compressed_array)) {
5,206,464✔
1076
#ifdef REALM_DEBUG
99,282✔
1077
            const auto encoding = compressed_array.m_integer_compressor.get_encoding();
99,282✔
1078
            REALM_ASSERT_DEBUG(encoding == Encoding::Flex || encoding == Encoding::Packed);
99,282✔
1079
            REALM_ASSERT_DEBUG(size() == compressed_array.size());
99,282✔
1080
            for (size_t i = 0; i < compressed_array.size(); ++i) {
14,155,416✔
1081
                REALM_ASSERT_DEBUG(get(i) == compressed_array.get(i));
14,056,134✔
1082
            }
14,056,134✔
1083
#endif
99,282✔
1084
            auto ref = compressed_array.do_write_shallow(out);
99,282✔
1085
            compressed_array.destroy();
99,282✔
1086
            return ref;
99,282✔
1087
        }
99,282✔
1088
        return do_write_shallow(out); // Throws
5,107,182✔
1089
    }
5,206,464✔
1090

1091
    return do_write_deep(out, only_if_modified, compress_in_flight); // Throws
1,207,956✔
1092
}
6,414,420✔
1093

1094
inline ref_type Array::write(ref_type ref, Allocator& alloc, _impl::ArrayWriterBase& out, bool only_if_modified,
1095
                             bool compress_in_flight)
1096
{
71,092,494✔
1097
    // The default allocator cannot be trusted wrt is_read_only():
1098
    REALM_ASSERT_DEBUG(!only_if_modified || &alloc != &Allocator::get_default());
71,092,494✔
1099
    if (only_if_modified && alloc.is_read_only(ref))
71,272,071✔
1100
        return ref;
57,533,097✔
1101

1102
    Array array(alloc);
13,559,397✔
1103
    array.init_from_ref(ref);
13,559,397✔
1104
    REALM_ASSERT_DEBUG(array.is_attached());
13,559,397✔
1105

1106
    if (!array.m_has_refs) {
13,559,397✔
1107
        Array compressed_array{Allocator::get_default()};
11,939,196✔
1108
        if (compress_in_flight && array.compress_array(compressed_array)) {
11,939,196✔
1109
#ifdef REALM_DEBUG
9,489✔
1110
            const auto encoding = compressed_array.m_integer_compressor.get_encoding();
9,489✔
1111
            REALM_ASSERT_DEBUG(encoding == Encoding::Flex || encoding == Encoding::Packed);
9,489✔
1112
            REALM_ASSERT_DEBUG(array.size() == compressed_array.size());
9,489✔
1113
            for (size_t i = 0; i < compressed_array.size(); ++i) {
2,517,021✔
1114
                REALM_ASSERT_DEBUG(array.get(i) == compressed_array.get(i));
2,507,532✔
1115
            }
2,507,532✔
1116
#endif
9,489✔
1117
            auto ref = compressed_array.do_write_shallow(out);
9,489✔
1118
            compressed_array.destroy();
9,489✔
1119
            return ref;
9,489✔
1120
        }
9,489✔
1121
        else {
11,929,707✔
1122
            return array.do_write_shallow(out); // Throws
11,929,707✔
1123
        }
11,929,707✔
1124
    }
11,939,196✔
1125
    return array.do_write_deep(out, only_if_modified, compress_in_flight); // Throws
1,620,201✔
1126
}
13,559,397✔
1127

1128

1129
} // namespace realm
1130

1131
#endif // REALM_ARRAY_HPP
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc