• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / nicola.cabiddu_1040

26 Sep 2023 05:08PM UTC coverage: 91.056% (-1.9%) from 92.915%
nicola.cabiddu_1040

Pull #6766

Evergreen

nicola-cab
several fixes and final client reset algo for collection in mixed
Pull Request #6766: Client Reset for collections in mixed / nested collections

97128 of 178458 branches covered (0.0%)

1524 of 1603 new or added lines in 5 files covered. (95.07%)

4511 existing lines in 109 files now uncovered.

236619 of 259862 relevant lines covered (91.06%)

7169640.31 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.35
/src/realm/bplustree.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2018 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/bplustree.hpp>
20
#include <realm/impl/destroy_guard.hpp>
21
#include <realm/array_unsigned.hpp>
22
#include <realm/array_integer.hpp>
23

24
using namespace realm;
25

26
namespace realm {
27

28
bool BPlusTreeNode::get_context_flag() const noexcept
29
{
6,613,761✔
30
    auto ref = get_ref();
6,613,761✔
31
    MemRef mem(ref, m_tree->get_alloc());
6,613,761✔
32
    return Array::get_context_flag_from_header(mem.get_addr());
6,613,761✔
33
}
6,613,761✔
34

35
void BPlusTreeNode::set_context_flag(bool cf) noexcept
36
{
23,823✔
37
    auto ref = get_ref();
23,823✔
38
    MemRef mem(ref, m_tree->get_alloc());
23,823✔
39
    if (Array::get_context_flag_from_header(mem.get_addr()) != cf) {
23,823✔
40
        Array arr(m_tree->get_alloc());
522✔
41
        arr.init_from_mem(mem);
522✔
42
        arr.set_context_flag(cf);
522✔
43
        if (auto new_ref = arr.get_ref(); new_ref != ref) {
522✔
44
            init_from_ref(new_ref);
6✔
45
            update_parent();
6✔
46
        }
6✔
47
    }
522✔
48
}
23,823✔
49

50

51
/*****************************************************************************/
52
/* BPlusTreeInner                                                            */
53
/* All interior nodes is of this class                                       */
54
/*****************************************************************************/
55
class BPlusTreeInner : public BPlusTreeNode, private Array {
56
public:
57
    using Array::destroy_deep;
58

59
    BPlusTreeInner(BPlusTreeBase* tree);
60
    ~BPlusTreeInner() override;
61

62
    void init_from_mem(MemRef mem);
63
    void create(size_t elems_per_child);
64

65
    /******** Implementation of abstract interface *******/
66
    bool is_leaf() const override
67
    {
449,064✔
68
        return false;
449,064✔
69
    }
449,064✔
70
    bool is_compact() const override
71
    {
×
72
        return (Array::get(0) & 1) != 0;
×
UNCOV
73
    }
×
74
    ref_type get_ref() const override
75
    {
72,729✔
76
        return Array::get_ref();
72,729✔
77
    }
72,729✔
78

79
    void init_from_ref(ref_type ref) noexcept override
80
    {
7,533,177✔
81
        char* header = m_alloc.translate(ref);
7,533,177✔
82
        init_from_mem(MemRef(header, ref, m_alloc));
7,533,177✔
83
    }
7,533,177✔
84

85
    void bp_set_parent(ArrayParent* p, size_t n) override
86
    {
8,720,706✔
87
        Array::set_parent(p, n);
8,720,706✔
88
    }
8,720,706✔
89
    void update_parent() override
90
    {
17,946✔
91
        Array::update_parent();
17,946✔
92
    }
17,946✔
93

94
    size_t get_node_size() const override
95
    {
15,079,992✔
96
        return Array::size() - 2;
15,079,992✔
97
    }
15,079,992✔
98
    size_t get_tree_size() const override
99
    {
15,790,824✔
100
        return size_t(back()) >> 1;
15,790,824✔
101
    }
15,790,824✔
102

103
    ref_type bptree_insert(size_t n, State& state, InsertFunc) override;
104
    void bptree_access(size_t n, AccessFunc) override;
105
    size_t bptree_erase(size_t n, EraseFunc) override;
106
    bool bptree_traverse(TraverseFunc) override;
107
    void verify() const override;
108

109
    // Other modifiers
110

111
    void append_tree_size(size_t sz)
112
    {
17,946✔
113
        Array::add((sz << 1) + 1);
17,946✔
114
    }
17,946✔
115
    void add_bp_node_ref(ref_type ref, int64_t offset = 0)
116
    {
35,940✔
117
        Array::add(from_ref(ref));
35,940✔
118
        REALM_ASSERT_DEBUG(offset >= 0);
35,940✔
119
        if (offset && m_offsets.is_attached()) {
35,940✔
120
            m_offsets.add(offset);
126✔
121
        }
126✔
122
    }
35,940✔
123
    // Reset first (and only!) child ref and return the previous value
124
    ref_type clear_first_bp_node_ref()
125
    {
186✔
126
        REALM_ASSERT(get_node_size() == 1);
186✔
127
        ref_type old_ref = Array::get_as_ref(1);
186✔
128
        Array::set(1, 0);
186✔
129

93✔
130
        return old_ref;
186✔
131
    }
186✔
132
    void ensure_offsets();
133

134
    std::unique_ptr<BPlusTreeInner> split_root();
135

136
private:
137
    ArrayUnsigned m_offsets;
138
    size_t m_my_offset;
139

140
    void move(BPlusTreeNode* new_node, size_t ndx, int64_t offset_adj) override;
141
    void set_offset(size_t offs)
UNCOV
142
    {
×
143
        m_my_offset = offs;
×
144
    }
×
145
    void set_tree_size(size_t sz)
UNCOV
146
    {
×
UNCOV
147
        Array::set(m_size - 1, (sz << 1) + 1);
×
UNCOV
148
    }
×
149
    size_t get_elems_per_child() const
150
    {
18,638,748✔
151
        // Only relevant when in compact form
9,222,210✔
152
        REALM_ASSERT_DEBUG(!m_offsets.is_attached());
18,638,748✔
153
        return size_t(Array::get(0)) >> 1;
18,638,748✔
154
    }
18,638,748✔
155
    // Should not be mixed up with Array::get_child_ref
156
    ref_type get_bp_node_ref(size_t ndx) const noexcept
157
    {
19,356,579✔
158
        return Array::get_as_ref(ndx + 1);
19,356,579✔
159
    }
19,356,579✔
160
    void insert_bp_node_ref(size_t ndx, ref_type ref)
161
    {
5,577✔
162
        Array::insert(ndx + 1, from_ref(ref));
5,577✔
163
    }
5,577✔
164
    BPlusTreeLeaf* cache_leaf(MemRef mem, size_t ndx, size_t offset);
165
    void erase_and_destroy_bp_node(size_t ndx);
166
    ref_type insert_bp_node(size_t child_ndx, ref_type new_sibling_ref, State& state);
167
    size_t get_bp_node_offset(size_t child_ndx) const
168
    {
706,221✔
169
        return (child_ndx) > 0 ? size_t(m_offsets.get(child_ndx - 1)) : 0;
604,053✔
170
    }
706,221✔
171
};
172
}
173

174
/****************************** BPlusTreeNode ********************************/
175

176
BPlusTreeNode::~BPlusTreeNode()
177
{
47,284,086✔
178
}
47,284,086✔
179

180
/****************************** BPlusTreeLeaf ********************************/
181

182
ref_type BPlusTreeLeaf::bptree_insert(size_t ndx, State& state, InsertFunc func)
183
{
71,355,576✔
184
    size_t leaf_size = get_node_size();
71,355,576✔
185
    REALM_ASSERT_DEBUG(leaf_size <= REALM_MAX_BPNODE_SIZE);
71,355,576✔
186
    if (ndx > leaf_size)
71,355,576✔
187
        ndx = leaf_size;
71,093,550✔
188
    if (REALM_LIKELY(leaf_size < REALM_MAX_BPNODE_SIZE)) {
71,355,576✔
189
        func(this, ndx);
71,209,434✔
190
        m_tree->adjust_leaf_bounds(1);
71,209,434✔
191
        return 0; // Leaf was not split
71,209,434✔
192
    }
71,209,434✔
193

77,916✔
194
    // Split leaf node
77,916✔
195
    auto new_leaf = m_tree->create_leaf_node();
146,142✔
196
    if (ndx == leaf_size) {
146,142✔
197
        func(new_leaf.get(), 0);
23,367✔
198
        state.split_offset = ndx;
23,367✔
199
    }
23,367✔
200
    else {
122,775✔
201
        move(new_leaf.get(), ndx, 0);
122,775✔
202
        func(this, ndx);
122,775✔
203
        state.split_offset = ndx + 1;
122,775✔
204
        // Invalidate cached leaf
66,366✔
205
        m_tree->invalidate_leaf_cache();
122,775✔
206
    }
122,775✔
207
    state.split_size = leaf_size + 1;
146,142✔
208

77,916✔
209
    return new_leaf->get_ref();
146,142✔
210
}
146,142✔
211

212
void BPlusTreeLeaf::bptree_access(size_t ndx, AccessFunc func)
213
{
52,984,863✔
214
    func(this, ndx);
52,984,863✔
215
}
52,984,863✔
216

217
size_t BPlusTreeLeaf::bptree_erase(size_t ndx, EraseFunc func)
218
{
2,007,516✔
219
    return func(this, ndx);
2,007,516✔
220
}
2,007,516✔
221

222
bool BPlusTreeLeaf::bptree_traverse(TraverseFunc func)
223
{
1,500,117✔
224
    return func(this, 0) == IteratorControl::Stop;
1,500,117✔
225
}
1,500,117✔
226

227
template <>
228
void BPlusTree<int64_t>::split_root()
229
{
6✔
230
    if (m_root->is_leaf()) {
6✔
231
        auto sz = m_root->get_node_size();
6✔
232

3✔
233
        LeafNode* leaf = static_cast<LeafNode*>(m_root.get());
6✔
234
        auto new_root = std::make_unique<BPlusTreeInner>(this);
6✔
235

3✔
236
        new_root->create(REALM_MAX_BPNODE_SIZE);
6✔
237

3✔
238
        size_t ndx = 0;
6✔
239
        while (ndx < sz) {
66✔
240
            LeafNode new_leaf(this);
60✔
241
            new_leaf.create();
60✔
242
            size_t to_move = std::min(size_t(REALM_MAX_BPNODE_SIZE), sz - ndx);
60✔
243
            for (size_t i = 0; i < to_move; i++, ndx++) {
60,060✔
244
                new_leaf.insert(i, leaf->get(ndx));
60,000✔
245
            }
60,000✔
246
            new_root->add_bp_node_ref(new_leaf.get_ref()); // Throws
60✔
247
        }
60✔
248
        new_root->append_tree_size(sz);
6✔
249
        leaf->destroy();
6✔
250
        replace_root(std::move(new_root));
6✔
251
    }
6✔
UNCOV
252
    else {
×
UNCOV
253
        BPlusTreeInner* inner = static_cast<BPlusTreeInner*>(m_root.get());
×
UNCOV
254
        replace_root(inner->split_root());
×
UNCOV
255
    }
×
256
}
6✔
257

258
/****************************** BPlusTreeInner *******************************/
259

260
BPlusTreeInner::BPlusTreeInner(BPlusTreeBase* tree)
261
    : BPlusTreeNode(tree)
262
    , Array(tree->get_alloc())
263
    , m_offsets(tree->get_alloc())
264
    , m_my_offset(0)
265
{
7,484,298✔
266
    m_offsets.set_parent(this, 0);
7,484,298✔
267
}
7,484,298✔
268

269
void BPlusTreeInner::create(size_t elems_per_child)
270
{
17,946✔
271
    // Born only with room for number of elements per child
8,661✔
272
    uint64_t tagged = (elems_per_child << 1) + 1;
17,946✔
273
    Array::create(Array::type_InnerBptreeNode, false, 1, tagged);
17,946✔
274
}
17,946✔
275

276
BPlusTreeInner::~BPlusTreeInner()
277
{
7,485,051✔
278
}
7,485,051✔
279

280
void BPlusTreeInner::init_from_mem(MemRef mem)
281
{
7,532,685✔
282
    Array::init_from_mem(mem);
7,532,685✔
283
    auto rot = Array::get(0);
7,532,685✔
284
    if ((rot & 1) == 0) {
7,532,685✔
285
        // rot is a ref
58,539✔
286
        m_offsets.init_from_ref(to_ref(rot));
117,093✔
287
    }
117,093✔
288
    else {
7,415,592✔
289
        m_offsets.detach();
7,415,592✔
290
    }
7,415,592✔
291
}
7,532,685✔
292

293
void BPlusTreeInner::bptree_access(size_t n, AccessFunc func)
294
{
4,544,991✔
295
    size_t child_ndx;
4,544,991✔
296
    size_t child_offset;
4,544,991✔
297
    if (m_offsets.is_attached()) {
4,544,991✔
298
        child_ndx = m_offsets.upper_bound(n);
157,152✔
299
        child_offset = get_bp_node_offset(child_ndx);
157,152✔
300
        REALM_ASSERT_3(child_ndx, <, get_node_size());
157,152✔
301
    }
157,152✔
302
    else {
4,387,839✔
303
        auto elems_per_child = get_elems_per_child();
4,387,839✔
304
        child_ndx = n / elems_per_child;
4,387,839✔
305
        child_offset = child_ndx * elems_per_child;
4,387,839✔
306
    }
4,387,839✔
307

2,280,528✔
308
    ref_type child_ref = get_bp_node_ref(child_ndx);
4,544,991✔
309
    char* child_header = m_alloc.translate(child_ref);
4,544,991✔
310
    MemRef mem(child_header, child_ref, m_alloc);
4,544,991✔
311
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
4,544,991✔
312
    if (child_is_leaf) {
4,544,994✔
313
        auto leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
4,544,883✔
314
        func(leaf, n - child_offset);
4,544,883✔
315
    }
4,544,883✔
316
    else {
2,147,483,758✔
317
        BPlusTreeInner node(m_tree);
2,147,483,758✔
318
        node.set_parent(this, child_ndx + 1);
2,147,483,758✔
319
        node.init_from_mem(mem);
2,147,483,758✔
320
        node.set_offset(child_offset + m_my_offset);
2,147,483,758✔
321
        node.bptree_access(n - child_offset, func);
2,147,483,758✔
322
    }
2,147,483,758✔
323
}
4,544,991✔
324

325
ref_type BPlusTreeInner::bptree_insert(size_t ndx, State& state, InsertFunc func)
326
{
8,256,813✔
327
    size_t child_ndx;
8,256,813✔
328
    size_t child_offset;
8,256,813✔
329
    if (ndx != npos) {
8,256,813✔
330
        ensure_offsets();
17,898✔
331
        child_ndx = m_offsets.upper_bound(ndx);
17,898✔
332
        child_offset = get_bp_node_offset(child_ndx);
17,898✔
333
        ndx -= child_offset;
17,898✔
334
        REALM_ASSERT_3(child_ndx, <, get_node_size());
17,898✔
335
    }
17,898✔
336
    else {
8,238,915✔
337
        child_ndx = get_node_size() - 1;
8,238,915✔
338
        if (m_offsets.is_attached()) {
8,238,915✔
UNCOV
339
            child_offset = get_bp_node_offset(child_ndx);
×
UNCOV
340
            REALM_ASSERT_3(child_ndx, <, get_node_size());
×
UNCOV
341
        }
×
342
        else {
8,238,915✔
343
            auto elems_per_child = get_elems_per_child();
8,238,915✔
344
            child_offset = child_ndx * elems_per_child;
8,238,915✔
345
        }
8,238,915✔
346
    }
8,238,915✔
347

4,025,979✔
348
    ref_type child_ref = get_bp_node_ref(child_ndx);
8,256,813✔
349
    char* child_header = m_alloc.translate(child_ref);
8,256,813✔
350
    MemRef mem(child_header, child_ref, m_alloc);
8,256,813✔
351
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
8,256,813✔
352
    ref_type new_sibling_ref;
8,256,813✔
353
    if (child_is_leaf) {
8,256,813✔
354
        auto leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
8,250,918✔
355
        new_sibling_ref = leaf->bptree_insert(ndx, state, func);
8,250,918✔
356
    }
8,250,918✔
357
    else {
5,895✔
358
        BPlusTreeInner node(m_tree);
5,895✔
359
        node.set_parent(this, child_ndx + 1);
5,895✔
360
        node.init_from_mem(mem);
5,895✔
361
        node.set_offset(child_offset + m_my_offset);
5,895✔
362
        new_sibling_ref = node.bptree_insert(ndx, state, func);
5,895✔
363
    }
5,895✔
364

4,025,979✔
365
    if (!new_sibling_ref) {
8,256,813✔
366
        adjust(size() - 1, +2); // Throws
8,234,448✔
367
        if (m_offsets.is_attached()) {
8,234,448✔
368
            m_offsets.adjust(child_ndx, m_offsets.size(), 1);
17,874✔
369
        }
17,874✔
370
        return 0;
8,234,448✔
371
    }
8,234,448✔
372

15,033✔
373
    return insert_bp_node(child_ndx, new_sibling_ref, state);
22,365✔
374
}
22,365✔
375

376
size_t BPlusTreeInner::bptree_erase(size_t n, EraseFunc func)
377
{
177,186✔
378
    ensure_offsets();
177,186✔
379

88,593✔
380
    size_t child_ndx = m_offsets.upper_bound(n);
177,186✔
381
    size_t child_offset = get_bp_node_offset(child_ndx);
177,186✔
382
    REALM_ASSERT_3(child_ndx, <, get_node_size());
177,186✔
383

88,593✔
384
    // Call bptree_erase recursively and ultimately call 'func' on leaf
88,593✔
385
    size_t erase_node_size;
177,186✔
386
    ref_type child_ref = get_bp_node_ref(child_ndx);
177,186✔
387
    char* child_header = m_alloc.translate(child_ref);
177,186✔
388
    MemRef mem(child_header, child_ref, m_alloc);
177,186✔
389
    BPlusTreeLeaf* leaf = nullptr;
177,186✔
390
    BPlusTreeInner node(m_tree);
177,186✔
391
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
177,186✔
392
    if (child_is_leaf) {
177,186✔
393
        leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
177,186✔
394
        erase_node_size = func(leaf, n - child_offset);
177,186✔
395
        if (erase_node_size == 0) {
177,186✔
396
            m_tree->invalidate_leaf_cache();
168✔
397
        }
168✔
398
        else {
177,018✔
399
            m_tree->adjust_leaf_bounds(-1);
177,018✔
400
        }
177,018✔
401
    }
177,186✔
402
    else {
×
403
        node.set_parent(this, child_ndx + 1);
×
404
        node.init_from_mem(mem);
×
UNCOV
405
        node.set_offset(child_offset + m_my_offset);
×
UNCOV
406
        erase_node_size = node.bptree_erase(n - child_offset, func);
×
UNCOV
407
    }
×
408

88,593✔
409
    adjust(size() - 1, -2);
177,186✔
410
    m_offsets.adjust(child_ndx, m_offsets.size(), -1);
177,186✔
411

88,593✔
412
    // Check if some nodes could be merged
88,593✔
413
    size_t num_children = get_node_size();
177,186✔
414
    if (erase_node_size == 0) {
177,186✔
415
        if (num_children == 1) {
168✔
416
            // Only child empty - delete this one too
UNCOV
417
            return 0;
×
UNCOV
418
        }
×
419
        // Child leaf is empty - destroy it!
84✔
420
        erase_and_destroy_bp_node(child_ndx);
168✔
421
        return num_children - 1;
168✔
422
    }
168✔
423
    else if (erase_node_size < REALM_MAX_BPNODE_SIZE / 2 && child_ndx < (num_children - 1)) {
177,018✔
424
        // Candidate for merge. First calculate if the combined size of current and
15,486✔
425
        // next sibling is small enough.
15,486✔
426
        size_t sibling_ndx = child_ndx + 1;
30,972✔
427
        ref_type sibling_ref = get_bp_node_ref(sibling_ndx);
30,972✔
428
        std::unique_ptr<BPlusTreeLeaf> sibling_leaf;
30,972✔
429
        BPlusTreeInner node2(m_tree);
30,972✔
430
        BPlusTreeNode* sibling_node;
30,972✔
431
        if (child_is_leaf) {
30,972✔
432
            sibling_leaf = m_tree->init_leaf_node(sibling_ref);
30,972✔
433
            sibling_node = sibling_leaf.get();
30,972✔
434
        }
30,972✔
435
        else {
×
436
            node2.init_from_ref(sibling_ref);
×
437
            sibling_node = &node2;
×
UNCOV
438
        }
×
439
        sibling_node->bp_set_parent(this, sibling_ndx + 1);
30,972✔
440

15,486✔
441
        size_t combined_size = sibling_node->get_node_size() + erase_node_size;
30,972✔
442

15,486✔
443
        if (combined_size < REALM_MAX_BPNODE_SIZE * 3 / 4) {
30,972✔
444
            // Combined size is small enough for the nodes to be merged
66✔
445
            // Move all content from the next sibling into current node
66✔
446
            int64_t offs_adj = 0;
132✔
447
            if (child_is_leaf) {
132✔
448
                REALM_ASSERT(leaf);
132✔
449
                if (sibling_ndx < m_offsets.size())
132✔
UNCOV
450
                    m_offsets.set(sibling_ndx - 1, m_offsets.get(sibling_ndx));
×
451
                sibling_node->move(leaf, 0, 0);
132✔
452
                // Invalidate cached leaf
66✔
453
                m_tree->invalidate_leaf_cache();
132✔
454
            }
132✔
UNCOV
455
            else {
×
UNCOV
456
                node.ensure_offsets();
×
UNCOV
457
                node2.ensure_offsets();
×
458
                // Offset adjustment is negative as the sibling has bigger offsets
UNCOV
459
                auto sibling_offs = m_offsets.get(sibling_ndx - 1);
×
UNCOV
460
                auto erase_node_offs = get_bp_node_offset(child_ndx);
×
UNCOV
461
                offs_adj = erase_node_offs - sibling_offs;
×
UNCOV
462
                if (sibling_ndx < m_offsets.size())
×
UNCOV
463
                    m_offsets.set(sibling_ndx - 1, m_offsets.get(sibling_ndx));
×
464

UNCOV
465
                size_t orig_size = node.get_tree_size();
×
UNCOV
466
                size_t size_of_moved = node2.get_tree_size();
×
467
                // Remove size field from node as the move operation will just add
UNCOV
468
                node.erase(node.size() - 1);
×
UNCOV
469
                node2.move(&node, 0, offs_adj);
×
UNCOV
470
                node.append_tree_size(orig_size + size_of_moved);
×
UNCOV
471
            }
×
472

66✔
473
            // Destroy sibling
66✔
474
            erase_and_destroy_bp_node(sibling_ndx);
132✔
475
            num_children--;
132✔
476
        }
132✔
477
    }
30,972✔
478

88,593✔
479
    return num_children;
177,102✔
480
}
177,186✔
481

482
bool BPlusTreeInner::bptree_traverse(TraverseFunc func)
483
{
6,147,297✔
484
    size_t sz = get_node_size();
6,147,297✔
485
    for (size_t i = 0; i < sz; i++) {
6,405,927✔
486
        size_t child_offset;
6,402,738✔
487
        if (m_offsets.is_attached()) {
6,402,738✔
488
            child_offset = get_bp_node_offset(i);
354,012✔
489
        }
354,012✔
490
        else {
6,048,726✔
491
            auto elems_per_child = get_elems_per_child();
6,048,726✔
492
            child_offset = i * elems_per_child;
6,048,726✔
493
        }
6,048,726✔
494

3,201,366✔
495
        bool done;
6,402,738✔
496
        ref_type child_ref = get_bp_node_ref(i);
6,402,738✔
497
        char* child_header = m_alloc.translate(child_ref);
6,402,738✔
498
        MemRef mem(child_header, child_ref, m_alloc);
6,402,738✔
499
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
6,402,738✔
500
        if (child_is_leaf) {
6,402,738✔
501
            auto leaf = cache_leaf(mem, i, child_offset + m_my_offset);
6,402,717✔
502
            done = (func(leaf, child_offset + m_my_offset) == IteratorControl::Stop);
6,402,717✔
503
        }
6,402,717✔
504
        else {
21✔
505
            BPlusTreeInner node(m_tree);
21✔
506
            node.set_parent(this, i + 1);
21✔
507
            node.init_from_mem(mem);
21✔
508
            node.set_offset(child_offset + m_my_offset);
21✔
509
            // std::cout << "BPlusTreeInner offset: " << child_offset << std::endl;
21✔
510
            done = node.bptree_traverse(func);
21✔
511
        }
21✔
512
        if (done) {
6,402,738✔
513
            return true;
6,144,108✔
514
        }
6,144,108✔
515
    }
6,402,738✔
516
    return false;
3,075,243✔
517
}
6,147,297✔
518

519
void BPlusTreeInner::move(BPlusTreeNode* new_node, size_t ndx, int64_t adj)
UNCOV
520
{
×
UNCOV
521
    BPlusTreeInner* dst(static_cast<BPlusTreeInner*>(new_node));
×
UNCOV
522
    size_t sz = get_node_size();
×
523

524
    // Copy refs
UNCOV
525
    for (size_t i = ndx; i < sz; i++) {
×
UNCOV
526
        size_t offs = get_bp_node_offset(i);
×
UNCOV
527
        dst->add_bp_node_ref(get_bp_node_ref(i), offs - adj);
×
UNCOV
528
    }
×
UNCOV
529
    truncate(ndx + 1);
×
UNCOV
530
    if (ndx > 0)
×
UNCOV
531
        m_offsets.truncate(ndx - 1);
×
UNCOV
532
}
×
533

534
void BPlusTreeInner::ensure_offsets()
535
{
195,204✔
536
    if (!m_offsets.is_attached()) {
195,204✔
537
        size_t elems_per_child = get_elems_per_child();
216✔
538
        size_t sz = size();
216✔
539
        size_t num_offsets = (sz > 2) ? sz - 3 : 0;
171✔
540
        size_t offs = 0;
216✔
541

108✔
542
        m_offsets.create(num_offsets, num_offsets * elems_per_child);
216✔
543
        for (size_t i = 0; i != num_offsets; ++i) {
420✔
544
            offs += elems_per_child;
204✔
545
            m_offsets.set(i, offs);
204✔
546
        }
204✔
547
        Array::set_as_ref(0, m_offsets.get_ref());
216✔
548
    }
216✔
549
}
195,204✔
550

551
std::unique_ptr<BPlusTreeInner> BPlusTreeInner::split_root()
UNCOV
552
{
×
UNCOV
553
    auto new_root = std::make_unique<BPlusTreeInner>(m_tree);
×
UNCOV
554
    auto sz = get_node_size();
×
UNCOV
555
    size_t elems_per_child = get_elems_per_child();
×
UNCOV
556
    new_root->create(REALM_MAX_BPNODE_SIZE * elems_per_child);
×
UNCOV
557
    size_t ndx = 0;
×
UNCOV
558
    size_t tree_size = get_tree_size();
×
UNCOV
559
    size_t accumulated_size = 0;
×
UNCOV
560
    while (ndx < sz) {
×
UNCOV
561
        BPlusTreeInner new_inner(m_tree);
×
562
        size_t to_move = std::min(size_t(REALM_MAX_BPNODE_SIZE), sz - ndx);
×
563
        new_inner.create(elems_per_child);
×
564
        for (size_t i = 0; i < to_move; i++, ndx++) {
×
565
            new_inner.add_bp_node_ref(get_bp_node_ref(ndx));
×
566
        }
×
567
        size_t this_size = to_move * elems_per_child;
×
568
        if (accumulated_size + this_size > tree_size) {
×
569
            this_size = tree_size - accumulated_size;
×
570
        }
×
571
        accumulated_size += this_size;
×
572
        new_inner.append_tree_size(this_size);
×
573
        new_root->add_bp_node_ref(new_inner.get_ref()); // Throws
×
574
    }
×
575
    REALM_ASSERT(accumulated_size == tree_size);
×
576
    new_root->append_tree_size(tree_size);
×
577
    destroy();
×
578
    return new_root;
×
579
}
×
580

581
inline BPlusTreeLeaf* BPlusTreeInner::cache_leaf(MemRef mem, size_t ndx, size_t offset)
582
{
19,336,602✔
583
    BPlusTreeLeaf* leaf = m_tree->cache_leaf(mem);
19,336,602✔
584
    leaf->bp_set_parent(this, ndx + 1);
19,336,602✔
585
    size_t sz = leaf->get_node_size();
19,336,602✔
586
    m_tree->set_leaf_bounds(offset, offset + sz);
19,336,602✔
587

9,568,197✔
588
    return leaf;
19,336,602✔
589
}
19,336,602✔
590

591
void BPlusTreeInner::erase_and_destroy_bp_node(size_t ndx)
592
{
300✔
593
    ref_type ref = get_bp_node_ref(ndx);
300✔
594
    erase(ndx + 1);
300✔
595
    Array::destroy_deep(ref, m_tree->get_alloc());
300✔
596
    REALM_ASSERT_DEBUG(m_offsets.is_attached());
300✔
597
    size_t sz = m_offsets.size();
300✔
598
    if (sz) {
300✔
599
        // in this case there will always be an offset to erase
150✔
600
        if (ndx < sz) {
300✔
601
            m_offsets.erase(ndx);
×
602
        }
×
603
        else {
300✔
604
            m_offsets.erase(sz - 1);
300✔
605
        }
300✔
606
    }
300✔
607
    REALM_ASSERT_DEBUG(m_offsets.size() == get_node_size() - 1);
300✔
608
}
300✔
609

610
ref_type BPlusTreeInner::insert_bp_node(size_t child_ndx, ref_type new_sibling_ref, State& state)
611
{
5,577✔
612
    size_t new_ref_ndx = child_ndx + 1;
5,577✔
613

2,967✔
614
    size_t sz = get_node_size();
5,577✔
615
    if (sz < REALM_MAX_BPNODE_SIZE) {
5,577✔
616
        // Room in current node for the new child
2,967✔
617
        adjust(size() - 1, +2); // Throws
5,577✔
618
        if (m_offsets.is_attached()) {
5,577✔
619
            size_t elem_ndx_offset = get_bp_node_offset(child_ndx);
24✔
620
            m_offsets.insert(child_ndx, elem_ndx_offset + state.split_offset); // Throws
24✔
621
            m_offsets.adjust(child_ndx + 1, m_offsets.size(), +1);             // Throws
24✔
622
        }
24✔
623
        insert_bp_node_ref(new_ref_ndx, new_sibling_ref);
5,577✔
624
        return ref_type(0);
5,577✔
625
    }
5,577✔
626

627
    // This node has to be split
628
    BPlusTreeInner new_sibling(m_tree);
×
629

630
    size_t elem_ndx_offset = 0;
×
631
    if (m_offsets.is_attached()) {
×
632
        new_sibling.create(0);
×
633
        new_sibling.ensure_offsets();
×
634
        elem_ndx_offset = get_bp_node_offset(child_ndx);
×
635
    }
×
636
    else {
×
637
        size_t elems_per_child = get_elems_per_child();
×
638
        elem_ndx_offset = child_ndx * elems_per_child;
×
639
        new_sibling.create(elems_per_child);
×
640
    }
×
641

642
    size_t new_split_offset;
×
643
    size_t new_split_size;
×
644
    if (new_ref_ndx == sz) {
×
645
        // Case 1/2: The split child was the last child of the parent
646
        // to be split. In this case the parent may or may not be on
647
        // the compact form.
648
        new_split_offset = size_t(elem_ndx_offset + state.split_offset);
×
649
        new_split_size = elem_ndx_offset + state.split_size;
×
650
        new_sibling.add_bp_node_ref(new_sibling_ref); // Throws
×
651
        set_tree_size(new_split_offset);             // Throws
×
652
    }
×
653
    else {
×
654
        // Case 2/2: The split child was not the last child of the
655
        // parent to be split. Since this is not possible during
656
        // 'append', we can safely assume that the parent node is on
657
        // the general form.
658
        new_split_offset = size_t(elem_ndx_offset + state.split_size);
×
659
        new_split_size = get_tree_size() + 1;
×
660

661
        move(&new_sibling, new_ref_ndx, (new_split_offset - 1));               // Strips off tree size
×
662
        add_bp_node_ref(new_sibling_ref, elem_ndx_offset + state.split_offset); // Throws
×
663
        append_tree_size(new_split_offset);                                    // Throws
×
664
    }
×
665

666
    new_sibling.append_tree_size(new_split_size - new_split_offset); // Throws
×
667

668
    state.split_offset = new_split_offset;
×
669
    state.split_size = new_split_size;
×
670

UNCOV
671
    return new_sibling.get_ref();
×
UNCOV
672
}
×
673

674
void BPlusTreeInner::verify() const
UNCOV
675
{
×
UNCOV
676
#ifdef REALM_DEBUG
×
UNCOV
677
    Array::verify();
×
678

679
    // This node must not be a leaf
UNCOV
680
    REALM_ASSERT_3(Array::get_type(), ==, Array::type_InnerBptreeNode);
×
681

UNCOV
682
    REALM_ASSERT_3(Array::size(), >=, 2);
×
UNCOV
683
    size_t num_children = get_node_size();
×
684

685
    // Verify invar:bptree-nonempty-inner
UNCOV
686
    REALM_ASSERT_3(num_children, >=, 1);
×
687

UNCOV
688
    size_t elems_per_child = 0;
×
UNCOV
689
    if (m_offsets.is_attached()) {
×
UNCOV
690
        REALM_ASSERT_DEBUG(m_offsets.size() == num_children - 1);
×
UNCOV
691
    }
×
UNCOV
692
    else {
×
UNCOV
693
        elems_per_child = get_elems_per_child();
×
UNCOV
694
    }
×
695

UNCOV
696
    size_t num_elems = 0;
×
UNCOV
697
    for (size_t i = 0; i < num_children; i++) {
×
UNCOV
698
        ref_type child_ref = get_bp_node_ref(i);
×
UNCOV
699
        char* child_header = m_alloc.translate(child_ref);
×
UNCOV
700
        MemRef mem(child_header, child_ref, m_alloc);
×
UNCOV
701
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
×
UNCOV
702
        size_t elems_in_child;
×
UNCOV
703
        if (child_is_leaf) {
×
UNCOV
704
            auto leaf = const_cast<BPlusTreeInner*>(this)->cache_leaf(mem, i, 0);
×
UNCOV
705
            elems_in_child = leaf->get_node_size();
×
UNCOV
706
            leaf->verify();
×
UNCOV
707
        }
×
UNCOV
708
        else {
×
UNCOV
709
            BPlusTreeInner node(m_tree);
×
UNCOV
710
            node.set_parent(const_cast<BPlusTreeInner*>(this), i + 1);
×
UNCOV
711
            node.init_from_mem(mem);
×
UNCOV
712
            node.verify();
×
UNCOV
713
            elems_in_child = node.get_tree_size();
×
UNCOV
714
        }
×
UNCOV
715
        num_elems += elems_in_child;
×
UNCOV
716
        if (m_offsets.is_attached()) {
×
UNCOV
717
            if (i < num_children - 1) {
×
UNCOV
718
                REALM_ASSERT_DEBUG(num_elems == m_offsets.get(i));
×
UNCOV
719
            }
×
UNCOV
720
        }
×
UNCOV
721
        else {
×
UNCOV
722
            if (i < num_children - 1) {
×
UNCOV
723
                REALM_ASSERT_DEBUG(elems_in_child == elems_per_child);
×
UNCOV
724
            }
×
UNCOV
725
            else {
×
UNCOV
726
                REALM_ASSERT_DEBUG(elems_in_child <= elems_per_child);
×
UNCOV
727
            }
×
UNCOV
728
        }
×
UNCOV
729
    }
×
UNCOV
730
    REALM_ASSERT_DEBUG(get_tree_size() == num_elems);
×
UNCOV
731
    m_tree->invalidate_leaf_cache();
×
UNCOV
732
#endif
×
UNCOV
733
}
×
734

735
/****************************** BPlusTreeBase ********************************/
736

737
BPlusTreeBase::~BPlusTreeBase()
738
{
23,714,241✔
739
}
23,714,241✔
740

741
void BPlusTreeBase::create()
742
{
2,219,670✔
743
    if (!m_root) {                   // Idempotent
2,246,046✔
744
        m_root = create_leaf_node(); // Throws
2,246,046✔
745
        if (m_parent) {
2,246,046✔
746
            ref_type ref = get_ref();
575,463✔
747
            _impl::DeepArrayRefDestroyGuard destroy_guard{ref, get_alloc()};
575,463✔
748
            m_parent->update_child_ref(m_ndx_in_parent, ref); // Throws
575,463✔
749
            destroy_guard.release();
575,463✔
750
        }
575,463✔
751
        m_root->bp_set_parent(m_parent, m_ndx_in_parent);
2,246,046✔
752
    }
2,246,046✔
753
}
2,219,670✔
754

755
void BPlusTreeBase::destroy()
756
{
2,064,156✔
757
    if (is_attached()) {
2,064,156✔
758
        ref_type ref = m_root->get_ref();
1,665,972✔
759
        Array::destroy_deep(ref, m_alloc);
1,665,972✔
760
        m_root = nullptr;
1,665,972✔
761
    }
1,665,972✔
762
    invalidate_leaf_cache();
2,064,156✔
763
}
2,064,156✔
764

765
void BPlusTreeBase::replace_root(std::unique_ptr<BPlusTreeNode> new_root)
766
{
18,132✔
767
    // Maintain parent.
8,754✔
768
    new_root->bp_set_parent(m_parent, m_ndx_in_parent);
18,132✔
769
    new_root->update_parent(); // Throws
18,132✔
770

8,754✔
771
    m_root = std::move(new_root);
18,132✔
772
}
18,132✔
773

774
void BPlusTreeBase::bptree_insert(size_t n, BPlusTreeNode::InsertFunc func)
775
{
71,505,192✔
776
    size_t bptree_size = m_root->get_tree_size();
71,505,192✔
777
    if (n == bptree_size) {
71,505,192✔
778
        n = npos;
10,076,616✔
779
    }
10,076,616✔
780
    BPlusTreeNode::State state;
71,505,192✔
781
    ref_type new_sibling_ref = m_root->bptree_insert(n, state, func);
71,505,192✔
782
    if (REALM_UNLIKELY(new_sibling_ref)) {
71,505,192✔
783
        bool compact_form = (n == npos) && m_root->is_compact();
17,940✔
784
        auto new_root = std::make_unique<BPlusTreeInner>(this);
17,940✔
785
        if (!compact_form) {
17,940✔
786
            new_root->create(0);
126✔
787
            new_root->ensure_offsets();
126✔
788
        }
126✔
789
        else {
17,814✔
790
            new_root->create(size_t(state.split_offset));
17,814✔
791
        }
17,814✔
792

8,658✔
793
        new_root->add_bp_node_ref(m_root->get_ref());                   // Throws
17,940✔
794
        new_root->add_bp_node_ref(new_sibling_ref, state.split_offset); // Throws
17,940✔
795
        new_root->append_tree_size(state.split_size);
17,940✔
796
        replace_root(std::move(new_root));
17,940✔
797
    }
17,940✔
798
}
71,505,192✔
799

800
void BPlusTreeBase::bptree_erase(size_t n, BPlusTreeNode::EraseFunc func)
801
{
2,184,663✔
802
    size_t root_size = m_root->bptree_erase(n, func);
2,184,663✔
803
    while (!m_root->is_leaf() && root_size == 1) {
2,184,849✔
804
        BPlusTreeInner* node = static_cast<BPlusTreeInner*>(m_root.get());
186✔
805

93✔
806
        ref_type new_root_ref = node->clear_first_bp_node_ref();
186✔
807
        node->destroy_deep();
186✔
808

93✔
809
        auto new_root = create_root_from_ref(new_root_ref);
186✔
810

93✔
811
        replace_root(std::move(new_root));
186✔
812
        root_size = m_root->get_node_size();
186✔
813
    }
186✔
814
}
2,184,663✔
815

816
std::unique_ptr<BPlusTreeNode> BPlusTreeBase::create_root_from_ref(ref_type ref)
817
{
24,777,855✔
818
    char* header = m_alloc.translate(ref);
24,777,855✔
819
    bool is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
24,777,855✔
820
    bool reuse_root = m_root && m_root->is_leaf() == is_leaf;
24,777,855✔
821

12,530,319✔
822
    if (reuse_root) {
24,777,855✔
823
        m_root->init_from_ref(ref);
3,562,365✔
824
        return std::move(m_root);
3,562,365✔
825
    }
3,562,365✔
826

10,652,307✔
827
    if (is_leaf) {
21,215,490✔
828
        return init_leaf_node(ref);
13,968,021✔
829
    }
13,968,021✔
830
    else {
7,247,469✔
831
        std::unique_ptr<BPlusTreeNode> new_root = std::make_unique<BPlusTreeInner>(this);
7,247,469✔
832
        new_root->init_from_ref(ref);
7,247,469✔
833
        return new_root;
7,247,469✔
834
    }
7,247,469✔
835
}
21,215,490✔
836

837
size_t BPlusTreeBase::size_from_header(const char* header)
838
{
4,242✔
839
    auto node_size = Array::get_size_from_header(header);
4,242✔
840
    if (Array::get_is_inner_bptree_node_from_header(header)) {
4,242✔
841
        auto data = Array::get_data_from_header(header);
6✔
842
        auto width = Array::get_width_from_header(header);
6✔
843
        node_size = size_t(get_direct(data, width, node_size - 1)) >> 1;
6✔
844
    }
6✔
845
    return node_size;
4,242✔
846
}
4,242✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc