• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_337

03 Jul 2024 01:04PM UTC coverage: 90.864% (-0.1%) from 90.984%
jorgen.edelbo_337

Pull #7826

Evergreen

nicola-cab
Merge branch 'master' of github.com:realm/realm-core into next-major
Pull Request #7826: Merge Next major

102968 of 181176 branches covered (56.83%)

3131 of 3738 new or added lines in 54 files covered. (83.76%)

106 existing lines in 23 files now uncovered.

217725 of 239616 relevant lines covered (90.86%)

6844960.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

70.81
/src/realm/bplustree.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2018 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/bplustree.hpp>
20
#include <realm/impl/destroy_guard.hpp>
21
#include <realm/array_unsigned.hpp>
22
#include <realm/array_integer.hpp>
23
#include <realm/impl/array_writer.hpp>
24

25
using namespace realm;
26

27
namespace realm {
28

29
bool BPlusTreeNode::get_context_flag() const noexcept
30
{
6,616,089✔
31
    auto ref = get_ref();
6,616,089✔
32
    MemRef mem(ref, m_tree->get_alloc());
6,616,089✔
33
    return Array::get_context_flag_from_header(mem.get_addr());
6,616,089✔
34
}
6,616,089✔
35

36
void BPlusTreeNode::set_context_flag(bool cf) noexcept
37
{
44,208✔
38
    auto ref = get_ref();
44,208✔
39
    MemRef mem(ref, m_tree->get_alloc());
44,208✔
40
    if (Array::get_context_flag_from_header(mem.get_addr()) != cf) {
44,208✔
41
        Array arr(m_tree->get_alloc());
12,666✔
42
        arr.init_from_mem(mem);
12,666✔
43
        arr.set_context_flag(cf);
12,666✔
44
        if (auto new_ref = arr.get_ref(); new_ref != ref) {
12,666✔
45
            init_from_ref(new_ref);
6✔
46
            update_parent();
6✔
47
        }
6✔
48
    }
12,666✔
49
}
44,208✔
50

51

52
/*****************************************************************************/
53
/* BPlusTreeInner                                                            */
54
/* All interior nodes is of this class                                       */
55
/*****************************************************************************/
56
class BPlusTreeInner : public BPlusTreeNode, private Array {
57
public:
58
    using Array::destroy_deep;
59

60
    BPlusTreeInner(BPlusTreeBase* tree);
61
    ~BPlusTreeInner() override;
62

63
    void init_from_mem(MemRef mem);
64
    void create(size_t elems_per_child);
65

66
    /******** Implementation of abstract interface *******/
67
    bool is_leaf() const override
68
    {
460,002✔
69
        return false;
460,002✔
70
    }
460,002✔
71
    bool is_compact() const override
72
    {
×
73
        return (Array::get(0) & 1) != 0;
×
74
    }
×
75
    ref_type get_ref() const override
76
    {
69,630✔
77
        return Array::get_ref();
69,630✔
78
    }
69,630✔
79

80
    void init_from_ref(ref_type ref) noexcept override
81
    {
7,608,414✔
82
        char* header = m_alloc.translate(ref);
7,608,414✔
83
        init_from_mem(MemRef(header, ref, m_alloc));
7,608,414✔
84
    }
7,608,414✔
85

86
    void bp_set_parent(ArrayParent* p, size_t n) override
87
    {
8,800,224✔
88
        Array::set_parent(p, n);
8,800,224✔
89
    }
8,800,224✔
90
    void update_parent() override
91
    {
13,407✔
92
        Array::update_parent();
13,407✔
93
    }
13,407✔
94

95
    size_t get_node_size() const override
96
    {
46,819,620✔
97
        return Array::size() - 2;
46,819,620✔
98
    }
46,819,620✔
99
    size_t get_tree_size() const override
100
    {
47,579,244✔
101
        return size_t(back()) >> 1;
47,579,244✔
102
    }
47,579,244✔
103

104
    ref_type bptree_insert(size_t n, State& state, InsertFunc) override;
105
    void bptree_access(size_t n, AccessFunc) override;
106
    size_t bptree_erase(size_t n, EraseFunc) override;
107
    bool bptree_traverse(TraverseFunc) override;
108
    void verify() const override;
109

110
    // Other modifiers
111

112
    void append_tree_size(size_t sz)
113
    {
13,407✔
114
        Array::add((sz << 1) + 1);
13,407✔
115
    }
13,407✔
116
    void add_bp_node_ref(ref_type ref, int64_t offset = 0)
117
    {
26,862✔
118
        Array::add(from_ref(ref));
26,862✔
119
        REALM_ASSERT_DEBUG(offset >= 0);
26,862✔
120
        if (offset && m_offsets.is_attached()) {
26,862✔
121
            m_offsets.add(offset);
126✔
122
        }
126✔
123
    }
26,862✔
124
    // Reset first (and only!) child ref and return the previous value
125
    ref_type clear_first_bp_node_ref()
126
    {
186✔
127
        REALM_ASSERT(get_node_size() == 1);
186✔
128
        ref_type old_ref = Array::get_as_ref(1);
186✔
129
        Array::set(1, 0);
186✔
130

131
        return old_ref;
186✔
132
    }
186✔
133
    void ensure_offsets();
134

135
    std::unique_ptr<BPlusTreeInner> split_root();
136

137
private:
138
    ArrayUnsigned m_offsets;
139
    size_t m_my_offset;
140

141
    void move(BPlusTreeNode* new_node, size_t ndx, int64_t offset_adj) override;
142
    void set_offset(size_t offs)
143
    {
×
144
        m_my_offset = offs;
×
145
    }
×
146
    void set_tree_size(size_t sz)
147
    {
×
148
        Array::set(m_size - 1, (sz << 1) + 1);
×
149
    }
×
150
    size_t get_elems_per_child() const
151
    {
50,508,210✔
152
        // Only relevant when in compact form
153
        REALM_ASSERT_DEBUG(!m_offsets.is_attached());
50,508,210✔
154
        return size_t(Array::get(0)) >> 1;
50,508,210✔
155
    }
50,508,210✔
156
    // Should not be mixed up with Array::get_child_ref
157
    ref_type get_bp_node_ref(size_t ndx) const noexcept
158
    {
51,128,373✔
159
        return Array::get_as_ref(ndx + 1);
51,128,373✔
160
    }
51,128,373✔
161
    void insert_bp_node_ref(size_t ndx, ref_type ref)
162
    {
39,780✔
163
        Array::insert(ndx + 1, from_ref(ref));
39,780✔
164
    }
39,780✔
165
    BPlusTreeLeaf* cache_leaf(MemRef mem, size_t ndx, size_t offset);
166
    void erase_and_destroy_bp_node(size_t ndx);
167
    ref_type insert_bp_node(size_t child_ndx, ref_type new_sibling_ref, State& state);
168
    size_t get_bp_node_offset(size_t child_ndx) const
169
    {
706,335✔
170
        return (child_ndx) > 0 ? size_t(m_offsets.get(child_ndx - 1)) : 0;
706,335✔
171
    }
706,335✔
172
};
173
} // namespace realm
174

175
/****************************** BPlusTreeNode ********************************/
176

177
BPlusTreeNode::~BPlusTreeNode() {}
64,205,925✔
178

179
/****************************** BPlusTreeLeaf ********************************/
180

181
ref_type BPlusTreeLeaf::bptree_insert(size_t ndx, State& state, InsertFunc func)
182
{
58,886,439✔
183
    size_t leaf_size = get_node_size();
58,886,439✔
184
    REALM_ASSERT_DEBUG(leaf_size <= REALM_MAX_BPNODE_SIZE);
58,886,439✔
185
    if (ndx > leaf_size)
58,886,439✔
186
        ndx = leaf_size;
58,731,252✔
187
    if (REALM_LIKELY(leaf_size < REALM_MAX_BPNODE_SIZE)) {
58,886,439✔
188
        func(this, ndx);
58,811,757✔
189
        m_tree->adjust_leaf_bounds(1);
58,811,757✔
190
        return 0; // Leaf was not split
58,811,757✔
191
    }
58,811,757✔
192

193
    // Split leaf node
194
    auto new_leaf = m_tree->create_leaf_node();
74,682✔
195
    if (ndx == leaf_size) {
74,682✔
196
        func(new_leaf.get(), 0);
53,031✔
197
        state.split_offset = ndx;
53,031✔
198
    }
53,031✔
199
    else {
21,651✔
200
        move(new_leaf.get(), ndx, 0);
21,651✔
201
        func(this, ndx);
21,651✔
202
        state.split_offset = ndx + 1;
21,651✔
203
        // Invalidate cached leaf
204
        m_tree->invalidate_leaf_cache();
21,651✔
205
    }
21,651✔
206
    state.split_size = leaf_size + 1;
74,682✔
207

208
    return new_leaf->get_ref();
74,682✔
209
}
58,886,439✔
210

211
void BPlusTreeLeaf::bptree_access(size_t ndx, AccessFunc func)
212
{
49,798,026✔
213
    func(this, ndx);
49,798,026✔
214
}
49,798,026✔
215

216
size_t BPlusTreeLeaf::bptree_erase(size_t ndx, EraseFunc func)
217
{
1,592,367✔
218
    return func(this, ndx);
1,592,367✔
219
}
1,592,367✔
220

221
bool BPlusTreeLeaf::bptree_traverse(TraverseFunc func)
222
{
2,426,922✔
223
    return func(this, 0) == IteratorControl::Stop;
2,426,922✔
224
}
2,426,922✔
225

226
template <>
227
void BPlusTree<int64_t>::split_root()
228
{
6✔
229
    if (m_root->is_leaf()) {
6✔
230
        auto sz = m_root->get_node_size();
6✔
231

232
        LeafNode* leaf = static_cast<LeafNode*>(m_root.get());
6✔
233
        ref_type orig_root_ref = leaf->get_ref();
6✔
234
        auto new_root = std::make_unique<BPlusTreeInner>(this);
6✔
235

236
        new_root->create(REALM_MAX_BPNODE_SIZE);
6✔
237

238
        size_t ndx = 0;
6✔
239
        while (ndx < sz) {
66✔
240
            LeafNode new_leaf(this);
60✔
241
            new_leaf.create();
60✔
242
            size_t to_move = std::min(size_t(REALM_MAX_BPNODE_SIZE), sz - ndx);
60✔
243
            for (size_t i = 0; i < to_move; i++, ndx++) {
60,060✔
244
                new_leaf.insert(i, leaf->get(ndx));
60,000✔
245
            }
60,000✔
246
            new_root->add_bp_node_ref(new_leaf.get_ref()); // Throws
60✔
247
        }
60✔
248
        new_root->append_tree_size(sz);
6✔
249
        replace_root(std::move(new_root));
6✔
250
        // destroy after replace_root in case we need a valid context flag lookup
251
        Array::destroy(orig_root_ref, m_alloc);
6✔
252
    }
6✔
253
    else {
×
254
        BPlusTreeInner* inner = static_cast<BPlusTreeInner*>(m_root.get());
×
255
        replace_root(inner->split_root());
×
256
    }
×
257
}
6✔
258

259
/****************************** BPlusTreeInner *******************************/
260

261
BPlusTreeInner::BPlusTreeInner(BPlusTreeBase* tree)
262
    : BPlusTreeNode(tree)
3,772,749✔
263
    , Array(tree->get_alloc())
3,772,749✔
264
    , m_offsets(tree->get_alloc())
3,772,749✔
265
    , m_my_offset(0)
3,772,749✔
266
{
7,547,157✔
267
    m_offsets.set_parent(this, 0);
7,547,157✔
268
}
7,547,157✔
269

270
void BPlusTreeInner::create(size_t elems_per_child)
271
{
13,407✔
272
    // Born only with room for number of elements per child
273
    uint64_t tagged = (elems_per_child << 1) + 1;
13,407✔
274
    Array::create(Array::type_InnerBptreeNode, false, 1, tagged);
13,407✔
275
}
13,407✔
276

277
BPlusTreeInner::~BPlusTreeInner() {}
7,547,418✔
278

279
void BPlusTreeInner::init_from_mem(MemRef mem)
280
{
7,608,114✔
281
    Array::init_from_mem(mem);
7,608,114✔
282
    auto rot = Array::get(0);
7,608,114✔
283
    if ((rot & 1) == 0) {
7,608,114✔
284
        // rot is a ref
285
        m_offsets.init_from_ref(to_ref(rot));
117,108✔
286
    }
117,108✔
287
    else {
7,491,006✔
288
        m_offsets.detach();
7,491,006✔
289
    }
7,491,006✔
290
}
7,608,114✔
291

292
void BPlusTreeInner::bptree_access(size_t n, AccessFunc func)
293
{
4,519,593✔
294
    size_t child_ndx;
4,519,593✔
295
    size_t child_offset;
4,519,593✔
296
    if (m_offsets.is_attached()) {
4,519,593✔
297
        child_ndx = m_offsets.upper_bound(n);
157,248✔
298
        child_offset = get_bp_node_offset(child_ndx);
157,248✔
299
        REALM_ASSERT_3(child_ndx, <, get_node_size());
157,248✔
300
    }
157,248✔
301
    else {
4,362,345✔
302
        auto elems_per_child = get_elems_per_child();
4,362,345✔
303
        child_ndx = n / elems_per_child;
4,362,345✔
304
        child_offset = child_ndx * elems_per_child;
4,362,345✔
305
    }
4,362,345✔
306

307
    ref_type child_ref = get_bp_node_ref(child_ndx);
4,519,593✔
308
    char* child_header = m_alloc.translate(child_ref);
4,519,593✔
309
    MemRef mem(child_header, child_ref, m_alloc);
4,519,593✔
310
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
4,519,593✔
311
    if (child_is_leaf) {
4,519,593✔
312
        auto leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
4,516,971✔
313
        func(leaf, n - child_offset);
4,516,971✔
314
    }
4,516,971✔
315
    else {
2,622✔
316
        BPlusTreeInner node(m_tree);
2,622✔
317
        node.set_parent(this, child_ndx + 1);
2,622✔
318
        node.init_from_mem(mem);
2,622✔
319
        node.set_offset(child_offset + m_my_offset);
2,622✔
320
        node.bptree_access(n - child_offset, func);
2,622✔
321
    }
2,622✔
322
}
4,519,593✔
323

324
ref_type BPlusTreeInner::bptree_insert(size_t ndx, State& state, InsertFunc func)
325
{
39,965,265✔
326
    size_t child_ndx;
39,965,265✔
327
    size_t child_offset;
39,965,265✔
328
    if (ndx != npos) {
39,965,265✔
329
        ensure_offsets();
17,898✔
330
        child_ndx = m_offsets.upper_bound(ndx);
17,898✔
331
        child_offset = get_bp_node_offset(child_ndx);
17,898✔
332
        ndx -= child_offset;
17,898✔
333
        REALM_ASSERT_3(child_ndx, <, get_node_size());
17,898✔
334
    }
17,898✔
335
    else {
39,947,367✔
336
        child_ndx = get_node_size() - 1;
39,947,367✔
337
        if (m_offsets.is_attached()) {
39,947,367✔
338
            child_offset = get_bp_node_offset(child_ndx);
×
339
            REALM_ASSERT_3(child_ndx, <, get_node_size());
×
340
        }
×
341
        else {
39,947,367✔
342
            auto elems_per_child = get_elems_per_child();
39,947,367✔
343
            child_offset = child_ndx * elems_per_child;
39,947,367✔
344
        }
39,947,367✔
345
    }
39,947,367✔
346

347
    ref_type child_ref = get_bp_node_ref(child_ndx);
39,965,265✔
348
    char* child_header = m_alloc.translate(child_ref);
39,965,265✔
349
    MemRef mem(child_header, child_ref, m_alloc);
39,965,265✔
350
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
39,965,265✔
351
    ref_type new_sibling_ref;
39,965,265✔
352
    if (child_is_leaf) {
39,965,265✔
353
        auto leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
39,847,215✔
354
        new_sibling_ref = leaf->bptree_insert(ndx, state, func);
39,847,215✔
355
    }
39,847,215✔
356
    else {
118,050✔
357
        BPlusTreeInner node(m_tree);
118,050✔
358
        node.set_parent(this, child_ndx + 1);
118,050✔
359
        node.init_from_mem(mem);
118,050✔
360
        node.set_offset(child_offset + m_my_offset);
118,050✔
361
        new_sibling_ref = node.bptree_insert(ndx, state, func);
118,050✔
362
    }
118,050✔
363

364
    if (!new_sibling_ref) {
39,965,265✔
365
        adjust(size() - 1, +2); // Throws
39,832,275✔
366
        if (m_offsets.is_attached()) {
39,832,275✔
367
            m_offsets.adjust(child_ndx, m_offsets.size(), 1);
17,874✔
368
        }
17,874✔
369
        return 0;
39,832,275✔
370
    }
39,832,275✔
371

372
    return insert_bp_node(child_ndx, new_sibling_ref, state);
132,990✔
373
}
39,965,265✔
374

375
size_t BPlusTreeInner::bptree_erase(size_t n, EraseFunc func)
376
{
177,183✔
377
    ensure_offsets();
177,183✔
378

379
    size_t child_ndx = m_offsets.upper_bound(n);
177,183✔
380
    size_t child_offset = get_bp_node_offset(child_ndx);
177,183✔
381
    REALM_ASSERT_3(child_ndx, <, get_node_size());
177,183✔
382

383
    // Call bptree_erase recursively and ultimately call 'func' on leaf
384
    size_t erase_node_size;
177,183✔
385
    ref_type child_ref = get_bp_node_ref(child_ndx);
177,183✔
386
    char* child_header = m_alloc.translate(child_ref);
177,183✔
387
    MemRef mem(child_header, child_ref, m_alloc);
177,183✔
388
    BPlusTreeLeaf* leaf = nullptr;
177,183✔
389
    BPlusTreeInner node(m_tree);
177,183✔
390
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
177,183✔
391
    if (child_is_leaf) {
177,183✔
392
        leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
177,174✔
393
        erase_node_size = func(leaf, n - child_offset);
177,174✔
394
        if (erase_node_size == 0) {
177,174✔
395
            m_tree->invalidate_leaf_cache();
168✔
396
        }
168✔
397
        else {
177,006✔
398
            m_tree->adjust_leaf_bounds(-1);
177,006✔
399
        }
177,006✔
400
    }
177,174✔
401
    else {
9✔
402
        node.set_parent(this, child_ndx + 1);
9✔
403
        node.init_from_mem(mem);
9✔
404
        node.set_offset(child_offset + m_my_offset);
9✔
405
        erase_node_size = node.bptree_erase(n - child_offset, func);
9✔
406
    }
9✔
407

408
    adjust(size() - 1, -2);
177,183✔
409
    m_offsets.adjust(child_ndx, m_offsets.size(), -1);
177,183✔
410

411
    // Check if some nodes could be merged
412
    size_t num_children = get_node_size();
177,183✔
413
    if (erase_node_size == 0) {
177,183✔
414
        if (num_children == 1) {
168✔
415
            // Only child empty - delete this one too
416
            return 0;
×
417
        }
×
418
        // Child leaf is empty - destroy it!
419
        erase_and_destroy_bp_node(child_ndx);
168✔
420
        return num_children - 1;
168✔
421
    }
168✔
422
    else if (erase_node_size < REALM_MAX_BPNODE_SIZE / 2 && child_ndx < (num_children - 1)) {
177,015✔
423
        // Candidate for merge. First calculate if the combined size of current and
424
        // next sibling is small enough.
425
        size_t sibling_ndx = child_ndx + 1;
30,972✔
426
        ref_type sibling_ref = get_bp_node_ref(sibling_ndx);
30,972✔
427
        std::unique_ptr<BPlusTreeLeaf> sibling_leaf;
30,972✔
428
        BPlusTreeInner node2(m_tree);
30,972✔
429
        BPlusTreeNode* sibling_node;
30,972✔
430
        if (child_is_leaf) {
30,972✔
431
            sibling_leaf = m_tree->init_leaf_node(sibling_ref);
30,972✔
432
            sibling_node = sibling_leaf.get();
30,972✔
433
        }
30,972✔
434
        else {
×
435
            node2.init_from_ref(sibling_ref);
×
436
            sibling_node = &node2;
×
437
        }
×
438
        sibling_node->bp_set_parent(this, sibling_ndx + 1);
30,972✔
439

440
        size_t combined_size = sibling_node->get_node_size() + erase_node_size;
30,972✔
441

442
        if (combined_size < REALM_MAX_BPNODE_SIZE * 3 / 4) {
30,972✔
443
            // Combined size is small enough for the nodes to be merged
444
            // Move all content from the next sibling into current node
445
            int64_t offs_adj = 0;
132✔
446
            if (child_is_leaf) {
132✔
447
                REALM_ASSERT(leaf);
132✔
448
                if (sibling_ndx < m_offsets.size())
132✔
449
                    m_offsets.set(sibling_ndx - 1, m_offsets.get(sibling_ndx));
×
450
                sibling_node->move(leaf, 0, 0);
132✔
451
                // Invalidate cached leaf
452
                m_tree->invalidate_leaf_cache();
132✔
453
            }
132✔
454
            else {
×
455
                node.ensure_offsets();
×
456
                node2.ensure_offsets();
×
457
                // Offset adjustment is negative as the sibling has bigger offsets
458
                auto sibling_offs = m_offsets.get(sibling_ndx - 1);
×
459
                auto erase_node_offs = get_bp_node_offset(child_ndx);
×
460
                offs_adj = erase_node_offs - sibling_offs;
×
461
                if (sibling_ndx < m_offsets.size())
×
462
                    m_offsets.set(sibling_ndx - 1, m_offsets.get(sibling_ndx));
×
463

464
                size_t orig_size = node.get_tree_size();
×
465
                size_t size_of_moved = node2.get_tree_size();
×
466
                // Remove size field from node as the move operation will just add
467
                node.erase(node.size() - 1);
×
468
                node2.move(&node, 0, offs_adj);
×
469
                node.append_tree_size(orig_size + size_of_moved);
×
470
            }
×
471

472
            // Destroy sibling
473
            erase_and_destroy_bp_node(sibling_ndx);
132✔
474
            num_children--;
132✔
475
        }
132✔
476
    }
30,972✔
477

478
    return num_children;
177,015✔
479
}
177,183✔
480

481
bool BPlusTreeInner::bptree_traverse(TraverseFunc func)
482
{
6,213,306✔
483
    size_t sz = get_node_size();
6,213,306✔
484
    for (size_t i = 0; i < sz; i++) {
6,753,999✔
485
        size_t child_offset;
6,744,804✔
486
        if (m_offsets.is_attached()) {
6,744,804✔
487
            child_offset = get_bp_node_offset(i);
354,024✔
488
        }
354,024✔
489
        else {
6,390,780✔
490
            auto elems_per_child = get_elems_per_child();
6,390,780✔
491
            child_offset = i * elems_per_child;
6,390,780✔
492
        }
6,390,780✔
493

494
        bool done;
6,744,804✔
495
        ref_type child_ref = get_bp_node_ref(i);
6,744,804✔
496
        char* child_header = m_alloc.translate(child_ref);
6,744,804✔
497
        MemRef mem(child_header, child_ref, m_alloc);
6,744,804✔
498
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
6,744,804✔
499
        if (child_is_leaf) {
6,744,804✔
500
            auto leaf = cache_leaf(mem, i, child_offset + m_my_offset);
6,744,753✔
501
            done = (func(leaf, child_offset + m_my_offset) == IteratorControl::Stop);
6,744,753✔
502
        }
6,744,753✔
503
        else {
51✔
504
            BPlusTreeInner node(m_tree);
51✔
505
            node.set_parent(this, i + 1);
51✔
506
            node.init_from_mem(mem);
51✔
507
            node.set_offset(child_offset + m_my_offset);
51✔
508
            // std::cout << "BPlusTreeInner offset: " << child_offset << std::endl;
509
            done = node.bptree_traverse(func);
51✔
510
        }
51✔
511
        if (done) {
6,744,804✔
512
            return true;
6,204,111✔
513
        }
6,204,111✔
514
    }
6,744,804✔
515
    return false;
9,195✔
516
}
6,213,306✔
517

518
void BPlusTreeInner::move(BPlusTreeNode* new_node, size_t ndx, int64_t adj)
519
{
×
520
    BPlusTreeInner* dst(static_cast<BPlusTreeInner*>(new_node));
×
521
    size_t sz = get_node_size();
×
522

523
    // Copy refs
524
    for (size_t i = ndx; i < sz; i++) {
×
525
        size_t offs = get_bp_node_offset(i);
×
526
        dst->add_bp_node_ref(get_bp_node_ref(i), offs - adj);
×
527
    }
×
528
    truncate(ndx + 1);
×
529
    if (ndx > 0)
×
530
        m_offsets.truncate(ndx - 1);
×
531
}
×
532

533
void BPlusTreeInner::ensure_offsets()
534
{
195,204✔
535
    if (!m_offsets.is_attached()) {
195,204✔
536
        size_t elems_per_child = get_elems_per_child();
216✔
537
        size_t sz = size();
216✔
538
        size_t num_offsets = (sz > 2) ? sz - 3 : 0;
216✔
539
        size_t offs = 0;
216✔
540

541
        m_offsets.create(num_offsets, num_offsets * elems_per_child);
216✔
542
        for (size_t i = 0; i != num_offsets; ++i) {
420✔
543
            offs += elems_per_child;
204✔
544
            m_offsets.set(i, offs);
204✔
545
        }
204✔
546
        Array::set_as_ref(0, m_offsets.get_ref());
216✔
547
    }
216✔
548
}
195,204✔
549

550
std::unique_ptr<BPlusTreeInner> BPlusTreeInner::split_root()
551
{
×
552
    auto new_root = std::make_unique<BPlusTreeInner>(m_tree);
×
553
    auto sz = get_node_size();
×
554
    size_t elems_per_child = get_elems_per_child();
×
555
    new_root->create(REALM_MAX_BPNODE_SIZE * elems_per_child);
×
556
    new_root->Array::set_context_flag(this->Array::get_context_flag());
×
557
    size_t ndx = 0;
×
558
    size_t tree_size = get_tree_size();
×
559
    size_t accumulated_size = 0;
×
560
    while (ndx < sz) {
×
561
        BPlusTreeInner new_inner(m_tree);
×
562
        size_t to_move = std::min(size_t(REALM_MAX_BPNODE_SIZE), sz - ndx);
×
563
        new_inner.create(elems_per_child);
×
564
        for (size_t i = 0; i < to_move; i++, ndx++) {
×
565
            new_inner.add_bp_node_ref(get_bp_node_ref(ndx));
×
566
        }
×
567
        size_t this_size = to_move * elems_per_child;
×
568
        if (accumulated_size + this_size > tree_size) {
×
569
            this_size = tree_size - accumulated_size;
×
570
        }
×
571
        accumulated_size += this_size;
×
572
        new_inner.append_tree_size(this_size);
×
573
        new_root->add_bp_node_ref(new_inner.get_ref()); // Throws
×
574
    }
×
575
    REALM_ASSERT(accumulated_size == tree_size);
×
576
    new_root->append_tree_size(tree_size);
×
577
    destroy();
×
578
    return new_root;
×
579
}
×
580

581
inline BPlusTreeLeaf* BPlusTreeInner::cache_leaf(MemRef mem, size_t ndx, size_t offset)
582
{
51,035,166✔
583
    BPlusTreeLeaf* leaf = m_tree->cache_leaf(mem);
51,035,166✔
584
    leaf->bp_set_parent(this, ndx + 1);
51,035,166✔
585
    size_t sz = leaf->get_node_size();
51,035,166✔
586
    m_tree->set_leaf_bounds(offset, offset + sz);
51,035,166✔
587

588
    return leaf;
51,035,166✔
589
}
51,035,166✔
590

591
void BPlusTreeInner::erase_and_destroy_bp_node(size_t ndx)
592
{
300✔
593
    ref_type ref = get_bp_node_ref(ndx);
300✔
594
    erase(ndx + 1);
300✔
595
    Array::destroy_deep(ref, m_tree->get_alloc());
300✔
596
    REALM_ASSERT_DEBUG(m_offsets.is_attached());
300✔
597
    size_t sz = m_offsets.size();
300✔
598
    if (sz) {
300✔
599
        // in this case there will always be an offset to erase
600
        if (ndx < sz) {
300✔
601
            m_offsets.erase(ndx);
×
602
        }
×
603
        else {
300✔
604
            m_offsets.erase(sz - 1);
300✔
605
        }
300✔
606
    }
300✔
607
    REALM_ASSERT_DEBUG(m_offsets.size() == get_node_size() - 1);
300✔
608
}
300✔
609

610
ref_type BPlusTreeInner::insert_bp_node(size_t child_ndx, ref_type new_sibling_ref, State& state)
611
{
39,780✔
612
    size_t new_ref_ndx = child_ndx + 1;
39,780✔
613

614
    size_t sz = get_node_size();
39,780✔
615
    if (sz < REALM_MAX_BPNODE_SIZE) {
39,780✔
616
        // Room in current node for the new child
617
        adjust(size() - 1, +2); // Throws
39,780✔
618
        if (m_offsets.is_attached()) {
39,780✔
619
            size_t elem_ndx_offset = get_bp_node_offset(child_ndx);
24✔
620
            m_offsets.insert(child_ndx, elem_ndx_offset + state.split_offset); // Throws
24✔
621
            m_offsets.adjust(child_ndx + 1, m_offsets.size(), +1);             // Throws
24✔
622
        }
24✔
623
        insert_bp_node_ref(new_ref_ndx, new_sibling_ref);
39,780✔
624
        return ref_type(0);
39,780✔
625
    }
39,780✔
626

627
    // This node has to be split
628
    BPlusTreeInner new_sibling(m_tree);
×
629

630
    size_t elem_ndx_offset = 0;
×
631
    if (m_offsets.is_attached()) {
×
632
        new_sibling.create(0);
×
633
        new_sibling.ensure_offsets();
×
634
        elem_ndx_offset = get_bp_node_offset(child_ndx);
×
635
    }
×
636
    else {
×
637
        size_t elems_per_child = get_elems_per_child();
×
638
        elem_ndx_offset = child_ndx * elems_per_child;
×
639
        new_sibling.create(elems_per_child);
×
640
    }
×
641

642
    size_t new_split_offset;
×
643
    size_t new_split_size;
×
644
    if (new_ref_ndx == sz) {
×
645
        // Case 1/2: The split child was the last child of the parent
646
        // to be split. In this case the parent may or may not be on
647
        // the compact form.
648
        new_split_offset = size_t(elem_ndx_offset + state.split_offset);
×
649
        new_split_size = elem_ndx_offset + state.split_size;
×
650
        new_sibling.add_bp_node_ref(new_sibling_ref); // Throws
×
651
        set_tree_size(new_split_offset);              // Throws
×
652
    }
×
653
    else {
×
654
        // Case 2/2: The split child was not the last child of the
655
        // parent to be split. Since this is not possible during
656
        // 'append', we can safely assume that the parent node is on
657
        // the general form.
658
        new_split_offset = size_t(elem_ndx_offset + state.split_size);
×
659
        new_split_size = get_tree_size() + 1;
×
660

661
        move(&new_sibling, new_ref_ndx, (new_split_offset - 1));                // Strips off tree size
×
662
        add_bp_node_ref(new_sibling_ref, elem_ndx_offset + state.split_offset); // Throws
×
663
        append_tree_size(new_split_offset);                                     // Throws
×
664
    }
×
665

666
    new_sibling.append_tree_size(new_split_size - new_split_offset); // Throws
×
667

668
    state.split_offset = new_split_offset;
×
669
    state.split_size = new_split_size;
×
670

671
    return new_sibling.get_ref();
×
672
}
39,780✔
673

674
void BPlusTreeInner::verify() const
675
{
×
676
#ifdef REALM_DEBUG
×
677
    Array::verify();
×
678

679
    // This node must not be a leaf
680
    REALM_ASSERT_3(Array::get_type(), ==, Array::type_InnerBptreeNode);
×
681

682
    REALM_ASSERT_3(Array::size(), >=, 2);
×
683
    size_t num_children = get_node_size();
×
684

685
    // Verify invar:bptree-nonempty-inner
686
    REALM_ASSERT_3(num_children, >=, 1);
×
687

688
    size_t elems_per_child = 0;
×
689
    if (m_offsets.is_attached()) {
×
690
        REALM_ASSERT_DEBUG(m_offsets.size() == num_children - 1);
×
691
    }
×
692
    else {
×
693
        elems_per_child = get_elems_per_child();
×
694
    }
×
695

696
    size_t num_elems = 0;
×
697
    for (size_t i = 0; i < num_children; i++) {
×
698
        ref_type child_ref = get_bp_node_ref(i);
×
699
        char* child_header = m_alloc.translate(child_ref);
×
700
        MemRef mem(child_header, child_ref, m_alloc);
×
701
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
×
702
        size_t elems_in_child;
×
703
        if (child_is_leaf) {
×
704
            auto leaf = const_cast<BPlusTreeInner*>(this)->cache_leaf(mem, i, 0);
×
705
            elems_in_child = leaf->get_node_size();
×
706
            leaf->verify();
×
707
        }
×
708
        else {
×
709
            BPlusTreeInner node(m_tree);
×
710
            node.set_parent(const_cast<BPlusTreeInner*>(this), i + 1);
×
711
            node.init_from_mem(mem);
×
712
            node.verify();
×
713
            elems_in_child = node.get_tree_size();
×
714
        }
×
715
        num_elems += elems_in_child;
×
716
        if (m_offsets.is_attached()) {
×
717
            if (i < num_children - 1) {
×
718
                REALM_ASSERT_DEBUG(num_elems == m_offsets.get(i));
×
719
            }
×
720
        }
×
721
        else {
×
722
            if (i < num_children - 1) {
×
723
                REALM_ASSERT_DEBUG(elems_in_child == elems_per_child);
×
724
            }
×
725
            else {
×
726
                REALM_ASSERT_DEBUG(elems_in_child <= elems_per_child);
×
727
            }
×
728
        }
×
729
    }
×
730
    REALM_ASSERT_DEBUG(get_tree_size() == num_elems);
×
731
    m_tree->invalidate_leaf_cache();
×
732
#endif
×
733
}
×
734

735
/****************************** BPlusTreeBase ********************************/
736

737
BPlusTreeBase::~BPlusTreeBase() {}
32,032,596✔
738

739
void BPlusTreeBase::create()
740
{
1,175,502✔
741
    if (!m_root) {                   // Idempotent
1,175,502✔
742
        m_root = create_leaf_node(); // Throws
1,175,502✔
743
        if (m_parent) {
1,175,502✔
744
            ref_type ref = get_ref();
1,111,713✔
745
            _impl::DeepArrayRefDestroyGuard destroy_guard{ref, get_alloc()};
1,111,713✔
746
            m_parent->update_child_ref(m_ndx_in_parent, ref); // Throws
1,111,713✔
747
            destroy_guard.release();
1,111,713✔
748
        }
1,111,713✔
749
        m_root->bp_set_parent(m_parent, m_ndx_in_parent);
1,175,502✔
750
    }
1,175,502✔
751
}
1,175,502✔
752

753
void BPlusTreeBase::destroy()
754
{
48,213✔
755
    if (is_attached()) {
48,213✔
756
        ref_type ref = m_root->get_ref();
48,213✔
757
        Array::destroy_deep(ref, m_alloc);
48,213✔
758
        m_root = nullptr;
48,213✔
759
    }
48,213✔
760
    invalidate_leaf_cache();
48,213✔
761
}
48,213✔
762

763
void BPlusTreeBase::replace_root(std::unique_ptr<BPlusTreeNode> new_root)
764
{
13,593✔
765
    // Maintain parent.
766
    new_root->bp_set_parent(m_parent, m_ndx_in_parent);
13,593✔
767
    new_root->update_parent(); // Throws
13,593✔
768

769
    m_root = std::move(new_root);
13,593✔
770
}
13,593✔
771

772
void BPlusTreeBase::bptree_insert(size_t n, BPlusTreeNode::InsertFunc func)
773
{
59,143,992✔
774
    size_t bptree_size = m_root->get_tree_size();
59,143,992✔
775
    if (n == bptree_size) {
59,143,992✔
776
        n = npos;
11,416,413✔
777
    }
11,416,413✔
778
    BPlusTreeNode::State state;
59,143,992✔
779
    ref_type new_sibling_ref = m_root->bptree_insert(n, state, func);
59,143,992✔
780
    if (REALM_UNLIKELY(new_sibling_ref)) {
59,143,992✔
781
        bool compact_form = (n == npos) && m_root->is_compact();
13,401✔
782
        auto new_root = std::make_unique<BPlusTreeInner>(this);
13,401✔
783
        if (!compact_form) {
13,401✔
784
            new_root->create(0);
126✔
785
            new_root->ensure_offsets();
126✔
786
        }
126✔
787
        else {
13,275✔
788
            new_root->create(size_t(state.split_offset));
13,275✔
789
        }
13,275✔
790

791
        new_root->add_bp_node_ref(m_root->get_ref());                   // Throws
13,401✔
792
        new_root->add_bp_node_ref(new_sibling_ref, state.split_offset); // Throws
13,401✔
793
        new_root->append_tree_size(state.split_size);
13,401✔
794
        replace_root(std::move(new_root));
13,401✔
795
    }
13,401✔
796
}
59,143,992✔
797

798
void BPlusTreeBase::bptree_erase(size_t n, BPlusTreeNode::EraseFunc func)
799
{
1,769,559✔
800
    size_t root_size = m_root->bptree_erase(n, func);
1,769,559✔
801
    while (!m_root->is_leaf() && root_size == 1) {
1,769,745✔
802
        BPlusTreeInner* node = static_cast<BPlusTreeInner*>(m_root.get());
186✔
803
        ref_type orig_root_ref = node->get_ref();
186✔
804
        ref_type new_root_ref = node->clear_first_bp_node_ref();
186✔
805
        auto new_root = create_root_from_ref(new_root_ref);
186✔
806

807
        replace_root(std::move(new_root));
186✔
808
        root_size = m_root->get_node_size();
186✔
809
        // destroy after replace_root for valid context flag lookup
810
        Array::destroy_deep(orig_root_ref, m_alloc);
186✔
811
    }
186✔
812
}
1,769,559✔
813

814
std::unique_ptr<BPlusTreeNode> BPlusTreeBase::create_root_from_ref(ref_type ref)
815
{
34,853,322✔
816
    char* header = m_alloc.translate(ref);
34,853,322✔
817
    bool is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
34,853,322✔
818
    bool reuse_root = m_root && m_root->is_leaf() == is_leaf;
34,853,322✔
819

820
    if (reuse_root) {
34,853,322✔
821
        m_root->init_from_ref(ref);
4,094,304✔
822
        return std::move(m_root);
4,094,304✔
823
    }
4,094,304✔
824

825
    if (is_leaf) {
30,759,018✔
826
        return init_leaf_node(ref);
23,413,428✔
827
    }
23,413,428✔
828
    else {
7,345,590✔
829
        std::unique_ptr<BPlusTreeNode> new_root = std::make_unique<BPlusTreeInner>(this);
7,345,590✔
830
        new_root->init_from_ref(ref);
7,345,590✔
831
        return new_root;
7,345,590✔
832
    }
7,345,590✔
833
}
30,759,018✔
834

835
// this should only be called for a column_type which we can safely compress.
836
ref_type BPlusTreeBase::typed_write(ref_type ref, _impl::ArrayWriterBase& out, Allocator& alloc,
837
                                    TypedWriteFunc leaf_write_func)
838
{
2,656,362✔
839
    if (out.only_modified && alloc.is_read_only(ref))
2,656,362✔
840
        return ref;
2,438,910✔
841

842
    if (!NodeHeader::get_is_inner_bptree_node_from_header(alloc.translate(ref))) {
217,452✔
843
        // leaf
844
        return leaf_write_func(ref, out, alloc);
217,071✔
845
    }
217,071✔
846

847
    Array node(alloc);
381✔
848
    node.init_from_ref(ref);
381✔
849
    REALM_ASSERT_DEBUG(node.has_refs());
381✔
850
    TempArray written_node(node.size(), NodeHeader::type_InnerBptreeNode, node.get_context_flag());
381✔
851
    for (unsigned j = 0; j < node.size(); ++j) {
2,181✔
852
        RefOrTagged rot = node.get_as_ref_or_tagged(j);
1,800✔
853
        if (rot.is_ref() && rot.get_as_ref()) {
1,800✔
854
            if (j == 0) {
900✔
855
                // keys (ArrayUnsigned me thinks)
NEW
856
                Array a(alloc);
×
NEW
857
                a.init_from_ref(rot.get_as_ref());
×
NEW
858
                written_node.set_as_ref(j, a.write(out, false, out.only_modified, false));
×
NEW
859
            }
×
860
            else {
900✔
861
                written_node.set_as_ref(j, BPlusTreeBase::typed_write(rot.get_as_ref(), out, alloc, leaf_write_func));
900✔
862
            }
900✔
863
        }
900✔
864
        else
900✔
865
            written_node.set(j, rot);
900✔
866
    }
1,800✔
867
    return written_node.write(out);
381✔
868
}
217,452✔
869

870
void BPlusTreeBase::typed_print(std::string prefix, Allocator& alloc, ref_type root, ColumnType col_type)
NEW
871
{
×
NEW
872
    char* header = alloc.translate(root);
×
NEW
873
    Array a(alloc);
×
NEW
874
    a.init_from_ref(root);
×
NEW
875
    if (NodeHeader::get_is_inner_bptree_node_from_header(header)) {
×
NEW
876
        std::cout << "{" << std::endl;
×
NEW
877
        REALM_ASSERT(a.has_refs());
×
NEW
878
        for (unsigned j = 0; j < a.size(); ++j) {
×
NEW
879
            auto pref = prefix + "  " + std::to_string(j) + ":\t";
×
NEW
880
            RefOrTagged rot = a.get_as_ref_or_tagged(j);
×
NEW
881
            if (rot.is_ref() && rot.get_as_ref()) {
×
NEW
882
                if (j == 0) {
×
NEW
883
                    std::cout << pref << "BPTree offsets as ArrayUnsigned as ";
×
NEW
884
                    Array a(alloc);
×
NEW
885
                    a.init_from_ref(rot.get_as_ref());
×
NEW
886
                    a.typed_print(prefix);
×
NEW
887
                }
×
NEW
888
                else {
×
NEW
889
                    std::cout << pref << "Subtree beeing ";
×
NEW
890
                    BPlusTreeBase::typed_print(pref, alloc, rot.get_as_ref(), col_type);
×
NEW
891
                }
×
NEW
892
            }
×
NEW
893
        }
×
NEW
894
    }
×
NEW
895
    else {
×
NEW
896
        std::cout << "BPTree Leaf[" << col_type << "] as ";
×
NEW
897
        a.typed_print(prefix);
×
NEW
898
    }
×
NEW
899
}
×
900

901
size_t BPlusTreeBase::size_from_header(const char* header)
902
{
232,320✔
903
    auto node_size = Array::get_size_from_header(header);
232,320✔
904
    if (Array::get_is_inner_bptree_node_from_header(header)) {
232,320✔
905
        auto data = Array::get_data_from_header(header);
6✔
906
        auto width = Array::get_width_from_header(header);
6✔
907
        node_size = size_t(get_direct(data, width, node_size - 1)) >> 1;
6✔
908
    }
6✔
909
    return node_size;
232,320✔
910
}
232,320✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc