• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 1816

04 Nov 2023 12:29AM UTC coverage: 91.648% (-0.01%) from 91.66%
1816

push

Evergreen

web-flow
Use a single write transaction for DiscardLocal client resets on FLX realms (#7110)

Updating the subscription store in a separate write transaction from the
recovery means that we temporarily commit an invalid state. If the application
crashes between committing the client reset diff and updating the subscription
store, the next launch of the application would try to use the now-invalid
pending subscriptions that should have been discarded.

92128 of 168844 branches covered (0.0%)

141 of 146 new or added lines in 7 files covered. (96.58%)

84 existing lines in 15 files now uncovered.

230681 of 251702 relevant lines covered (91.65%)

6383138.69 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.72
/src/realm/bplustree.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2018 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/bplustree.hpp>
20
#include <realm/impl/destroy_guard.hpp>
21
#include <realm/array_unsigned.hpp>
22

23
using namespace realm;
24

25
namespace realm {
26

27
bool BPlusTreeNode::get_context_flag() const noexcept
28
{
6,657,999✔
29
    auto ref = get_ref();
6,657,999✔
30
    MemRef mem(ref, m_tree->get_alloc());
6,657,999✔
31
    return Array::get_context_flag_from_header(mem.get_addr());
6,657,999✔
32
}
6,657,999✔
33

34
void BPlusTreeNode::set_context_flag(bool cf) noexcept
35
{
19,185✔
36
    auto ref = get_ref();
19,185✔
37
    MemRef mem(ref, m_tree->get_alloc());
19,185✔
38
    if (Array::get_context_flag_from_header(mem.get_addr()) != cf) {
19,185✔
39
        Array arr(m_tree->get_alloc());
522✔
40
        arr.init_from_mem(mem);
522✔
41
        arr.set_context_flag(cf);
522✔
42
        if (auto new_ref = arr.get_ref(); new_ref != ref) {
522✔
43
            init_from_ref(new_ref);
6✔
44
            update_parent();
6✔
45
        }
6✔
46
    }
522✔
47
}
19,185✔
48

49

50
/*****************************************************************************/
51
/* BPlusTreeInner                                                            */
52
/* All interior nodes is of this class                                       */
53
/*****************************************************************************/
54
class BPlusTreeInner : public BPlusTreeNode, private Array {
55
public:
56
    using Array::destroy_deep;
57

58
    BPlusTreeInner(BPlusTreeBase* tree);
59
    ~BPlusTreeInner() override;
60

61
    void init_from_mem(MemRef mem);
62
    void create(size_t elems_per_child);
63

64
    /******** Implementation of abstract interface *******/
65
    bool is_leaf() const override
66
    {
343,587✔
67
        return false;
343,587✔
68
    }
343,587✔
69
    bool is_compact() const override
70
    {
×
71
        return (Array::get(0) & 1) != 0;
×
72
    }
×
73
    ref_type get_ref() const override
74
    {
63,342✔
75
        return Array::get_ref();
63,342✔
76
    }
63,342✔
77

78
    void init_from_ref(ref_type ref) noexcept override
79
    {
7,315,305✔
80
        char* header = m_alloc.translate(ref);
7,315,305✔
81
        init_from_mem(MemRef(header, ref, m_alloc));
7,315,305✔
82
    }
7,315,305✔
83

84
    void bp_set_parent(ArrayParent* p, size_t n) override
85
    {
8,330,472✔
86
        Array::set_parent(p, n);
8,330,472✔
87
    }
8,330,472✔
88
    void update_parent() override
89
    {
7,248✔
90
        Array::update_parent();
7,248✔
91
    }
7,248✔
92

93
    size_t get_node_size() const override
94
    {
7,556,706✔
95
        return Array::size() - 2;
7,556,706✔
96
    }
7,556,706✔
97
    size_t get_tree_size() const override
98
    {
8,655,612✔
99
        return size_t(back()) >> 1;
8,655,612✔
100
    }
8,655,612✔
101

102
    ref_type bptree_insert(size_t n, State& state, InsertFunc) override;
103
    void bptree_access(size_t n, AccessFunc) override;
104
    size_t bptree_erase(size_t n, EraseFunc) override;
105
    bool bptree_traverse(TraverseFunc) override;
106
    void verify() const override;
107

108
    // Other modifiers
109

110
    void append_tree_size(size_t sz)
111
    {
7,248✔
112
        Array::add((sz << 1) + 1);
7,248✔
113
    }
7,248✔
114
    void add_bp_node_ref(ref_type ref, int64_t offset = 0)
115
    {
14,496✔
116
        Array::add(from_ref(ref));
14,496✔
117
        REALM_ASSERT_DEBUG(offset >= 0);
14,496✔
118
        if (offset && m_offsets.is_attached()) {
14,496✔
119
            m_offsets.add(offset);
126✔
120
        }
126✔
121
    }
14,496✔
122
    // Reset first (and only!) child ref and return the previous value
123
    ref_type clear_first_bp_node_ref()
124
    {
168✔
125
        REALM_ASSERT(get_node_size() == 1);
168✔
126
        ref_type old_ref = Array::get_as_ref(1);
168✔
127
        Array::set(1, 0);
168✔
128

84✔
129
        return old_ref;
168✔
130
    }
168✔
131
    void ensure_offsets();
132

133
private:
134
    ArrayUnsigned m_offsets;
135
    size_t m_my_offset;
136

137
    void move(BPlusTreeNode* new_node, size_t ndx, int64_t offset_adj) override;
138
    void set_offset(size_t offs)
139
    {
×
140
        m_my_offset = offs;
×
141
    }
×
142
    void set_tree_size(size_t sz)
143
    {
×
144
        Array::set(m_size - 1, (sz << 1) + 1);
×
145
    }
×
146
    size_t get_elems_per_child() const
147
    {
11,660,058✔
148
        // Only relevant when in compact form
5,830,911✔
149
        REALM_ASSERT_DEBUG(!m_offsets.is_attached());
11,660,058✔
150
        return size_t(Array::get(0)) >> 1;
11,660,058✔
151
    }
11,660,058✔
152
    // Should not be mixed up with Array::get_child_ref
153
    ref_type get_bp_node_ref(size_t ndx) const noexcept
154
    {
11,799,738✔
155
        return Array::get_as_ref(ndx + 1);
11,799,738✔
156
    }
11,799,738✔
157
    void insert_bp_node_ref(size_t ndx, ref_type ref)
158
    {
990✔
159
        Array::insert(ndx + 1, from_ref(ref));
990✔
160
    }
990✔
161
    BPlusTreeLeaf* cache_leaf(MemRef mem, size_t ndx, size_t offset);
162
    void erase_and_destroy_bp_node(size_t ndx);
163
    ref_type insert_bp_node(size_t child_ndx, ref_type new_sibling_ref, State& state);
164
    size_t get_bp_node_offset(size_t child_ndx) const
165
    {
130,245✔
166
        return (child_ndx) > 0 ? size_t(m_offsets.get(child_ndx - 1)) : 0;
107,235✔
167
    }
130,245✔
168
};
169
}
170

171
/****************************** BPlusTreeNode ********************************/
172

173
BPlusTreeNode::~BPlusTreeNode()
174
{
31,175,724✔
175
}
31,175,724✔
176

177
/****************************** BPlusTreeLeaf ********************************/
178

179
ref_type BPlusTreeLeaf::bptree_insert(size_t ndx, State& state, InsertFunc func)
180
{
12,584,802✔
181
    size_t leaf_size = get_node_size();
12,584,802✔
182
    REALM_ASSERT_DEBUG(leaf_size <= REALM_MAX_BPNODE_SIZE);
12,584,802✔
183
    if (ndx > leaf_size)
12,584,802✔
184
        ndx = leaf_size;
12,431,523✔
185
    if (REALM_LIKELY(leaf_size < REALM_MAX_BPNODE_SIZE)) {
12,584,802✔
186
        func(this, ndx);
12,569,301✔
187
        m_tree->adjust_leaf_bounds(1);
12,569,301✔
188
        return 0; // Leaf was not split
12,569,301✔
189
    }
12,569,301✔
190

10,488✔
191
    // Split leaf node
10,488✔
192
    auto new_leaf = m_tree->create_leaf_node();
15,501✔
193
    if (ndx == leaf_size) {
15,501✔
194
        func(new_leaf.get(), 0);
8,088✔
195
        state.split_offset = ndx;
8,088✔
196
    }
8,088✔
197
    else {
7,413✔
198
        move(new_leaf.get(), ndx, 0);
7,413✔
199
        func(this, ndx);
7,413✔
200
        state.split_offset = ndx + 1;
7,413✔
201
        // Invalidate cached leaf
6,444✔
202
        m_tree->invalidate_leaf_cache();
7,413✔
203
    }
7,413✔
204
    state.split_size = leaf_size + 1;
15,501✔
205

10,488✔
206
    return new_leaf->get_ref();
15,501✔
207
}
15,501✔
208

209
void BPlusTreeLeaf::bptree_access(size_t ndx, AccessFunc func)
210
{
48,017,199✔
211
    func(this, ndx);
48,017,199✔
212
}
48,017,199✔
213

214
size_t BPlusTreeLeaf::bptree_erase(size_t ndx, EraseFunc func)
215
{
2,001,477✔
216
    return func(this, ndx);
2,001,477✔
217
}
2,001,477✔
218

219
bool BPlusTreeLeaf::bptree_traverse(TraverseFunc func)
220
{
1,336,938✔
221
    return func(this, 0) == IteratorControl::Stop;
1,336,938✔
222
}
1,336,938✔
223

224
/****************************** BPlusTreeInner *******************************/
225

226
BPlusTreeInner::BPlusTreeInner(BPlusTreeBase* tree)
227
    : BPlusTreeNode(tree)
228
    , Array(tree->get_alloc())
229
    , m_offsets(tree->get_alloc())
230
    , m_my_offset(0)
231
{
7,146,492✔
232
    m_offsets.set_parent(this, 0);
7,146,492✔
233
}
7,146,492✔
234

235
void BPlusTreeInner::create(size_t elems_per_child)
236
{
7,248✔
237
    // Born only with room for number of elements per child
3,624✔
238
    uint64_t tagged = (elems_per_child << 1) + 1;
7,248✔
239
    Array::create(Array::type_InnerBptreeNode, false, 1, tagged);
7,248✔
240
}
7,248✔
241

242
BPlusTreeInner::~BPlusTreeInner()
243
{
7,148,649✔
244
}
7,148,649✔
245

246
void BPlusTreeInner::init_from_mem(MemRef mem)
247
{
7,314,246✔
248
    Array::init_from_mem(mem);
7,314,246✔
249
    auto rot = Array::get(0);
7,314,246✔
250
    if ((rot & 1) == 0) {
7,314,246✔
251
        // rot is a ref
4,557✔
252
        m_offsets.init_from_ref(to_ref(rot));
9,114✔
253
    }
9,114✔
254
    else {
7,305,132✔
255
        m_offsets.detach();
7,305,132✔
256
    }
7,305,132✔
257
}
7,314,246✔
258

259
void BPlusTreeInner::bptree_access(size_t n, AccessFunc func)
260
{
4,338,042✔
261
    size_t child_ndx;
4,338,042✔
262
    size_t child_offset;
4,338,042✔
263
    if (m_offsets.is_attached()) {
4,338,042✔
264
        child_ndx = m_offsets.upper_bound(n);
37,149✔
265
        child_offset = get_bp_node_offset(child_ndx);
37,149✔
266
        REALM_ASSERT_3(child_ndx, <, get_node_size());
37,149✔
267
    }
37,149✔
268
    else {
4,300,893✔
269
        auto elems_per_child = get_elems_per_child();
4,300,893✔
270
        child_ndx = n / elems_per_child;
4,300,893✔
271
        child_offset = child_ndx * elems_per_child;
4,300,893✔
272
    }
4,300,893✔
273

2,178,030✔
274
    ref_type child_ref = get_bp_node_ref(child_ndx);
4,338,042✔
275
    char* child_header = m_alloc.translate(child_ref);
4,338,042✔
276
    MemRef mem(child_header, child_ref, m_alloc);
4,338,042✔
277
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
4,338,042✔
278
    if (child_is_leaf) {
4,338,042✔
279
        auto leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
4,337,688✔
280
        func(leaf, n - child_offset);
4,337,688✔
281
    }
4,337,688✔
282
    else {
354✔
283
        BPlusTreeInner node(m_tree);
354✔
284
        node.set_parent(this, child_ndx + 1);
354✔
285
        node.init_from_mem(mem);
354✔
286
        node.set_offset(child_offset + m_my_offset);
354✔
287
        node.bptree_access(n - child_offset, func);
354✔
288
    }
354✔
289
}
4,338,042✔
290

291
ref_type BPlusTreeInner::bptree_insert(size_t ndx, State& state, InsertFunc func)
292
{
1,343,709✔
293
    size_t child_ndx;
1,343,709✔
294
    size_t child_offset;
1,343,709✔
295
    if (ndx != npos) {
1,343,709✔
296
        ensure_offsets();
17,898✔
297
        child_ndx = m_offsets.upper_bound(ndx);
17,898✔
298
        child_offset = get_bp_node_offset(child_ndx);
17,898✔
299
        ndx -= child_offset;
17,898✔
300
        REALM_ASSERT_3(child_ndx, <, get_node_size());
17,898✔
301
    }
17,898✔
302
    else {
1,325,811✔
303
        child_ndx = get_node_size() - 1;
1,325,811✔
304
        if (m_offsets.is_attached()) {
1,325,811✔
305
            child_offset = get_bp_node_offset(child_ndx);
×
306
            REALM_ASSERT_3(child_ndx, <, get_node_size());
×
307
        }
×
308
        else {
1,325,811✔
309
            auto elems_per_child = get_elems_per_child();
1,325,811✔
310
            child_offset = child_ndx * elems_per_child;
1,325,811✔
311
        }
1,325,811✔
312
    }
1,325,811✔
313

671,058✔
314
    ref_type child_ref = get_bp_node_ref(child_ndx);
1,343,709✔
315
    char* child_header = m_alloc.translate(child_ref);
1,343,709✔
316
    MemRef mem(child_header, child_ref, m_alloc);
1,343,709✔
317
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
1,343,709✔
318
    ref_type new_sibling_ref;
1,343,709✔
319
    if (child_is_leaf) {
1,343,709✔
320
        auto leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
1,343,670✔
321
        new_sibling_ref = leaf->bptree_insert(ndx, state, func);
1,343,670✔
322
    }
1,343,670✔
323
    else {
39✔
324
        BPlusTreeInner node(m_tree);
39✔
325
        node.set_parent(this, child_ndx + 1);
39✔
326
        node.init_from_mem(mem);
39✔
327
        node.set_offset(child_offset + m_my_offset);
39✔
328
        new_sibling_ref = node.bptree_insert(ndx, state, func);
39✔
329
    }
39✔
330

671,058✔
331
    if (!new_sibling_ref) {
1,343,709✔
332
        adjust(size() - 1, +2); // Throws
1,342,662✔
333
        if (m_offsets.is_attached()) {
1,342,662✔
334
            m_offsets.adjust(child_ndx, m_offsets.size(), 1);
17,874✔
335
        }
17,874✔
336
        return 0;
1,342,662✔
337
    }
1,342,662✔
338

504✔
339
    return insert_bp_node(child_ndx, new_sibling_ref, state);
1,047✔
340
}
1,047✔
341

342
size_t BPlusTreeInner::bptree_erase(size_t n, EraseFunc func)
343
{
69,174✔
344
    ensure_offsets();
69,174✔
345

34,587✔
346
    size_t child_ndx = m_offsets.upper_bound(n);
69,174✔
347
    size_t child_offset = get_bp_node_offset(child_ndx);
69,174✔
348
    REALM_ASSERT_3(child_ndx, <, get_node_size());
69,174✔
349

34,587✔
350
    // Call bptree_erase recursively and ultimately call 'func' on leaf
34,587✔
351
    size_t erase_node_size;
69,174✔
352
    ref_type child_ref = get_bp_node_ref(child_ndx);
69,174✔
353
    char* child_header = m_alloc.translate(child_ref);
69,174✔
354
    MemRef mem(child_header, child_ref, m_alloc);
69,174✔
355
    BPlusTreeLeaf* leaf = nullptr;
69,174✔
356
    BPlusTreeInner node(m_tree);
69,174✔
357
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
69,174✔
358
    if (child_is_leaf) {
69,174✔
359
        leaf = cache_leaf(mem, child_ndx, child_offset + m_my_offset);
69,174✔
360
        erase_node_size = func(leaf, n - child_offset);
69,174✔
361
        if (erase_node_size == 0) {
69,174✔
362
            m_tree->invalidate_leaf_cache();
48✔
363
        }
48✔
364
        else {
69,126✔
365
            m_tree->adjust_leaf_bounds(-1);
69,126✔
366
        }
69,126✔
367
    }
69,174✔
368
    else {
×
369
        node.set_parent(this, child_ndx + 1);
×
370
        node.init_from_mem(mem);
×
371
        node.set_offset(child_offset + m_my_offset);
×
372
        erase_node_size = node.bptree_erase(n - child_offset, func);
×
373
    }
×
374

34,587✔
375
    adjust(size() - 1, -2);
69,174✔
376
    m_offsets.adjust(child_ndx, m_offsets.size(), -1);
69,174✔
377

34,587✔
378
    // Check if some nodes could be merged
34,587✔
379
    size_t num_children = get_node_size();
69,174✔
380
    if (erase_node_size == 0) {
69,174✔
381
        if (num_children == 1) {
48✔
382
            // Only child empty - delete this one too
383
            return 0;
×
384
        }
×
385
        // Child leaf is empty - destroy it!
24✔
386
        erase_and_destroy_bp_node(child_ndx);
48✔
387
        return num_children - 1;
48✔
388
    }
48✔
389
    else if (erase_node_size < REALM_MAX_BPNODE_SIZE / 2 && child_ndx < (num_children - 1)) {
69,126✔
390
        // Candidate for merge. First calculate if the combined size of current and
15,486✔
391
        // next sibling is small enough.
15,486✔
392
        size_t sibling_ndx = child_ndx + 1;
30,972✔
393
        ref_type sibling_ref = get_bp_node_ref(sibling_ndx);
30,972✔
394
        std::unique_ptr<BPlusTreeLeaf> sibling_leaf;
30,972✔
395
        BPlusTreeInner node2(m_tree);
30,972✔
396
        BPlusTreeNode* sibling_node;
30,972✔
397
        if (child_is_leaf) {
30,972✔
398
            sibling_leaf = m_tree->init_leaf_node(sibling_ref);
30,972✔
399
            sibling_node = sibling_leaf.get();
30,972✔
400
        }
30,972✔
401
        else {
×
402
            node2.init_from_ref(sibling_ref);
×
403
            sibling_node = &node2;
×
404
        }
×
405
        sibling_node->bp_set_parent(this, sibling_ndx + 1);
30,972✔
406

15,486✔
407
        size_t combined_size = sibling_node->get_node_size() + erase_node_size;
30,972✔
408

15,486✔
409
        if (combined_size < REALM_MAX_BPNODE_SIZE * 3 / 4) {
30,972✔
410
            // Combined size is small enough for the nodes to be merged
66✔
411
            // Move all content from the next sibling into current node
66✔
412
            int64_t offs_adj = 0;
132✔
413
            if (child_is_leaf) {
132✔
414
                REALM_ASSERT(leaf);
132✔
415
                if (sibling_ndx < m_offsets.size())
132✔
416
                    m_offsets.set(sibling_ndx - 1, m_offsets.get(sibling_ndx));
×
417
                sibling_node->move(leaf, 0, 0);
132✔
418
                // Invalidate cached leaf
66✔
419
                m_tree->invalidate_leaf_cache();
132✔
420
            }
132✔
421
            else {
×
422
                node.ensure_offsets();
×
423
                node2.ensure_offsets();
×
424
                // Offset adjustment is negative as the sibling has bigger offsets
425
                auto sibling_offs = m_offsets.get(sibling_ndx - 1);
×
426
                auto erase_node_offs = get_bp_node_offset(child_ndx);
×
427
                offs_adj = erase_node_offs - sibling_offs;
×
428
                if (sibling_ndx < m_offsets.size())
×
429
                    m_offsets.set(sibling_ndx - 1, m_offsets.get(sibling_ndx));
×
430

431
                size_t orig_size = node.get_tree_size();
×
432
                size_t size_of_moved = node2.get_tree_size();
×
433
                // Remove size field from node as the move operation will just add
434
                node.erase(node.size() - 1);
×
435
                node2.move(&node, 0, offs_adj);
×
436
                node.append_tree_size(orig_size + size_of_moved);
×
437
            }
×
438

66✔
439
            // Destroy sibling
66✔
440
            erase_and_destroy_bp_node(sibling_ndx);
132✔
441
            num_children--;
132✔
442
        }
132✔
443
    }
30,972✔
444

34,587✔
445
    return num_children;
69,150✔
446
}
69,174✔
447

448
bool BPlusTreeInner::bptree_traverse(TraverseFunc func)
449
{
6,039,288✔
450
    size_t sz = get_node_size();
6,039,288✔
451
    for (size_t i = 0; i < sz; i++) {
6,057,894✔
452
        size_t child_offset;
6,054,702✔
453
        if (m_offsets.is_attached()) {
6,054,702✔
454
            child_offset = get_bp_node_offset(i);
6,000✔
455
        }
6,000✔
456
        else {
6,048,702✔
457
            auto elems_per_child = get_elems_per_child();
6,048,702✔
458
            child_offset = i * elems_per_child;
6,048,702✔
459
        }
6,048,702✔
460

3,027,351✔
461
        bool done;
6,054,702✔
462
        ref_type child_ref = get_bp_node_ref(i);
6,054,702✔
463
        char* child_header = m_alloc.translate(child_ref);
6,054,702✔
464
        MemRef mem(child_header, child_ref, m_alloc);
6,054,702✔
465
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
6,054,702✔
466
        if (child_is_leaf) {
6,054,702✔
467
            auto leaf = cache_leaf(mem, i, child_offset + m_my_offset);
6,054,702✔
468
            done = (func(leaf, child_offset + m_my_offset) == IteratorControl::Stop);
6,054,702✔
469
        }
6,054,702✔
UNCOV
470
        else {
×
UNCOV
471
            BPlusTreeInner node(m_tree);
×
UNCOV
472
            node.set_parent(this, i + 1);
×
UNCOV
473
            node.init_from_mem(mem);
×
UNCOV
474
            node.set_offset(child_offset + m_my_offset);
×
475
            // std::cout << "BPlusTreeInner offset: " << child_offset << std::endl;
UNCOV
476
            done = node.bptree_traverse(func);
×
UNCOV
477
        }
×
478
        if (done) {
6,054,702✔
479
            return true;
6,036,096✔
480
        }
6,036,096✔
481
    }
6,054,702✔
482
    return false;
3,021,240✔
483
}
6,039,288✔
484

485
void BPlusTreeInner::move(BPlusTreeNode* new_node, size_t ndx, int64_t adj)
486
{
×
487
    BPlusTreeInner* dst(static_cast<BPlusTreeInner*>(new_node));
×
488
    size_t sz = get_node_size();
×
489

490
    // Copy refs
491
    for (size_t i = ndx; i < sz; i++) {
×
492
        size_t offs = get_bp_node_offset(i);
×
493
        dst->add_bp_node_ref(get_bp_node_ref(i), offs - adj);
×
494
    }
×
495
    truncate(ndx + 1);
×
496
    if (ndx > 0)
×
497
        m_offsets.truncate(ndx - 1);
×
498
}
×
499

500
void BPlusTreeInner::ensure_offsets()
501
{
87,198✔
502
    if (!m_offsets.is_attached()) {
87,198✔
503
        size_t elems_per_child = get_elems_per_child();
198✔
504
        size_t sz = size();
198✔
505
        size_t num_offsets = (sz > 2) ? sz - 3 : 0;
162✔
506
        size_t offs = 0;
198✔
507

99✔
508
        m_offsets.create(num_offsets, num_offsets * elems_per_child);
198✔
509
        for (size_t i = 0; i != num_offsets; ++i) {
282✔
510
            offs += elems_per_child;
84✔
511
            m_offsets.set(i, offs);
84✔
512
        }
84✔
513
        Array::set_as_ref(0, m_offsets.get_ref());
198✔
514
    }
198✔
515
}
87,198✔
516

517
inline BPlusTreeLeaf* BPlusTreeInner::cache_leaf(MemRef mem, size_t ndx, size_t offset)
518
{
11,771,835✔
519
    BPlusTreeLeaf* leaf = m_tree->cache_leaf(mem);
11,771,835✔
520
    leaf->bp_set_parent(this, ndx + 1);
11,771,835✔
521
    size_t sz = leaf->get_node_size();
11,771,835✔
522
    m_tree->set_leaf_bounds(offset, offset + sz);
11,771,835✔
523

5,878,017✔
524
    return leaf;
11,771,835✔
525
}
11,771,835✔
526

527
void BPlusTreeInner::erase_and_destroy_bp_node(size_t ndx)
528
{
180✔
529
    ref_type ref = get_bp_node_ref(ndx);
180✔
530
    erase(ndx + 1);
180✔
531
    Array::destroy_deep(ref, m_tree->get_alloc());
180✔
532
    REALM_ASSERT_DEBUG(m_offsets.is_attached());
180✔
533
    size_t sz = m_offsets.size();
180✔
534
    if (sz) {
180✔
535
        // in this case there will always be an offset to erase
90✔
536
        if (ndx < sz) {
180✔
537
            m_offsets.erase(ndx);
×
538
        }
×
539
        else {
180✔
540
            m_offsets.erase(sz - 1);
180✔
541
        }
180✔
542
    }
180✔
543
    REALM_ASSERT_DEBUG(m_offsets.size() == get_node_size() - 1);
180✔
544
}
180✔
545

546
ref_type BPlusTreeInner::insert_bp_node(size_t child_ndx, ref_type new_sibling_ref, State& state)
547
{
990✔
548
    size_t new_ref_ndx = child_ndx + 1;
990✔
549

495✔
550
    size_t sz = get_node_size();
990✔
551
    if (sz < REALM_MAX_BPNODE_SIZE) {
990✔
552
        // Room in current node for the new child
495✔
553
        adjust(size() - 1, +2); // Throws
990✔
554
        if (m_offsets.is_attached()) {
990✔
555
            size_t elem_ndx_offset = get_bp_node_offset(child_ndx);
24✔
556
            m_offsets.insert(child_ndx, elem_ndx_offset + state.split_offset); // Throws
24✔
557
            m_offsets.adjust(child_ndx + 1, m_offsets.size(), +1);             // Throws
24✔
558
        }
24✔
559
        insert_bp_node_ref(new_ref_ndx, new_sibling_ref);
990✔
560
        return ref_type(0);
990✔
561
    }
990✔
562

563
    // This node has to be split
564
    BPlusTreeInner new_sibling(m_tree);
×
565

566
    size_t elem_ndx_offset = 0;
×
567
    if (m_offsets.is_attached()) {
×
568
        new_sibling.create(0);
×
569
        new_sibling.ensure_offsets();
×
570
        elem_ndx_offset = get_bp_node_offset(child_ndx);
×
571
    }
×
572
    else {
×
573
        size_t elems_per_child = get_elems_per_child();
×
574
        elem_ndx_offset = child_ndx * elems_per_child;
×
575
        new_sibling.create(elems_per_child);
×
576
    }
×
577

578
    size_t new_split_offset;
×
579
    size_t new_split_size;
×
580
    if (new_ref_ndx == sz) {
×
581
        // Case 1/2: The split child was the last child of the parent
582
        // to be split. In this case the parent may or may not be on
583
        // the compact form.
584
        new_split_offset = size_t(elem_ndx_offset + state.split_offset);
×
585
        new_split_size = elem_ndx_offset + state.split_size;
×
586
        new_sibling.add_bp_node_ref(new_sibling_ref); // Throws
×
587
        set_tree_size(new_split_offset);             // Throws
×
588
    }
×
589
    else {
×
590
        // Case 2/2: The split child was not the last child of the
591
        // parent to be split. Since this is not possible during
592
        // 'append', we can safely assume that the parent node is on
593
        // the general form.
594
        new_split_offset = size_t(elem_ndx_offset + state.split_size);
×
595
        new_split_size = get_tree_size() + 1;
×
596

597
        move(&new_sibling, new_ref_ndx, (new_split_offset - 1));               // Strips off tree size
×
598
        add_bp_node_ref(new_sibling_ref, elem_ndx_offset + state.split_offset); // Throws
×
599
        append_tree_size(new_split_offset);                                    // Throws
×
600
    }
×
601

602
    new_sibling.append_tree_size(new_split_size - new_split_offset); // Throws
×
603

604
    state.split_offset = new_split_offset;
×
605
    state.split_size = new_split_size;
×
606

607
    return new_sibling.get_ref();
×
608
}
×
609

610
void BPlusTreeInner::verify() const
611
{
×
612
#ifdef REALM_DEBUG
×
613
    Array::verify();
×
614

615
    // This node must not be a leaf
616
    REALM_ASSERT_3(Array::get_type(), ==, Array::type_InnerBptreeNode);
×
617

618
    REALM_ASSERT_3(Array::size(), >=, 2);
×
619
    size_t num_children = get_node_size();
×
620

621
    // Verify invar:bptree-nonempty-inner
622
    REALM_ASSERT_3(num_children, >=, 1);
×
623

624
    size_t elems_per_child = 0;
×
625
    if (m_offsets.is_attached()) {
×
626
        REALM_ASSERT_DEBUG(m_offsets.size() == num_children - 1);
×
627
    }
×
628
    else {
×
629
        elems_per_child = get_elems_per_child();
×
630
    }
×
631

632
    size_t num_elems = 0;
×
633
    for (size_t i = 0; i < num_children; i++) {
×
634
        ref_type child_ref = get_bp_node_ref(i);
×
635
        char* child_header = m_alloc.translate(child_ref);
×
636
        MemRef mem(child_header, child_ref, m_alloc);
×
637
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
×
638
        size_t elems_in_child;
×
639
        if (child_is_leaf) {
×
640
            auto leaf = const_cast<BPlusTreeInner*>(this)->cache_leaf(mem, i, 0);
×
641
            elems_in_child = leaf->get_node_size();
×
642
            leaf->verify();
×
643
        }
×
644
        else {
×
645
            BPlusTreeInner node(m_tree);
×
646
            node.set_parent(const_cast<BPlusTreeInner*>(this), i + 1);
×
647
            node.init_from_mem(mem);
×
648
            node.verify();
×
649
            elems_in_child = node.get_tree_size();
×
650
        }
×
651
        num_elems += elems_in_child;
×
652
        if (m_offsets.is_attached()) {
×
653
            if (i < num_children - 1) {
×
654
                REALM_ASSERT_DEBUG(num_elems == m_offsets.get(i));
×
655
            }
×
656
        }
×
657
        else {
×
658
            if (i < num_children - 1) {
×
659
                REALM_ASSERT_DEBUG(elems_in_child == elems_per_child);
×
660
            }
×
661
            else {
×
662
                REALM_ASSERT_DEBUG(elems_in_child <= elems_per_child);
×
663
            }
×
664
        }
×
665
    }
×
666
    REALM_ASSERT_DEBUG(get_tree_size() == num_elems);
×
667
    m_tree->invalidate_leaf_cache();
×
668
#endif
×
669
}
×
670

671
/****************************** BPlusTreeBase ********************************/
672

673
BPlusTreeBase::~BPlusTreeBase()
674
{
15,585,156✔
675
}
15,585,156✔
676

677
void BPlusTreeBase::create()
678
{
573,012✔
679
    if (!m_root) {                   // Idempotent
573,012✔
680
        m_root = create_leaf_node(); // Throws
573,012✔
681
        if (m_parent) {
573,012✔
682
            ref_type ref = get_ref();
571,140✔
683
            _impl::DeepArrayRefDestroyGuard destroy_guard{ref, get_alloc()};
571,140✔
684
            m_parent->update_child_ref(m_ndx_in_parent, ref); // Throws
571,140✔
685
            destroy_guard.release();
571,140✔
686
        }
571,140✔
687
        m_root->bp_set_parent(m_parent, m_ndx_in_parent);
573,012✔
688
    }
573,012✔
689
}
573,012✔
690

691
void BPlusTreeBase::destroy()
692
{
3,444✔
693
    if (is_attached()) {
3,444✔
694
        ref_type ref = m_root->get_ref();
3,444✔
695
        Array::destroy_deep(ref, m_alloc);
3,444✔
696
        m_root = nullptr;
3,444✔
697
    }
3,444✔
698
    invalidate_leaf_cache();
3,444✔
699
}
3,444✔
700

701
void BPlusTreeBase::replace_root(std::unique_ptr<BPlusTreeNode> new_root)
702
{
7,416✔
703
    // Maintain parent.
3,708✔
704
    new_root->bp_set_parent(m_parent, m_ndx_in_parent);
7,416✔
705
    new_root->update_parent(); // Throws
7,416✔
706

3,708✔
707
    m_root = std::move(new_root);
7,416✔
708
}
7,416✔
709

710
void BPlusTreeBase::bptree_insert(size_t n, BPlusTreeNode::InsertFunc func)
711
{
12,587,526✔
712
    size_t bptree_size = m_root->get_tree_size();
12,587,526✔
713
    if (n == bptree_size) {
12,587,526✔
714
        n = npos;
10,078,491✔
715
    }
10,078,491✔
716
    BPlusTreeNode::State state;
12,587,526✔
717
    ref_type new_sibling_ref = m_root->bptree_insert(n, state, func);
12,587,526✔
718
    if (REALM_UNLIKELY(new_sibling_ref)) {
12,587,526✔
719
        bool compact_form = (n == npos) && m_root->is_compact();
7,248✔
720
        auto new_root = std::make_unique<BPlusTreeInner>(this);
7,248✔
721
        if (!compact_form) {
7,248✔
722
            new_root->create(0);
126✔
723
            new_root->ensure_offsets();
126✔
724
        }
126✔
725
        else {
7,122✔
726
            new_root->create(size_t(state.split_offset));
7,122✔
727
        }
7,122✔
728

3,624✔
729
        new_root->add_bp_node_ref(m_root->get_ref());                   // Throws
7,248✔
730
        new_root->add_bp_node_ref(new_sibling_ref, state.split_offset); // Throws
7,248✔
731
        new_root->append_tree_size(state.split_size);
7,248✔
732
        replace_root(std::move(new_root));
7,248✔
733
    }
7,248✔
734
}
12,587,526✔
735

736
void BPlusTreeBase::bptree_erase(size_t n, BPlusTreeNode::EraseFunc func)
737
{
2,070,597✔
738
    size_t root_size = m_root->bptree_erase(n, func);
2,070,597✔
739
    while (!m_root->is_leaf() && root_size == 1) {
2,070,765✔
740
        BPlusTreeInner* node = static_cast<BPlusTreeInner*>(m_root.get());
168✔
741

84✔
742
        ref_type new_root_ref = node->clear_first_bp_node_ref();
168✔
743
        node->destroy_deep();
168✔
744

84✔
745
        auto new_root = create_root_from_ref(new_root_ref);
168✔
746

84✔
747
        replace_root(std::move(new_root));
168✔
748
        root_size = m_root->get_node_size();
168✔
749
    }
168✔
750
}
2,070,597✔
751

752
std::unique_ptr<BPlusTreeNode> BPlusTreeBase::create_root_from_ref(ref_type ref)
753
{
18,526,851✔
754
    char* header = m_alloc.translate(ref);
18,526,851✔
755
    bool is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
18,526,851✔
756
    bool reuse_root = m_root && m_root->is_leaf() == is_leaf;
18,526,851✔
757

9,364,740✔
758
    if (reuse_root) {
18,526,851✔
759
        m_root->init_from_ref(ref);
3,631,263✔
760
        return std::move(m_root);
3,631,263✔
761
    }
3,631,263✔
762

7,468,812✔
763
    if (is_leaf) {
14,895,588✔
764
        return init_leaf_node(ref);
7,856,850✔
765
    }
7,856,850✔
766
    else {
7,038,738✔
767
        std::unique_ptr<BPlusTreeNode> new_root = std::make_unique<BPlusTreeInner>(this);
7,038,738✔
768
        new_root->init_from_ref(ref);
7,038,738✔
769
        return new_root;
7,038,738✔
770
    }
7,038,738✔
771
}
14,895,588✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc