• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_402

21 Aug 2024 11:10AM UTC coverage: 91.054% (-0.03%) from 91.085%
jorgen.edelbo_402

Pull #7803

Evergreen

jedelbo
Small fix to Table::typed_write

When writing the realm to a new file from a write transaction,
the Table may be COW so that the top ref is changed. So don't
use the ref that is present in the group when the operation starts.
Pull Request #7803: Feature/string compression

103494 of 181580 branches covered (57.0%)

1929 of 1999 new or added lines in 46 files covered. (96.5%)

695 existing lines in 51 files now uncovered.

220142 of 241772 relevant lines covered (91.05%)

7344461.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.63
/src/realm/cluster_tree.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2020 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include "realm/cluster_tree.hpp"
20
#include "realm/group.hpp"
21
#include "realm/replication.hpp"
22
#include "realm/array_key.hpp"
23
#include "realm/array_integer.hpp"
24
#include "realm/array_backlink.hpp"
25
#include "realm/array_typed_link.hpp"
26
#include "realm/array_timestamp.hpp"
27
#include "realm/array_bool.hpp"
28
#include "realm/array_string.hpp"
29
#include "realm/array_mixed.hpp"
30
#include "realm/array_fixed_bytes.hpp"
31

32
#include <iostream>
33

34
/*
35
 * Node-splitting is done in the way that if the new element comes after all the
36
 * current elements, then the new element is added to the new node as the only
37
 * element and the old node is untouched. Here split key is the key of the new
38
 * element.
39
 * Otherwise the node is split so that the new element can be added to the old node.
40
 * So all elements that should come after the new element are moved to the new node.
41
 * Split key is the key of the first element that is moved. (First key that comes
42
 * after the new element).
43
 * Merging is done when a node is less than half full and the combined size will be
44
 * less than 3/4 of the max size.
45
 */
46

47
namespace realm {
48
/*
49
 * The inner nodes are organized in the way that the main array has a ref to the
50
 * (optional) key array in position 0 and the subtree depth in position 1. After
51
 * that follows refs to the subordinate nodes.
52
 */
53
class ClusterNodeInner : public ClusterNode {
54
public:
55
    ClusterNodeInner(Allocator& allocator, const ClusterTree& tree_top);
56
    ~ClusterNodeInner() override;
57

58
    void create(int sub_tree_depth);
59
    void init(MemRef mem) override;
60
    void update_from_parent() noexcept override;
61
    MemRef ensure_writeable(RowKey k) override;
62
    void update_ref_in_parent(RowKey, ref_type) override;
63

64
    bool is_leaf() const override
65
    {
47,459,796✔
66
        return false;
47,459,796✔
67
    }
47,459,796✔
68

69
    int get_sub_tree_depth() const override
70
    {
72✔
71
        return m_sub_tree_depth;
72✔
72
    }
72✔
73

74
    bool traverse(ClusterTree::TraverseFunction func, int64_t) const;
75
    void update(ClusterTree::UpdateFunction func, int64_t);
76

77
    size_t node_size() const override
78
    {
84,777,750✔
79
        return Array::size() - s_first_node_index;
84,777,750✔
80
    }
84,777,750✔
81
    size_t get_tree_size() const override
82
    {
44,797,833✔
83
        return size_t(Array::get(s_sub_tree_size)) >> 1;
44,797,833✔
84
    }
44,797,833✔
85
    void set_tree_size(size_t sub_tree_size)
86
    {
41,990,772✔
87
        Array::set(s_sub_tree_size, sub_tree_size << 1 | 1);
41,990,772✔
88
    }
41,990,772✔
89
    size_t update_sub_tree_size();
90

91
    int64_t get_last_key_value() const override;
92

93
    void ensure_general_form() override;
94
    void insert_column(ColKey col) override;
95
    void remove_column(ColKey col) override;
96
    size_t nb_columns() const override;
97
    ref_type insert(RowKey k, const FieldValues& init_values, State& state) override;
98
    bool try_get(RowKey k, State& state) const noexcept override;
99
    ObjKey get(size_t ndx, State& state) const override;
100
    size_t get_ndx(RowKey key, size_t ndx) const noexcept override;
101
    size_t erase(RowKey k, CascadeState& state) override;
102
    void nullify_incoming_links(RowKey key, CascadeState& state) override;
103
    void add(ref_type ref, int64_t key_value = 0);
104

105
    // Reset first (and only!) child ref and return node based on the previous value
106
    std::unique_ptr<ClusterNode> return_and_clear_first_child()
107
    {
240✔
108
        REALM_ASSERT(node_size() == 1);
240✔
109
        auto new_root = m_tree_top.get_node(this, s_first_node_index);
240✔
110
        Array::set(s_first_node_index, 0); // The node is no longer belonging to this
240✔
111
        return new_root;
240✔
112
    }
240✔
113

114
    int64_t get_first_key_value()
115
    {
240✔
116
        return m_keys.is_attached() ? m_keys.get(0) : 0;
240✔
117
    }
240✔
118

119
    bool get_leaf(RowKey key, ClusterNode::IteratorState& state) const noexcept;
120

121
    void dump_objects(int64_t key_offset, std::string lead) const override;
122

123
    virtual ref_type typed_write(ref_type ref, _impl::ArrayWriterBase& out) const override
124
    {
105,687✔
125
        REALM_ASSERT_DEBUG(ref == get_mem().get_ref());
105,687✔
126
        if (out.only_modified && m_alloc.is_read_only(ref)) {
105,687✔
127
            return ref;
6✔
128
        }
6✔
129
        REALM_ASSERT_DEBUG(get_is_inner_bptree_node_from_header(get_header()));
105,681✔
130
        REALM_ASSERT_DEBUG(!get_context_flag_from_header(get_header()));
105,681✔
131
        REALM_ASSERT_DEBUG(has_refs());
105,681✔
132
        TempArray written_node(size(), type_InnerBptreeNode);
105,681✔
133
        for (unsigned j = 0; j < size(); ++j) {
2,714,415✔
134
            RefOrTagged rot = get_as_ref_or_tagged(j);
2,608,734✔
135
            if (rot.is_ref() && rot.get_as_ref()) {
2,608,734✔
136
                if (out.only_modified && m_alloc.is_read_only(rot.get_as_ref())) {
2,373,900✔
137
                    written_node.set(j, rot);
1,989,798✔
138
                    continue;
1,989,798✔
139
                }
1,989,798✔
140
                if (j == 0) {
384,102✔
141
                    // keys (ArrayUnsigned, me thinks)
142
                    Array array_unsigned(m_alloc);
1,623✔
143
                    array_unsigned.init_from_ref(rot.get_as_ref());
1,623✔
144
                    written_node.set_as_ref(j, array_unsigned.write(out, false, out.only_modified, false));
1,623✔
145
                }
1,623✔
146
                else {
382,479✔
147
                    auto header = m_alloc.translate(rot.get_as_ref());
382,479✔
148
                    MemRef m(header, rot.get_as_ref(), m_alloc);
382,479✔
149
                    if (get_is_inner_bptree_node_from_header(header)) {
382,479✔
150
                        ClusterNodeInner inner_node(m_alloc, m_tree_top);
7,659✔
151
                        inner_node.init(m);
7,659✔
152
                        written_node.set_as_ref(j, inner_node.typed_write(rot.get_as_ref(), out));
7,659✔
153
                    }
7,659✔
154
                    else {
374,820✔
155
                        Cluster cluster(j, m_alloc, m_tree_top);
374,820✔
156
                        cluster.init(m);
374,820✔
157
                        written_node.set_as_ref(j, cluster.typed_write(rot.get_as_ref(), out));
374,820✔
158
                    }
374,820✔
159
                }
382,479✔
160
            }
384,102✔
161
            else { // not a ref, just copy value over
234,834✔
162
                written_node.set(j, rot);
234,834✔
163
            }
234,834✔
164
        }
2,608,734✔
165
        return written_node.write(out);
105,681✔
166
    }
105,687✔
167

168
private:
169
    static constexpr size_t s_key_ref_index = 0;
170
    static constexpr size_t s_sub_tree_depth_index = 1;
171
    static constexpr size_t s_sub_tree_size = 2;
172
    static constexpr size_t s_first_node_index = 3;
173

174
    int m_sub_tree_depth = 0;
175
    int m_shift_factor = 0;
176

177
    struct ChildInfo {
178
        size_t ndx;
179
        uint64_t offset;
180
        RowKey key;
181
        MemRef mem;
182
    };
183
    bool find_child(RowKey key, ChildInfo& ret) const noexcept
184
    {
399,106,356✔
185
        if (m_keys.is_attached()) {
399,106,356✔
186
            auto upper = m_keys.upper_bound(key.value);
343,158,504✔
187
            // The first entry in m_keys will always be smaller than or equal
188
            // to all keys in this subtree. If you get zero returned here, the
189
            // key is not in the tree
190
            if (upper == 0) {
343,158,504✔
191
                return false;
×
192
            }
×
193
            ret.ndx = upper - 1;
343,158,504✔
194
            ret.offset = m_keys.get(ret.ndx);
343,158,504✔
195
        }
343,158,504✔
196
        else {
55,947,852✔
197
            size_t sz = node_size();
55,947,852✔
198
            REALM_ASSERT_DEBUG(sz > 0);
55,947,852✔
199
            size_t max_ndx = sz - 1;
55,947,852✔
200
            ret.ndx = std::min(size_t(key.value >> m_shift_factor), max_ndx);
55,947,852✔
201
            ret.offset = ret.ndx << m_shift_factor;
55,947,852✔
202
        }
55,947,852✔
203
        ret.key = RowKey(key.value - ret.offset);
399,106,356✔
204
        ref_type child_ref = _get_child_ref(ret.ndx);
399,106,356✔
205
        char* child_header = m_alloc.translate(child_ref);
399,106,356✔
206
        ret.mem = MemRef(child_header, child_ref, m_alloc);
399,106,356✔
207
        return true;
399,106,356✔
208
    }
399,106,356✔
209

210
    ref_type _get_child_ref(size_t ndx) const noexcept
211
    {
478,885,815✔
212
        return Array::get_as_ref(ndx + s_first_node_index);
478,885,815✔
213
    }
478,885,815✔
214
    void _insert_child_ref(size_t ndx, ref_type ref)
215
    {
92,187✔
216
        Array::insert(ndx + s_first_node_index, from_ref(ref));
92,187✔
217
    }
92,187✔
218
    void _erase_child_ref(size_t ndx)
219
    {
21,135✔
220
        Array::erase(ndx + s_first_node_index);
21,135✔
221
    }
21,135✔
222
    void move(size_t ndx, ClusterNode* new_node, int64_t key_adj) override;
223

224
    template <class T, class F>
225
    T recurse(RowKey key, F func);
226

227
    template <class T, class F>
228
    T recurse(ChildInfo& child_info, F func);
229
    // Adjust key offset values by adding offset
230
    void adjust_keys(int64_t offset);
231
    // Make sure the first key offset value is 0
232
    // This is done by adjusting the child node by current first offset
233
    // and setting it to 0 thereafter
234
    void adjust_keys_first_child(int64_t adj);
235
};
236

237
/***************************** ClusterNodeInner ******************************/
238

239
ClusterNodeInner::ClusterNodeInner(Allocator& allocator, const ClusterTree& tree_top)
240
    : ClusterNode(0, allocator, tree_top)
49,063,668✔
241
{
98,129,799✔
242
}
98,129,799✔
243

244
ClusterNodeInner::~ClusterNodeInner() {}
98,129,433✔
245

246
void ClusterNodeInner::create(int sub_tree_depth)
247
{
2,811✔
248
    Array::create(Array::type_InnerBptreeNode, false, s_first_node_index);
2,811✔
249

250
    Array::set(s_key_ref_index, 0);
2,811✔
251

252
    Array::set(s_sub_tree_depth_index, RefOrTagged::make_tagged(sub_tree_depth));
2,811✔
253
    Array::set(s_sub_tree_size, 1); // sub_tree_size = 0 (as tagged value)
2,811✔
254
    m_sub_tree_depth = sub_tree_depth;
2,811✔
255
    m_shift_factor = m_sub_tree_depth * node_shift_factor;
2,811✔
256
}
2,811✔
257

258
void ClusterNodeInner::init(MemRef mem)
259
{
96,074,160✔
260
    Array::init_from_mem(mem);
96,074,160✔
261
    m_keys.set_parent(this, s_key_ref_index);
96,074,160✔
262
    ref_type ref = Array::get_as_ref(s_key_ref_index);
96,074,160✔
263
    if (ref) {
96,074,160✔
264
        m_keys.init_from_ref(ref);
81,994,623✔
265
    }
81,994,623✔
266
    else {
14,079,537✔
267
        m_keys.detach();
14,079,537✔
268
    }
14,079,537✔
269
    m_sub_tree_depth = int(Array::get(s_sub_tree_depth_index)) >> 1;
96,074,160✔
270
    m_shift_factor = m_sub_tree_depth * node_shift_factor;
96,074,160✔
271
}
96,074,160✔
272

273
void ClusterNodeInner::update_from_parent() noexcept
274
{
87,399✔
275
    Array::update_from_parent();
87,399✔
276
    ref_type ref = Array::get_as_ref(s_key_ref_index);
87,399✔
277
    if (ref) {
87,399✔
278
        m_keys.update_from_parent();
74,982✔
279
    }
74,982✔
280
    m_sub_tree_depth = int(Array::get(s_sub_tree_depth_index)) >> 1;
87,399✔
281
}
87,399✔
282

283
template <class T, class F>
284
T ClusterNodeInner::recurse(RowKey key, F func)
285
{
50,486,511✔
286
    ChildInfo child_info;
50,486,511✔
287
    if (!find_child(key, child_info)) {
50,486,511✔
288
        throw KeyNotFound("Child not found in recurse");
×
289
    }
×
290
    return recurse<T>(child_info, func);
50,486,511✔
291
}
50,486,511✔
292

293
template <class T, class F>
294
T ClusterNodeInner::recurse(ChildInfo& child_info, F func)
295
{
397,065,777✔
296
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_info.mem.get_addr());
397,065,777✔
297
    if (child_is_leaf) {
397,065,777✔
298
        Cluster leaf(child_info.offset + m_offset, m_alloc, m_tree_top);
307,476,018✔
299
        leaf.set_parent(this, child_info.ndx + s_first_node_index);
307,476,018✔
300
        leaf.init(child_info.mem);
307,476,018✔
301
        return func(&leaf, child_info);
307,476,018✔
302
    }
307,476,018✔
303
    else {
89,589,759✔
304
        ClusterNodeInner node(m_alloc, m_tree_top);
89,589,759✔
305
        node.set_parent(this, child_info.ndx + s_first_node_index);
89,589,759✔
306
        node.init(child_info.mem);
89,589,759✔
307
        node.set_offset(child_info.offset + m_offset);
89,589,759✔
308
        return func(&node, child_info);
89,589,759✔
309
    }
89,589,759✔
310
}
397,065,777✔
311

312
MemRef ClusterNodeInner::ensure_writeable(RowKey key)
313
{
×
314
    return recurse<MemRef>(key, [](ClusterNode* node, ChildInfo& child_info) {
×
315
        return node->ensure_writeable(child_info.key);
×
316
    });
×
317
}
×
318

319
void ClusterNodeInner::update_ref_in_parent(RowKey key, ref_type ref)
320
{
487,428✔
321
    ChildInfo child_info;
487,428✔
322
    if (!find_child(key, child_info)) {
487,428✔
323
        throw KeyNotFound("Child not found in update_ref_in_parent");
×
324
    }
×
325
    if (this->m_sub_tree_depth == 1) {
487,428✔
326
        set(child_info.ndx + s_first_node_index, ref);
270,492✔
327
    }
270,492✔
328
    else {
216,936✔
329
        ClusterNodeInner node(m_alloc, m_tree_top);
216,936✔
330
        node.set_parent(this, child_info.ndx + s_first_node_index);
216,936✔
331
        node.init(child_info.mem);
216,936✔
332
        node.set_offset(child_info.offset + m_offset);
216,936✔
333
        node.update_ref_in_parent(child_info.key, ref);
216,936✔
334
    }
216,936✔
335
}
487,428✔
336

337
ref_type ClusterNodeInner::insert(RowKey row_key, const FieldValues& init_values, ClusterNode::State& state)
338
{
33,676,158✔
339
    return recurse<ref_type>(row_key, [this, &state, &init_values](ClusterNode* node, ChildInfo& child_info) {
33,676,158✔
340
        ref_type new_sibling_ref = node->insert(child_info.key, init_values, state);
33,592,350✔
341

342
        set_tree_size(get_tree_size() + 1);
33,592,350✔
343

344
        if (!new_sibling_ref) {
33,592,350✔
345
            return ref_type(0);
33,531,477✔
346
        }
33,531,477✔
347

348
        size_t new_ref_ndx = child_info.ndx + 1;
60,873✔
349

350
        int64_t split_key_value = state.split_key + child_info.offset;
60,873✔
351
        uint64_t sz = node_size();
60,873✔
352
        if (sz < cluster_node_size) {
92,187✔
353
            if (m_keys.is_attached()) {
92,187✔
354
                m_keys.insert(new_ref_ndx, split_key_value);
82,044✔
355
            }
82,044✔
356
            else {
10,143✔
357
                if (uint64_t(split_key_value) != sz << m_shift_factor) {
10,143✔
358
                    ensure_general_form();
294✔
359
                    m_keys.insert(new_ref_ndx, split_key_value);
294✔
360
                }
294✔
361
            }
10,143✔
362
            _insert_child_ref(new_ref_ndx, new_sibling_ref);
92,187✔
363
            return ref_type(0);
92,187✔
364
        }
92,187✔
365

366
        ClusterNodeInner child(m_alloc, m_tree_top);
4,294,967,294✔
367
        child.create(m_sub_tree_depth);
4,294,967,294✔
368
        if (new_ref_ndx == sz) {
4,294,967,294✔
369
            child.add(new_sibling_ref);
186✔
370
            state.split_key = split_key_value;
186✔
371
        }
186✔
372
        else {
4,294,967,294✔
373
            int64_t first_key_value = m_keys.get(new_ref_ndx);
4,294,967,294✔
374
            child.ensure_general_form();
4,294,967,294✔
375
            move(new_ref_ndx, &child, first_key_value);
4,294,967,294✔
376
            add(new_sibling_ref, split_key_value); // Throws
4,294,967,294✔
377
            state.split_key = first_key_value;
4,294,967,294✔
378
        }
4,294,967,294✔
379

380
        // Some objects has been moved out of this tree - find out how many
381
        size_t child_sub_tree_size = child.update_sub_tree_size();
4,294,967,294✔
382
        set_tree_size(get_tree_size() - child_sub_tree_size);
4,294,967,294✔
383

384
        return child.get_ref();
4,294,967,294✔
385
    });
60,873✔
386
}
33,676,158✔
387

388
bool ClusterNodeInner::try_get(RowKey key, ClusterNode::State& state) const noexcept
389
{
348,651,576✔
390
    ChildInfo child_info;
348,651,576✔
391
    if (!find_child(key, child_info)) {
348,651,576✔
392
        return false;
×
393
    }
×
394
    return const_cast<ClusterNodeInner*>(this)->recurse<bool>(child_info,
348,651,576✔
395
                                                              [&state](const ClusterNode* node, ChildInfo& info) {
348,651,576✔
396
                                                                  return node->try_get(info.key, state);
345,842,496✔
397
                                                              });
345,842,496✔
398
}
348,651,576✔
399

400
ObjKey ClusterNodeInner::get(size_t ndx, ClusterNode::State& state) const
401
{
12,006,942✔
402
    size_t sz = node_size();
12,006,942✔
403
    size_t child_ndx = 0;
12,006,942✔
404
    while (child_ndx < sz) {
74,806,164✔
405
        int64_t key_offset = m_keys.is_attached() ? m_keys.get(child_ndx) : (child_ndx << m_shift_factor);
74,792,586✔
406

407
        ref_type child_ref = _get_child_ref(child_ndx);
74,792,586✔
408
        char* child_header = m_alloc.translate(child_ref);
74,792,586✔
409
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
74,792,586✔
410
        size_t sub_tree_size;
74,792,586✔
411
        if (child_is_leaf) {
74,792,586✔
412
            sub_tree_size = Cluster::node_size_from_header(m_alloc, child_header);
72,112,449✔
413
            if (ndx < sub_tree_size) {
72,112,449✔
414
                Cluster leaf(key_offset + m_offset, m_alloc, m_tree_top);
9,401,280✔
415
                leaf.init(MemRef(child_header, child_ref, m_alloc));
9,401,280✔
416
                REALM_ASSERT(sub_tree_size == leaf.get_tree_size());
9,401,280✔
417
                return leaf.get(ndx, state);
9,401,280✔
418
            }
9,401,280✔
419
        }
72,112,449✔
420
        else {
2,680,137✔
421
            ClusterNodeInner node(m_alloc, m_tree_top);
2,680,137✔
422
            node.init(MemRef(child_header, child_ref, m_alloc));
2,680,137✔
423
            node.set_offset(key_offset + m_offset);
2,680,137✔
424
            sub_tree_size = node.get_tree_size();
2,680,137✔
425
            if (ndx < sub_tree_size) {
2,680,137✔
426
                return node.get(ndx, state);
2,592,690✔
427
            }
2,592,690✔
428
        }
2,680,137✔
429
        child_ndx++;
62,798,616✔
430
        ndx -= sub_tree_size;
62,798,616✔
431
    }
62,798,616✔
432
    return {};
2,147,497,225✔
433
}
12,006,942✔
434

435
size_t ClusterNodeInner::get_ndx(RowKey key, size_t ndx) const noexcept
436
{
24,723✔
437
    ChildInfo child_info;
24,723✔
438
    if (!find_child(key, child_info)) {
24,723✔
439
        return realm::npos;
×
440
    }
×
441

442
    // First figure out how many objects there are in nodes before actual one
443
    // then descent in tree
444

445
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_info.mem.get_addr());
24,723✔
446
    if (child_is_leaf) {
24,723✔
447
        for (unsigned i = 0; i < child_info.ndx; i++) {
51,075✔
448
            ref_type ref = _get_child_ref(i);
26,352✔
449
            char* header = m_alloc.translate(ref);
26,352✔
450
            ndx += Cluster::node_size_from_header(m_alloc, header);
26,352✔
451
        }
26,352✔
452
        Cluster leaf(child_info.offset + m_offset, m_alloc, m_tree_top);
24,723✔
453
        leaf.init(child_info.mem);
24,723✔
454
        return leaf.get_ndx(child_info.key, ndx);
24,723✔
455
    }
24,723✔
456
    else {
×
457
        for (unsigned i = 0; i < child_info.ndx; i++) {
×
458
            char* header = m_alloc.translate(_get_child_ref(i));
×
459
            ndx += size_t(Array::get(header, s_sub_tree_size)) >> 1;
×
460
        }
×
461
        ClusterNodeInner node(m_alloc, m_tree_top);
×
462
        node.init(child_info.mem);
×
463
        node.set_offset(child_info.offset + m_offset);
×
464
        return node.get_ndx(child_info.key, ndx);
×
465
    }
×
466
}
24,723✔
467

468
void ClusterNodeInner::adjust_keys(int64_t adj)
469
{
30✔
470
    ensure_general_form();
30✔
471
    REALM_ASSERT(m_keys.get(0) == 0);
30✔
472
    m_keys.adjust(0, m_keys.size(), adj);
30✔
473

474
    // Now the first key offset value is 'adj' - it must be 0
475
    adjust_keys_first_child(adj);
30✔
476
}
30✔
477

478
void ClusterNodeInner::adjust_keys_first_child(int64_t adj)
479
{
12,060✔
480
    ref_type child_ref = _get_child_ref(0);
12,060✔
481
    char* child_header = m_alloc.translate(child_ref);
12,060✔
482
    auto mem = MemRef(child_header, child_ref, m_alloc);
12,060✔
483
    if (Array::get_is_inner_bptree_node_from_header(child_header)) {
12,060✔
484
        ClusterNodeInner node(m_alloc, m_tree_top);
30✔
485
        node.set_parent(this, s_first_node_index);
30✔
486
        node.init(mem);
30✔
487
        node.adjust_keys(adj);
30✔
488
    }
30✔
489
    else {
12,030✔
490
        Cluster node(0, m_alloc, m_tree_top);
12,030✔
491
        node.set_parent(this, s_first_node_index);
12,030✔
492
        node.init(mem);
12,030✔
493
        node.adjust_keys(adj);
12,030✔
494
    }
12,030✔
495
    m_keys.set(0, 0);
12,060✔
496
}
12,060✔
497

498
size_t ClusterNodeInner::erase(RowKey key, CascadeState& state)
499
{
8,433,669✔
500
    return recurse<size_t>(key, [this, &state](ClusterNode* erase_node, ChildInfo& child_info) {
8,433,669✔
501
        size_t erase_node_size = erase_node->erase(child_info.key, state);
8,433,243✔
502
        bool is_leaf = erase_node->is_leaf();
8,433,243✔
503
        set_tree_size(get_tree_size() - 1);
8,433,243✔
504

505
        if (erase_node_size == 0) {
8,433,243✔
506
            erase_node->destroy_deep();
13,350✔
507

508
            ensure_general_form();
13,350✔
509
            _erase_child_ref(child_info.ndx);
13,350✔
510
            m_keys.erase(child_info.ndx);
13,350✔
511
            if (child_info.ndx == 0 && m_keys.size() > 0) {
13,350✔
512
                auto first_offset = m_keys.get(0);
12,030✔
513
                // Adjust all key values in new first node
514
                // We have to make sure that the first key offset value
515
                // in all inner nodes is 0
516
                adjust_keys_first_child(first_offset);
12,030✔
517
            }
12,030✔
518
        }
13,350✔
519
        else if (erase_node_size < cluster_node_size / 2 && child_info.ndx < (node_size() - 1)) {
8,419,893✔
520
            // Candidate for merge. First calculate if the combined size of current and
521
            // next sibling is small enough.
522
            size_t sibling_ndx = child_info.ndx + 1;
3,314,937✔
523
            Cluster l2(child_info.offset, m_alloc, m_tree_top);
3,314,937✔
524
            ClusterNodeInner n2(m_alloc, m_tree_top);
3,314,937✔
525
            ClusterNode* sibling_node = is_leaf ? static_cast<ClusterNode*>(&l2) : static_cast<ClusterNode*>(&n2);
3,314,937✔
526
            sibling_node->set_parent(this, sibling_ndx + s_first_node_index);
3,314,937✔
527
            sibling_node->init_from_parent();
3,314,937✔
528

529
            size_t combined_size = sibling_node->node_size() + erase_node_size;
3,314,937✔
530

531
            if (combined_size < cluster_node_size * 3 / 4) {
3,314,937✔
532
                // Calculate value that must be subtracted from the moved keys
533
                // (will be negative as the sibling has bigger keys)
534
                int64_t key_adj = m_keys.is_attached() ? (m_keys.get(child_info.ndx) - m_keys.get(sibling_ndx))
7,785✔
535
                                                       : 0 - (1 << m_shift_factor);
7,785✔
536
                // And then move all elements into current node
537
                sibling_node->ensure_general_form();
7,785✔
538
                erase_node->ensure_general_form();
7,785✔
539
                sibling_node->move(0, erase_node, key_adj);
7,785✔
540

541
                if (!erase_node->is_leaf()) {
7,785✔
542
                    static_cast<ClusterNodeInner*>(erase_node)->update_sub_tree_size();
39✔
543
                }
39✔
544

545
                // Destroy sibling
546
                sibling_node->destroy_deep();
7,785✔
547

548
                ensure_general_form();
7,785✔
549
                _erase_child_ref(sibling_ndx);
7,785✔
550
                m_keys.erase(sibling_ndx);
7,785✔
551
            }
7,785✔
552
        }
3,314,937✔
553

554
        return node_size();
8,433,243✔
555
    });
8,433,243✔
556
}
8,433,669✔
557

558
void ClusterNodeInner::nullify_incoming_links(RowKey key, CascadeState& state)
559
{
8,363,217✔
560
    recurse<void>(key, [&state](ClusterNode* node, ChildInfo& child_info) {
8,363,217✔
561
        node->nullify_incoming_links(child_info.key, state);
8,362,761✔
562
    });
8,362,761✔
563
}
8,363,217✔
564

565
void ClusterNodeInner::ensure_general_form()
566
{
23,625✔
567
    if (!m_keys.is_attached()) {
23,625✔
568
        size_t current_size = node_size();
2,568✔
569
        m_keys.create(current_size, (current_size - 1) << m_shift_factor);
2,568✔
570
        m_keys.update_parent();
2,568✔
571
        for (size_t i = 0; i < current_size; i++) {
9,486✔
572
            m_keys.set(i, i << m_shift_factor);
6,918✔
573
        }
6,918✔
574
    }
2,568✔
575
}
23,625✔
576

577
void ClusterNodeInner::insert_column(ColKey col)
578
{
162✔
579
    size_t sz = node_size();
162✔
580
    for (size_t i = 0; i < sz; i++) {
24,252✔
581
        std::shared_ptr<ClusterNode> node = m_tree_top.get_node(this, i + s_first_node_index);
24,090✔
582
        node->insert_column(col);
24,090✔
583
    }
24,090✔
584
}
162✔
585

586
void ClusterNodeInner::remove_column(ColKey col)
587
{
6✔
588
    size_t sz = node_size();
6✔
589
    for (size_t i = 0; i < sz; i++) {
18✔
590
        std::shared_ptr<ClusterNode> node = m_tree_top.get_node(this, i + s_first_node_index);
12✔
591
        node->remove_column(col);
12✔
592
    }
12✔
593
}
6✔
594

595
size_t ClusterNodeInner::nb_columns() const
596
{
×
597
    ref_type ref = _get_child_ref(0);
×
598
    char* header = m_alloc.translate(ref);
×
599
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
×
600
    MemRef mem(header, ref, m_alloc);
×
601
    if (child_is_leaf) {
×
602
        Cluster leaf(0, m_alloc, m_tree_top);
×
603
        leaf.init(mem);
×
604
        return leaf.nb_columns();
×
605
    }
×
606
    else {
×
607
        ClusterNodeInner node(m_alloc, m_tree_top);
×
608
        node.init(mem);
×
609
        return node.nb_columns();
×
610
    }
×
611
}
×
612

613
void ClusterNodeInner::add(ref_type ref, int64_t key_value)
614
{
5,409✔
615
    if (m_keys.is_attached()) {
5,409✔
616
        m_keys.add(key_value);
27✔
617
    }
27✔
618
    else {
5,382✔
619
        if (uint64_t(key_value) != (uint64_t(node_size()) << m_shift_factor)) {
5,382✔
620
            ensure_general_form();
2,061✔
621
            m_keys.add(key_value);
2,061✔
622
        }
2,061✔
623
    }
5,382✔
624
    Array::add(from_ref(ref));
5,409✔
625
}
5,409✔
626

627
// Find leaf that contains the object identified by key. If this does not exist return the
628
// leaf that contains the next object
629
bool ClusterNodeInner::get_leaf(RowKey key, ClusterNode::IteratorState& state) const noexcept
630
{
2,692,248✔
631
    size_t child_ndx;
2,692,248✔
632
    if (m_keys.is_attached()) {
2,692,248✔
633
        child_ndx = m_keys.upper_bound(key.value);
2,655,012✔
634
        if (child_ndx > 0)
2,655,012✔
635
            child_ndx--;
2,655,030✔
636
    }
2,655,012✔
637
    else {
37,236✔
638
        REALM_ASSERT_DEBUG(node_size() > 0);
37,236✔
639
        size_t max_ndx = node_size() - 1;
37,236✔
640
        child_ndx = std::min(size_t(key.value >> m_shift_factor), max_ndx);
37,236✔
641
    }
37,236✔
642

643
    size_t sz = node_size();
2,692,248✔
644
    while (child_ndx < sz) {
2,754,765✔
645
        uint64_t key_offset = m_keys.is_attached() ? m_keys.get(child_ndx) : (child_ndx << m_shift_factor);
2,746,848✔
646
        RowKey new_key(key_offset < key.value ? key.value - key_offset : 0);
2,746,848✔
647
        state.m_key_offset += key_offset;
2,746,848✔
648

649
        ref_type child_ref = _get_child_ref(child_ndx);
2,746,848✔
650
        char* child_header = m_alloc.translate(child_ref);
2,746,848✔
651
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
2,746,848✔
652
        if (child_is_leaf) {
2,746,848✔
653
            state.m_current_leaf.init(MemRef(child_header, child_ref, m_alloc));
1,542,132✔
654
            state.m_current_leaf.set_offset(state.m_key_offset);
1,542,132✔
655
            state.m_current_index = state.m_current_leaf.lower_bound_key(new_key);
1,542,132✔
656
            if (state.m_current_index < state.m_current_leaf.node_size())
1,542,132✔
657
                return true;
1,479,639✔
658
        }
1,542,132✔
659
        else {
1,204,716✔
660
            ClusterNodeInner node(m_alloc, m_tree_top);
1,204,716✔
661
            node.init(MemRef(child_header, child_ref, m_alloc));
1,204,716✔
662
            if (node.get_leaf(new_key, state))
1,204,716✔
663
                return true;
1,204,692✔
664
        }
1,204,716✔
665
        state.m_key_offset -= key_offset;
62,517✔
666
        child_ndx++;
62,517✔
667
    }
62,517✔
668
    return false;
7,917✔
669
}
2,692,248✔
670

671
// LCOV_EXCL_START
672
void ClusterNodeInner::dump_objects(int64_t key_offset, std::string lead) const
673
{
×
674
    std::cout << lead << "node" << std::endl;
×
675
    if (!m_keys.is_attached()) {
×
676
        std::cout << lead << "compact form" << std::endl;
×
677
    }
×
678
    size_t sz = node_size();
×
679
    for (unsigned i = 0; i < sz; i++) {
×
680
        int64_t key_value;
×
681
        if (m_keys.is_attached()) {
×
682
            key_value = m_keys.get(i) + key_offset;
×
683
        }
×
684
        else {
×
685
            key_value = (i << m_shift_factor) + key_offset;
×
686
        }
×
687
        std::cout << lead << std::hex << "split: " << key_value << std::dec << std::endl;
×
688
        m_tree_top.get_node(const_cast<ClusterNodeInner*>(this), i + s_first_node_index)
×
689
            ->dump_objects(key_value, lead + "   ");
×
690
    }
×
691
}
×
692
// LCOV_EXCL_STOP
693
void ClusterNodeInner::move(size_t ndx, ClusterNode* new_node, int64_t key_adj)
694
{
66✔
695
    auto new_cluster_node_inner = static_cast<ClusterNodeInner*>(new_node);
66✔
696
    for (size_t i = ndx; i < node_size(); i++) {
7,866✔
697
        new_cluster_node_inner->Array::add(_get_child_ref(i));
7,800✔
698
    }
7,800✔
699
    for (size_t i = ndx; i < m_keys.size(); i++) {
7,866✔
700
        new_cluster_node_inner->m_keys.add(m_keys.get(i) - key_adj);
7,800✔
701
    }
7,800✔
702
    truncate(ndx + s_first_node_index);
66✔
703
    if (m_keys.is_attached()) {
66✔
704
        m_keys.truncate(ndx);
66✔
705
    }
66✔
706
}
66✔
707

708
size_t ClusterNodeInner::update_sub_tree_size()
709
{
2,850✔
710
    size_t sub_tree_size = 0;
2,850✔
711
    auto sz = node_size();
2,850✔
712

713
    for (unsigned i = 0; i < sz; i++) {
19,320✔
714
        ref_type ref = _get_child_ref(i);
16,470✔
715
        char* header = m_alloc.translate(ref);
16,470✔
716
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
16,470✔
717
        if (child_is_leaf) {
16,470✔
718
            sub_tree_size += Cluster::node_size_from_header(m_alloc, header);
16,326✔
719
        }
16,326✔
720
        else {
144✔
721
            sub_tree_size += size_t(Array::get(header, s_sub_tree_size)) >> 1;
144✔
722
        }
144✔
723
    }
16,470✔
724
    set_tree_size(sub_tree_size);
2,850✔
725
    return sub_tree_size;
2,850✔
726
}
2,850✔
727

728
bool ClusterNodeInner::traverse(ClusterTree::TraverseFunction func, int64_t key_offset) const
729
{
1,678,380✔
730
    auto sz = node_size();
1,678,380✔
731

732
    for (unsigned i = 0; i < sz; i++) {
8,339,952✔
733
        ref_type ref = _get_child_ref(i);
6,690,693✔
734
        char* header = m_alloc.translate(ref);
6,690,693✔
735
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
6,690,693✔
736
        MemRef mem(header, ref, m_alloc);
6,690,693✔
737
        int64_t offs = (m_keys.is_attached() ? m_keys.get(i) : i << m_shift_factor) + key_offset;
6,690,693✔
738
        if (child_is_leaf) {
6,690,693✔
739
            Cluster leaf(offs, m_alloc, m_tree_top);
6,493,902✔
740
            leaf.init(mem);
6,493,902✔
741
            if (func(&leaf) == IteratorControl::Stop) {
6,493,902✔
742
                return true;
29,121✔
743
            }
29,121✔
744
        }
6,493,902✔
745
        else {
196,791✔
746
            ClusterNodeInner node(m_alloc, m_tree_top);
196,791✔
747
            node.init(mem);
196,791✔
748
            if (node.traverse(func, offs)) {
196,791✔
749
                return true;
×
750
            }
×
751
        }
196,791✔
752
    }
6,690,693✔
753
    return false;
1,649,259✔
754
}
1,678,380✔
755

756
void ClusterNodeInner::update(ClusterTree::UpdateFunction func, int64_t key_offset)
UNCOV
757
{
×
UNCOV
758
    auto sz = node_size();
×
759

UNCOV
760
    for (unsigned i = 0; i < sz; i++) {
×
UNCOV
761
        ref_type ref = _get_child_ref(i);
×
UNCOV
762
        char* header = m_alloc.translate(ref);
×
UNCOV
763
        bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
×
UNCOV
764
        MemRef mem(header, ref, m_alloc);
×
UNCOV
765
        int64_t offs = (m_keys.is_attached() ? m_keys.get(i) : i << m_shift_factor) + key_offset;
×
UNCOV
766
        if (child_is_leaf) {
×
UNCOV
767
            Cluster leaf(offs, m_alloc, m_tree_top);
×
UNCOV
768
            leaf.init(mem);
×
UNCOV
769
            leaf.set_parent(this, i + s_first_node_index);
×
UNCOV
770
            func(&leaf);
×
UNCOV
771
        }
×
772
        else {
×
773
            ClusterNodeInner node(m_alloc, m_tree_top);
×
774
            node.init(mem);
×
775
            node.set_parent(this, i + s_first_node_index);
×
776
            node.update(func, offs);
×
777
        }
×
UNCOV
778
    }
×
UNCOV
779
}
×
780

781
int64_t ClusterNodeInner::get_last_key_value() const
782
{
×
783
    auto last_ndx = node_size() - 1;
×
784

785
    ref_type ref = _get_child_ref(last_ndx);
×
786
    char* header = m_alloc.translate(ref);
×
787
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
×
788
    MemRef mem(header, ref, m_alloc);
×
789
    int64_t offset = m_keys.is_attached() ? m_keys.get(last_ndx) : last_ndx << m_shift_factor;
×
790
    if (child_is_leaf) {
×
791
        Cluster leaf(offset, m_alloc, m_tree_top);
×
792
        leaf.init(mem);
×
793
        return offset + leaf.get_last_key_value();
×
794
    }
×
795
    else {
×
796
        ClusterNodeInner node(m_alloc, m_tree_top);
×
797
        node.init(mem);
×
798
        return offset + node.get_last_key_value();
×
799
    }
×
800
}
×
801

802
ClusterTree::ClusterTree(Table* owner, Allocator& alloc, size_t top_position_for_cluster_tree)
803
    : m_alloc(alloc)
27,111✔
804
    , m_owner(owner)
27,111✔
805
    , m_top_position_for_cluster_tree(top_position_for_cluster_tree)
27,111✔
806
{
53,556✔
807
}
53,556✔
808

809
ClusterTree::~ClusterTree() {}
31,740✔
810

811
std::unique_ptr<ClusterNode> ClusterTree::create_root_from_parent(ArrayParent* parent, size_t ndx_in_parent)
812
{
2,580,519✔
813
    ref_type ref = parent->get_child_ref(ndx_in_parent);
2,580,519✔
814
    if (!ref)
2,580,519✔
815
        return nullptr;
×
816

817
    MemRef mem{m_alloc.translate(ref), ref, m_alloc};
2,580,519✔
818
    const char* header = mem.get_addr();
2,580,519✔
819
    bool is_leaf = !Array::get_is_inner_bptree_node_from_header(header);
2,580,519✔
820

821
    bool can_reuse_root_accessor = m_root && m_root->is_leaf() == is_leaf;
2,580,519✔
822
    if (can_reuse_root_accessor) {
2,580,519✔
823
        m_root->init(mem);
2,445,894✔
824
        return std::move(m_root); // Same root will be reinstalled.
2,445,894✔
825
    }
2,445,894✔
826

827
    // Not reusing root note, allocating a new one.
828
    std::unique_ptr<ClusterNode> new_root;
134,625✔
829
    if (is_leaf) {
134,625✔
830
        new_root = std::make_unique<Cluster>(0, m_alloc, *this);
90,672✔
831
    }
90,672✔
832
    else {
43,953✔
833
        new_root = std::make_unique<ClusterNodeInner>(m_alloc, *this);
43,953✔
834
    }
43,953✔
835
    new_root->init(mem);
134,625✔
836
    new_root->set_parent(parent, ndx_in_parent);
134,625✔
837

838
    return new_root;
134,625✔
839
}
2,580,519✔
840

841
std::unique_ptr<ClusterNode> ClusterTree::get_node(ArrayParent* parent, size_t ndx_in_parent) const
842
{
24,342✔
843
    ref_type ref = parent->get_child_ref(ndx_in_parent);
24,342✔
844

845
    std::unique_ptr<ClusterNode> node;
24,342✔
846

847
    char* child_header = static_cast<char*>(m_alloc.translate(ref));
24,342✔
848
    bool child_is_leaf = !Array::get_is_inner_bptree_node_from_header(child_header);
24,342✔
849
    if (child_is_leaf) {
24,342✔
850
        node = std::make_unique<Cluster>(0, m_alloc, *this);
24,228✔
851
    }
24,228✔
852
    else {
114✔
853
        node = std::make_unique<ClusterNodeInner>(m_alloc, *this);
114✔
854
    }
114✔
855
    node->init(MemRef(child_header, ref, m_alloc));
24,342✔
856
    node->set_parent(parent, ndx_in_parent);
24,342✔
857

858
    return node;
24,342✔
859
}
24,342✔
860

861
void ClusterTree::clear(CascadeState& state)
862
{
5,568✔
863
    m_owner->clear_indexes();
5,568✔
864

865
    if (state.m_group) {
5,568✔
866
        remove_all_links(state); // This will also delete objects loosing their last strong link
5,262✔
867
    }
5,262✔
868

869
    // We no longer have "clear table" instruction, so we have to report the removal of each
870
    // object individually
871
    if (Replication* repl = m_owner->get_repl()) {
5,568✔
872
        // Go through all clusters
873
        traverse([repl, this](const Cluster* cluster) {
4,107✔
874
            auto sz = cluster->node_size();
4,107✔
875
            for (size_t i = 0; i < sz; i++) {
105,978✔
876
                repl->remove_object(m_owner, cluster->get_real_key(i));
101,871✔
877
            }
101,871✔
878
            return IteratorControl::AdvanceToNext;
4,107✔
879
        });
4,107✔
880
    }
3,753✔
881

882
    m_root->destroy_deep();
5,568✔
883

884
    auto leaf = std::make_unique<Cluster>(0, m_root->get_alloc(), *this);
5,568✔
885
    leaf->create();
5,568✔
886
    replace_root(std::move(leaf));
5,568✔
887

888
    bump_content_version();
5,568✔
889
    bump_storage_version();
5,568✔
890

891
    m_size = 0;
5,568✔
892
}
5,568✔
893

894
void ClusterTree::replace_root(std::unique_ptr<ClusterNode> new_root)
895
{
8,406✔
896
    if (new_root != m_root) {
8,406✔
897
        // Maintain parent.
898
        new_root->set_parent(m_root->get_parent(), m_root->get_ndx_in_parent());
8,406✔
899
        new_root->update_parent(); // Throws
8,406✔
900
        m_root = std::move(new_root);
8,406✔
901
    }
8,406✔
902
}
8,406✔
903

904
bool ClusterTree::init_from_parent()
905
{
2,576,829✔
906
    m_root = get_root_from_parent();
2,576,829✔
907
    if (m_root) {
2,582,463✔
908
        m_size = m_root->get_tree_size();
2,582,463✔
909
        return true;
2,582,463✔
910
    }
2,582,463✔
911
    m_size = 0;
4,294,967,294✔
912
    return false;
4,294,967,294✔
913
}
2,576,829✔
914

915
void ClusterTree::update_from_parent() noexcept
916
{
1,108,086✔
917
    m_root->update_from_parent();
1,108,086✔
918
    m_size = m_root->get_tree_size();
1,108,086✔
919
}
1,108,086✔
920

921
void ClusterTree::insert_fast(ObjKey k, const FieldValues& init_values, ClusterNode::State& state)
922
{
24,611,973✔
923
    ref_type new_sibling_ref = m_root->insert(ClusterNode::RowKey(k), init_values, state);
24,611,973✔
924
    if (REALM_UNLIKELY(new_sibling_ref)) {
24,611,973✔
925
        auto new_root = std::make_unique<ClusterNodeInner>(m_root->get_alloc(), *this);
2,598✔
926
        new_root->create(m_root->get_sub_tree_depth() + 1);
2,598✔
927

928
        new_root->add(m_root->get_ref());                // Throws
2,598✔
929
        new_root->add(new_sibling_ref, state.split_key); // Throws
2,598✔
930
        new_root->update_sub_tree_size();
2,598✔
931

932
        replace_root(std::move(new_root));
2,598✔
933
    }
2,598✔
934
    m_size++;
24,611,973✔
935
}
24,611,973✔
936

937
Obj ClusterTree::insert(ObjKey k, const FieldValues& init_values)
938
{
24,586,905✔
939
    ClusterNode::State state;
24,586,905✔
940

941
    insert_fast(k, init_values, state);
24,586,905✔
942
    m_owner->update_indexes(k, init_values);
24,586,905✔
943

944
    bump_content_version();
24,586,905✔
945
    bump_storage_version();
24,586,905✔
946

947
    // Replicate setting of values
948
    if (Replication* repl = m_owner->get_repl()) {
24,586,905✔
949
        auto pk_col = m_owner->get_primary_key_column();
9,556,611✔
950
        for (const auto& v : init_values) {
9,556,611✔
951
            if (v.col_key != pk_col)
435,135✔
952
                repl->set(m_owner, v.col_key, k, v.value, v.is_default ? _impl::instr_SetDefault : _impl::instr_Set);
36,228✔
953
        }
435,135✔
954
    }
9,556,611✔
955

956
    return Obj(get_table_ref(), state.mem, k, state.index);
24,586,905✔
957
}
24,586,905✔
958

959
bool ClusterTree::is_valid(ObjKey k) const noexcept
960
{
85,390,695✔
961
    if (m_size == 0)
85,390,695✔
962
        return false;
218,106✔
963

964
    ClusterNode::State state;
85,172,589✔
965
    return m_root->try_get(ClusterNode::RowKey(k), state);
85,172,589✔
966
}
85,390,695✔
967

968
ClusterNode::State ClusterTree::try_get(ObjKey k) const noexcept
969
{
35,633,388✔
970
    ClusterNode::State state;
35,633,388✔
971
    if (!(k && m_root->try_get(ClusterNode::RowKey(k), state)))
35,769,129✔
972
        state.index = realm::npos;
53,496✔
973
    return state;
35,633,388✔
974
}
35,633,388✔
975

976
ClusterNode::State ClusterTree::get(size_t ndx, ObjKey& k) const
977
{
11,914,749✔
978
    if (ndx >= m_size) {
11,914,749✔
979
        throw Exception(ErrorCodes::InvalidatedObject, "Object was deleted");
×
980
    }
×
981
    ClusterNode::State state;
11,914,749✔
982
    k = m_root->get(ndx, state);
11,914,749✔
983
    return state;
11,914,749✔
984
}
11,914,749✔
985

986
size_t ClusterTree::get_ndx(ObjKey k) const noexcept
987
{
36,750✔
988
    return m_root->get_ndx(ClusterNode::RowKey(k), 0);
36,750✔
989
}
36,750✔
990

991
void ClusterTree::erase(ObjKey k, CascadeState& state)
992
{
5,317,320✔
993
    if (!k.is_unresolved()) {
5,317,320✔
994
        if (auto table = get_owning_table()) {
5,277,696✔
995
            if (Replication* repl = table->get_repl()) {
5,277,678✔
996
                repl->remove_object(table, k);
4,834,182✔
997
            }
4,834,182✔
998
        }
5,277,678✔
999
    }
5,277,696✔
1000
    m_owner->free_local_id_after_hash_collision(k);
5,317,320✔
1001
    m_owner->erase_from_search_indexes(k);
5,317,320✔
1002

1003
    size_t root_size = m_root->erase(ClusterNode::RowKey(k), state);
5,317,320✔
1004

1005
    bump_content_version();
5,317,320✔
1006
    bump_storage_version();
5,317,320✔
1007
    m_size--;
5,317,320✔
1008
    while (!m_root->is_leaf() && root_size == 1) {
5,317,560✔
1009
        ClusterNodeInner* node = static_cast<ClusterNodeInner*>(m_root.get());
240✔
1010

1011
        REALM_ASSERT(node->get_first_key_value() == 0);
240✔
1012
        auto new_root = node->return_and_clear_first_child();
240✔
1013
        node->destroy_deep();
240✔
1014

1015
        replace_root(std::move(new_root));
240✔
1016
        root_size = m_root->node_size();
240✔
1017
    }
240✔
1018
}
5,317,320✔
1019

1020
bool ClusterTree::get_leaf(ObjKey key, ClusterNode::IteratorState& state) const noexcept
1021
{
2,132,787✔
1022
    state.clear();
2,132,787✔
1023
    ClusterNode::RowKey row_key(key);
2,132,787✔
1024
    if (m_root->is_leaf()) {
2,132,787✔
1025
        Cluster* node = static_cast<Cluster*>(m_root.get());
645,243✔
1026
        REALM_ASSERT_DEBUG(node->get_offset() == 0);
645,243✔
1027
        state.m_key_offset = 0;
645,243✔
1028
        state.m_current_leaf.init(node->get_mem());
645,243✔
1029
        state.m_current_leaf.set_offset(state.m_key_offset);
645,243✔
1030
        state.m_current_index = node->lower_bound_key(row_key);
645,243✔
1031
        return state.m_current_index < state.m_current_leaf.node_size();
645,243✔
1032
    }
645,243✔
1033
    else {
1,487,544✔
1034
        ClusterNodeInner* node = static_cast<ClusterNodeInner*>(m_root.get());
1,487,544✔
1035
        return node->get_leaf(row_key, state);
1,487,544✔
1036
    }
1,487,544✔
1037
}
2,132,787✔
1038

1039
bool ClusterTree::traverse(TraverseFunction func) const
1040
{
2,424,180✔
1041
    if (m_root->is_leaf()) {
2,424,180✔
1042
        return func(static_cast<Cluster*>(m_root.get())) == IteratorControl::Stop;
738,414✔
1043
    }
738,414✔
1044
    else {
1,685,766✔
1045
        return static_cast<ClusterNodeInner*>(m_root.get())->traverse(func, 0);
1,685,766✔
1046
    }
1,685,766✔
1047
}
2,424,180✔
1048

1049
void ClusterTree::update(UpdateFunction func)
1050
{
282✔
1051
    if (m_root->is_leaf()) {
282✔
1052
        func(static_cast<Cluster*>(m_root.get()));
282✔
1053
    }
282✔
UNCOV
1054
    else {
×
UNCOV
1055
        static_cast<ClusterNodeInner*>(m_root.get())->update(func, 0);
×
UNCOV
1056
    }
×
1057
}
282✔
1058

1059
void ClusterTree::set_string_interner(ArrayPayload& arr, ColKey col_key) const
1060
{
3,479,325✔
1061
    // Check for owner. This function may be called in context of DictionaryClusterTree
1062
    // in which case m_owner is null (and spec never needed).
1063
    if (m_owner) {
3,479,346✔
1064
        arr.set_string_interner(m_owner->get_string_interner(col_key));
3,479,346✔
1065
    }
3,479,346✔
1066
}
3,479,325✔
1067

1068
TableRef ClusterTree::get_table_ref() const
1069
{
251,724,273✔
1070
    REALM_ASSERT(m_owner != nullptr);
251,724,273✔
1071
    // as safe as storing the TableRef locally in the ClusterTree,
1072
    // because the cluster tree and the table is basically one object :-O
1073
    return m_owner->m_own_ref;
251,724,273✔
1074
}
251,724,273✔
1075

1076
std::unique_ptr<ClusterNode> ClusterTree::get_root_from_parent()
1077
{
2,578,428✔
1078
    return create_root_from_parent(&m_owner->m_top, m_top_position_for_cluster_tree);
2,578,428✔
1079
}
2,578,428✔
1080

1081
void ClusterTree::verify() const
1082
{
252,552✔
1083
#ifdef REALM_DEBUG
252,552✔
1084
    traverse([](const Cluster* cluster) {
372,567✔
1085
        cluster->verify();
372,567✔
1086
        return IteratorControl::AdvanceToNext;
372,567✔
1087
    });
372,567✔
1088
#endif
252,552✔
1089
}
252,552✔
1090

1091
void ClusterTree::nullify_incoming_links(ObjKey obj_key, CascadeState& state)
1092
{
5,192,238✔
1093
    REALM_ASSERT(state.m_group);
5,192,238✔
1094
    m_root->nullify_incoming_links(ClusterNode::RowKey(obj_key), state);
5,192,238✔
1095
}
5,192,238✔
1096

1097
void ClusterTree::remove_all_links(CascadeState& state)
1098
{
5,262✔
1099
    Allocator& alloc = get_alloc();
5,262✔
1100
    auto origin_table = get_owning_table();
5,262✔
1101
    // This function will add objects that should be deleted to 'state'
1102
    auto func = [&](const Cluster* cluster) {
5,640✔
1103
        auto remove_link_from_column = [&](ColKey col_key) {
69,843✔
1104
            // Prevent making changes to table that is going to be removed anyway
1105
            // Furthermore it is a prerequisite for using 'traverse' that the tree
1106
            // is not modified
1107
            if (origin_table->links_to_self(col_key)) {
69,843✔
1108
                return IteratorControl::AdvanceToNext;
228✔
1109
            }
228✔
1110
            auto col_type = col_key.get_type();
69,615✔
1111
            if (col_key.is_collection()) {
69,615✔
1112
                ArrayInteger values(alloc);
5,421✔
1113
                cluster->init_leaf(col_key, &values);
5,421✔
1114
                size_t sz = values.size();
5,421✔
1115

1116
                for (size_t i = 0; i < sz; i++) {
18,597✔
1117
                    if (ref_type ref = values.get_as_ref(i)) {
13,176✔
1118
                        ObjKey origin_key = cluster->get_real_key(i);
390✔
1119
                        if (col_key.is_list() || col_key.is_set()) {
390✔
1120
                            if (col_type == col_type_Link) {
318✔
1121
                                BPlusTree<ObjKey> links(alloc);
258✔
1122
                                links.init_from_ref(ref);
258✔
1123
                                if (links.size() > 0) {
258✔
1124
                                    cluster->do_remove_backlinks(origin_key, col_key, links.get_all(), state);
246✔
1125
                                }
246✔
1126
                            }
258✔
1127
                            else if (col_type == col_type_TypedLink) {
60✔
1128
                                BPlusTree<ObjLink> links(alloc);
×
1129
                                links.init_from_ref(ref);
×
1130
                                if (links.size() > 0) {
×
1131
                                    cluster->do_remove_backlinks(origin_key, col_key, links.get_all(), state);
×
1132
                                }
×
1133
                            }
×
1134
                            else if (col_type == col_type_Mixed) {
60✔
1135
                                BPlusTree<Mixed> mix_arr(alloc);
60✔
1136
                                mix_arr.init_from_ref(ref);
60✔
1137
                                auto sz = mix_arr.size();
60✔
1138
                                std::vector<ObjLink> links;
60✔
1139
                                for (size_t j = 0; j < sz; j++) {
180✔
1140
                                    auto mix = mix_arr.get(j);
120✔
1141
                                    if (mix.is_type(type_TypedLink)) {
120✔
1142
                                        links.push_back(mix.get<ObjLink>());
120✔
1143
                                    }
120✔
1144
                                }
120✔
1145
                                if (links.size())
60✔
1146
                                    cluster->do_remove_backlinks(origin_key, col_key, links, state);
60✔
1147
                            }
60✔
1148
                        }
318✔
1149
                        else if (col_key.is_dictionary()) {
72✔
1150
                            std::vector<ObjLink> links;
72✔
1151

1152
                            Array top(alloc);
72✔
1153
                            top.set_parent(&values, i);
72✔
1154
                            top.init_from_parent();
72✔
1155
                            BPlusTree<Mixed> values(alloc);
72✔
1156
                            values.set_parent(&top, 1);
72✔
1157
                            values.init_from_parent();
72✔
1158

1159
                            // Iterate through values and insert all link values
1160
                            values.for_all([&](Mixed val) {
108✔
1161
                                if (val.is_type(type_TypedLink)) {
108✔
1162
                                    links.push_back(val.get<ObjLink>());
108✔
1163
                                }
108✔
1164
                            });
108✔
1165

1166
                            if (links.size() > 0) {
72✔
1167
                                cluster->do_remove_backlinks(origin_key, col_key, links, state);
72✔
1168
                            }
72✔
1169
                        }
72✔
1170
                    }
390✔
1171
                }
13,176✔
1172
            }
5,421✔
1173
            else {
64,194✔
1174
                if (col_type == col_type_Link) {
64,194✔
1175
                    ArrayKey values(alloc);
1,512✔
1176
                    cluster->init_leaf(col_key, &values);
1,512✔
1177
                    size_t sz = values.size();
1,512✔
1178
                    for (size_t i = 0; i < sz; i++) {
4,644✔
1179
                        if (ObjKey key = values.get(i)) {
3,132✔
1180
                            cluster->do_remove_backlinks(cluster->get_real_key(i), col_key, std::vector<ObjKey>{key},
522✔
1181
                                                         state);
522✔
1182
                        }
522✔
1183
                    }
3,132✔
1184
                }
1,512✔
1185
                else if (col_type == col_type_TypedLink) {
62,682✔
1186
                    ArrayTypedLink values(alloc);
×
1187
                    cluster->init_leaf(col_key, &values);
×
1188
                    size_t sz = values.size();
×
1189
                    for (size_t i = 0; i < sz; i++) {
×
1190
                        if (ObjLink link = values.get(i)) {
×
1191
                            cluster->do_remove_backlinks(cluster->get_real_key(i), col_key,
×
1192
                                                         std::vector<ObjLink>{link}, state);
×
1193
                        }
×
1194
                    }
×
1195
                }
×
1196
                else if (col_type == col_type_Mixed) {
62,682✔
1197
                    ArrayMixed values(alloc);
108✔
1198
                    cluster->init_leaf(col_key, &values);
108✔
1199
                    size_t sz = values.size();
108✔
1200
                    for (size_t i = 0; i < sz; i++) {
648✔
1201
                        Mixed mix = values.get(i);
540✔
1202
                        if (mix.is_type(type_TypedLink)) {
540✔
1203
                            cluster->do_remove_backlinks(cluster->get_real_key(i), col_key,
12✔
1204
                                                         std::vector<ObjLink>{mix.get<ObjLink>()}, state);
12✔
1205
                        }
12✔
1206
                    }
540✔
1207
                }
108✔
1208
                else if (col_type == col_type_BackLink) {
62,574✔
1209
                    ArrayBacklink values(alloc);
282✔
1210
                    cluster->init_leaf(col_key, &values);
282✔
1211
                    values.set_parent(const_cast<Cluster*>(cluster),
282✔
1212
                                      col_key.get_index().val + Cluster::s_first_col_index);
282✔
1213
                    size_t sz = values.size();
282✔
1214
                    for (size_t i = 0; i < sz; i++) {
2,106✔
1215
                        values.nullify_fwd_links(i, state);
1,824✔
1216
                    }
1,824✔
1217
                }
282✔
1218
            }
64,194✔
1219
            return IteratorControl::AdvanceToNext;
69,615✔
1220
        };
69,843✔
1221
        m_owner->for_each_and_every_column(remove_link_from_column);
5,640✔
1222
        return IteratorControl::AdvanceToNext;
5,640✔
1223
    };
5,640✔
1224

1225
    // Go through all clusters
1226
    traverse(func);
5,262✔
1227

1228
    const_cast<Table*>(get_owning_table())->remove_recursive(state);
5,262✔
1229
}
5,262✔
1230

1231
/**************************  ClusterTree::Iterator  **************************/
1232

1233
ClusterTree::Iterator::Iterator(const ClusterTree& t, size_t ndx)
1234
    : m_tree(t)
5,062,527✔
1235
    , m_leaf(0, t.get_alloc(), t)
5,062,527✔
1236
    , m_state(m_leaf)
5,062,527✔
1237
    , m_instance_version(t.get_instance_version())
5,062,527✔
1238
    , m_leaf_invalid(false)
5,062,527✔
1239
    , m_position(ndx)
5,062,527✔
1240
{
10,113,564✔
1241
    auto sz = t.size();
10,113,564✔
1242
    if (ndx >= sz) {
10,113,564✔
1243
        // end
1244
        m_position = sz;
9,590,706✔
1245
        m_leaf_invalid = true;
9,590,706✔
1246
    }
9,590,706✔
1247
    else if (ndx == 0) {
522,858✔
1248
        // begin
1249
        m_key = load_leaf(ObjKey(0));
509,580✔
1250
        m_leaf_start_pos = 0;
509,580✔
1251
    }
509,580✔
1252
    else {
13,278✔
1253
        auto s = const_cast<ClusterTree&>(m_tree).get(ndx, m_key);
13,278✔
1254
        m_state.init(s, m_key);
13,278✔
1255
        m_leaf_start_pos = ndx - m_state.m_current_index;
13,278✔
1256
    }
13,278✔
1257
}
10,113,564✔
1258

1259
ClusterTree::Iterator::Iterator(const Iterator& other)
1260
    : m_tree(other.m_tree)
336✔
1261
    , m_leaf(0, m_tree.get_alloc(), m_tree)
336✔
1262
    , m_state(m_leaf)
336✔
1263
    , m_instance_version(m_tree.get_instance_version())
336✔
1264
    , m_key(other.m_key)
336✔
1265
    , m_leaf_invalid(other.m_leaf_invalid)
336✔
1266
    , m_position(other.m_position)
336✔
1267
{
429✔
1268
    m_leaf_start_pos = m_position - m_state.m_current_index;
429✔
1269
}
429✔
1270

1271
size_t ClusterTree::Iterator::get_position()
1272
{
18,582✔
1273
    auto ndx = m_tree.get_ndx(m_key);
18,582✔
1274
    if (ndx == realm::npos) {
18,582✔
1275
        throw StaleAccessor("Stale iterator");
×
1276
    }
×
1277
    return ndx;
18,582✔
1278
}
18,582✔
1279

1280
ObjKey ClusterTree::Iterator::load_leaf(ObjKey key) const
1281
{
2,132,808✔
1282
    m_storage_version = m_tree.get_storage_version(m_instance_version);
2,132,808✔
1283
    // 'key' may or may not exist. If it does not exist, state is updated
1284
    // to point to the next object in line.
1285
    if (m_tree.get_leaf(key, m_state)) {
2,132,808✔
1286
        m_leaf_start_pos = m_position - m_state.m_current_index;
1,981,866✔
1287
        // Get the actual key value
1288
        return m_leaf.get_real_key(m_state.m_current_index);
1,981,866✔
1289
    }
1,981,866✔
1290
    else {
150,942✔
1291
        // end of table
1292
        return null_key;
150,942✔
1293
    }
150,942✔
1294
}
2,132,808✔
1295

1296
void ClusterTree::Iterator::go(size_t abs_pos)
1297
{
43,440✔
1298
    size_t sz = m_tree.size();
43,440✔
1299
    if (abs_pos >= sz) {
43,440✔
1300
        throw OutOfBounds("go() on Iterator", abs_pos, sz);
×
1301
    }
×
1302

1303
    m_position = abs_pos;
43,440✔
1304

1305
    // If the position is within the current leaf then just set the iterator to that position
1306
    if (!m_leaf_invalid && m_storage_version == m_tree.get_storage_version(m_instance_version)) {
43,440✔
1307
        if (abs_pos >= m_leaf_start_pos && abs_pos < (m_leaf_start_pos + m_leaf.node_size())) {
15,120✔
1308
            m_state.m_current_index = abs_pos - m_leaf_start_pos;
4,431✔
1309
            m_key = m_leaf.get_real_key(m_state.m_current_index);
4,431✔
1310
            return;
4,431✔
1311
        }
4,431✔
1312
    }
15,120✔
1313

1314
    // Find cluster holding requested position
1315
    auto s = m_tree.get(abs_pos, m_key);
39,009✔
1316
    m_state.init(s, m_key);
39,009✔
1317
    m_leaf_start_pos = abs_pos - s.index;
39,009✔
1318
    m_leaf_invalid = false;
39,009✔
1319
}
39,009✔
1320

1321
bool ClusterTree::Iterator::update() const
1322
{
23,343,102✔
1323
    if (m_leaf_invalid || m_storage_version != m_tree.get_storage_version(m_instance_version)) {
23,347,719✔
1324
        ObjKey k = load_leaf(m_key);
46,698✔
1325
        m_leaf_invalid = !k || (k != m_key);
46,698✔
1326
        if (m_leaf_invalid) {
46,698✔
1327
            throw StaleAccessor("Stale iterator");
30✔
1328
        }
30✔
1329
        return true;
46,668✔
1330
    }
46,698✔
1331

1332
    REALM_ASSERT(m_leaf.is_attached());
23,296,404✔
1333
    return false;
23,296,404✔
1334
}
23,343,102✔
1335

1336
ClusterTree::Iterator& ClusterTree::Iterator::operator++()
1337
{
21,782,415✔
1338
    if (m_leaf_invalid || m_storage_version != m_tree.get_storage_version(m_instance_version)) {
21,784,347✔
1339
        ObjKey k = load_leaf(m_key);
1,371,480✔
1340
        if (k != m_key) {
1,371,480✔
1341
            // Objects was deleted. k points to the next object
1342
            m_key = k;
60,894✔
1343
            m_leaf_invalid = !m_key;
60,894✔
1344
            return *this;
60,894✔
1345
        }
60,894✔
1346
    }
1,371,480✔
1347

1348
    m_state.m_current_index++;
21,721,521✔
1349
    m_position++;
21,721,521✔
1350
    if (m_state.m_current_index == m_leaf.node_size()) {
21,721,521✔
1351
        m_key = load_leaf(ObjKey(m_key.value + 1));
205,113✔
1352
        m_leaf_invalid = !m_key;
205,113✔
1353
    }
205,113✔
1354
    else {
21,516,408✔
1355
        m_key = m_leaf.get_real_key(m_state.m_current_index);
21,516,408✔
1356
    }
21,516,408✔
1357
    return *this;
21,721,521✔
1358
}
21,782,415✔
1359

1360
ClusterTree::Iterator& ClusterTree::Iterator::operator+=(ptrdiff_t adj)
1361
{
2,016✔
1362
    // If you have to jump far away and thus have to load many leaves,
1363
    // this function will be slow
1364
    REALM_ASSERT(adj >= 0);
2,016✔
1365
    if (adj == 0) {
2,016✔
1366
        return *this;
6✔
1367
    }
6✔
1368

1369
    size_t n = size_t(adj);
2,010✔
1370
    if (m_leaf_invalid || m_storage_version != m_tree.get_storage_version(m_instance_version)) {
2,010✔
1371
        ObjKey k = load_leaf(m_key);
12✔
1372
        if (k != m_key) {
12✔
1373
            // Objects was deleted. k points to the next object
1374
            m_key = k;
6✔
1375
            m_position = m_key ? m_tree.get_ndx(m_key) : m_tree.size();
6✔
1376
            n--;
6✔
1377
        }
6✔
1378
    }
12✔
1379
    if (n > 0) {
2,010✔
1380
        m_position += n;
2,010✔
1381
        size_t left_in_leaf = m_leaf.node_size() - m_state.m_current_index;
2,010✔
1382
        if (n < left_in_leaf) {
2,010✔
1383
            m_state.m_current_index += n;
1,986✔
1384
            m_key = m_leaf.get_real_key(m_state.m_current_index);
1,986✔
1385
        }
1,986✔
1386
        else {
24✔
1387
            if (m_position < m_tree.size()) {
24✔
1388
                auto s = const_cast<ClusterTree&>(m_tree).get(m_position, m_key);
18✔
1389
                m_state.init(s, m_key);
18✔
1390
                m_leaf_start_pos = m_position - m_state.m_current_index;
18✔
1391
            }
18✔
1392
            else {
6✔
1393
                m_key = ObjKey();
6✔
1394
                m_position = m_tree.size();
6✔
1395
            }
6✔
1396
        }
24✔
1397
    }
2,010✔
1398
    m_leaf_invalid = !m_key;
2,010✔
1399
    return *this;
2,010✔
1400
}
2,016✔
1401

1402
ClusterTree::Iterator::pointer ClusterTree::Iterator::operator->() const
1403
{
23,351,139✔
1404
    if (update() || m_key != m_obj.get_key()) {
23,351,139✔
1405
        new (&m_obj) Obj(m_tree.get_table_ref(), m_leaf.get_mem(), m_key, m_state.m_current_index);
19,550,454✔
1406
    }
19,550,454✔
1407

1408
    return &m_obj;
23,351,139✔
1409
}
23,351,139✔
1410

1411
} // namespace realm
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc