• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 1868

27 Nov 2023 10:27AM UTC coverage: 91.681% (-0.003%) from 91.684%
1868

push

Evergreen

web-flow
Support wildcard notation for key path arrays (#7163)

* Use Realm::create_key_path_array in 'object' test
* Add bindgen entry

92398 of 169340 branches covered (0.0%)

265 of 284 new or added lines in 4 files covered. (93.31%)

63 existing lines in 14 files now uncovered.

231841 of 252877 relevant lines covered (91.68%)

6599244.58 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.55
/src/realm/group.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <new>
20
#include <algorithm>
21
#include <fstream>
22

23
#ifdef REALM_DEBUG
24
#include <iostream>
25
#include <iomanip>
26
#endif
27

28
#include <realm/util/file_mapper.hpp>
29
#include <realm/util/memory_stream.hpp>
30
#include <realm/util/thread.hpp>
31
#include <realm/impl/destroy_guard.hpp>
32
#include <realm/utilities.hpp>
33
#include <realm/exceptions.hpp>
34
#include <realm/group_writer.hpp>
35
#include <realm/transaction.hpp>
36
#include <realm/replication.hpp>
37

38
using namespace realm;
39
using namespace realm::util;
40

41
namespace {
42

43
class Initialization {
44
public:
45
    Initialization()
46
    {
24✔
47
        realm::cpuid_init();
24✔
48
    }
24✔
49
};
50

51
Initialization initialization;
52

53
} // anonymous namespace
54

55
Group::Group()
56
    : m_local_alloc(new SlabAlloc)
57
    , m_alloc(*m_local_alloc) // Throws
58
    , m_top(m_alloc)
59
    , m_tables(m_alloc)
60
    , m_table_names(m_alloc)
61
{
3,996✔
62
    init_array_parents();
3,996✔
63
    m_alloc.attach_empty(); // Throws
3,996✔
64
    m_file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
3,996✔
65
    ref_type top_ref = 0; // Instantiate a new empty group
3,996✔
66
    bool create_group_when_missing = true;
3,996✔
67
    bool writable = create_group_when_missing;
3,996✔
68
    attach(top_ref, writable, create_group_when_missing); // Throws
3,996✔
69
}
3,996✔
70

71

72
Group::Group(const std::string& file_path, const char* encryption_key)
73
    : m_local_alloc(new SlabAlloc) // Throws
74
    , m_alloc(*m_local_alloc)
75
    , m_top(m_alloc)
76
    , m_tables(m_alloc)
77
    , m_table_names(m_alloc)
78
{
849✔
79
    init_array_parents();
849✔
80

327✔
81
    SlabAlloc::Config cfg;
849✔
82
    cfg.read_only = true;
849✔
83
    cfg.no_create = true;
849✔
84
    cfg.encryption_key = encryption_key;
849✔
85
    ref_type top_ref = m_alloc.attach_file(file_path, cfg); // Throws
849✔
86
    // Non-Transaction Groups always allow writing and simply don't allow
327✔
87
    // committing when opened in read-only mode
327✔
88
    m_alloc.set_read_only(false);
849✔
89

327✔
90
    open(top_ref, file_path);
849✔
91
}
849✔
92

93

94
Group::Group(BinaryData buffer, bool take_ownership)
95
    : m_local_alloc(new SlabAlloc) // Throws
96
    , m_alloc(*m_local_alloc)
97
    , m_top(m_alloc)
98
    , m_tables(m_alloc)
99
    , m_table_names(m_alloc)
100
{
48✔
101
    REALM_ASSERT(buffer.data());
48✔
102

24✔
103
    init_array_parents();
48✔
104
    ref_type top_ref = m_alloc.attach_buffer(buffer.data(), buffer.size()); // Throws
48✔
105

24✔
106
    open(top_ref, {});
48✔
107

24✔
108
    if (take_ownership)
48✔
109
        m_alloc.own_buffer();
36✔
110
}
48✔
111

112
Group::Group(SlabAlloc* alloc) noexcept
113
    : m_alloc(*alloc)
114
    , // Throws
115
    m_top(m_alloc)
116
    , m_tables(m_alloc)
117
    , m_table_names(m_alloc)
118
{
2,752,200✔
119
    init_array_parents();
2,752,200✔
120
}
2,752,200✔
121

122
namespace {
123

124
class TableRecycler : public std::vector<Table*> {
125
public:
126
    ~TableRecycler()
127
    {
×
128
        REALM_UNREACHABLE();
129
        // if ever enabled, remember to release Tables:
×
130
        // for (auto t : *this) {
×
131
        //    delete t;
×
132
        //}
×
133
    }
×
134
};
135

136
// We use the classic approach to construct a FIFO from two LIFO's,
137
// insertion is done into recycler_1, removal is done from recycler_2,
138
// and when recycler_2 is empty, recycler_1 is reversed into recycler_2.
139
// this i O(1) for each entry.
140
auto& g_table_recycler_1 = *new TableRecycler;
141
auto& g_table_recycler_2 = *new TableRecycler;
142
// number of tables held back before being recycled. We hold back recycling
143
// the latest to increase the probability of detecting race conditions
144
// without crashing.
145
const static int g_table_recycling_delay = 100;
146
auto& g_table_recycler_mutex = *new std::mutex;
147

148
} // namespace
149

150
TableKeyIterator& TableKeyIterator::operator++()
151
{
5,122,947✔
152
    m_pos++;
5,122,947✔
153
    m_index_in_group++;
5,122,947✔
154
    load_key();
5,122,947✔
155
    return *this;
5,122,947✔
156
}
5,122,947✔
157

158
TableKey TableKeyIterator::operator*()
159
{
5,257,941✔
160
    if (!bool(m_table_key)) {
5,257,941✔
161
        load_key();
1,310,280✔
162
    }
1,310,280✔
163
    return m_table_key;
5,257,941✔
164
}
5,257,941✔
165

166
void TableKeyIterator::load_key()
167
{
6,433,224✔
168
    const Group& g = *m_group;
6,433,224✔
169
    size_t max_index_in_group = g.m_table_names.size();
6,433,224✔
170
    while (m_index_in_group < max_index_in_group) {
6,788,901✔
171
        RefOrTagged rot = g.m_tables.get_as_ref_or_tagged(m_index_in_group);
5,613,687✔
172
        if (rot.is_ref()) {
5,613,687✔
173
            Table* t;
5,258,010✔
174
            if (m_index_in_group < g.m_table_accessors.size() &&
5,258,010✔
175
                (t = load_atomic(g.m_table_accessors[m_index_in_group], std::memory_order_acquire))) {
5,258,013✔
176
                m_table_key = t->get_key();
1,171,320✔
177
            }
1,171,320✔
178
            else {
4,086,690✔
179
                m_table_key = Table::get_key_direct(g.m_tables.get_alloc(), rot.get_as_ref());
4,086,690✔
180
            }
4,086,690✔
181
            return;
5,258,010✔
182
        }
5,258,010✔
183
        m_index_in_group++;
355,677✔
184
    }
355,677✔
185
    m_table_key = TableKey();
3,811,407✔
186
}
1,175,214✔
187

188
TableKey TableKeys::operator[](size_t p) const
189
{
780✔
190
    if (p < m_iter.m_pos) {
780✔
191
        m_iter = TableKeyIterator(m_iter.m_group, 0);
×
192
    }
×
193
    while (m_iter.m_pos < p) {
1,122✔
194
        ++m_iter;
342✔
195
    }
342✔
196
    return *m_iter;
780✔
197
}
780✔
198

199
size_t Group::size() const noexcept
200
{
1,776,768✔
201
    return m_num_tables;
1,776,768✔
202
}
1,776,768✔
203

204

205
void Group::set_size() const noexcept
206
{
3,027,327✔
207
    int retval = 0;
3,027,327✔
208
    if (is_attached() && m_table_names.is_attached()) {
3,027,435✔
209
        size_t max_index = m_tables.size();
2,875,485✔
210
        REALM_ASSERT_EX(max_index < (1 << 16), max_index);
2,875,485✔
211
        for (size_t j = 0; j < max_index; ++j) {
12,071,829✔
212
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(j);
9,196,344✔
213
            if (rot.is_ref() && rot.get_as_ref()) {
9,196,344✔
214
                ++retval;
8,863,158✔
215
            }
8,863,158✔
216
        }
9,196,344✔
217
    }
2,875,485✔
218
    m_num_tables = retval;
3,027,327✔
219
}
3,027,327✔
220

221
std::map<TableRef, ColKey> Group::get_primary_key_columns_from_pk_table(TableRef pk_table)
222
{
54✔
223
    std::map<TableRef, ColKey> ret;
54✔
224
    REALM_ASSERT(pk_table);
54✔
225
    ColKey col_table = pk_table->get_column_key("pk_table");
54✔
226
    ColKey col_prop = pk_table->get_column_key("pk_property");
54✔
227
    for (auto pk_obj : *pk_table) {
162✔
228
        auto object_type = pk_obj.get<String>(col_table);
162✔
229
        auto name = std::string(g_class_name_prefix) + std::string(object_type);
162✔
230
        auto table = get_table(name);
162✔
231
        auto pk_col_name = pk_obj.get<String>(col_prop);
162✔
232
        auto pk_col = table->get_column_key(pk_col_name);
162✔
233
        ret.emplace(table, pk_col);
162✔
234
    }
162✔
235

27✔
236
    return ret;
54✔
237
}
54✔
238

239
TableKey Group::ndx2key(size_t ndx) const
240
{
7,785✔
241
    REALM_ASSERT(is_attached());
7,785✔
242
    Table* accessor = load_atomic(m_table_accessors[ndx], std::memory_order_acquire);
7,785✔
243
    if (accessor)
7,785✔
244
        return accessor->get_key(); // fast path
1,917✔
245

2,880✔
246
    // slow path:
2,880✔
247
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(ndx);
5,868✔
248
    if (rot.is_tagged())
5,868✔
249
        throw NoSuchTable();
×
250
    ref_type ref = rot.get_as_ref();
5,868✔
251
    REALM_ASSERT(ref);
5,868✔
252
    return Table::get_key_direct(m_tables.get_alloc(), ref);
5,868✔
253
}
5,868✔
254

255
size_t Group::key2ndx_checked(TableKey key) const
256
{
31,958,124✔
257
    size_t idx = key2ndx(key);
31,958,124✔
258
    // early out
16,522,827✔
259
    // note: don't lock when accessing m_table_accessors, because if we miss a concurrently introduced table
16,522,827✔
260
    // accessor, we'll just fall through to the slow path. Table accessors can be introduced concurrently,
16,522,827✔
261
    // but never removed. The following is only safe because 'm_table_accessors' will not be relocated
16,522,827✔
262
    // concurrently. (We aim to be safe in face of concurrent access to a frozen transaction, where tables
16,522,827✔
263
    // cannot be added or removed. All other races are undefined behaviour)
16,522,827✔
264
    if (idx < m_table_accessors.size()) {
31,960,452✔
265
        Table* tbl = load_atomic(m_table_accessors[idx], std::memory_order_acquire);
31,904,061✔
266
        if (tbl && tbl->get_key() == key)
31,904,061✔
267
            return idx;
26,333,820✔
268
    }
5,624,304✔
269
    // The notion of a const group as it is now, is not really
3,046,929✔
270
    // useful. It is linked to a distinction between a read
3,046,929✔
271
    // and a write transaction. This distinction is no longer
3,046,929✔
272
    // a compile time aspect (it's not const anymore)
3,046,929✔
273
    Allocator* alloc = const_cast<SlabAlloc*>(&m_alloc);
5,624,304✔
274
    if (m_tables.is_attached() && idx < m_tables.size()) {
5,634,639✔
275
        RefOrTagged rot = m_tables.get_as_ref_or_tagged(idx);
5,533,467✔
276
        if (rot.is_ref() && rot.get_as_ref() && (Table::get_key_direct(*alloc, rot.get_as_ref()) == key)) {
5,535,534✔
277

3,056,214✔
278
            return idx;
5,533,845✔
279
        }
5,533,845✔
280
    }
2,147,583,391✔
281
    throw NoSuchTable();
2,147,583,391✔
282
}
2,147,583,391✔
283

284

285
int Group::get_file_format_version() const noexcept
286
{
1,198,857✔
287
    return m_file_format_version;
1,198,857✔
288
}
1,198,857✔
289

290

291
void Group::set_file_format_version(int file_format) noexcept
292
{
2,750,235✔
293
    m_file_format_version = file_format;
2,750,235✔
294
}
2,750,235✔
295

296

297
int Group::get_committed_file_format_version() const noexcept
298
{
×
299
    return m_alloc.get_committed_file_format_version();
×
300
}
×
301

302
std::optional<int> Group::fake_target_file_format;
303

304
void _impl::GroupFriend::fake_target_file_format(const std::optional<int> format) noexcept
305
{
72✔
306
    Group::fake_target_file_format = format;
72✔
307
}
72✔
308

309
int Group::get_target_file_format_version_for_session(int current_file_format_version,
310
                                                      int requested_history_type) noexcept
311
{
174,348✔
312
    if (Group::fake_target_file_format) {
174,348✔
313
        return *Group::fake_target_file_format;
72✔
314
    }
72✔
315
    // Note: This function is responsible for choosing the target file format
85,845✔
316
    // for a sessions. If it selects a file format that is different from
85,845✔
317
    // `current_file_format_version`, it will trigger a file format upgrade
85,845✔
318
    // process.
85,845✔
319

85,845✔
320
    // Note: `current_file_format_version` may be zero at this time, which means
85,845✔
321
    // that the file format it is not yet decided (only possible for empty
85,845✔
322
    // Realms where top-ref is zero).
85,845✔
323

85,845✔
324
    // Please see Group::get_file_format_version() for information about the
85,845✔
325
    // individual file format versions.
85,845✔
326

85,845✔
327
    if (requested_history_type == Replication::hist_None) {
174,276✔
328
        if (current_file_format_version == 23) {
34,005✔
329
            // We are able to open these file formats in RO mode
12,360✔
330
            return current_file_format_version;
24,915✔
331
        }
24,915✔
332
    }
149,361✔
333

73,485✔
334
    return g_current_file_format_version;
149,361✔
335
}
149,361✔
336

337
void Group::get_version_and_history_info(const Array& top, _impl::History::version_type& version, int& history_type,
338
                                         int& history_schema_version) noexcept
339
{
777,909✔
340
    using version_type = _impl::History::version_type;
777,909✔
341
    version_type version_2 = 0;
777,909✔
342
    int history_type_2 = 0;
777,909✔
343
    int history_schema_version_2 = 0;
777,909✔
344
    if (top.is_attached()) {
777,909✔
345
        if (top.size() > s_version_ndx) {
726,483✔
346
            version_2 = version_type(top.get_as_ref_or_tagged(s_version_ndx).get_as_int());
726,135✔
347
        }
726,135✔
348
        if (top.size() > s_hist_type_ndx) {
726,483✔
349
            history_type_2 = int(top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
723,099✔
350
        }
723,099✔
351
        if (top.size() > s_hist_version_ndx) {
726,483✔
352
            history_schema_version_2 = int(top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
723,072✔
353
        }
723,072✔
354
    }
726,483✔
355
    // Version 0 is not a legal initial version, so it has to be set to 1
387,534✔
356
    // instead.
387,534✔
357
    if (version_2 == 0)
777,909✔
358
        version_2 = 1;
53,766✔
359
    version = version_2;
777,909✔
360
    history_type = history_type_2;
777,909✔
361
    history_schema_version = history_schema_version_2;
777,909✔
362
}
777,909✔
363

364
int Group::get_history_schema_version() noexcept
365
{
23,571✔
366
    bool history_schema_version = (m_top.is_attached() && m_top.size() > s_hist_version_ndx);
23,571✔
367
    if (history_schema_version) {
23,571✔
368
        return int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
759✔
369
    }
759✔
370
    return 0;
22,812✔
371
}
22,812✔
372

373
uint64_t Group::get_sync_file_id() const noexcept
374
{
13,494,180✔
375
    if (m_top.is_attached() && m_top.size() > s_sync_file_id_ndx) {
13,494,180✔
376
        return uint64_t(m_top.get_as_ref_or_tagged(s_sync_file_id_ndx).get_as_int());
6,083,310✔
377
    }
6,083,310✔
378
    auto repl = get_replication();
7,410,870✔
379
    if (repl && repl->get_history_type() == Replication::hist_SyncServer) {
7,410,870✔
380
        return 1;
2,706✔
381
    }
2,706✔
382
    return 0;
7,408,164✔
383
}
7,408,164✔
384

385
int Group::read_only_version_check(SlabAlloc& alloc, ref_type top_ref, const std::string& path)
386
{
1,035✔
387
    // Select file format if it is still undecided.
420✔
388
    auto file_format_version = alloc.get_committed_file_format_version();
1,035✔
389

420✔
390
    bool file_format_ok = false;
1,035✔
391
    // It is not possible to open prior file format versions without an upgrade.
420✔
392
    // Since a Realm file cannot be upgraded when opened in this mode
420✔
393
    // (we may be unable to write to the file), no earlier versions can be opened.
420✔
394
    // Please see Group::get_file_format_version() for information about the
420✔
395
    // individual file format versions.
420✔
396
    switch (file_format_version) {
1,035✔
397
        case 0:
6✔
398
            file_format_ok = (top_ref == 0);
6✔
399
            break;
6✔
400
        case g_current_file_format_version:
993✔
401
            file_format_ok = true;
993✔
402
            break;
993✔
403
    }
1,035✔
404
    if (REALM_UNLIKELY(!file_format_ok))
1,035✔
405
        throw FileAccessError(ErrorCodes::FileFormatUpgradeRequired,
438✔
406
                              util::format("Realm file at path '%1' cannot be opened in read-only mode because it "
36✔
407
                                           "has a file format version (%2) which requires an upgrade",
36✔
408
                                           path, file_format_version),
36✔
409
                              path);
36✔
410
    return file_format_version;
999✔
411
}
999✔
412

413
void Group::open(ref_type top_ref, const std::string& file_path)
414
{
849✔
415
    SlabAlloc::DetachGuard dg(m_alloc);
849✔
416
    m_file_format_version = read_only_version_check(m_alloc, top_ref, file_path);
849✔
417

327✔
418
    Replication::HistoryType history_type = Replication::hist_None;
849✔
419
    int target_file_format_version = get_target_file_format_version_for_session(m_file_format_version, history_type);
849✔
420
    if (m_file_format_version == 0) {
849✔
421
        set_file_format_version(target_file_format_version);
6✔
422
    }
6✔
423
    else {
843✔
424
        // From a technical point of view, we could upgrade the Realm file
324✔
425
        // format in memory here, but since upgrading can be expensive, it is
324✔
426
        // currently disallowed.
324✔
427
        REALM_ASSERT(target_file_format_version == m_file_format_version);
843✔
428
    }
843✔
429

327✔
430
    // Make all dynamically allocated memory (space beyond the attached file) as
327✔
431
    // available free-space.
327✔
432
    reset_free_space_tracking(); // Throws
849✔
433

327✔
434
    bool create_group_when_missing = true;
849✔
435
    bool writable = create_group_when_missing;
849✔
436
    attach(top_ref, writable, create_group_when_missing); // Throws
849✔
437
    dg.release();                                         // Do not detach after all
849✔
438
}
849✔
439

440
Group::~Group() noexcept
441
{
2,757,000✔
442
    // If this group accessor is detached at this point in time, it is either
1,668,756✔
443
    // because it is DB::m_group (m_is_shared), or it is a free-stading
1,668,756✔
444
    // group accessor that was never successfully opened.
1,668,756✔
445
    if (!m_top.is_attached())
2,757,000✔
446
        return;
2,750,784✔
447

3,678✔
448
    // Free-standing group accessor
3,678✔
449
    detach();
6,216✔
450

3,678✔
451
    // if a local allocator is set in m_local_alloc, then the destruction
3,678✔
452
    // of m_local_alloc will trigger destruction of the allocator, which will
3,678✔
453
    // verify that the allocator has been detached, so....
3,678✔
454
    if (m_local_alloc)
6,216✔
455
        m_local_alloc->detach();
4,809✔
456
}
6,216✔
457

458
void Group::remap_and_update_refs(ref_type new_top_ref, size_t new_file_size, bool writable)
459
{
408,636✔
460
    m_alloc.update_reader_view(new_file_size); // Throws
408,636✔
461
    update_allocator_wrappers(writable);
408,636✔
462

203,151✔
463
    // force update of all ref->ptr translations if the mapping has changed
203,151✔
464
    auto mapping_version = m_alloc.get_mapping_version();
408,636✔
465
    if (mapping_version != m_last_seen_mapping_version) {
408,636✔
466
        m_last_seen_mapping_version = mapping_version;
203,577✔
467
    }
203,577✔
468
    update_refs(new_top_ref);
408,636✔
469
}
408,636✔
470

471
void Group::validate_top_array(const Array& arr, const SlabAlloc& alloc, std::optional<size_t> read_lock_file_size,
472
                               std::optional<uint_fast64_t> read_lock_version)
473
{
2,955,954✔
474
    size_t top_size = arr.size();
2,955,954✔
475
    ref_type top_ref = arr.get_ref();
2,955,954✔
476

1,810,518✔
477
    switch (top_size) {
2,955,954✔
478
        // These are the valid sizes
479
        case 3:
1,810,365✔
480
        case 5:
1,810,365✔
481
        case 7:
1,859,685✔
482
        case 9:
1,859,712✔
483
        case 10:
1,859,829✔
484
        case 11:
2,950,605✔
485
        case 12: {
2,955,243✔
486
            ref_type table_names_ref = arr.get_as_ref_or_tagged(s_table_name_ndx).get_as_ref();
2,955,243✔
487
            ref_type tables_ref = arr.get_as_ref_or_tagged(s_table_refs_ndx).get_as_ref();
2,955,243✔
488
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
2,955,243✔
489

1,809,993✔
490
            // Logical file size must never exceed actual file size.
1,809,993✔
491
            auto file_size = alloc.get_baseline();
2,955,243✔
492
            if (logical_file_size > file_size) {
2,955,243✔
493
                std::string err = util::format("Invalid logical file size: %1, actual file size: %2, read lock file "
×
494
                                               "size: %3, read lock version: %4",
×
495
                                               logical_file_size, file_size, read_lock_file_size, read_lock_version);
×
496
                throw InvalidDatabase(err, "");
×
497
            }
×
498
            // First two entries must be valid refs pointing inside the file
1,809,993✔
499
            auto invalid_ref = [logical_file_size](ref_type ref) {
5,907,519✔
500
                return ref == 0 || (ref & 7) || ref > logical_file_size;
5,907,684✔
501
            };
5,907,519✔
502
            if (invalid_ref(table_names_ref) || invalid_ref(tables_ref)) {
2,955,324✔
503
                std::string err = util::format(
×
504
                    "Invalid top array (top_ref, [0], [1]): %1, %2, %3, read lock size: %4, read lock version: %5",
×
505
                    top_ref, table_names_ref, tables_ref, read_lock_file_size, read_lock_version);
×
506
                throw InvalidDatabase(err, "");
×
507
            }
×
508
            break;
2,955,243✔
509
        }
2,955,243✔
510
        default: {
1,809,993✔
511
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
×
512
            std::string err =
×
513
                util::format("Invalid top array size (ref: %1, array size: %2) file size: %3, read "
×
514
                             "lock size: %4, read lock version: %5",
×
515
                             top_ref, top_size, logical_file_size, read_lock_file_size, read_lock_version);
×
516
            throw InvalidDatabase(err, "");
×
517
            break;
1,809,993✔
518
        }
2,955,243✔
519
    }
2,955,954✔
520
}
2,955,954✔
521

522
void Group::attach(ref_type top_ref, bool writable, bool create_group_when_missing, size_t file_size,
523
                   uint_fast64_t version)
524
{
3,031,155✔
525
    REALM_ASSERT(!m_top.is_attached());
3,031,155✔
526
    if (create_group_when_missing)
3,031,155✔
527
        REALM_ASSERT(writable);
3,031,155✔
528

1,847,439✔
529
    // If this function throws, it must leave the group accesor in a the
1,847,439✔
530
    // unattached state.
1,847,439✔
531

1,847,439✔
532
    m_tables.detach();
3,031,155✔
533
    m_table_names.detach();
3,031,155✔
534
    m_is_writable = writable;
3,031,155✔
535

1,847,439✔
536
    if (top_ref != 0) {
3,031,155✔
537
        m_top.init_from_ref(top_ref);
2,865,303✔
538
        validate_top_array(m_top, m_alloc, file_size, version);
2,865,303✔
539
        m_table_names.init_from_parent();
2,865,303✔
540
        m_tables.init_from_parent();
2,865,303✔
541
    }
2,865,303✔
542
    else if (create_group_when_missing) {
165,852✔
543
        create_empty_group(); // Throws
14,334✔
544
    }
14,334✔
545
    m_attached = true;
3,031,155✔
546
    set_size();
3,031,155✔
547

1,847,439✔
548
    size_t sz = m_tables.is_attached() ? m_tables.size() : 0;
2,953,701✔
549
    while (m_table_accessors.size() > sz) {
3,031,257✔
550
        if (Table* t = m_table_accessors.back()) {
102✔
551
            t->detach(Table::cookie_void);
96✔
552
            recycle_table_accessor(t);
96✔
553
        }
96✔
554
        m_table_accessors.pop_back();
102✔
555
    }
102✔
556
    while (m_table_accessors.size() < sz) {
11,720,931✔
557
        m_table_accessors.emplace_back();
8,689,776✔
558
    }
8,689,776✔
559
}
3,031,155✔
560

561

562
void Group::detach() noexcept
563
{
2,752,047✔
564
    detach_table_accessors();
2,752,047✔
565
    m_table_accessors.clear();
2,752,047✔
566

1,663,848✔
567
    m_table_names.detach();
2,752,047✔
568
    m_tables.detach();
2,752,047✔
569
    m_top.detach();
2,752,047✔
570

1,663,848✔
571
    m_attached = false;
2,752,047✔
572
}
2,752,047✔
573

574
void Group::attach_shared(ref_type new_top_ref, size_t new_file_size, bool writable, VersionID version)
575
{
2,751,336✔
576
    REALM_ASSERT_3(new_top_ref, <, new_file_size);
2,751,336✔
577
    REALM_ASSERT(!is_attached());
2,751,336✔
578

1,665,696✔
579
    // update readers view of memory
1,665,696✔
580
    m_alloc.update_reader_view(new_file_size); // Throws
2,751,336✔
581
    update_allocator_wrappers(writable);
2,751,336✔
582

1,665,696✔
583
    // When `new_top_ref` is null, ask attach() to create a new node structure
1,665,696✔
584
    // for an empty group, but only during the initiation of write
1,665,696✔
585
    // transactions. When the transaction being initiated is a read transaction,
1,665,696✔
586
    // we instead have to leave array accessors m_top, m_tables, and
1,665,696✔
587
    // m_table_names in their detached state, as there are no underlying array
1,665,696✔
588
    // nodes to attached them to. In the case of write transactions, the nodes
1,665,696✔
589
    // have to be created, as they have to be ready for being modified.
1,665,696✔
590
    bool create_group_when_missing = writable;
2,751,336✔
591
    attach(new_top_ref, writable, create_group_when_missing, new_file_size, version.version); // Throws
2,751,336✔
592
}
2,751,336✔
593

594

595
void Group::detach_table_accessors() noexcept
596
{
2,750,856✔
597
    for (auto& table_accessor : m_table_accessors) {
9,028,365✔
598
        if (Table* t = table_accessor) {
9,028,365✔
599
            t->detach(Table::cookie_transaction_ended);
5,150,679✔
600
            recycle_table_accessor(t);
5,150,679✔
601
            table_accessor = nullptr;
5,150,679✔
602
        }
5,150,679✔
603
    }
9,028,365✔
604
}
2,750,856✔
605

606

607
void Group::create_empty_group()
608
{
80,655✔
609
    m_top.create(Array::type_HasRefs); // Throws
80,655✔
610
    _impl::DeepArrayDestroyGuard dg_top(&m_top);
80,655✔
611
    {
80,655✔
612
        m_table_names.create(); // Throws
80,655✔
613
        _impl::DestroyGuard<ArrayStringShort> dg(&m_table_names);
80,655✔
614
        m_top.add(m_table_names.get_ref()); // Throws
80,655✔
615
        dg.release();
80,655✔
616
    }
80,655✔
617
    {
80,655✔
618
        m_tables.create(Array::type_HasRefs); // Throws
80,655✔
619
        _impl::DestroyGuard<Array> dg(&m_tables);
80,655✔
620
        m_top.add(m_tables.get_ref()); // Throws
80,655✔
621
        dg.release();
80,655✔
622
    }
80,655✔
623
    size_t initial_logical_file_size = sizeof(SlabAlloc::Header);
80,655✔
624
    m_top.add(RefOrTagged::make_tagged(initial_logical_file_size)); // Throws
80,655✔
625
    dg_top.release();
80,655✔
626
}
80,655✔
627

628

629
Table* Group::do_get_table(size_t table_ndx)
630
{
33,504,384✔
631
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
33,504,384✔
632
    // Get table accessor from cache if it exists, else create
17,248,926✔
633
    Table* table = load_atomic(m_table_accessors[table_ndx], std::memory_order_acquire);
33,504,384✔
634
    if (!table) {
33,504,384✔
635
        // double-checked locking idiom
2,701,227✔
636
        std::lock_guard<std::mutex> lock(m_accessor_mutex);
4,825,437✔
637
        table = m_table_accessors[table_ndx];
4,825,437✔
638
        if (!table)
4,825,437✔
639
            table = create_table_accessor(table_ndx); // Throws
4,818,681✔
640
    }
4,825,437✔
641
    return table;
33,504,384✔
642
}
33,504,384✔
643

644

645
Table* Group::do_get_table(StringData name)
646
{
8,984,178✔
647
    if (!m_table_names.is_attached())
8,984,178✔
648
        return 0;
265,365✔
649
    size_t table_ndx = m_table_names.find_first(name);
8,718,813✔
650
    if (table_ndx == not_found)
8,718,813✔
651
        return 0;
669,297✔
652

3,996,831✔
653
    Table* table = do_get_table(table_ndx); // Throws
8,049,516✔
654
    return table;
8,049,516✔
655
}
8,049,516✔
656

657
TableRef Group::add_table_with_primary_key(StringData name, DataType pk_type, StringData pk_name, bool nullable,
658
                                           Table::Type table_type)
659
{
136,467✔
660
    check_attached();
136,467✔
661
    check_table_name_uniqueness(name);
136,467✔
662

67,566✔
663
    auto table = do_add_table(name, table_type, false);
136,467✔
664

67,566✔
665
    // Add pk column - without replication
67,566✔
666
    ColumnAttrMask attr;
136,467✔
667
    if (nullable)
136,467✔
668
        attr.set(col_attr_Nullable);
15,732✔
669
    ColKey pk_col = table->generate_col_key(ColumnType(pk_type), attr);
136,467✔
670
    table->do_insert_root_column(pk_col, ColumnType(pk_type), pk_name);
136,467✔
671
    table->do_set_primary_key_column(pk_col);
136,467✔
672

67,566✔
673
    if (Replication* repl = *get_repl())
136,467✔
674
        repl->add_class_with_primary_key(table->get_key(), name, pk_type, pk_name, nullable, table_type);
135,669✔
675

67,566✔
676
    return TableRef(table, table->m_alloc.get_instance_version());
136,467✔
677
}
136,467✔
678

679
Table* Group::do_add_table(StringData name, Table::Type table_type, bool do_repl)
680
{
348,378✔
681
    if (!m_is_writable)
348,378✔
682
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
6✔
683

172,794✔
684
    // get new key and index
172,794✔
685
    // find first empty spot:
172,794✔
686
    uint32_t j;
348,372✔
687
    RefOrTagged rot = RefOrTagged::make_tagged(0);
348,372✔
688
    for (j = 0; j < m_tables.size(); ++j) {
54,910,767✔
689
        rot = m_tables.get_as_ref_or_tagged(j);
54,567,564✔
690
        if (!rot.is_ref())
54,567,564✔
691
            break;
5,169✔
692
    }
54,567,564✔
693
    bool gen_null_tag = (j == m_tables.size()); // new tags start at zero
348,372✔
694
    uint32_t tag = gen_null_tag ? 0 : uint32_t(rot.get_as_int());
345,735✔
695
    TableKey key = TableKey((tag << 16) | j);
348,372✔
696

172,794✔
697
    if (REALM_UNLIKELY(name.size() > max_table_name_length))
348,372✔
698
        throw InvalidArgument(ErrorCodes::InvalidName, util::format("Name too long: %1", name));
172,797✔
699

172,791✔
700
    using namespace _impl;
348,366✔
701
    size_t table_ndx = key2ndx(key);
348,366✔
702
    ref_type ref = Table::create_empty_table(m_alloc, key); // Throws
348,366✔
703
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
348,366✔
704

172,791✔
705
    rot = RefOrTagged::make_ref(ref);
348,366✔
706
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
348,366✔
707

172,791✔
708
    if (table_ndx == m_tables.size()) {
348,366✔
709
        m_tables.add(rot);
343,194✔
710
        m_table_names.add(name);
343,194✔
711
        // Need new slot for table accessor
170,256✔
712
        m_table_accessors.push_back(nullptr);
343,194✔
713
    }
343,194✔
714
    else {
5,172✔
715
        m_tables.set(table_ndx, rot);       // Throws
5,172✔
716
        m_table_names.set(table_ndx, name); // Throws
5,172✔
717
    }
5,172✔
718

172,791✔
719
    Replication* repl = *get_repl();
348,366✔
720
    if (do_repl && repl)
348,366✔
721
        repl->add_class(key, name, table_type);
206,253✔
722

172,791✔
723
    ++m_num_tables;
348,366✔
724

172,791✔
725
    Table* table = create_table_accessor(j);
348,366✔
726
    table->do_set_table_type(table_type);
348,366✔
727

172,791✔
728
    return table;
348,366✔
729
}
348,366✔
730

731

732
Table* Group::create_table_accessor(size_t table_ndx)
733
{
5,167,077✔
734
    REALM_ASSERT(m_tables.size() == m_table_accessors.size());
5,167,077✔
735
    REALM_ASSERT(table_ndx < m_table_accessors.size());
5,167,077✔
736

2,873,043✔
737
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(table_ndx);
5,167,077✔
738
    ref_type ref = rot.get_as_ref();
5,167,077✔
739
    if (ref == 0) {
5,167,077✔
740
        throw NoSuchTable();
×
741
    }
×
742
    Table* table = 0;
5,167,077✔
743
    {
5,167,077✔
744
        std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
5,167,077✔
745
        if (g_table_recycler_2.empty()) {
5,167,077✔
746
            while (!g_table_recycler_1.empty()) {
5,186,778✔
747
                auto t = g_table_recycler_1.back();
5,150,325✔
748
                g_table_recycler_1.pop_back();
5,150,325✔
749
                g_table_recycler_2.push_back(t);
5,150,325✔
750
            }
5,150,325✔
751
        }
36,453✔
752
        if (g_table_recycler_2.size() + g_table_recycler_1.size() > g_table_recycling_delay) {
5,167,077✔
753
            table = g_table_recycler_2.back();
5,146,923✔
754
            table->fully_detach();
5,146,923✔
755
            g_table_recycler_2.pop_back();
5,146,923✔
756
        }
5,146,923✔
757
    }
5,167,077✔
758
    if (table) {
5,167,077✔
759
        table->revive(get_repl(), m_alloc, m_is_writable);
5,146,689✔
760
        table->init(ref, this, table_ndx, m_is_writable, is_frozen());
5,146,689✔
761
    }
5,146,689✔
762
    else {
20,388✔
763
        std::unique_ptr<Table> new_table(new Table(get_repl(), m_alloc));  // Throws
20,388✔
764
        new_table->init(ref, this, table_ndx, m_is_writable, is_frozen()); // Throws
20,388✔
765
        table = new_table.release();
20,388✔
766
    }
20,388✔
767
    table->refresh_index_accessors();
5,167,077✔
768
    // must be atomic to allow concurrent probing of the m_table_accessors vector.
2,873,043✔
769
    store_atomic(m_table_accessors[table_ndx], table, std::memory_order_release);
5,167,077✔
770
    return table;
5,167,077✔
771
}
5,167,077✔
772

773

774
void Group::recycle_table_accessor(Table* to_be_recycled)
775
{
5,166,273✔
776
    std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
5,166,273✔
777
    g_table_recycler_1.push_back(to_be_recycled);
5,166,273✔
778
}
5,166,273✔
779

780
void Group::remove_table(StringData name)
781
{
7,767✔
782
    check_attached();
7,767✔
783
    size_t table_ndx = m_table_names.find_first(name);
7,767✔
784
    if (table_ndx == not_found)
7,767✔
785
        throw NoSuchTable();
6✔
786
    auto key = ndx2key(table_ndx);
7,761✔
787
    remove_table(table_ndx, key); // Throws
7,761✔
788
}
7,761✔
789

790

791
void Group::remove_table(TableKey key)
792
{
261✔
793
    check_attached();
261✔
794

135✔
795
    size_t table_ndx = key2ndx_checked(key);
261✔
796
    remove_table(table_ndx, key);
261✔
797
}
261✔
798

799

800
void Group::remove_table(size_t table_ndx, TableKey key)
801
{
8,022✔
802
    if (!m_is_writable)
8,022✔
803
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
804
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
8,022✔
805
    REALM_ASSERT(table_ndx < m_tables.size());
8,022✔
806
    TableRef table = get_table(key);
8,022✔
807

3,963✔
808
    // In principle we could remove a table even if it is the target of link
3,963✔
809
    // columns of other tables, however, to do that, we would have to
3,963✔
810
    // automatically remove the "offending" link columns from those other
3,963✔
811
    // tables. Such a behaviour is deemed too obscure, and we shall therefore
3,963✔
812
    // require that a removed table does not contain foreign origin backlink
3,963✔
813
    // columns.
3,963✔
814
    if (table->is_cross_table_link_target())
8,022✔
815
        throw CrossTableLinkTarget(table->get_name());
18✔
816

3,954✔
817
    {
8,004✔
818
        // We don't want to replicate the individual column removals along the
3,954✔
819
        // way as they're covered by the table removal
3,954✔
820
        Table::DisableReplication dr(*table);
8,004✔
821
        for (size_t i = table->get_column_count(); i > 0; --i) {
25,992✔
822
            ColKey col_key = table->spec_ndx2colkey(i - 1);
17,988✔
823
            table->remove_column(col_key);
17,988✔
824
        }
17,988✔
825
    }
8,004✔
826

3,954✔
827
    size_t prior_num_tables = m_tables.size();
8,004✔
828
    Replication* repl = *get_repl();
8,004✔
829
    if (repl)
8,004✔
830
        repl->erase_class(key, prior_num_tables); // Throws
7,770✔
831

3,954✔
832
    int64_t ref_64 = m_tables.get(table_ndx);
8,004✔
833
    REALM_ASSERT(!int_cast_has_overflow<ref_type>(ref_64));
8,004✔
834
    ref_type ref = ref_type(ref_64);
8,004✔
835

3,954✔
836
    // Replace entry in m_tables with next tag to use:
3,954✔
837
    RefOrTagged rot = RefOrTagged::make_tagged((1 + (key.value >> 16)) & 0x7FFF);
8,004✔
838
    // Remove table
3,954✔
839
    m_tables.set(table_ndx, rot);     // Throws
8,004✔
840
    m_table_names.set(table_ndx, {}); // Throws
8,004✔
841
    m_table_accessors[table_ndx] = nullptr;
8,004✔
842
    --m_num_tables;
8,004✔
843

3,954✔
844
    table->detach(Table::cookie_removed);
8,004✔
845
    // Destroy underlying node structure
3,954✔
846
    Array::destroy_deep(ref, m_alloc);
8,004✔
847
    recycle_table_accessor(table.unchecked_ptr());
8,004✔
848
}
8,004✔
849

850

851
void Group::rename_table(StringData name, StringData new_name, bool require_unique_name)
852
{
24✔
853
    check_attached();
24✔
854
    size_t table_ndx = m_table_names.find_first(name);
24✔
855
    if (table_ndx == not_found)
24✔
856
        throw NoSuchTable();
6✔
857
    rename_table(ndx2key(table_ndx), new_name, require_unique_name); // Throws
18✔
858
}
18✔
859

860

861
void Group::rename_table(TableKey key, StringData new_name, bool require_unique_name)
862
{
24✔
863
    check_attached();
24✔
864
    if (!m_is_writable)
24✔
865
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
866
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
24✔
867
    if (require_unique_name && has_table(new_name))
24✔
868
        throw TableNameInUse();
6✔
869
    size_t table_ndx = key2ndx_checked(key);
18✔
870
    m_table_names.set(table_ndx, new_name);
18✔
871
    if (Replication* repl = *get_repl())
18✔
872
        repl->rename_class(key, new_name); // Throws
×
873
}
18✔
874

875
Obj Group::get_object(ObjLink link)
876
{
16,881✔
877
    auto target_table = get_table(link.get_table_key());
16,881✔
878
    ObjKey key = link.get_obj_key();
16,881✔
879
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
16,875✔
880
    return ct->get(key);
16,881✔
881
}
16,881✔
882

883
Obj Group::try_get_object(ObjLink link) noexcept
884
{
134,106✔
885
    auto target_table = get_table(link.get_table_key());
134,106✔
886
    ObjKey key = link.get_obj_key();
134,106✔
887
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
129,414✔
888
    return ct->try_get_obj(key);
134,106✔
889
}
134,106✔
890

891
void Group::validate(ObjLink link) const
892
{
14,592✔
893
    if (auto tk = link.get_table_key()) {
14,592✔
894
        auto target_key = link.get_obj_key();
14,592✔
895
        auto target_table = get_table(tk);
14,592✔
896
        const ClusterTree* ct =
14,592✔
897
            target_key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
14,445✔
898
        if (!ct->is_valid(target_key)) {
14,592✔
899
            throw InvalidArgument(ErrorCodes::KeyNotFound, "Target object not found");
12✔
900
        }
12✔
901
        if (target_table->is_embedded()) {
14,580✔
902
            throw IllegalOperation("Cannot link to embedded object");
×
903
        }
×
904
        if (target_table->is_asymmetric()) {
14,580✔
905
            throw IllegalOperation("Cannot link to ephemeral object");
6✔
906
        }
6✔
907
    }
14,580✔
908
}
14,592✔
909

910
ref_type Group::DefaultTableWriter::write_names(_impl::OutputStream& out)
911
{
570✔
912
    bool deep = true;                                                 // Deep
570✔
913
    bool only_if_modified = false;                                    // Always
570✔
914
    return m_group->m_table_names.write(out, deep, only_if_modified); // Throws
570✔
915
}
570✔
916
ref_type Group::DefaultTableWriter::write_tables(_impl::OutputStream& out)
917
{
570✔
918
    bool deep = true;                                            // Deep
570✔
919
    bool only_if_modified = false;                               // Always
570✔
920
    return m_group->m_tables.write(out, deep, only_if_modified); // Throws
570✔
921
}
570✔
922

923
auto Group::DefaultTableWriter::write_history(_impl::OutputStream& out) -> HistoryInfo
924
{
336✔
925
    bool deep = true;              // Deep
336✔
926
    bool only_if_modified = false; // Always
336✔
927
    ref_type history_ref = _impl::GroupFriend::get_history_ref(*m_group);
336✔
928
    HistoryInfo info;
336✔
929
    if (history_ref) {
336✔
930
        _impl::History::version_type version;
294✔
931
        int history_type, history_schema_version;
294✔
932
        _impl::GroupFriend::get_version_and_history_info(_impl::GroupFriend::get_alloc(*m_group),
294✔
933
                                                         m_group->m_top.get_ref(), version, history_type,
294✔
934
                                                         history_schema_version);
294✔
935
        REALM_ASSERT(history_type != Replication::hist_None);
294✔
936
        if (!m_should_write_history ||
294✔
937
            (history_type != Replication::hist_SyncClient && history_type != Replication::hist_SyncServer)) {
291✔
938
            return info; // Only sync history should be preserved when writing to a new file
138✔
939
        }
138✔
940
        info.type = history_type;
156✔
941
        info.version = history_schema_version;
156✔
942
        Array history{const_cast<Allocator&>(_impl::GroupFriend::get_alloc(*m_group))};
156✔
943
        history.init_from_ref(history_ref);
156✔
944
        info.ref = history.write(out, deep, only_if_modified); // Throws
156✔
945
    }
156✔
946
    info.sync_file_id = m_group->get_sync_file_id();
267✔
947
    return info;
198✔
948
}
336✔
949

950
void Group::write(std::ostream& out, bool pad) const
951
{
42✔
952
    DefaultTableWriter table_writer;
42✔
953
    write(out, pad, 0, table_writer);
42✔
954
}
42✔
955

956
void Group::write(std::ostream& out, bool pad_for_encryption, uint_fast64_t version_number, TableWriter& writer) const
957
{
582✔
958
    REALM_ASSERT(is_attached());
582✔
959
    writer.set_group(this);
582✔
960
    bool no_top_array = !m_top.is_attached();
582✔
961
    write(out, m_file_format_version, writer, no_top_array, pad_for_encryption, version_number); // Throws
582✔
962
}
582✔
963

964
void Group::write(File& file, const char* encryption_key, uint_fast64_t version_number, TableWriter& writer) const
965
{
540✔
966
    REALM_ASSERT(file.get_size() == 0);
540✔
967

270✔
968
    file.set_encryption_key(encryption_key);
540✔
969

270✔
970
    // The aim is that the buffer size should be at least 1/256 of needed size but less than 64 Mb
270✔
971
    constexpr size_t upper_bound = 64 * 1024 * 1024;
540✔
972
    size_t min_space = std::min(get_used_space() >> 8, upper_bound);
540✔
973
    size_t buffer_size = 4096;
540✔
974
    while (buffer_size < min_space) {
612✔
975
        buffer_size <<= 1;
72✔
976
    }
72✔
977
    File::Streambuf streambuf(&file, buffer_size);
540✔
978

270✔
979
    std::ostream out(&streambuf);
540✔
980
    out.exceptions(std::ios_base::failbit | std::ios_base::badbit);
540✔
981
    write(out, encryption_key != 0, version_number, writer);
540✔
982
    int sync_status = streambuf.pubsync();
540✔
983
    REALM_ASSERT(sync_status == 0);
540✔
984
}
540✔
985

986
void Group::write(const std::string& path, const char* encryption_key, uint64_t version_number,
987
                  bool write_history) const
988
{
234✔
989
    File file;
234✔
990
    int flags = 0;
234✔
991
    file.open(path, File::access_ReadWrite, File::create_Must, flags);
234✔
992
    DefaultTableWriter table_writer(write_history);
234✔
993
    write(file, encryption_key, version_number, table_writer);
234✔
994
}
234✔
995

996

997
BinaryData Group::write_to_mem() const
998
{
42✔
999
    REALM_ASSERT(is_attached());
42✔
1000

21✔
1001
    // Get max possible size of buffer
21✔
1002
    size_t max_size = m_alloc.get_total_size();
42✔
1003

21✔
1004
    auto buffer = std::unique_ptr<char[]>(new (std::nothrow) char[max_size]);
42✔
1005
    if (!buffer)
42✔
1006
        throw Exception(ErrorCodes::OutOfMemory, "Could not allocate memory while dumping to memory");
×
1007
    MemoryOutputStream out; // Throws
42✔
1008
    out.set_buffer(buffer.get(), buffer.get() + max_size);
42✔
1009
    write(out); // Throws
42✔
1010
    size_t buffer_size = out.size();
42✔
1011
    return BinaryData(buffer.release(), buffer_size);
42✔
1012
}
42✔
1013

1014

1015
void Group::write(std::ostream& out, int file_format_version, TableWriter& table_writer, bool no_top_array,
1016
                  bool pad_for_encryption, uint_fast64_t version_number)
1017
{
582✔
1018
    _impl::OutputStream out_2(out);
582✔
1019

291✔
1020
    // Write the file header
291✔
1021
    SlabAlloc::Header streaming_header;
582✔
1022
    if (no_top_array) {
582✔
1023
        file_format_version = 0;
12✔
1024
    }
12✔
1025
    else if (file_format_version == 0) {
570✔
1026
        // Use current file format version
1027
        file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
×
1028
    }
×
1029
    SlabAlloc::init_streaming_header(&streaming_header, file_format_version);
582✔
1030
    out_2.write(reinterpret_cast<const char*>(&streaming_header), sizeof streaming_header);
582✔
1031

291✔
1032
    ref_type top_ref = 0;
582✔
1033
    size_t final_file_size = sizeof streaming_header;
582✔
1034
    if (no_top_array) {
582✔
1035
        // Accept version number 1 as that number is (unfortunately) also used
6✔
1036
        // to denote the empty initial state of a Realm file.
6✔
1037
        REALM_ASSERT(version_number == 0 || version_number == 1);
12✔
1038
    }
12✔
1039
    else {
570✔
1040
        // Because we need to include the total logical file size in the
285✔
1041
        // top-array, we have to start by writing everything except the
285✔
1042
        // top-array, and then finally compute and write a correct version of
285✔
1043
        // the top-array. The free-space information of the group will only be
285✔
1044
        // included if a non-zero version number is given as parameter,
285✔
1045
        // indicating that versioning info is to be saved. This is used from
285✔
1046
        // DB to compact the database by writing only the live data
285✔
1047
        // into a separate file.
285✔
1048
        ref_type names_ref = table_writer.write_names(out_2);   // Throws
570✔
1049
        ref_type tables_ref = table_writer.write_tables(out_2); // Throws
570✔
1050
        SlabAlloc new_alloc;
570✔
1051
        new_alloc.attach_empty(); // Throws
570✔
1052
        Array top(new_alloc);
570✔
1053
        top.create(Array::type_HasRefs); // Throws
570✔
1054
        _impl::ShallowArrayDestroyGuard dg_top(&top);
570✔
1055
        int_fast64_t value_1 = from_ref(names_ref);
570✔
1056
        int_fast64_t value_2 = from_ref(tables_ref);
570✔
1057
        top.add(value_1); // Throws
570✔
1058
        top.add(value_2); // Throws
570✔
1059
        top.add(0);       // Throws
570✔
1060

285✔
1061
        int top_size = 3;
570✔
1062
        if (version_number) {
570✔
1063
            TableWriter::HistoryInfo history_info = table_writer.write_history(out_2); // Throws
336✔
1064

168✔
1065
            Array free_list(new_alloc);
336✔
1066
            Array size_list(new_alloc);
336✔
1067
            Array version_list(new_alloc);
336✔
1068
            free_list.create(Array::type_Normal); // Throws
336✔
1069
            _impl::DeepArrayDestroyGuard dg_1(&free_list);
336✔
1070
            size_list.create(Array::type_Normal); // Throws
336✔
1071
            _impl::DeepArrayDestroyGuard dg_2(&size_list);
336✔
1072
            version_list.create(Array::type_Normal); // Throws
336✔
1073
            _impl::DeepArrayDestroyGuard dg_3(&version_list);
336✔
1074
            bool deep = true;              // Deep
336✔
1075
            bool only_if_modified = false; // Always
336✔
1076
            ref_type free_list_ref = free_list.write(out_2, deep, only_if_modified);
336✔
1077
            ref_type size_list_ref = size_list.write(out_2, deep, only_if_modified);
336✔
1078
            ref_type version_list_ref = version_list.write(out_2, deep, only_if_modified);
336✔
1079
            top.add(RefOrTagged::make_ref(free_list_ref));     // Throws
336✔
1080
            top.add(RefOrTagged::make_ref(size_list_ref));     // Throws
336✔
1081
            top.add(RefOrTagged::make_ref(version_list_ref));  // Throws
336✔
1082
            top.add(RefOrTagged::make_tagged(version_number)); // Throws
336✔
1083
            top_size = 7;
336✔
1084

168✔
1085
            if (history_info.type != Replication::hist_None) {
336✔
1086
                top.add(RefOrTagged::make_tagged(history_info.type));
156✔
1087
                top.add(RefOrTagged::make_ref(history_info.ref));
156✔
1088
                top.add(RefOrTagged::make_tagged(history_info.version));
156✔
1089
                top.add(RefOrTagged::make_tagged(history_info.sync_file_id));
156✔
1090
                top_size = s_group_max_size;
156✔
1091
                // ^ this is too large, since the evacuation point entry is not there:
78✔
1092
                // (but the code below is self correcting)
78✔
1093
            }
156✔
1094
        }
336✔
1095
        top_ref = out_2.get_ref_of_next_array();
570✔
1096

285✔
1097
        // Produce a preliminary version of the top array whose
285✔
1098
        // representation is guaranteed to be able to hold the final file
285✔
1099
        // size
285✔
1100
        size_t max_top_byte_size = Array::get_max_byte_size(top_size);
570✔
1101
        size_t max_final_file_size = size_t(top_ref) + max_top_byte_size;
570✔
1102
        top.ensure_minimum_width(RefOrTagged::make_tagged(max_final_file_size)); // Throws
570✔
1103

285✔
1104
        // Finalize the top array by adding the projected final file size
285✔
1105
        // to it
285✔
1106
        size_t top_byte_size = top.get_byte_size();
570✔
1107
        final_file_size = size_t(top_ref) + top_byte_size;
570✔
1108
        top.set(2, RefOrTagged::make_tagged(final_file_size)); // Throws
570✔
1109

285✔
1110
        // Write the top array
285✔
1111
        bool deep = false;                        // Shallow
570✔
1112
        bool only_if_modified = false;            // Always
570✔
1113
        top.write(out_2, deep, only_if_modified); // Throws
570✔
1114
        REALM_ASSERT_3(size_t(out_2.get_ref_of_next_array()), ==, final_file_size);
570✔
1115

285✔
1116
        dg_top.reset(nullptr); // Destroy now
570✔
1117
    }
570✔
1118

291✔
1119
    // encryption will pad the file to a multiple of the page, so ensure the
291✔
1120
    // footer is aligned to the end of a page
291✔
1121
    if (pad_for_encryption) {
582✔
1122
#if REALM_ENABLE_ENCRYPTION
18✔
1123
        size_t unrounded_size = final_file_size + sizeof(SlabAlloc::StreamingFooter);
18✔
1124
        size_t rounded_size = round_up_to_page_size(unrounded_size);
18✔
1125
        if (rounded_size != unrounded_size) {
18✔
1126
            std::unique_ptr<char[]> buffer(new char[rounded_size - unrounded_size]());
18✔
1127
            out_2.write(buffer.get(), rounded_size - unrounded_size);
18✔
1128
        }
18✔
1129
#endif
18✔
1130
    }
18✔
1131

291✔
1132
    // Write streaming footer
291✔
1133
    SlabAlloc::StreamingFooter footer;
582✔
1134
    footer.m_top_ref = top_ref;
582✔
1135
    footer.m_magic_cookie = SlabAlloc::footer_magic_cookie;
582✔
1136
    out_2.write(reinterpret_cast<const char*>(&footer), sizeof footer);
582✔
1137
}
582✔
1138

1139

1140
void Group::update_refs(ref_type top_ref) noexcept
1141
{
408,639✔
1142
    // After Group::commit() we will always have free space tracking
203,154✔
1143
    // info.
203,154✔
1144
    REALM_ASSERT_3(m_top.size(), >=, 5);
408,639✔
1145

203,154✔
1146
    m_top.init_from_ref(top_ref);
408,639✔
1147

203,154✔
1148
    // Now we can update it's child arrays
203,154✔
1149
    m_table_names.update_from_parent();
408,639✔
1150
    m_tables.update_from_parent();
408,639✔
1151

203,154✔
1152
    // Update all attached table accessors.
203,154✔
1153
    for (auto& table_accessor : m_table_accessors) {
1,420,776✔
1154
        if (table_accessor) {
1,420,776✔
1155
            table_accessor->update_from_parent();
852,642✔
1156
        }
852,642✔
1157
    }
1,420,776✔
1158
}
408,639✔
1159

1160
bool Group::operator==(const Group& g) const
1161
{
66✔
1162
    for (auto tk : get_table_keys()) {
138✔
1163
        const StringData& table_name = get_table_name(tk);
138✔
1164

69✔
1165
        ConstTableRef table_1 = get_table(tk);
138✔
1166
        ConstTableRef table_2 = g.get_table(table_name);
138✔
1167
        if (!table_2)
138✔
1168
            return false;
12✔
1169
        if (table_1->get_primary_key_column().get_type() != table_2->get_primary_key_column().get_type()) {
126✔
1170
            return false;
×
1171
        }
×
1172
        if (table_1->is_embedded() != table_2->is_embedded())
126✔
1173
            return false;
×
1174
        if (table_1->is_embedded())
126✔
1175
            continue;
60✔
1176

33✔
1177
        if (*table_1 != *table_2)
66✔
1178
            return false;
18✔
1179
    }
66✔
1180
    return true;
51✔
1181
}
66✔
1182
void Group::schema_to_json(std::ostream& out, std::map<std::string, std::string>* opt_renames) const
1183
{
6✔
1184
    check_attached();
6✔
1185

3✔
1186
    std::map<std::string, std::string> renames;
6✔
1187
    if (opt_renames) {
6✔
1188
        renames = *opt_renames;
×
1189
    }
×
1190

3✔
1191
    out << "[" << std::endl;
6✔
1192

3✔
1193
    auto keys = get_table_keys();
6✔
1194
    int sz = int(keys.size());
6✔
1195
    for (int i = 0; i < sz; ++i) {
18✔
1196
        auto key = keys[i];
12✔
1197
        ConstTableRef table = get_table(key);
12✔
1198

6✔
1199
        table->schema_to_json(out, renames);
12✔
1200
        if (i < sz - 1)
12✔
1201
            out << ",";
6✔
1202
        out << std::endl;
12✔
1203
    }
12✔
1204

3✔
1205
    out << "]" << std::endl;
6✔
1206
}
6✔
1207

1208
void Group::to_json(std::ostream& out, size_t link_depth, std::map<std::string, std::string>* opt_renames,
1209
                    JSONOutputMode output_mode) const
1210
{
12✔
1211
    check_attached();
12✔
1212

6✔
1213
    std::map<std::string, std::string> renames;
12✔
1214
    if (opt_renames) {
12✔
1215
        renames = *opt_renames;
×
1216
    }
×
1217

6✔
1218
    out << "{" << std::endl;
12✔
1219

6✔
1220
    auto keys = get_table_keys();
12✔
1221
    bool first = true;
12✔
1222
    for (size_t i = 0; i < keys.size(); ++i) {
36✔
1223
        auto key = keys[i];
24✔
1224
        StringData name = get_table_name(key);
24✔
1225
        if (renames[name] != "")
24✔
1226
            name = renames[name];
×
1227

12✔
1228
        ConstTableRef table = get_table(key);
24✔
1229

12✔
1230
        if (!table->is_embedded()) {
24✔
1231
            if (!first)
24✔
1232
                out << ",";
12✔
1233
            out << "\"" << name << "\"";
24✔
1234
            out << ":";
24✔
1235
            table->to_json(out, link_depth, renames, output_mode);
24✔
1236
            out << std::endl;
24✔
1237
            first = false;
24✔
1238
        }
24✔
1239
    }
24✔
1240

6✔
1241
    out << "}" << std::endl;
12✔
1242
}
12✔
1243

1244
size_t Group::get_used_space() const noexcept
1245
{
558✔
1246
    if (!m_top.is_attached())
558✔
1247
        return 0;
12✔
1248

273✔
1249
    size_t used_space = (size_t(m_top.get(2)) >> 1);
546✔
1250

273✔
1251
    if (m_top.size() > 4) {
546✔
1252
        Array free_lengths(const_cast<SlabAlloc&>(m_alloc));
420✔
1253
        free_lengths.init_from_ref(ref_type(m_top.get(4)));
420✔
1254
        used_space -= size_t(free_lengths.get_sum());
420✔
1255
    }
420✔
1256

273✔
1257
    return used_space;
546✔
1258
}
546✔
1259

1260

1261
namespace {
1262
class TransactAdvancer : public _impl::NullInstructionObserver {
1263
public:
1264
    TransactAdvancer(Group&, bool& schema_changed)
1265
        : m_schema_changed(schema_changed)
1266
    {
43,938✔
1267
    }
43,938✔
1268

1269
    bool insert_group_level_table(TableKey) noexcept
1270
    {
12,225✔
1271
        m_schema_changed = true;
12,225✔
1272
        return true;
12,225✔
1273
    }
12,225✔
1274

1275
    bool erase_class(TableKey) noexcept
1276
    {
×
1277
        m_schema_changed = true;
×
1278
        return true;
×
1279
    }
×
1280

1281
    bool rename_class(TableKey) noexcept
1282
    {
×
1283
        m_schema_changed = true;
×
1284
        return true;
×
1285
    }
×
1286

1287
    bool insert_column(ColKey)
1288
    {
38,742✔
1289
        m_schema_changed = true;
38,742✔
1290
        return true;
38,742✔
1291
    }
38,742✔
1292

1293
    bool erase_column(ColKey)
1294
    {
×
1295
        m_schema_changed = true;
×
1296
        return true;
×
1297
    }
×
1298

1299
    bool rename_column(ColKey) noexcept
1300
    {
×
1301
        m_schema_changed = true;
×
1302
        return true; // No-op
×
1303
    }
×
1304

1305
private:
1306
    bool& m_schema_changed;
1307
};
1308
} // anonymous namespace
1309

1310

1311
void Group::update_allocator_wrappers(bool writable)
1312
{
4,865,472✔
1313
    m_is_writable = writable;
4,865,472✔
1314
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
9,449,382✔
1315
        auto table_accessor = m_table_accessors[i];
4,583,910✔
1316
        if (table_accessor) {
4,583,910✔
1317
            table_accessor->update_allocator_wrapper(writable);
2,845,326✔
1318
        }
2,845,326✔
1319
    }
4,583,910✔
1320
}
4,865,472✔
1321

1322
void Group::flush_accessors_for_commit()
1323
{
1,382,739✔
1324
    for (auto& acc : m_table_accessors)
1,382,739✔
1325
        if (acc)
5,575,293✔
1326
            acc->flush_for_commit();
3,997,665✔
1327
}
1,382,739✔
1328

1329
void Group::refresh_dirty_accessors()
1330
{
272,697✔
1331
    if (!m_tables.is_attached()) {
272,697✔
1332
        m_table_accessors.clear();
36✔
1333
        return;
36✔
1334
    }
36✔
1335

177,033✔
1336
    // The array of Tables cannot have shrunk:
177,033✔
1337
    REALM_ASSERT(m_tables.size() >= m_table_accessors.size());
272,661✔
1338

177,033✔
1339
    // but it may have grown - and if so, we must resize the accessor array to match
177,033✔
1340
    if (m_tables.size() > m_table_accessors.size()) {
272,661✔
1341
        m_table_accessors.resize(m_tables.size());
×
1342
    }
×
1343

177,033✔
1344
    // Update all attached table accessors.
177,033✔
1345
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
821,421✔
1346
        auto& table_accessor = m_table_accessors[i];
548,760✔
1347
        if (table_accessor) {
548,760✔
1348
            // If the table has changed it's key in the file, it's a
225,882✔
1349
            // new table. This will detach the old accessor and remove it.
225,882✔
1350
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(i);
365,253✔
1351
            bool same_table = false;
365,253✔
1352
            if (rot.is_ref()) {
365,403✔
1353
                auto ref = rot.get_as_ref();
365,376✔
1354
                TableKey new_key = Table::get_key_direct(m_alloc, ref);
365,376✔
1355
                if (new_key == table_accessor->get_key())
365,376✔
1356
                    same_table = true;
365,202✔
1357
            }
365,376✔
1358
            if (same_table) {
365,253✔
1359
                table_accessor->refresh_accessor_tree();
365,199✔
1360
            }
365,199✔
1361
            else {
54✔
1362
                table_accessor->detach(Table::cookie_removed);
54✔
1363
                recycle_table_accessor(table_accessor);
54✔
1364
                m_table_accessors[i] = nullptr;
54✔
1365
            }
54✔
1366
        }
365,253✔
1367
    }
548,760✔
1368
}
272,661✔
1369

1370

1371
void Group::advance_transact(ref_type new_top_ref, util::InputStream* in, bool writable)
1372
{
273,057✔
1373
    REALM_ASSERT(is_attached());
273,057✔
1374
    // Exception safety: If this function throws, the group accessor and all of
177,405✔
1375
    // its subordinate accessors are left in a state that may not be fully
177,405✔
1376
    // consistent. Only minimal consistency is guaranteed (see
177,405✔
1377
    // AccessorConsistencyLevels). In this case, the application is required to
177,405✔
1378
    // either destroy the Group object, forcing all subordinate accessors to
177,405✔
1379
    // become detached, or take some other equivalent action that involves a
177,405✔
1380
    // call to Group::detach(), such as terminating the transaction in progress.
177,405✔
1381
    // such actions will also lead to the detachment of all subordinate
177,405✔
1382
    // accessors. Until then it is an error, and unsafe if the application
177,405✔
1383
    // attempts to access the group one of its subordinate accessors.
177,405✔
1384
    //
177,405✔
1385
    // The purpose of this function is to refresh all attached accessors after
177,405✔
1386
    // the underlying node structure has undergone arbitrary change, such as
177,405✔
1387
    // when a read transaction has been advanced to a later snapshot of the
177,405✔
1388
    // database.
177,405✔
1389
    //
177,405✔
1390
    // Initially, when this function is invoked, we cannot assume any
177,405✔
1391
    // correspondence between the accessor state and the underlying node
177,405✔
1392
    // structure. We can assume that the hierarchy is in a state of minimal
177,405✔
1393
    // consistency, and that it can be brought to a state of structural
177,405✔
1394
    // correspondence using information in the transaction logs. When structural
177,405✔
1395
    // correspondence is achieved, we can reliably refresh the accessor hierarchy
177,405✔
1396
    // (Table::refresh_accessor_tree()) to bring it back to a fully consistent
177,405✔
1397
    // state. See AccessorConsistencyLevels.
177,405✔
1398
    //
177,405✔
1399
    // Much of the information in the transaction logs is not used in this
177,405✔
1400
    // process, because the changes have already been applied to the underlying
177,405✔
1401
    // node structure. All we need to do here is to bring the accessors back
177,405✔
1402
    // into a state where they correctly reflect the underlying structure (or
177,405✔
1403
    // detach them if the underlying object has been removed.)
177,405✔
1404
    //
177,405✔
1405
    // This is no longer needed in Core, but we need to compute "schema_changed",
177,405✔
1406
    // for the benefit of ObjectStore.
177,405✔
1407
    bool schema_changed = false;
273,057✔
1408
    if (in && has_schema_change_notification_handler()) {
273,057✔
1409
        TransactAdvancer advancer(*this, schema_changed);
43,938✔
1410
        _impl::TransactLogParser parser; // Throws
43,938✔
1411
        parser.parse(*in, advancer);     // Throws
43,938✔
1412
    }
43,938✔
1413

177,405✔
1414
    m_top.detach();                                           // Soft detach
273,057✔
1415
    bool create_group_when_missing = false;                   // See Group::attach_shared().
273,057✔
1416
    attach(new_top_ref, writable, create_group_when_missing); // Throws
273,057✔
1417
    refresh_dirty_accessors();                                // Throws
273,057✔
1418

177,405✔
1419
    if (schema_changed)
273,057✔
1420
        send_schema_change_notification();
11,217✔
1421
}
273,057✔
1422

1423
void Group::prepare_top_for_history(int history_type, int history_schema_version, uint64_t file_ident)
1424
{
70,884✔
1425
    REALM_ASSERT(m_file_format_version >= 7);
70,884✔
1426
    while (m_top.size() < s_hist_type_ndx) {
353,964✔
1427
        m_top.add(0); // Throws
283,080✔
1428
    }
283,080✔
1429

34,944✔
1430
    if (m_top.size() > s_hist_version_ndx) {
70,884✔
1431
        int stored_history_type = int(m_top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
18✔
1432
        int stored_history_schema_version = int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
18✔
1433
        if (stored_history_type != Replication::hist_None) {
18✔
1434
            REALM_ASSERT(stored_history_type == history_type);
6✔
1435
            REALM_ASSERT(stored_history_schema_version == history_schema_version);
6✔
1436
        }
6✔
1437
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(history_type));              // Throws
18✔
1438
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(history_schema_version)); // Throws
18✔
1439
    }
18✔
1440
    else {
70,866✔
1441
        // No history yet
34,935✔
1442
        REALM_ASSERT(m_top.size() == s_hist_type_ndx);
70,866✔
1443
        ref_type history_ref = 0;                                    // No history yet
70,866✔
1444
        m_top.add(RefOrTagged::make_tagged(history_type));           // Throws
70,866✔
1445
        m_top.add(RefOrTagged::make_ref(history_ref));               // Throws
70,866✔
1446
        m_top.add(RefOrTagged::make_tagged(history_schema_version)); // Throws
70,866✔
1447
    }
70,866✔
1448

34,944✔
1449
    if (m_top.size() > s_sync_file_id_ndx) {
70,884✔
1450
        m_top.set(s_sync_file_id_ndx, RefOrTagged::make_tagged(file_ident));
18✔
1451
    }
18✔
1452
    else {
70,866✔
1453
        m_top.add(RefOrTagged::make_tagged(file_ident)); // Throws
70,866✔
1454
    }
70,866✔
1455
}
70,884✔
1456

1457
void Group::clear_history()
1458
{
36✔
1459
    bool has_history = (m_top.is_attached() && m_top.size() > s_hist_type_ndx);
36✔
1460
    if (has_history) {
36✔
1461
        auto hist_ref = m_top.get_as_ref(s_hist_ref_ndx);
12✔
1462
        Array::destroy_deep(hist_ref, m_top.get_alloc());
12✔
1463
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(Replication::hist_None)); // Throws
12✔
1464
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(0));                   // Throws
12✔
1465
        m_top.set(s_hist_ref_ndx, 0);                                                 // Throws
12✔
1466
    }
12✔
1467
}
36✔
1468

1469
#ifdef REALM_DEBUG // LCOV_EXCL_START ignore debug functions
1470

1471
class MemUsageVerifier : public Array::MemUsageHandler {
1472
public:
1473
    MemUsageVerifier(ref_type ref_begin, ref_type immutable_ref_end, ref_type mutable_ref_end, ref_type baseline)
1474
        : m_ref_begin(ref_begin)
1475
        , m_immutable_ref_end(immutable_ref_end)
1476
        , m_mutable_ref_end(mutable_ref_end)
1477
        , m_baseline(baseline)
1478
    {
1,279,404✔
1479
    }
1,279,404✔
1480
    void add_immutable(ref_type ref, size_t size)
1481
    {
12,644,385✔
1482
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
12,644,385✔
1483
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
12,644,385✔
1484
        REALM_ASSERT_3(size, >, 0);
12,644,385✔
1485
        REALM_ASSERT_3(ref, >=, m_ref_begin);
12,644,385✔
1486
        REALM_ASSERT_3(size, <=, m_immutable_ref_end - ref);
12,644,385✔
1487
        Chunk chunk;
12,644,385✔
1488
        chunk.ref = ref;
12,644,385✔
1489
        chunk.size = size;
12,644,385✔
1490
        m_chunks.push_back(chunk);
12,644,385✔
1491
    }
12,644,385✔
1492
    void add_mutable(ref_type ref, size_t size)
1493
    {
5,456,163✔
1494
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
5,456,163✔
1495
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
5,456,163✔
1496
        REALM_ASSERT_3(size, >, 0);
5,456,163✔
1497
        REALM_ASSERT_3(ref, >=, m_immutable_ref_end);
5,456,163✔
1498
        REALM_ASSERT_3(size, <=, m_mutable_ref_end - ref);
5,456,163✔
1499
        Chunk chunk;
5,456,163✔
1500
        chunk.ref = ref;
5,456,163✔
1501
        chunk.size = size;
5,456,163✔
1502
        m_chunks.push_back(chunk);
5,456,163✔
1503
    }
5,456,163✔
1504
    void add(ref_type ref, size_t size)
1505
    {
52,257,654✔
1506
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
52,257,654✔
1507
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
52,257,654✔
1508
        REALM_ASSERT_3(size, >, 0);
52,257,654✔
1509
        REALM_ASSERT_3(ref, >=, m_ref_begin);
52,257,654✔
1510
        REALM_ASSERT(size <= (ref < m_baseline ? m_immutable_ref_end : m_mutable_ref_end) - ref);
52,257,654✔
1511
        Chunk chunk;
52,257,654✔
1512
        chunk.ref = ref;
52,257,654✔
1513
        chunk.size = size;
52,257,654✔
1514
        m_chunks.push_back(chunk);
52,257,654✔
1515
    }
52,257,654✔
1516
    void add(const MemUsageVerifier& verifier)
1517
    {
1,916,718✔
1518
        m_chunks.insert(m_chunks.end(), verifier.m_chunks.begin(), verifier.m_chunks.end());
1,916,718✔
1519
    }
1,916,718✔
1520
    void handle(ref_type ref, size_t allocated, size_t) override
1521
    {
52,257,633✔
1522
        add(ref, allocated);
52,257,633✔
1523
    }
52,257,633✔
1524
    void canonicalize()
1525
    {
5,112,828✔
1526
        // Sort the chunks in order of increasing ref, then merge adjacent
2,556,570✔
1527
        // chunks while checking that there is no overlap
2,556,570✔
1528
        typedef std::vector<Chunk>::iterator iter;
5,112,828✔
1529
        iter i_1 = m_chunks.begin(), end = m_chunks.end();
5,112,828✔
1530
        iter i_2 = i_1;
5,112,828✔
1531
        sort(i_1, end);
5,112,828✔
1532
        if (i_1 != end) {
5,112,828✔
1533
            while (++i_2 != end) {
107,838,069✔
1534
                ref_type prev_ref_end = i_1->ref + i_1->size;
103,062,195✔
1535
                REALM_ASSERT_3(prev_ref_end, <=, i_2->ref);
103,062,195✔
1536
                if (i_2->ref == prev_ref_end) { // in-file
103,062,195✔
1537
                    i_1->size += i_2->size;     // Merge
69,718,677✔
1538
                }
69,718,677✔
1539
                else {
33,343,518✔
1540
                    *++i_1 = *i_2;
33,343,518✔
1541
                }
33,343,518✔
1542
            }
103,062,195✔
1543
            m_chunks.erase(i_1 + 1, end);
4,775,874✔
1544
        }
4,775,874✔
1545
    }
5,112,828✔
1546
    void clear()
1547
    {
1,916,718✔
1548
        m_chunks.clear();
1,916,718✔
1549
    }
1,916,718✔
1550
    void check_total_coverage()
1551
    {
639,699✔
1552
        REALM_ASSERT_3(m_chunks.size(), ==, 1);
639,699✔
1553
        REALM_ASSERT_3(m_chunks.front().ref, ==, m_ref_begin);
639,699✔
1554
        REALM_ASSERT_3(m_chunks.front().size, ==, m_mutable_ref_end - m_ref_begin);
639,699✔
1555
    }
639,699✔
1556

1557
private:
1558
    struct Chunk {
1559
        ref_type ref;
1560
        size_t size;
1561
        bool operator<(const Chunk& c) const
1562
        {
655,578,363✔
1563
            return ref < c.ref;
655,578,363✔
1564
        }
655,578,363✔
1565
    };
1566
    std::vector<Chunk> m_chunks;
1567
    ref_type m_ref_begin, m_immutable_ref_end, m_mutable_ref_end, m_baseline;
1568
};
1569

1570
#endif
1571

1572
void Group::verify() const
1573
{
702,477✔
1574
#ifdef REALM_DEBUG
702,477✔
1575
    REALM_ASSERT(is_attached());
702,477✔
1576

351,273✔
1577
    m_alloc.verify();
702,477✔
1578

351,273✔
1579
    if (!m_top.is_attached()) {
702,477✔
1580
        return;
39✔
1581
    }
39✔
1582

351,252✔
1583
    // Verify tables
351,252✔
1584
    {
702,438✔
1585
        auto keys = get_table_keys();
702,438✔
1586
        for (auto key : keys) {
2,932,764✔
1587
            ConstTableRef table = get_table(key);
2,932,764✔
1588
            REALM_ASSERT_3(table->get_key().value, ==, key.value);
2,932,764✔
1589
            table->verify();
2,932,764✔
1590
        }
2,932,764✔
1591
    }
702,438✔
1592

351,252✔
1593
    // Verify history if present
351,252✔
1594
    if (Replication* repl = *get_repl()) {
702,438✔
1595
        if (auto hist = repl->_create_history_read()) {
656,733✔
1596
            hist->set_group(const_cast<Group*>(this), false);
656,727✔
1597
            _impl::History::version_type version = 0;
656,727✔
1598
            int history_type = 0;
656,727✔
1599
            int history_schema_version = 0;
656,727✔
1600
            get_version_and_history_info(m_top, version, history_type, history_schema_version);
656,727✔
1601
            REALM_ASSERT(history_type != Replication::hist_None || history_schema_version == 0);
656,727✔
1602
            ref_type hist_ref = get_history_ref(m_top);
656,727✔
1603
            hist->update_from_ref_and_version(hist_ref, version);
656,727✔
1604
            hist->verify();
656,727✔
1605
        }
656,727✔
1606
    }
656,733✔
1607

351,252✔
1608
    if (auto tr = dynamic_cast<const Transaction*>(this)) {
702,438✔
1609
        // This is a transaction
351,105✔
1610
        if (tr->get_transact_stage() == DB::TransactStage::transact_Reading) {
702,147✔
1611
            // Verifying the memory cannot be done from a read transaction
31,374✔
1612
            // There might be a write transaction running that has freed some
31,374✔
1613
            // memory that is seen as being in use in this transaction
31,374✔
1614
            return;
62,736✔
1615
        }
62,736✔
1616
    }
639,702✔
1617
    size_t logical_file_size = to_size_t(m_top.get_as_ref_or_tagged(2).get_as_int());
639,702✔
1618
    size_t ref_begin = sizeof(SlabAlloc::Header);
639,702✔
1619
    ref_type real_immutable_ref_end = logical_file_size;
639,702✔
1620
    ref_type real_mutable_ref_end = m_alloc.get_total_size();
639,702✔
1621
    ref_type real_baseline = m_alloc.get_baseline();
639,702✔
1622
    // Fake that any empty area between the file and slab is part of the file (immutable):
319,878✔
1623
    ref_type immutable_ref_end = m_alloc.align_size_to_section_boundary(real_immutable_ref_end);
639,702✔
1624
    ref_type mutable_ref_end = m_alloc.align_size_to_section_boundary(real_mutable_ref_end);
639,702✔
1625
    ref_type baseline = m_alloc.align_size_to_section_boundary(real_baseline);
639,702✔
1626

319,878✔
1627
    // Check the consistency of the allocation of used memory
319,878✔
1628
    MemUsageVerifier mem_usage_1(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
639,702✔
1629
    m_top.report_memory_usage(mem_usage_1);
639,702✔
1630
    mem_usage_1.canonicalize();
639,702✔
1631

319,878✔
1632
    // Check concistency of the allocation of the immutable memory that was
319,878✔
1633
    // marked as free before the file was opened.
319,878✔
1634
    MemUsageVerifier mem_usage_2(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
639,702✔
1635
    {
639,702✔
1636
        REALM_ASSERT_EX(m_top.size() == 3 || m_top.size() == 5 || m_top.size() == 7 || m_top.size() >= 10,
639,702✔
1637
                        m_top.size());
639,702✔
1638
        Allocator& alloc = m_top.get_alloc();
639,702✔
1639
        Array pos(alloc), len(alloc), ver(alloc);
639,702✔
1640
        pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
639,702✔
1641
        len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
639,702✔
1642
        ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
639,702✔
1643
        if (m_top.size() > s_free_pos_ndx) {
639,702✔
1644
            if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
639,306✔
1645
                pos.init_from_ref(ref);
637,314✔
1646
        }
639,306✔
1647
        if (m_top.size() > s_free_size_ndx) {
639,702✔
1648
            if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
639,303✔
1649
                len.init_from_ref(ref);
637,311✔
1650
        }
639,303✔
1651
        if (m_top.size() > s_free_version_ndx) {
639,702✔
1652
            if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
639,306✔
1653
                ver.init_from_ref(ref);
637,314✔
1654
        }
639,306✔
1655
        REALM_ASSERT(pos.is_attached() == len.is_attached());
639,702✔
1656
        REALM_ASSERT(pos.is_attached() || !ver.is_attached()); // pos.is_attached() <== ver.is_attached()
639,702✔
1657
        if (pos.is_attached()) {
639,702✔
1658
            size_t n = pos.size();
637,314✔
1659
            REALM_ASSERT_3(n, ==, len.size());
637,314✔
1660
            if (ver.is_attached())
637,314✔
1661
                REALM_ASSERT_3(n, ==, ver.size());
637,314✔
1662
            for (size_t i = 0; i != n; ++i) {
10,354,737✔
1663
                ref_type ref = to_ref(pos.get(i));
9,717,423✔
1664
                size_t size_of_i = to_size_t(len.get(i));
9,717,423✔
1665
                mem_usage_2.add_immutable(ref, size_of_i);
9,717,423✔
1666
            }
9,717,423✔
1667
            mem_usage_2.canonicalize();
637,314✔
1668
            mem_usage_1.add(mem_usage_2);
637,314✔
1669
            mem_usage_1.canonicalize();
637,314✔
1670
            mem_usage_2.clear();
637,314✔
1671
        }
637,314✔
1672
    }
639,702✔
1673

319,878✔
1674
    // Check the concistency of the allocation of the immutable memory that has
319,878✔
1675
    // been marked as free after the file was opened
319,878✔
1676
    for (const auto& free_block : m_alloc.m_free_read_only) {
2,287,290✔
1677
        mem_usage_2.add_immutable(free_block.first, free_block.second);
2,287,290✔
1678
    }
2,287,290✔
1679
    mem_usage_2.canonicalize();
639,702✔
1680
    mem_usage_1.add(mem_usage_2);
639,702✔
1681
    mem_usage_1.canonicalize();
639,702✔
1682
    mem_usage_2.clear();
639,702✔
1683

319,878✔
1684
    // Check the consistency of the allocation of the mutable memory that has
319,878✔
1685
    // been marked as free
319,878✔
1686
    m_alloc.for_all_free_entries([&](ref_type ref, size_t sz) {
5,456,163✔
1687
        mem_usage_2.add_mutable(ref, sz);
5,456,163✔
1688
    });
5,456,163✔
1689
    mem_usage_2.canonicalize();
639,702✔
1690
    mem_usage_1.add(mem_usage_2);
639,702✔
1691
    mem_usage_1.canonicalize();
639,702✔
1692
    mem_usage_2.clear();
639,702✔
1693

319,878✔
1694
    // There may be a hole between the end of file and the beginning of the slab area.
319,878✔
1695
    // We need to take that into account here.
319,878✔
1696
    REALM_ASSERT_3(real_immutable_ref_end, <=, real_baseline);
639,702✔
1697
    auto slab_start = immutable_ref_end;
639,702✔
1698
    if (real_immutable_ref_end < slab_start) {
639,705✔
1699
        ref_type ref = real_immutable_ref_end;
639,702✔
1700
        size_t corrected_size = slab_start - real_immutable_ref_end;
639,702✔
1701
        mem_usage_1.add_immutable(ref, corrected_size);
639,702✔
1702
        mem_usage_1.canonicalize();
639,702✔
1703
    }
639,702✔
1704

319,878✔
1705
    // At this point we have accounted for all memory managed by the slab
319,878✔
1706
    // allocator
319,878✔
1707
    mem_usage_1.check_total_coverage();
639,702✔
1708
#endif
639,702✔
1709
}
639,702✔
1710

1711
void Group::validate_primary_columns()
1712
{
480✔
1713
    auto table_keys = this->get_table_keys();
480✔
1714
    for (auto tk : table_keys) {
1,800✔
1715
        auto table = get_table(tk);
1,800✔
1716
        table->validate_primary_column();
1,800✔
1717
    }
1,800✔
1718
}
480✔
1719

1720
#ifdef REALM_DEBUG
1721

1722
MemStats Group::get_stats()
1723
{
×
1724
    MemStats mem_stats;
×
1725
    m_top.stats(mem_stats);
×
1726

1727
    return mem_stats;
×
1728
}
×
1729

1730
void Group::print() const
UNCOV
1731
{
×
1732
    m_alloc.print();
×
1733
}
×
1734

1735

1736
void Group::print_free() const
1737
{
×
1738
    Allocator& alloc = m_top.get_alloc();
×
1739
    Array pos(alloc), len(alloc), ver(alloc);
×
1740
    pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
×
1741
    len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
×
1742
    ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
×
1743
    if (m_top.size() > s_free_pos_ndx) {
×
1744
        if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
×
1745
            pos.init_from_ref(ref);
×
1746
    }
×
1747
    if (m_top.size() > s_free_size_ndx) {
×
1748
        if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
×
1749
            len.init_from_ref(ref);
×
1750
    }
×
1751
    if (m_top.size() > s_free_version_ndx) {
×
1752
        if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
×
1753
            ver.init_from_ref(ref);
×
1754
    }
×
1755

1756
    if (!pos.is_attached()) {
×
1757
        std::cout << "none\n";
×
1758
        return;
×
1759
    }
×
1760
    bool has_versions = ver.is_attached();
×
1761

1762
    size_t n = pos.size();
×
1763
    for (size_t i = 0; i != n; ++i) {
×
1764
        size_t offset = to_size_t(pos.get(i));
×
1765
        size_t size_of_i = to_size_t(len.get(i));
×
1766
        std::cout << i << ": " << offset << " " << size_of_i;
×
1767

1768
        if (has_versions) {
×
1769
            size_t version = to_size_t(ver.get(i));
×
1770
            std::cout << " " << version;
×
1771
        }
×
1772
        std::cout << "\n";
×
1773
    }
×
1774
    std::cout << "\n";
×
1775
}
×
1776
#endif
1777

1778
// LCOV_EXCL_STOP ignore debug functions
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc