• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / github_pull_request_275914

25 Sep 2023 03:10PM UTC coverage: 92.915% (+1.7%) from 91.215%
github_pull_request_275914

Pull #6073

Evergreen

jedelbo
Merge tag 'v13.21.0' into next-major

"Feature/Bugfix release"
Pull Request #6073: Merge next-major

96928 of 177706 branches covered (0.0%)

8324 of 8714 new or added lines in 122 files covered. (95.52%)

181 existing lines in 28 files now uncovered.

247505 of 266379 relevant lines covered (92.91%)

7164945.17 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.77
/src/realm/group.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <new>
20
#include <algorithm>
21
#include <fstream>
22

23
#ifdef REALM_DEBUG
24
#include <iostream>
25
#include <iomanip>
26
#endif
27

28
#include <realm/util/file_mapper.hpp>
29
#include <realm/util/memory_stream.hpp>
30
#include <realm/util/miscellaneous.hpp>
31
#include <realm/util/thread.hpp>
32
#include <realm/impl/destroy_guard.hpp>
33
#include <realm/utilities.hpp>
34
#include <realm/exceptions.hpp>
35
#include <realm/group_writer.hpp>
36
#include <realm/transaction.hpp>
37
#include <realm/replication.hpp>
38

39
using namespace realm;
40
using namespace realm::util;
41

42
namespace {
43

44
class Initialization {
45
public:
46
    Initialization()
47
    {
24✔
48
        realm::cpuid_init();
24✔
49
    }
24✔
50
};
51

52
Initialization initialization;
53

54
} // anonymous namespace
55

56
Group::Group()
57
    : m_local_alloc(new SlabAlloc)
58
    , m_alloc(*m_local_alloc) // Throws
59
    , m_top(m_alloc)
60
    , m_tables(m_alloc)
61
    , m_table_names(m_alloc)
62
{
3,999✔
63
    init_array_parents();
3,999✔
64
    m_alloc.attach_empty(); // Throws
3,999✔
65
    m_file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
3,999✔
66
    ref_type top_ref = 0; // Instantiate a new empty group
3,999✔
67
    bool create_group_when_missing = true;
3,999✔
68
    bool writable = create_group_when_missing;
3,999✔
69
    attach(top_ref, writable, create_group_when_missing); // Throws
3,999✔
70
}
3,999✔
71

72

73
Group::Group(const std::string& file_path, const char* encryption_key)
74
    : m_local_alloc(new SlabAlloc) // Throws
75
    , m_alloc(*m_local_alloc)
76
    , m_top(m_alloc)
77
    , m_tables(m_alloc)
78
    , m_table_names(m_alloc)
79
{
849✔
80
    init_array_parents();
849✔
81

327✔
82
    SlabAlloc::Config cfg;
849✔
83
    cfg.read_only = true;
849✔
84
    cfg.no_create = true;
849✔
85
    cfg.encryption_key = encryption_key;
849✔
86
    ref_type top_ref = m_alloc.attach_file(file_path, cfg); // Throws
849✔
87
    // Non-Transaction Groups always allow writing and simply don't allow
327✔
88
    // committing when opened in read-only mode
327✔
89
    m_alloc.set_read_only(false);
849✔
90

327✔
91
    open(top_ref, file_path);
849✔
92
}
849✔
93

94

95
Group::Group(BinaryData buffer, bool take_ownership)
96
    : m_local_alloc(new SlabAlloc) // Throws
97
    , m_alloc(*m_local_alloc)
98
    , m_top(m_alloc)
99
    , m_tables(m_alloc)
100
    , m_table_names(m_alloc)
101
{
48✔
102
    REALM_ASSERT(buffer.data());
48✔
103

24✔
104
    init_array_parents();
48✔
105
    ref_type top_ref = m_alloc.attach_buffer(buffer.data(), buffer.size()); // Throws
48✔
106

24✔
107
    open(top_ref, {});
48✔
108

24✔
109
    if (take_ownership)
48✔
110
        m_alloc.own_buffer();
36✔
111
}
48✔
112

113
Group::Group(SlabAlloc* alloc) noexcept
114
    : m_alloc(*alloc)
115
    , // Throws
116
    m_top(m_alloc)
117
    , m_tables(m_alloc)
118
    , m_table_names(m_alloc)
119
{
2,778,891✔
120
    init_array_parents();
2,778,891✔
121
}
2,778,891✔
122

123
namespace {
124

125
class TableRecycler : public std::vector<Table*> {
126
public:
127
    ~TableRecycler()
128
    {
×
129
        REALM_UNREACHABLE();
×
130
        // if ever enabled, remember to release Tables:
×
131
        // for (auto t : *this) {
×
132
        //    delete t;
×
133
        //}
×
134
    }
×
135
};
136

137
// We use the classic approach to construct a FIFO from two LIFO's,
138
// insertion is done into recycler_1, removal is done from recycler_2,
139
// and when recycler_2 is empty, recycler_1 is reversed into recycler_2.
140
// this i O(1) for each entry.
141
auto& g_table_recycler_1 = *new TableRecycler;
142
auto& g_table_recycler_2 = *new TableRecycler;
143
// number of tables held back before being recycled. We hold back recycling
144
// the latest to increase the probability of detecting race conditions
145
// without crashing.
146
const static int g_table_recycling_delay = 100;
147
auto& g_table_recycler_mutex = *new std::mutex;
148

149
} // namespace
150

151
TableKeyIterator& TableKeyIterator::operator++()
152
{
5,100,882✔
153
    m_pos++;
5,100,882✔
154
    m_index_in_group++;
5,100,882✔
155
    load_key();
5,100,882✔
156
    return *this;
5,100,882✔
157
}
5,100,882✔
158

159
TableKey TableKeyIterator::operator*()
160
{
5,235,756✔
161
    if (!bool(m_table_key)) {
5,235,756✔
162
        load_key();
1,293,243✔
163
    }
1,293,243✔
164
    return m_table_key;
5,235,756✔
165
}
5,235,756✔
166

167
void TableKeyIterator::load_key()
168
{
6,394,122✔
169
    const Group& g = *m_group;
6,394,122✔
170
    size_t max_index_in_group = g.m_table_names.size();
6,394,122✔
171
    while (m_index_in_group < max_index_in_group) {
6,786,120✔
172
        RefOrTagged rot = g.m_tables.get_as_ref_or_tagged(m_index_in_group);
5,627,751✔
173
        if (rot.is_ref()) {
5,627,751✔
174
            Table* t;
5,235,753✔
175
            if (m_index_in_group < g.m_table_accessors.size() &&
5,235,753✔
176
                (t = load_atomic(g.m_table_accessors[m_index_in_group], std::memory_order_acquire))) {
5,235,753✔
177
                m_table_key = t->get_key();
1,128,198✔
178
            }
1,128,198✔
179
            else {
4,107,555✔
180
                m_table_key = Table::get_key_direct(g.m_tables.get_alloc(), rot.get_as_ref());
4,107,555✔
181
            }
4,107,555✔
182
            return;
5,235,753✔
183
        }
5,235,753✔
184
        m_index_in_group++;
391,998✔
185
    }
391,998✔
186
    m_table_key = TableKey();
3,767,991✔
187
}
1,158,369✔
188

189
TableKey TableKeys::operator[](size_t p) const
190
{
624✔
191
    if (p < m_iter.m_pos) {
624✔
192
        m_iter = TableKeyIterator(m_iter.m_group, 0);
×
193
    }
×
194
    while (m_iter.m_pos < p) {
690✔
195
        ++m_iter;
66✔
196
    }
66✔
197
    return *m_iter;
624✔
198
}
624✔
199

200
size_t Group::size() const noexcept
201
{
1,741,941✔
202
    return m_num_tables;
1,741,941✔
203
}
1,741,941✔
204

205

206
void Group::set_size() const noexcept
207
{
2,998,746✔
208
    int retval = 0;
2,998,746✔
209
    if (is_attached() && m_table_names.is_attached()) {
2,998,776✔
210
        size_t max_index = m_tables.size();
2,852,280✔
211
        REALM_ASSERT_EX(max_index < (1 << 16), max_index);
2,852,280✔
212
        for (size_t j = 0; j < max_index; ++j) {
11,813,244✔
213
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(j);
8,960,964✔
214
            if (rot.is_ref() && rot.get_as_ref()) {
8,960,964✔
215
                ++retval;
8,591,283✔
216
            }
8,591,283✔
217
        }
8,960,964✔
218
    }
2,852,280✔
219
    m_num_tables = retval;
2,998,746✔
220
}
2,998,746✔
221

222
std::map<TableRef, ColKey> Group::get_primary_key_columns_from_pk_table(TableRef pk_table)
223
{
27✔
224
    std::map<TableRef, ColKey> ret;
27✔
225
    REALM_ASSERT(pk_table);
27!
226
    ColKey col_table = pk_table->get_column_key("pk_table");
27✔
227
    ColKey col_prop = pk_table->get_column_key("pk_property");
27✔
228
    for (auto pk_obj : *pk_table) {
81!
229
        auto object_type = pk_obj.get<String>(col_table);
81✔
230
        auto name = std::string(g_class_name_prefix) + std::string(object_type);
81✔
231
        auto table = get_table(name);
81✔
232
        auto pk_col_name = pk_obj.get<String>(col_prop);
81✔
233
        auto pk_col = table->get_column_key(pk_col_name);
81✔
234
        ret.emplace(table, pk_col);
81✔
235
    }
81✔
236

27✔
237
    return ret;
27✔
238
}
27✔
239

240
TableKey Group::ndx2key(size_t ndx) const
241
{
7,593✔
242
    REALM_ASSERT(is_attached());
7,593✔
243
    Table* accessor = load_atomic(m_table_accessors[ndx], std::memory_order_acquire);
7,593✔
244
    if (accessor)
7,593✔
245
        return accessor->get_key(); // fast path
1,878✔
246

2,787✔
247
    // slow path:
2,787✔
248
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(ndx);
5,715✔
249
    if (rot.is_tagged())
5,715✔
250
        throw NoSuchTable();
×
251
    ref_type ref = rot.get_as_ref();
5,715✔
252
    REALM_ASSERT(ref);
5,715✔
253
    return Table::get_key_direct(m_tables.get_alloc(), ref);
5,715✔
254
}
5,715✔
255

256
size_t Group::key2ndx_checked(TableKey key) const
257
{
40,160,985✔
258
    size_t idx = key2ndx(key);
40,160,985✔
259
    // early out
20,496,735✔
260
    // note: don't lock when accessing m_table_accessors, because if we miss a concurrently introduced table
20,496,735✔
261
    // accessor, we'll just fall through to the slow path. Table accessors can be introduced concurrently,
20,496,735✔
262
    // but never removed. The following is only safe because 'm_table_accessors' will not be relocated
20,496,735✔
263
    // concurrently. (We aim to be safe in face of concurrent access to a frozen transaction, where tables
20,496,735✔
264
    // cannot be added or removed. All other races are undefined behaviour)
20,496,735✔
265
    if (idx < m_table_accessors.size()) {
40,242,345✔
266
        Table* tbl = load_atomic(m_table_accessors[idx], std::memory_order_acquire);
40,031,490✔
267
        if (tbl && tbl->get_key() == key)
40,031,490✔
268
            return idx;
34,430,949✔
269
    }
5,730,036✔
270
    // The notion of a const group as it is now, is not really
3,088,722✔
271
    // useful. It is linked to a distinction between a read
3,088,722✔
272
    // and a write transaction. This distinction is no longer
3,088,722✔
273
    // a compile time aspect (it's not const anymore)
3,088,722✔
274
    Allocator* alloc = const_cast<SlabAlloc*>(&m_alloc);
5,730,036✔
275
    if (m_tables.is_attached() && idx < m_tables.size()) {
5,747,796✔
276
        RefOrTagged rot = m_tables.get_as_ref_or_tagged(idx);
5,591,970✔
277
        if (rot.is_ref() && rot.get_as_ref() && (Table::get_key_direct(*alloc, rot.get_as_ref()) == key)) {
5,592,990✔
278

3,106,080✔
279
            return idx;
5,592,177✔
280
        }
5,592,177✔
281
    }
2,147,638,864✔
282
    throw NoSuchTable();
2,147,638,864✔
283
}
2,147,638,864✔
284

285

286
int Group::get_file_format_version() const noexcept
287
{
1,191,927✔
288
    return m_file_format_version;
1,191,927✔
289
}
1,191,927✔
290

291

292
void Group::set_file_format_version(int file_format) noexcept
293
{
2,777,865✔
294
    m_file_format_version = file_format;
2,777,865✔
295
}
2,777,865✔
296

297

298
int Group::get_committed_file_format_version() const noexcept
299
{
×
300
    return m_alloc.get_committed_file_format_version();
×
301
}
×
302

303
std::optional<int> Group::fake_target_file_format;
304

305
void _impl::GroupFriend::fake_target_file_format(const std::optional<int> format) noexcept
306
{
72✔
307
    Group::fake_target_file_format = format;
72✔
308
}
72✔
309

310
int Group::get_target_file_format_version_for_session(int current_file_format_version,
311
                                                      int requested_history_type) noexcept
312
{
168,186✔
313
    if (Group::fake_target_file_format) {
168,186✔
314
        return *Group::fake_target_file_format;
72✔
315
    }
72✔
316
    // Note: This function is responsible for choosing the target file format
82,767✔
317
    // for a sessions. If it selects a file format that is different from
82,767✔
318
    // `current_file_format_version`, it will trigger a file format upgrade
82,767✔
319
    // process.
82,767✔
320

82,767✔
321
    // Note: `current_file_format_version` may be zero at this time, which means
82,767✔
322
    // that the file format it is not yet decided (only possible for empty
82,767✔
323
    // Realms where top-ref is zero).
82,767✔
324

82,767✔
325
    // Please see Group::get_file_format_version() for information about the
82,767✔
326
    // individual file format versions.
82,767✔
327

82,767✔
328
    if (requested_history_type == Replication::hist_None) {
168,114✔
329
        if (current_file_format_version == 23) {
34,002✔
330
            // We are able to open these file formats in RO mode
12,360✔
331
            return current_file_format_version;
12,360✔
332
        }
12,360✔
333
    }
155,754✔
334

70,407✔
335
    return g_current_file_format_version;
155,754✔
336
}
155,754✔
337

338
void Group::get_version_and_history_info(const Array& top, _impl::History::version_type& version, int& history_type,
339
                                         int& history_schema_version) noexcept
340
{
771,252✔
341
    using version_type = _impl::History::version_type;
771,252✔
342
    version_type version_2 = 0;
771,252✔
343
    int history_type_2 = 0;
771,252✔
344
    int history_schema_version_2 = 0;
771,252✔
345
    if (top.is_attached()) {
771,252✔
346
        if (top.size() > s_version_ndx) {
722,331✔
347
            version_2 = version_type(top.get_as_ref_or_tagged(s_version_ndx).get_as_int());
722,010✔
348
        }
722,010✔
349
        if (top.size() > s_hist_type_ndx) {
722,331✔
350
            history_type_2 = int(top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
719,487✔
351
        }
719,487✔
352
        if (top.size() > s_hist_version_ndx) {
722,331✔
353
            history_schema_version_2 = int(top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
719,478✔
354
        }
719,478✔
355
    }
722,331✔
356
    // Version 0 is not a legal initial version, so it has to be set to 1
384,759✔
357
    // instead.
384,759✔
358
    if (version_2 == 0)
771,252✔
359
        version_2 = 1;
51,171✔
360
    version = version_2;
771,252✔
361
    history_type = history_type_2;
771,252✔
362
    history_schema_version = history_schema_version_2;
771,252✔
363
}
771,252✔
364

365
int Group::get_history_schema_version() noexcept
366
{
24,111✔
367
    bool history_schema_version = (m_top.is_attached() && m_top.size() > s_hist_version_ndx);
24,111✔
368
    if (history_schema_version) {
24,111✔
369
        return int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
756✔
370
    }
756✔
371
    return 0;
23,355✔
372
}
23,355✔
373

374
uint64_t Group::get_sync_file_id() const noexcept
375
{
13,449,252✔
376
    if (m_top.is_attached() && m_top.size() > s_sync_file_id_ndx) {
13,449,252✔
377
        return uint64_t(m_top.get_as_ref_or_tagged(s_sync_file_id_ndx).get_as_int());
6,056,409✔
378
    }
6,056,409✔
379
    auto repl = get_replication();
7,392,843✔
380
    if (repl && repl->get_history_type() == Replication::hist_SyncServer) {
7,392,843✔
381
        return 1;
2,679✔
382
    }
2,679✔
383
    return 0;
7,390,164✔
384
}
7,390,164✔
385

386
int Group::read_only_version_check(SlabAlloc& alloc, ref_type top_ref, const std::string& path)
387
{
1,029✔
388
    // Select file format if it is still undecided.
417✔
389
    auto file_format_version = alloc.get_committed_file_format_version();
1,029✔
390

417✔
391
    bool file_format_ok = false;
1,029✔
392
    // It is not possible to open prior file format versions without an upgrade.
417✔
393
    // Since a Realm file cannot be upgraded when opened in this mode
417✔
394
    // (we may be unable to write to the file), no earlier versions can be opened.
417✔
395
    // Please see Group::get_file_format_version() for information about the
417✔
396
    // individual file format versions.
417✔
397
    switch (file_format_version) {
1,029✔
398
        case 0:
6✔
399
            file_format_ok = (top_ref == 0);
6✔
400
            break;
6✔
401
        case g_current_file_format_version:
396✔
402
            file_format_ok = true;
987✔
403
            break;
987✔
404
    }
1,008✔
405
    if (REALM_UNLIKELY(!file_format_ok))
1,029✔
406
        throw FileAccessError(ErrorCodes::FileFormatUpgradeRequired,
1,029✔
407
                              util::format("Realm file at path '%1' cannot be opened in read-only mode because it "
36✔
408
                                           "has a file format version (%2) which requires an upgrade",
36✔
409
                                           path, file_format_version),
36✔
410
                              path);
36✔
411
    return file_format_version;
417✔
412
}
993✔
413

594✔
414
void Group::open(ref_type top_ref, const std::string& file_path)
415
{
327✔
416
    SlabAlloc::DetachGuard dg(m_alloc);
849✔
417
    m_file_format_version = read_only_version_check(m_alloc, top_ref, file_path);
849✔
418

849✔
419
    Replication::HistoryType history_type = Replication::hist_None;
327✔
420
    int target_file_format_version = get_target_file_format_version_for_session(m_file_format_version, history_type);
849✔
421
    if (m_file_format_version == 0) {
849✔
422
        set_file_format_version(target_file_format_version);
525✔
423
    }
6✔
424
    else {
327✔
425
        // From a technical point of view, we could upgrade the Realm file
843✔
426
        // format in memory here, but since upgrading can be expensive, it is
324✔
427
        // currently disallowed.
324✔
428
        REALM_ASSERT(target_file_format_version == m_file_format_version);
324✔
429
    }
843✔
430

846✔
431
    // Make all dynamically allocated memory (space beyond the attached file) as
327✔
432
    // available free-space.
327✔
433
    reset_free_space_tracking(); // Throws
327✔
434

849✔
435
    bool create_group_when_missing = true;
327✔
436
    bool writable = create_group_when_missing;
849✔
437
    attach(top_ref, writable, create_group_when_missing); // Throws
849✔
438
    dg.release();                                         // Do not detach after all
849✔
439
}
849✔
440

522✔
441
Group::~Group() noexcept
442
{
1,718,835✔
443
    // If this group accessor is detached at this point in time, it is either
2,783,364✔
444
    // because it is DB::m_group (m_is_shared), or it is a free-stading
1,718,835✔
445
    // group accessor that was never successfully opened.
1,718,835✔
446
    if (!m_top.is_attached())
1,718,835✔
447
        return;
2,780,961✔
448

1,064,394✔
449
    // Free-standing group accessor
2,403✔
450
    detach();
2,403✔
451

4,941✔
452
    // if a local allocator is set in m_local_alloc, then the destruction
2,403✔
453
    // of m_local_alloc will trigger destruction of the allocator, which will
2,403✔
454
    // verify that the allocator has been detached, so....
2,403✔
455
    if (m_local_alloc)
2,403✔
456
        m_local_alloc->detach();
4,839✔
457
}
4,914✔
458

2,538✔
459
void Group::remap_and_update_refs(ref_type new_top_ref, size_t new_file_size, bool writable)
460
{
192,807✔
461
    m_alloc.update_reader_view(new_file_size); // Throws
387,354✔
462
    update_allocator_wrappers(writable);
387,354✔
463

387,354✔
464
    // force update of all ref->ptr translations if the mapping has changed
192,807✔
465
    auto mapping_version = m_alloc.get_mapping_version();
192,807✔
466
    if (mapping_version != m_last_seen_mapping_version) {
387,354✔
467
        m_last_seen_mapping_version = mapping_version;
293,313✔
468
    }
196,650✔
469
    update_refs(new_top_ref);
290,691✔
470
}
387,354✔
471

194,547✔
472
void Group::validate_top_array(const Array& arr, const SlabAlloc& alloc, std::optional<size_t> read_lock_file_size,
473
                               std::optional<uint_fast64_t> read_lock_version)
474
{
1,800,807✔
475
    size_t top_size = arr.size();
2,927,286✔
476
    ref_type top_ref = arr.get_ref();
2,927,286✔
477

2,927,286✔
478
    switch (top_size) {
1,800,807✔
479
        // These are the valid sizes
1,126,479✔
480
        case 3:
1,800,492✔
481
        case 5:
1,800,771✔
482
        case 7:
1,800,771✔
483
        case 9:
1,850,946✔
484
        case 10:
1,850,946✔
485
        case 11:
1,850,946✔
486
        case 12: {
2,922,213✔
487
            ref_type table_names_ref = arr.get_as_ref_or_tagged(s_table_name_ndx).get_as_ref();
2,926,872✔
488
            ref_type tables_ref = arr.get_as_ref_or_tagged(s_table_refs_ndx).get_as_ref();
2,926,872✔
489
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
2,926,872✔
490

2,926,872✔
491
            // Logical file size must never exceed actual file size.
1,800,492✔
492
            auto file_size = alloc.get_baseline();
1,800,492✔
493
            if (logical_file_size > file_size) {
2,926,872✔
494
                std::string err = util::format("Invalid logical file size: %1, actual file size: %2, read lock file "
1,126,380✔
495
                                               "size: %3, read lock version: %4",
×
496
                                               logical_file_size, file_size, read_lock_file_size, read_lock_version);
×
497
                throw InvalidDatabase(err, "");
×
498
            }
×
499
            // First two entries must be valid refs pointing inside the file
1,800,492✔
500
            auto invalid_ref = [logical_file_size](ref_type ref) {
3,599,739✔
501
                return ref == 0 || (ref & 7) || ref > logical_file_size;
5,852,442✔
502
            };
5,852,478✔
503
            if (invalid_ref(table_names_ref) || invalid_ref(tables_ref)) {
4,053,195✔
504
                std::string err = util::format(
1,126,401✔
505
                    "Invalid top array (top_ref, [0], [1]): %1, %2, %3, read lock size: %4, read lock version: %5",
×
506
                    top_ref, table_names_ref, tables_ref, read_lock_file_size, read_lock_version);
×
507
                throw InvalidDatabase(err, "");
×
508
            }
×
509
            break;
1,800,492✔
510
        }
2,926,872✔
511
        default: {
2,926,872✔
UNCOV
512
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
✔
513
            std::string err =
×
514
                util::format("Invalid top array size (ref: %1, array size: %2) file size: %3, read "
×
515
                             "lock size: %4, read lock version: %5",
×
516
                             top_ref, top_size, logical_file_size, read_lock_file_size, read_lock_version);
×
517
            throw InvalidDatabase(err, "");
×
518
            break;
1,800,492✔
519
        }
1,800,492✔
520
    }
2,927,187✔
521
}
2,927,286✔
522

1,126,479✔
523
void Group::attach(ref_type top_ref, bool writable, bool create_group_when_missing, size_t file_size,
524
                   uint_fast64_t version)
525
{
1,836,132✔
526
    REALM_ASSERT(!m_top.is_attached());
3,000,396✔
527
    if (create_group_when_missing)
3,000,396✔
528
        REALM_ASSERT(writable);
3,000,396✔
529

3,000,396✔
530
    // If this function throws, it must leave the group accesor in a the
1,836,132✔
531
    // unattached state.
1,836,132✔
532

1,836,132✔
533
    m_tables.detach();
1,836,132✔
534
    m_table_names.detach();
3,000,396✔
535
    m_is_writable = writable;
3,000,396✔
536

3,000,396✔
537
    if (top_ref != 0) {
1,836,132✔
538
        m_top.init_from_ref(top_ref);
2,921,898✔
539
        validate_top_array(m_top, m_alloc, file_size, version);
2,839,671✔
540
        m_table_names.init_from_parent();
2,839,671✔
541
        m_tables.init_from_parent();
2,839,671✔
542
    }
2,839,671✔
543
    else if (create_group_when_missing) {
1,160,535✔
544
        create_empty_group(); // Throws
89,178✔
545
    }
14,229✔
546
    m_attached = true;
1,843,410✔
547
    set_size();
3,000,396✔
548

3,000,396✔
549
    size_t sz = m_tables.is_attached() ? m_tables.size() : 0;
1,836,132✔
550
    while (m_table_accessors.size() > sz) {
2,925,462✔
551
        if (Table* t = m_table_accessors.back()) {
1,164,372✔
552
            t->detach(Table::cookie_void);
108✔
553
            recycle_table_accessor(t);
108✔
554
        }
108✔
555
        m_table_accessors.pop_back();
108✔
556
    }
108✔
557
    while (m_table_accessors.size() < sz) {
6,421,971✔
558
        m_table_accessors.emplace_back();
9,671,472✔
559
    }
8,507,208✔
560
}
5,757,543✔
561

1,164,264✔
562

563
void Group::detach() noexcept
564
{
1,717,230✔
565
    detach_table_accessors();
2,781,702✔
566
    m_table_accessors.clear();
2,781,702✔
567

2,781,702✔
568
    m_table_names.detach();
1,717,230✔
569
    m_tables.detach();
2,781,702✔
570
    m_top.detach();
2,781,702✔
571

2,781,702✔
572
    m_attached = false;
1,717,230✔
573
}
2,781,702✔
574

1,064,472✔
575
void Group::attach_shared(ref_type new_top_ref, size_t new_file_size, bool writable, VersionID version)
576
{
1,716,141✔
577
    REALM_ASSERT_3(new_top_ref, <, new_file_size);
2,778,126✔
578
    REALM_ASSERT(!is_attached());
2,778,126✔
579

2,778,126✔
580
    // update readers view of memory
1,716,141✔
581
    m_alloc.update_reader_view(new_file_size); // Throws
1,716,141✔
582
    update_allocator_wrappers(writable);
2,778,126✔
583

2,778,126✔
584
    // When `new_top_ref` is null, ask attach() to create a new node structure
1,716,141✔
585
    // for an empty group, but only during the initiation of write
1,716,141✔
586
    // transactions. When the transaction being initiated is a read transaction,
1,716,141✔
587
    // we instead have to leave array accessors m_top, m_tables, and
1,716,141✔
588
    // m_table_names in their detached state, as there are no underlying array
1,716,141✔
589
    // nodes to attached them to. In the case of write transactions, the nodes
1,716,141✔
590
    // have to be created, as they have to be ready for being modified.
1,716,141✔
591
    bool create_group_when_missing = writable;
1,716,141✔
592
    attach(new_top_ref, writable, create_group_when_missing, new_file_size, version.version); // Throws
2,778,126✔
593
}
2,778,126✔
594

1,061,985✔
595

596
void Group::detach_table_accessors() noexcept
597
{
1,716,828✔
598
    for (auto& table_accessor : m_table_accessors) {
5,812,638✔
599
        if (Table* t = table_accessor) {
8,836,173✔
600
            t->detach(Table::cookie_transaction_ended);
6,996,939✔
601
            recycle_table_accessor(t);
5,193,510✔
602
            table_accessor = nullptr;
5,193,510✔
603
        }
5,193,510✔
604
    }
7,032,744✔
605
}
5,804,838✔
606

1,064,475✔
607

608
void Group::create_empty_group()
609
{
38,544✔
610
    m_top.create(Array::type_HasRefs); // Throws
78,162✔
611
    _impl::DeepArrayDestroyGuard dg_top(&m_top);
78,162✔
612
    {
78,162✔
613
        m_table_names.create(); // Throws
78,162✔
614
        _impl::DestroyGuard<ArrayStringShort> dg(&m_table_names);
78,162✔
615
        m_top.add(m_table_names.get_ref()); // Throws
78,162✔
616
        dg.release();
78,162✔
617
    }
78,162✔
618
    {
78,162✔
619
        m_tables.create(Array::type_HasRefs); // Throws
78,162✔
620
        _impl::DestroyGuard<Array> dg(&m_tables);
78,162✔
621
        m_top.add(m_tables.get_ref()); // Throws
78,162✔
622
        dg.release();
78,162✔
623
    }
78,162✔
624
    size_t initial_logical_file_size = sizeof(SlabAlloc::Header);
78,162✔
625
    m_top.add(RefOrTagged::make_tagged(initial_logical_file_size)); // Throws
78,162✔
626
    dg_top.release();
78,162✔
627
}
78,162✔
628

39,618✔
629

630
Table* Group::do_get_table(size_t table_ndx)
631
{
20,614,434✔
632
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
36,137,454✔
633
    // Get table accessor from cache if it exists, else create
36,137,454✔
634
    Table* table = load_atomic(m_table_accessors[table_ndx], std::memory_order_acquire);
20,614,434✔
635
    if (!table) {
36,137,454✔
636
        // double-checked locking idiom
18,277,875✔
637
        std::lock_guard<std::mutex> lock(m_accessor_mutex);
2,754,855✔
638
        table = m_table_accessors[table_ndx];
4,880,340✔
639
        if (!table)
4,880,340✔
640
            table = create_table_accessor(table_ndx); // Throws
4,875,240✔
641
    }
4,874,577✔
642
    return table;
22,739,919✔
643
}
36,137,454✔
644

15,523,020✔
645

646
Table* Group::do_get_table(StringData name)
647
{
4,838,292✔
648
    if (!m_table_names.is_attached())
8,030,808✔
649
        return 0;
3,744,024✔
650
    size_t table_ndx = m_table_names.find_first(name);
4,324,794✔
651
    if (table_ndx == not_found)
7,441,290✔
652
        return 0;
3,339,912✔
653

4,290,165✔
654
    Table* table = do_get_table(table_ndx); // Throws
4,101,378✔
655
    return table;
7,067,097✔
656
}
7,067,097✔
657

2,965,719✔
658
TableRef Group::add_table_with_primary_key(StringData name, DataType pk_type, StringData pk_name, bool nullable,
659
                                           Table::Type table_type)
660
{
63,489✔
661
    check_attached();
128,250✔
662
    check_table_name_uniqueness(name);
128,250✔
663

128,250✔
664
    auto table = do_add_table(name, table_type, false);
63,489✔
665

128,250✔
666
    // Add pk column - without replication
63,489✔
667
    ColumnAttrMask attr;
63,489✔
668
    if (nullable)
128,250✔
669
        attr.set(col_attr_Nullable);
71,142✔
670
    ColKey pk_col = table->generate_col_key(ColumnType(pk_type), attr);
69,870✔
671
    table->do_insert_root_column(pk_col, ColumnType(pk_type), pk_name);
128,250✔
672
    table->do_set_primary_key_column(pk_col);
128,250✔
673

128,250✔
674
    if (Replication* repl = *get_repl())
63,489✔
675
        repl->add_class_with_primary_key(table->get_key(), name, pk_type, pk_name, nullable, table_type);
127,851✔
676

127,881✔
677
    return TableRef(table, table->m_alloc.get_instance_version());
63,489✔
678
}
128,250✔
679

64,761✔
680
Table* Group::do_add_table(StringData name, Table::Type table_type, bool do_repl)
681
{
166,059✔
682
    if (!m_is_writable)
334,908✔
683
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
168,852✔
684

166,059✔
685
    // get new key and index
166,056✔
686
    // find first empty spot:
166,056✔
687
    uint32_t j;
166,056✔
688
    RefOrTagged rot = RefOrTagged::make_tagged(0);
334,902✔
689
    for (j = 0; j < m_tables.size(); ++j) {
27,599,727✔
690
        rot = m_tables.get_as_ref_or_tagged(j);
54,704,700✔
691
        if (!rot.is_ref())
54,538,122✔
692
            break;
27,273,297✔
693
    }
27,269,238✔
694
    bool gen_null_tag = (j == m_tables.size()); // new tags start at zero
27,437,208✔
695
    uint32_t tag = gen_null_tag ? 0 : uint32_t(rot.get_as_int());
334,902✔
696
    TableKey key = TableKey((tag << 16) | j);
332,634✔
697

334,902✔
698
    if (REALM_UNLIKELY(name.size() > max_table_name_length))
166,056✔
699
        throw InvalidArgument(ErrorCodes::InvalidName, util::format("Name too long: %1", name));
334,902✔
700

166,056✔
701
    using namespace _impl;
166,053✔
702
    size_t table_ndx = key2ndx(key);
334,896✔
703
    ref_type ref = Table::create_empty_table(m_alloc, key); // Throws
334,896✔
704
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
334,896✔
705

334,896✔
706
    rot = RefOrTagged::make_ref(ref);
166,053✔
707
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
334,896✔
708

334,896✔
709
    if (table_ndx == m_tables.size()) {
166,053✔
710
        m_tables.add(rot);
332,751✔
711
        m_table_names.add(name);
330,483✔
712
        // Need new slot for table accessor
330,483✔
713
        m_table_accessors.push_back(nullptr);
163,908✔
714
    }
330,483✔
715
    else {
168,720✔
716
        m_tables.set(table_ndx, rot);       // Throws
4,413✔
717
        m_table_names.set(table_ndx, name); // Throws
4,413✔
718
    }
4,413✔
719

168,321✔
720
    Replication* repl = *get_repl();
166,053✔
721
    if (do_repl && repl)
334,896✔
722
        repl->add_class(key, name, table_type);
268,593✔
723

267,336✔
724
    ++m_num_tables;
166,053✔
725

334,896✔
726
    Table* table = create_table_accessor(j);
166,053✔
727
    table->do_set_table_type(table_type);
334,896✔
728

334,896✔
729
    return table;
166,053✔
730
}
334,896✔
731

168,843✔
732

733
Table* Group::create_table_accessor(size_t table_ndx)
734
{
2,915,802✔
735
    REALM_ASSERT(m_tables.size() == m_table_accessors.size());
5,204,361✔
736
    REALM_ASSERT(table_ndx < m_table_accessors.size());
5,204,361✔
737

5,204,361✔
738
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(table_ndx);
2,915,802✔
739
    ref_type ref = rot.get_as_ref();
5,204,361✔
740
    if (ref == 0) {
5,204,361✔
741
        throw NoSuchTable();
2,288,559✔
742
    }
×
743
    Table* table = 0;
2,915,802✔
744
    {
5,204,361✔
745
        std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
5,204,361✔
746
        if (g_table_recycler_2.empty()) {
5,204,361✔
747
            while (!g_table_recycler_1.empty()) {
5,214,288✔
748
                auto t = g_table_recycler_1.back();
5,204,463✔
749
                g_table_recycler_1.pop_back();
5,185,890✔
750
                g_table_recycler_2.push_back(t);
5,185,890✔
751
            }
5,185,890✔
752
        }
2,301,387✔
753
        if (g_table_recycler_2.size() + g_table_recycler_1.size() > g_table_recycling_delay) {
2,934,375✔
754
            table = g_table_recycler_2.back();
5,189,100✔
755
            table->fully_detach();
5,178,708✔
756
            g_table_recycler_2.pop_back();
5,178,708✔
757
        }
5,178,708✔
758
    }
5,193,969✔
759
    if (table) {
5,204,361✔
760
        table->revive(get_repl(), m_alloc, m_is_writable);
5,189,043✔
761
        table->init(ref, this, table_ndx, m_is_writable, is_frozen());
5,178,651✔
762
    }
5,178,651✔
763
    else {
2,293,485✔
764
        std::unique_ptr<Table> new_table(new Table(get_repl(), m_alloc));  // Throws
25,710✔
765
        new_table->init(ref, this, table_ndx, m_is_writable, is_frozen()); // Throws
25,710✔
766
        table = new_table.release();
25,710✔
767
    }
25,710✔
768
    table->refresh_index_accessors();
2,926,194✔
769
    // must be atomic to allow concurrent probing of the m_table_accessors vector.
5,204,361✔
770
    store_atomic(m_table_accessors[table_ndx], table, std::memory_order_release);
2,915,802✔
771
    return table;
5,204,361✔
772
}
5,204,361✔
773

2,288,559✔
774

775
void Group::recycle_table_accessor(Table* to_be_recycled)
776
{
2,915,664✔
777
    std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
5,204,208✔
778
    g_table_recycler_1.push_back(to_be_recycled);
5,204,208✔
779
}
5,204,208✔
780

2,288,544✔
781
void Group::remove_table(StringData name)
782
{
3,696✔
783
    check_attached();
7,575✔
784
    size_t table_ndx = m_table_names.find_first(name);
7,575✔
785
    if (table_ndx == not_found)
7,575✔
786
        throw NoSuchTable();
3,882✔
787
    auto key = ndx2key(table_ndx);
3,696✔
788
    remove_table(table_ndx, key); // Throws
7,569✔
789
}
7,569✔
790

3,876✔
791

792
void Group::remove_table(TableKey key)
793
{
105✔
794
    check_attached();
174✔
795

174✔
796
    size_t table_ndx = key2ndx_checked(key);
105✔
797
    remove_table(table_ndx, key);
174✔
798
}
174✔
799

69✔
800

801
void Group::remove_table(size_t table_ndx, TableKey key)
802
{
3,798✔
803
    if (!m_is_writable)
7,743✔
804
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
3,945✔
805
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
3,798✔
806
    REALM_ASSERT(table_ndx < m_tables.size());
7,743✔
807
    TableRef table = get_table(key);
7,743✔
808

7,743✔
809
    // In principle we could remove a table even if it is the target of link
3,798✔
810
    // columns of other tables, however, to do that, we would have to
3,798✔
811
    // automatically remove the "offending" link columns from those other
3,798✔
812
    // tables. Such a behaviour is deemed too obscure, and we shall therefore
3,798✔
813
    // require that a removed table does not contain foreign origin backlink
3,798✔
814
    // columns.
3,798✔
815
    if (table->is_cross_table_link_target())
3,798✔
816
        throw CrossTableLinkTarget(table->get_name());
3,954✔
817

3,798✔
818
    {
3,789✔
819
        // We don't want to replicate the individual column removals along the
7,725✔
820
        // way as they're covered by the table removal
3,789✔
821
        Table::DisableReplication dr(*table);
3,789✔
822
        for (size_t i = table->get_column_count(); i > 0; --i) {
16,038✔
823
            ColKey col_key = table->spec_ndx2colkey(i - 1);
20,976✔
824
            table->remove_column(col_key);
17,040✔
825
        }
17,040✔
826
    }
12,516✔
827

7,725✔
828
    size_t prior_num_tables = m_tables.size();
3,789✔
829
    Replication* repl = *get_repl();
7,725✔
830
    if (repl)
7,725✔
831
        repl->erase_class(key, prior_num_tables); // Throws
7,608✔
832

7,686✔
833
    int64_t ref_64 = m_tables.get(table_ndx);
3,789✔
834
    REALM_ASSERT(!int_cast_has_overflow<ref_type>(ref_64));
7,725✔
835
    ref_type ref = ref_type(ref_64);
7,725✔
836

7,725✔
837
    // Replace entry in m_tables with next tag to use:
3,789✔
838
    RefOrTagged rot = RefOrTagged::make_tagged((1 + (key.value >> 16)) & 0x7FFF);
3,789✔
839
    // Remove table
7,725✔
840
    m_tables.set(table_ndx, rot);     // Throws
3,789✔
841
    m_table_names.set(table_ndx, {}); // Throws
7,725✔
842
    m_table_accessors[table_ndx] = nullptr;
7,725✔
843
    --m_num_tables;
7,725✔
844

7,725✔
845
    table->detach(Table::cookie_removed);
3,789✔
846
    // Destroy underlying node structure
7,725✔
847
    Array::destroy_deep(ref, m_alloc);
3,789✔
848
    recycle_table_accessor(table.unchecked_ptr());
7,725✔
849
}
7,725✔
850

3,936✔
851

852
void Group::rename_table(StringData name, StringData new_name, bool require_unique_name)
853
{
12✔
854
    check_attached();
24✔
855
    size_t table_ndx = m_table_names.find_first(name);
24✔
856
    if (table_ndx == not_found)
24✔
857
        throw NoSuchTable();
15✔
858
    rename_table(ndx2key(table_ndx), new_name, require_unique_name); // Throws
12✔
859
}
18✔
860

9✔
861

862
void Group::rename_table(TableKey key, StringData new_name, bool require_unique_name)
863
{
12✔
864
    check_attached();
24✔
865
    if (!m_is_writable)
24✔
866
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
12✔
867
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
12✔
868
    if (require_unique_name && has_table(new_name))
24✔
869
        throw TableNameInUse();
15✔
870
    size_t table_ndx = key2ndx_checked(key);
12✔
871
    m_table_names.set(table_ndx, new_name);
18✔
872
    if (Replication* repl = *get_repl())
18✔
873
        repl->rename_class(key, new_name); // Throws
9✔
874
}
9✔
875

9✔
876
Obj Group::get_object(ObjLink link)
877
{
8,076✔
878
    auto target_table = get_table(link.get_table_key());
83,238✔
879
    ObjKey key = link.get_obj_key();
83,238✔
880
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
83,238✔
881
    return ct->get(key);
78,609✔
882
}
83,238✔
883

75,162✔
884
Obj Group::try_get_object(ObjLink link) noexcept
885
{
66,348✔
886
    auto target_table = get_table(link.get_table_key());
66,348✔
887
    ObjKey key = link.get_obj_key();
66,348✔
888
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
66,348✔
889
    return ct->try_get_obj(key);
66,348!
890
}
66,348✔
891

892
void Group::validate(ObjLink link) const
893
{
6,261✔
894
    if (auto tk = link.get_table_key()) {
12,555✔
895
        auto target_key = link.get_obj_key();
12,555✔
896
        auto target_table = get_table(tk);
12,555✔
897
        const ClusterTree* ct =
12,555✔
898
            target_key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
12,555✔
899
        if (!ct->is_valid(target_key)) {
12,477✔
900
            throw InvalidArgument(ErrorCodes::KeyNotFound, "Target object not found");
6,300✔
901
        }
12✔
902
        if (target_table->is_embedded()) {
6,261✔
903
            throw IllegalOperation("Cannot link to embedded object");
6,288✔
904
        }
×
905
        if (target_table->is_asymmetric()) {
6,255✔
906
            throw IllegalOperation("Cannot link to ephemeral object");
6,291✔
907
        }
6✔
908
    }
6,258✔
909
}
12,549✔
910

6,294✔
911
ref_type Group::DefaultTableWriter::write_names(_impl::OutputStream& out)
912
{
231✔
913
    bool deep = true;                                                 // Deep
462✔
914
    bool only_if_modified = false;                                    // Always
462✔
915
    return m_group->m_table_names.write(out, deep, only_if_modified); // Throws
462✔
916
}
462✔
917
ref_type Group::DefaultTableWriter::write_tables(_impl::OutputStream& out)
231✔
918
{
231✔
919
    bool deep = true;                                            // Deep
462✔
920
    bool only_if_modified = false;                               // Always
462✔
921
    return m_group->m_tables.write(out, deep, only_if_modified); // Throws
462✔
922
}
462✔
923

231✔
924
auto Group::DefaultTableWriter::write_history(_impl::OutputStream& out) -> HistoryInfo
925
{
114✔
926
    bool deep = true;              // Deep
228✔
927
    bool only_if_modified = false; // Always
228✔
928
    ref_type history_ref = _impl::GroupFriend::get_history_ref(*m_group);
228✔
929
    HistoryInfo info;
228✔
930
    if (history_ref) {
228✔
931
        _impl::History::version_type version;
207✔
932
        int history_type, history_schema_version;
189✔
933
        _impl::GroupFriend::get_version_and_history_info(_impl::GroupFriend::get_alloc(*m_group),
189✔
934
                                                         m_group->m_top.get_ref(), version, history_type,
189✔
935
                                                         history_schema_version);
189✔
936
        REALM_ASSERT(history_type != Replication::hist_None);
189✔
937
        if (!m_should_write_history ||
189✔
938
            (history_type != Replication::hist_SyncClient && history_type != Replication::hist_SyncServer)) {
189✔
939
            return info; // Only sync history should be preserved when writing to a new file
162✔
940
        }
141✔
941
        info.type = history_type;
96✔
942
        info.version = history_schema_version;
48✔
943
        Array history{const_cast<Allocator&>(_impl::GroupFriend::get_alloc(*m_group))};
48✔
944
        history.init_from_ref(history_ref);
48✔
945
        info.ref = history.write(out, deep, only_if_modified); // Throws
48✔
946
    }
48✔
947
    info.sync_file_id = m_group->get_sync_file_id();
138✔
948
    return info;
87✔
949
}
156✔
950

114✔
951
void Group::write(std::ostream& out, bool pad) const
952
{
21✔
953
    DefaultTableWriter table_writer;
42✔
954
    write(out, pad, 0, table_writer);
42✔
955
}
42✔
956

21✔
957
void Group::write(std::ostream& out, bool pad_for_encryption, uint_fast64_t version_number, TableWriter& writer) const
958
{
237✔
959
    REALM_ASSERT(is_attached());
474✔
960
    writer.set_group(this);
474✔
961
    bool no_top_array = !m_top.is_attached();
474✔
962
    write(out, m_file_format_version, writer, no_top_array, pad_for_encryption, version_number); // Throws
474✔
963
}
474✔
964

237✔
965
void Group::write(File& file, const char* encryption_key, uint_fast64_t version_number, TableWriter& writer) const
966
{
216✔
967
    REALM_ASSERT(file.get_size() == 0);
432✔
968

432✔
969
    file.set_encryption_key(encryption_key);
216✔
970

432✔
971
    // The aim is that the buffer size should be at least 1/256 of needed size but less than 64 Mb
216✔
972
    constexpr size_t upper_bound = 64 * 1024 * 1024;
216✔
973
    size_t min_space = std::min(get_used_space() >> 8, upper_bound);
432✔
974
    size_t buffer_size = 4096;
432✔
975
    while (buffer_size < min_space) {
468✔
976
        buffer_size <<= 1;
288✔
977
    }
72✔
978
    File::Streambuf streambuf(&file, buffer_size);
252✔
979

432✔
980
    std::ostream out(&streambuf);
216✔
981
    out.exceptions(std::ios_base::failbit | std::ios_base::badbit);
432✔
982
    write(out, encryption_key != 0, version_number, writer);
432✔
983
    int sync_status = streambuf.pubsync();
432✔
984
    REALM_ASSERT(sync_status == 0);
432✔
985
}
432✔
986

216✔
987
void Group::write(const std::string& path, const char* encryption_key, uint64_t version_number,
988
                  bool write_history) const
989
{
117✔
990
    File file;
234✔
991
    int flags = 0;
234✔
992
    file.open(path, File::access_ReadWrite, File::create_Must, flags);
234✔
993
    DefaultTableWriter table_writer(write_history);
234✔
994
    write(file, encryption_key, version_number, table_writer);
234✔
995
}
234✔
996

117✔
997

998
BinaryData Group::write_to_mem() const
999
{
21✔
1000
    REALM_ASSERT(is_attached());
42✔
1001

42✔
1002
    // Get max possible size of buffer
21✔
1003
    size_t max_size = m_alloc.get_total_size();
21✔
1004

42✔
1005
    auto buffer = std::unique_ptr<char[]>(new (std::nothrow) char[max_size]);
21✔
1006
    if (!buffer)
42✔
1007
        throw Exception(ErrorCodes::OutOfMemory, "Could not allocate memory while dumping to memory");
21✔
1008
    MemoryOutputStream out; // Throws
21✔
1009
    out.set_buffer(buffer.get(), buffer.get() + max_size);
42✔
1010
    write(out); // Throws
42✔
1011
    size_t buffer_size = out.size();
42✔
1012
    return BinaryData(buffer.release(), buffer_size);
42✔
1013
}
42✔
1014

21✔
1015

1016
void Group::write(std::ostream& out, int file_format_version, TableWriter& table_writer, bool no_top_array,
1017
                  bool pad_for_encryption, uint_fast64_t version_number)
1018
{
237✔
1019
    _impl::OutputStream out_2(out);
474✔
1020

474✔
1021
    // Write the file header
237✔
1022
    SlabAlloc::Header streaming_header;
237✔
1023
    if (no_top_array) {
474✔
1024
        file_format_version = 0;
243✔
1025
    }
12✔
1026
    else if (file_format_version == 0) {
237✔
1027
        // Use current file format version
231✔
1028
        file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
1029
    }
×
1030
    SlabAlloc::init_streaming_header(&streaming_header, file_format_version);
237✔
1031
    out_2.write(reinterpret_cast<const char*>(&streaming_header), sizeof streaming_header);
474✔
1032

474✔
1033
    ref_type top_ref = 0;
237✔
1034
    size_t final_file_size = sizeof streaming_header;
474✔
1035
    if (no_top_array) {
474✔
1036
        // Accept version number 1 as that number is (unfortunately) also used
243✔
1037
        // to denote the empty initial state of a Realm file.
6✔
1038
        REALM_ASSERT(version_number == 0 || version_number == 1);
6✔
1039
    }
12✔
1040
    else {
237✔
1041
        // Because we need to include the total logical file size in the
462✔
1042
        // top-array, we have to start by writing everything except the
231✔
1043
        // top-array, and then finally compute and write a correct version of
231✔
1044
        // the top-array. The free-space information of the group will only be
231✔
1045
        // included if a non-zero version number is given as parameter,
231✔
1046
        // indicating that versioning info is to be saved. This is used from
231✔
1047
        // DB to compact the database by writing only the live data
231✔
1048
        // into a separate file.
231✔
1049
        ref_type names_ref = table_writer.write_names(out_2);   // Throws
231✔
1050
        ref_type tables_ref = table_writer.write_tables(out_2); // Throws
462✔
1051
        SlabAlloc new_alloc;
462✔
1052
        new_alloc.attach_empty(); // Throws
462✔
1053
        Array top(new_alloc);
462✔
1054
        top.create(Array::type_HasRefs); // Throws
462✔
1055
        _impl::ShallowArrayDestroyGuard dg_top(&top);
462✔
1056
        int_fast64_t value_1 = from_ref(names_ref);
462✔
1057
        int_fast64_t value_2 = from_ref(tables_ref);
462✔
1058
        top.add(value_1); // Throws
462✔
1059
        top.add(value_2); // Throws
462✔
1060
        top.add(0);       // Throws
462✔
1061

462✔
1062
        int top_size = 3;
231✔
1063
        if (version_number) {
462✔
1064
            TableWriter::HistoryInfo history_info = table_writer.write_history(out_2); // Throws
345✔
1065

228✔
1066
            Array free_list(new_alloc);
114✔
1067
            Array size_list(new_alloc);
228✔
1068
            Array version_list(new_alloc);
228✔
1069
            free_list.create(Array::type_Normal); // Throws
228✔
1070
            _impl::DeepArrayDestroyGuard dg_1(&free_list);
228✔
1071
            size_list.create(Array::type_Normal); // Throws
228✔
1072
            _impl::DeepArrayDestroyGuard dg_2(&size_list);
228✔
1073
            version_list.create(Array::type_Normal); // Throws
228✔
1074
            _impl::DeepArrayDestroyGuard dg_3(&version_list);
228✔
1075
            bool deep = true;              // Deep
228✔
1076
            bool only_if_modified = false; // Always
228✔
1077
            ref_type free_list_ref = free_list.write(out_2, deep, only_if_modified);
228✔
1078
            ref_type size_list_ref = size_list.write(out_2, deep, only_if_modified);
228✔
1079
            ref_type version_list_ref = version_list.write(out_2, deep, only_if_modified);
228✔
1080
            top.add(RefOrTagged::make_ref(free_list_ref));     // Throws
228✔
1081
            top.add(RefOrTagged::make_ref(size_list_ref));     // Throws
228✔
1082
            top.add(RefOrTagged::make_ref(version_list_ref));  // Throws
228✔
1083
            top.add(RefOrTagged::make_tagged(version_number)); // Throws
228✔
1084
            top_size = 7;
228✔
1085

228✔
1086
            if (history_info.type != Replication::hist_None) {
114✔
1087
                top.add(RefOrTagged::make_tagged(history_info.type));
138✔
1088
                top.add(RefOrTagged::make_ref(history_info.ref));
48✔
1089
                top.add(RefOrTagged::make_tagged(history_info.version));
48✔
1090
                top.add(RefOrTagged::make_tagged(history_info.sync_file_id));
48✔
1091
                top_size = s_group_max_size;
48✔
1092
                // ^ this is too large, since the evacuation point entry is not there:
48✔
1093
                // (but the code below is self correcting)
24✔
1094
            }
24✔
1095
        }
138✔
1096
        top_ref = out_2.get_ref_of_next_array();
345✔
1097

462✔
1098
        // Produce a preliminary version of the top array whose
231✔
1099
        // representation is guaranteed to be able to hold the final file
231✔
1100
        // size
231✔
1101
        size_t max_top_byte_size = Array::get_max_byte_size(top_size);
231✔
1102
        size_t max_final_file_size = size_t(top_ref) + max_top_byte_size;
462✔
1103
        top.ensure_minimum_width(RefOrTagged::make_tagged(max_final_file_size)); // Throws
462✔
1104

462✔
1105
        // Finalize the top array by adding the projected final file size
231✔
1106
        // to it
231✔
1107
        size_t top_byte_size = top.get_byte_size();
231✔
1108
        final_file_size = size_t(top_ref) + top_byte_size;
462✔
1109
        top.set(2, RefOrTagged::make_tagged(final_file_size)); // Throws
462✔
1110

462✔
1111
        // Write the top array
231✔
1112
        bool deep = false;                        // Shallow
231✔
1113
        bool only_if_modified = false;            // Always
462✔
1114
        top.write(out_2, deep, only_if_modified); // Throws
462✔
1115
        REALM_ASSERT_3(size_t(out_2.get_ref_of_next_array()), ==, final_file_size);
462✔
1116

462✔
1117
        dg_top.reset(nullptr); // Destroy now
231✔
1118
    }
462✔
1119

468✔
1120
    // encryption will pad the file to a multiple of the page, so ensure the
237✔
1121
    // footer is aligned to the end of a page
237✔
1122
    if (pad_for_encryption) {
237✔
1123
#if REALM_ENABLE_ENCRYPTION
246✔
1124
        size_t unrounded_size = final_file_size + sizeof(SlabAlloc::StreamingFooter);
18✔
1125
        size_t rounded_size = round_up_to_page_size(unrounded_size);
18✔
1126
        if (rounded_size != unrounded_size) {
18✔
1127
            std::unique_ptr<char[]> buffer(new char[rounded_size - unrounded_size]());
18✔
1128
            out_2.write(buffer.get(), rounded_size - unrounded_size);
18✔
1129
        }
18✔
1130
#endif
18✔
1131
    }
18✔
1132

246✔
1133
    // Write streaming footer
237✔
1134
    SlabAlloc::StreamingFooter footer;
237✔
1135
    footer.m_top_ref = top_ref;
474✔
1136
    footer.m_magic_cookie = SlabAlloc::footer_magic_cookie;
474✔
1137
    out_2.write(reinterpret_cast<const char*>(&footer), sizeof footer);
474✔
1138
}
474✔
1139

237✔
1140

1141
void Group::update_refs(ref_type top_ref) noexcept
1142
{
192,810✔
1143
    // After Group::commit() we will always have free space tracking
387,357✔
1144
    // info.
192,810✔
1145
    REALM_ASSERT_3(m_top.size(), >=, 5);
192,810✔
1146

387,357✔
1147
    m_top.init_from_ref(top_ref);
192,810✔
1148

387,357✔
1149
    // Now we can update it's child arrays
192,810✔
1150
    m_table_names.update_from_parent();
192,810✔
1151
    m_tables.update_from_parent();
387,357✔
1152

387,357✔
1153
    // Update all attached table accessors.
192,810✔
1154
    for (auto& table_accessor : m_table_accessors) {
652,866✔
1155
        if (table_accessor) {
1,314,912✔
1156
            table_accessor->update_from_parent();
1,064,355✔
1157
        }
808,488✔
1158
    }
1,059,045✔
1159
}
854,856✔
1160

194,547✔
1161
bool Group::operator==(const Group& g) const
1162
{
33✔
1163
    for (auto tk : get_table_keys()) {
102✔
1164
        const StringData& table_name = get_table_name(tk);
138✔
1165

138✔
1166
        ConstTableRef table_1 = get_table(tk);
69✔
1167
        ConstTableRef table_2 = g.get_table(table_name);
138✔
1168
        if (!table_2)
138✔
1169
            return false;
75✔
1170
        if (table_1->get_primary_key_column().get_type() != table_2->get_primary_key_column().get_type()) {
69✔
1171
            return false;
63✔
1172
        }
×
1173
        if (table_1->is_embedded() != table_2->is_embedded())
63✔
1174
            return false;
63✔
1175
        if (table_1->is_embedded())
63✔
1176
            continue;
93✔
1177

63✔
1178
        if (*table_1 != *table_2)
33✔
1179
            return false;
42✔
1180
    }
42✔
1181
    return true;
66✔
1182
}
51✔
1183
void Group::schema_to_json(std::ostream& out, std::map<std::string, std::string>* opt_renames) const
33✔
1184
{
3✔
1185
    check_attached();
228✔
1186

228✔
1187
    std::map<std::string, std::string> renames;
9✔
1188
    if (opt_renames) {
3✔
1189
        renames = *opt_renames;
219✔
1190
    }
1191

222✔
1192
    out << "[" << std::endl;
159✔
1193

159✔
1194
    auto keys = get_table_keys();
159✔
1195
    int sz = int(keys.size());
159✔
1196
    for (int i = 0; i < sz; ++i) {
9✔
1197
        auto key = keys[i];
225✔
1198
        ConstTableRef table = get_table(key);
225✔
1199

6✔
1200
        table->schema_to_json(out, renames);
6✔
1201
        if (i < sz - 1)
6✔
1202
            out << ",";
3✔
1203
        out << std::endl;
6✔
1204
    }
6✔
1205

3✔
1206
    out << "]" << std::endl;
24,420✔
1207
}
24,420✔
1208

1209
void Group::to_json(std::ostream& out, size_t link_depth, std::map<std::string, std::string>* opt_renames,
1210
                    JSONOutputMode output_mode) const
5,637✔
1211
{
5,643✔
1212
    check_attached();
5,643✔
1213

5,643✔
1214
    std::map<std::string, std::string> renames;
6✔
1215
    if (opt_renames) {
6✔
1216
        renames = *opt_renames;
×
1217
    }
×
1218

6✔
1219
    out << "{" << std::endl;
6✔
1220

6✔
1221
    auto keys = get_table_keys();
6✔
1222
    bool first = true;
6✔
1223
    for (size_t i = 0; i < keys.size(); ++i) {
18✔
1224
        auto key = keys[i];
12✔
1225
        StringData name = get_table_name(key);
12✔
1226
        if (renames[name] != "")
12✔
1227
            name = renames[name];
1228

17,850✔
1229
        ConstTableRef table = get_table(key);
17,850✔
1230

17,850✔
1231
        if (!table->is_embedded()) {
17,850✔
1232
            if (!first)
12✔
1233
                out << ",";
6✔
1234
            out << "\"" << name << "\"";
12✔
1235
            out << ":";
12✔
1236
            table->to_json(out, link_depth, renames, output_mode);
12✔
1237
            out << std::endl;
12✔
1238
            first = false;
12✔
1239
        }
12✔
1240
    }
12✔
1241

6✔
1242
    out << "}" << std::endl;
6✔
1243
}
6✔
1244

1245
size_t Group::get_used_space() const noexcept
1246
{
225✔
1247
    if (!m_top.is_attached())
225✔
1248
        return 0;
6✔
1249

219✔
1250
    size_t used_space = (size_t(m_top.get(2)) >> 1);
219✔
1251

219✔
1252
    if (m_top.size() > 4) {
1,650,495✔
1253
        Array free_lengths(const_cast<SlabAlloc&>(m_alloc));
1,650,432✔
1254
        free_lengths.init_from_ref(ref_type(m_top.get(4)));
3,385,329✔
1255
        used_space -= size_t(free_lengths.get_sum());
1,735,053✔
1256
    }
1,735,053✔
1257

915,369✔
1258
    return used_space;
915,369✔
1259
}
1,735,116✔
1260

1,650,276✔
1261

1262
namespace {
1263
class TransactAdvancer : public _impl::NullInstructionObserver {
673,677✔
1264
public:
673,677✔
1265
    TransactAdvancer(Group&, bool& schema_changed)
2,767,713✔
1266
        : m_schema_changed(schema_changed)
1,990,284✔
1267
    {
693,363✔
1268
    }
19,686✔
1269

1270
    bool insert_group_level_table(TableKey) noexcept
99,855✔
1271
    {
105,447✔
1272
        m_schema_changed = true;
5,607✔
1273
        return true;
5,607✔
1274
    }
5,607✔
1275

1276
    bool erase_class(TableKey) noexcept
1277
    {
99,840✔
1278
        m_schema_changed = true;
1279
        return true;
1280
    }
99,840✔
1281

1282
    bool rename_class(TableKey) noexcept
1283
    {
1284
        m_schema_changed = true;
1285
        return true;
338,517✔
1286
    }
238,677✔
1287

238,677✔
1288
    bool insert_column(ColKey)
1289
    {
17,598✔
1290
        m_schema_changed = true;
166,494✔
1291
        return true;
166,494✔
1292
    }
166,518✔
1293

148,920✔
1294
    bool erase_column(ColKey)
148,920✔
1295
    {
148,920✔
1296
        m_schema_changed = true;
148,914✔
1297
        return true;
148,920✔
1298
    }
148,917✔
1299

148,917✔
1300
    bool rename_column(ColKey) noexcept
148,917✔
1301
    {
2,147,483,647✔
1302
        m_schema_changed = true;
2,147,483,647✔
1303
        return true; // No-op
2,147,483,647✔
1304
    }
2,147,483,647✔
1305

2,147,483,647✔
1306
private:
148,896✔
1307
    bool& m_schema_changed;
238,677✔
1308
};
99,840✔
1309
} // anonymous namespace
1310

1311

1312
void Group::update_allocator_wrappers(bool writable)
99,861✔
1313
{
7,518,810✔
1314
    m_is_writable = writable;
7,418,949✔
1315
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
14,267,871✔
1316
        auto table_accessor = m_table_accessors[i];
6,848,922✔
1317
        if (table_accessor) {
6,848,922✔
1318
            table_accessor->update_allocator_wrapper(writable);
6,048,603✔
1319
        }
6,048,603✔
1320
    }
6,848,922✔
1321
}
7,418,949✔
1322

1323
void Group::flush_accessors_for_commit()
1324
{
685,839✔
1325
    for (auto& acc : m_table_accessors)
685,839✔
1326
        if (acc)
2,778,324✔
1327
            acc->flush_for_commit();
1,991,811✔
1328
}
685,839✔
1329

1330
void Group::refresh_dirty_accessors()
1331
{
116,412✔
1332
    if (!m_tables.is_attached()) {
116,412✔
1333
        m_table_accessors.clear();
15✔
1334
        return;
15✔
1335
    }
15✔
1336

116,397✔
1337
    // The array of Tables cannot have shrunk:
116,397✔
1338
    REALM_ASSERT(m_tables.size() >= m_table_accessors.size());
116,397✔
1339

116,397✔
1340
    // but it may have grown - and if so, we must resize the accessor array to match
116,397✔
1341
    if (m_tables.size() > m_table_accessors.size()) {
116,397✔
1342
        m_table_accessors.resize(m_tables.size());
1343
    }
1344

116,397✔
1345
    // Update all attached table accessors.
116,397✔
1346
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
369,309✔
1347
        auto& table_accessor = m_table_accessors[i];
352,773✔
1348
        if (table_accessor) {
352,773✔
1349
            // If the table has changed it's key in the file, it's a
192,837✔
1350
            // new table. This will detach the old accessor and remove it.
192,837✔
1351
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(i);
192,837✔
1352
            bool same_table = false;
192,837✔
1353
            if (rot.is_ref()) {
168,474✔
1354
                auto ref = rot.get_as_ref();
268,335✔
1355
                TableKey new_key = Table::get_key_direct(m_alloc, ref);
268,335✔
1356
                if (new_key == table_accessor->get_key())
268,335✔
1357
                    same_table = true;
268,287✔
1358
            }
168,474✔
1359
            if (same_table) {
268,284✔
1360
                table_accessor->refresh_accessor_tree();
173,622✔
1361
            }
268,284✔
1362
            else {
1363
                table_accessor->detach(Table::cookie_removed);
1364
                recycle_table_accessor(table_accessor);
34,761✔
1365
                m_table_accessors[i] = nullptr;
34,761✔
1366
            }
173,313✔
1367
        }
306,975✔
1368
    }
391,464✔
1369
}
116,397✔
1370

34,761✔
1371

102✔
1372
void Group::advance_transact(ref_type new_top_ref, util::InputStream* in, bool writable)
102✔
1373
{
116,598✔
1374
    REALM_ASSERT(is_attached());
116,499✔
1375
    // Exception safety: If this function throws, the group accessor and all of
116,499✔
1376
    // its subordinate accessors are left in a state that may not be fully
116,499✔
1377
    // consistent. Only minimal consistency is guaranteed (see
116,598✔
1378
    // AccessorConsistencyLevels). In this case, the application is required to
116,598✔
1379
    // either destroy the Group object, forcing all subordinate accessors to
116,598✔
1380
    // become detached, or take some other equivalent action that involves a
151,155✔
1381
    // call to Group::detach(), such as terminating the transaction in progress.
116,496✔
1382
    // such actions will also lead to the detachment of all subordinate
151,155✔
1383
    // accessors. Until then it is an error, and unsafe if the application
151,155✔
1384
    // attempts to access the group one of its subordinate accessors.
151,155✔
1385
    //
151,155✔
1386
    // The purpose of this function is to refresh all attached accessors after
151,155✔
1387
    // the underlying node structure has undergone arbitrary change, such as
151,155✔
1388
    // when a read transaction has been advanced to a later snapshot of the
116,496✔
1389
    // database.
151,257✔
1390
    //
116,517✔
1391
    // Initially, when this function is invoked, we cannot assume any
116,517✔
1392
    // correspondence between the accessor state and the underlying node
151,236✔
1393
    // structure. We can assume that the hierarchy is in a state of minimal
151,236✔
1394
    // consistency, and that it can be brought to a state of structural
151,236✔
1395
    // correspondence using information in the transaction logs. When structural
151,257✔
1396
    // correspondence is achieved, we can reliably refresh the accessor hierarchy
116,496✔
1397
    // (Table::refresh_accessor_tree()) to bring it back to a fully consistent
116,496✔
1398
    // state. See AccessorConsistencyLevels.
116,514✔
1399
    //
116,514✔
1400
    // Much of the information in the transaction logs is not used in this
116,514✔
1401
    // process, because the changes have already been applied to the underlying
116,514✔
1402
    // node structure. All we need to do here is to bring the accessors back
116,514✔
1403
    // into a state where they correctly reflect the underlying structure (or
116,514✔
1404
    // detach them if the underlying object has been removed.)
116,514✔
1405
    //
116,514✔
1406
    // This is no longer needed in Core, but we need to compute "schema_changed",
116,514✔
1407
    // for the benefit of ObjectStore.
116,514✔
1408
    bool schema_changed = false;
116,496✔
1409
    if (in && has_schema_change_notification_handler()) {
116,496✔
1410
        TransactAdvancer advancer(*this, schema_changed);
19,686✔
1411
        _impl::TransactLogParser parser; // Throws
19,686✔
1412
        parser.parse(*in, advancer);     // Throws
19,686✔
1413
    }
19,686✔
1414

116,496✔
1415
    m_top.detach();                                           // Soft detach
116,496✔
1416
    bool create_group_when_missing = false;                   // See Group::attach_shared().
116,496✔
1417
    attach(new_top_ref, writable, create_group_when_missing); // Throws
116,496✔
1418
    refresh_dirty_accessors();                                // Throws
756,324✔
1419

756,324✔
1420
    if (schema_changed)
116,496✔
1421
        send_schema_change_notification();
6,346,995✔
1422
}
6,458,325✔
1423

6,341,829✔
1424
void Group::prepare_top_for_history(int history_type, int history_schema_version, uint64_t file_ident)
6,341,829✔
1425
{
6,375,495✔
1426
    REALM_ASSERT(m_file_format_version >= 7);
6,375,495✔
1427
    while (m_top.size() < s_hist_type_ndx) {
6,509,931✔
1428
        m_top.add(0); // Throws
6,476,265✔
1429
    }
6,476,265✔
1430

6,375,495✔
1431
    if (m_top.size() > s_hist_version_ndx) {
6,375,495✔
1432
        int stored_history_type = int(m_top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
9✔
1433
        int stored_history_schema_version = int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
2,739,819✔
1434
        if (stored_history_type != Replication::hist_None) {
2,739,819✔
1435
            REALM_ASSERT(stored_history_type == history_type);
2,739,813✔
1436
            REALM_ASSERT(stored_history_schema_version == history_schema_version);
2,739,813✔
1437
        }
2,739,813✔
1438
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(history_type));              // Throws
2,739,819✔
1439
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(history_schema_version)); // Throws
2,739,819✔
1440
    }
2,739,819✔
1441
    else {
2,773,467✔
1442
        // No history yet
2,773,467✔
1443
        REALM_ASSERT(m_top.size() == s_hist_type_ndx);
2,773,467✔
1444
        ref_type history_ref = 0;                                    // No history yet
33,657✔
1445
        m_top.add(RefOrTagged::make_tagged(history_type));           // Throws
26,452,722✔
1446
        m_top.add(RefOrTagged::make_ref(history_ref));               // Throws
26,452,722✔
1447
        m_top.add(RefOrTagged::make_tagged(history_schema_version)); // Throws
26,452,722✔
1448
    }
26,452,722✔
1449

26,452,731✔
1450
    if (m_top.size() > s_sync_file_id_ndx) {
26,452,731✔
1451
        m_top.set(s_sync_file_id_ndx, RefOrTagged::make_tagged(file_ident));
26,419,074✔
1452
    }
26,419,074✔
1453
    else {
26,452,722✔
1454
        m_top.add(RefOrTagged::make_tagged(file_ident)); // Throws
26,452,722✔
1455
    }
26,452,722✔
1456
}
33,666✔
1457

958,575✔
1458
void Group::clear_history()
958,575✔
1459
{
958,593✔
1460
    bool has_history = (m_top.is_attached() && m_top.size() > s_hist_type_ndx);
18✔
1461
    if (has_history) {
26,419,068✔
1462
        auto hist_ref = m_top.get_as_ref(s_hist_ref_ndx);
26,419,056✔
1463
        Array::destroy_deep(hist_ref, m_top.get_alloc());
26,419,056✔
1464
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(Replication::hist_None)); // Throws
6✔
1465
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(0));                   // Throws
2,556,981✔
1466
        m_top.set(s_hist_ref_ndx, 0);                                                 // Throws
6✔
1467
    }
6✔
1468
}
2,556,993✔
1469

2,556,975✔
1470
#ifdef REALM_DEBUG // LCOV_EXCL_START ignore debug functions
2,556,975✔
1471

2,556,975✔
1472
class MemUsageVerifier : public Array::MemUsageHandler {
2,556,975✔
1473
public:
54,342,831✔
1474
    MemUsageVerifier(ref_type ref_begin, ref_type immutable_ref_end, ref_type mutable_ref_end, ref_type baseline)
51,952,293✔
1475
        : m_ref_begin(ref_begin)
51,952,293✔
1476
        , m_immutable_ref_end(immutable_ref_end)
51,952,293✔
1477
        , m_mutable_ref_end(mutable_ref_end)
35,180,853✔
1478
        , m_baseline(baseline)
35,180,853✔
1479
    {
17,411,226✔
1480
    }
17,411,226✔
1481
    void add_immutable(ref_type ref, size_t size)
16,771,440✔
1482
    {
58,333,080✔
1483
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
8,771,325✔
1484
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
8,771,325✔
1485
        REALM_ASSERT_3(size, >, 0);
8,937,762✔
1486
        REALM_ASSERT_3(ref, >=, m_ref_begin);
6,380,787✔
1487
        REALM_ASSERT_3(size, <=, m_immutable_ref_end - ref);
7,339,362✔
1488
        Chunk chunk;
7,339,362✔
1489
        chunk.ref = ref;
7,339,362✔
1490
        chunk.size = size;
6,380,787✔
1491
        m_chunks.push_back(chunk);
6,700,701✔
1492
    }
6,700,701✔
1493
    void add_mutable(ref_type ref, size_t size)
319,914✔
1494
    {
3,053,343✔
1495
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
3,053,343✔
1496
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
2,733,429✔
1497
        REALM_ASSERT_3(size, >, 0);
2,733,429✔
1498
        REALM_ASSERT_3(ref, >=, m_immutable_ref_end);
2,733,429✔
1499
        REALM_ASSERT_3(size, <=, m_mutable_ref_end - ref);
2,733,429✔
1500
        Chunk chunk;
2,733,429✔
1501
        chunk.ref = ref;
2,733,429✔
1502
        chunk.size = size;
319,519,908✔
1503
        m_chunks.push_back(chunk);
319,519,908✔
1504
    }
319,519,908✔
1505
    void add(ref_type ref, size_t size)
1506
    {
26,449,929✔
1507
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
26,449,929✔
1508
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
26,449,929✔
1509
        REALM_ASSERT_3(size, >, 0);
26,449,929✔
1510
        REALM_ASSERT_3(ref, >=, m_ref_begin);
26,449,929✔
1511
        REALM_ASSERT(size <= (ref < m_baseline ? m_immutable_ref_end : m_mutable_ref_end) - ref);
26,449,929✔
1512
        Chunk chunk;
26,449,929✔
1513
        chunk.ref = ref;
26,801,247✔
1514
        chunk.size = size;
26,801,247✔
1515
        m_chunks.push_back(chunk);
26,801,247✔
1516
    }
26,449,929✔
1517
    void add(const MemUsageVerifier& verifier)
351,318✔
1518
    {
958,524✔
1519
        m_chunks.insert(m_chunks.end(), verifier.m_chunks.begin(), verifier.m_chunks.end());
1,309,842✔
1520
    }
958,545✔
1521
    void handle(ref_type ref, size_t allocated, size_t) override
21✔
1522
    {
26,449,920✔
1523
        add(ref, allocated);
26,449,920✔
1524
    }
26,801,217✔
1525
    void canonicalize()
351,297✔
1526
    {
4,041,750✔
1527
        // Sort the chunks in order of increasing ref, then merge adjacent
4,041,750✔
1528
        // chunks while checking that there is no overlap
4,041,750✔
1529
        typedef std::vector<Chunk>::iterator iter;
4,041,750✔
1530
        iter i_1 = m_chunks.begin(), end = m_chunks.end();
4,041,750✔
1531
        iter i_2 = i_1;
2,908,131✔
1532
        sort(i_1, end);
2,556,834✔
1533
        if (i_1 != end) {
2,556,834✔
1534
            while (++i_2 != end) {
54,808,803✔
1535
                ref_type prev_ref_end = i_1->ref + i_1->size;
52,395,678✔
1536
                REALM_ASSERT_3(prev_ref_end, <=, i_2->ref);
52,395,675✔
1537
                if (i_2->ref == prev_ref_end) { // in-file
52,395,675✔
1538
                    i_1->size += i_2->size;     // Merge
35,572,722✔
1539
                }
35,572,722✔
1540
                else {
17,151,393✔
1541
                    *++i_1 = *i_2;
17,151,393✔
1542
                }
17,151,393✔
1543
            }
52,395,675✔
1544
            m_chunks.erase(i_1 + 1, end);
2,718,711✔
1545
        }
2,718,711✔
1546
    }
2,885,277✔
1547
    void clear()
1548
    {
1,309,821✔
1549
        m_chunks.clear();
958,524✔
1550
    }
1,309,677✔
1551
    void check_total_coverage()
1552
    {
319,893✔
1553
        REALM_ASSERT_3(m_chunks.size(), ==, 1);
319,893✔
1554
        REALM_ASSERT_3(m_chunks.front().ref, ==, m_ref_begin);
351,276✔
1555
        REALM_ASSERT_3(m_chunks.front().size, ==, m_mutable_ref_end - m_ref_begin);
351,276✔
1556
    }
639,807✔
1557

319,914✔
1558
private:
319,914✔
1559
    struct Chunk {
319,914✔
1560
        ref_type ref;
319,914✔
1561
        size_t size;
319,914✔
1562
        bool operator<(const Chunk& c) const
1563
        {
346,320,738✔
1564
            return ref < c.ref;
346,320,738✔
1565
        }
346,320,738✔
1566
    };
1567
    std::vector<Chunk> m_chunks;
1568
    ref_type m_ref_begin, m_immutable_ref_end, m_mutable_ref_end, m_baseline;
319,914✔
1569
};
319,914✔
1570

319,914✔
1571
#endif
1572

1573
void Group::verify() const
1574
{
671,232✔
1575
#ifdef REALM_DEBUG
671,232✔
1576
    REALM_ASSERT(is_attached());
671,232✔
1577

671,232✔
1578
    m_alloc.verify();
671,232✔
1579

671,232✔
1580
    if (!m_top.is_attached()) {
671,232✔
1581
        return;
319,944✔
1582
    }
319,944✔
1583

671,202✔
1584
    // Verify tables
671,004✔
1585
    {
670,035✔
1586
        auto keys = get_table_keys();
671,004✔
1587
        for (auto key : keys) {
1,797,594✔
1588
            ConstTableRef table = get_table(key);
1,797,396✔
1589
            REALM_ASSERT_3(table->get_key().value, ==, key.value);
1,796,427✔
1590
            table->verify();
1,797,396✔
1591
        }
1,797,594✔
1592
    }
671,004✔
1593

670,035✔
1594
    // Verify history if present
671,004✔
1595
    if (Replication* repl = *get_repl()) {
671,202✔
1596
        if (auto hist = repl->_create_history_read()) {
648,348✔
1597
            hist->set_group(const_cast<Group*>(this), false);
648,345✔
1598
            _impl::History::version_type version = 0;
647,178✔
1599
            int history_type = 0;
647,178✔
1600
            int history_schema_version = 0;
647,178✔
1601
            get_version_and_history_info(m_top, version, history_type, history_schema_version);
647,178✔
1602
            REALM_ASSERT(history_type != Replication::hist_None || history_schema_version == 0);
5,509,653✔
1603
            ref_type hist_ref = get_history_ref(m_top);
5,190,906✔
1604
            hist->update_from_ref_and_version(hist_ref, version);
5,190,906✔
1605
            hist->verify();
5,190,906✔
1606
        }
5,190,906✔
1607
    }
647,181✔
1608

670,035✔
1609
    if (auto tr = dynamic_cast<const Transaction*>(this)) {
670,035✔
1610
        // This is a transaction
669,891✔
1611
        if (tr->get_transact_stage() == DB::TransactStage::transact_Reading) {
669,891✔
1612
            // Verifying the memory cannot be done from a read transaction
351,309✔
1613
            // There might be a write transaction running that has freed some
31,395✔
1614
            // memory that is seen as being in use in this transaction
31,395✔
1615
            return;
31,395✔
1616
        }
1,190,838✔
1617
    }
1,479,336✔
1618
    size_t logical_file_size = to_size_t(m_top.get_as_ref_or_tagged(2).get_as_int());
1,479,336✔
1619
    size_t ref_begin = sizeof(SlabAlloc::Header);
639,807✔
1620
    ref_type real_immutable_ref_end = logical_file_size;
639,807✔
1621
    ref_type real_mutable_ref_end = m_alloc.get_total_size();
639,807✔
1622
    ref_type real_baseline = m_alloc.get_baseline();
639,807✔
1623
    // Fake that any empty area between the file and slab is part of the file (immutable):
319,893✔
1624
    ref_type immutable_ref_end = m_alloc.align_size_to_section_boundary(real_immutable_ref_end);
319,893✔
1625
    ref_type mutable_ref_end = m_alloc.align_size_to_section_boundary(real_mutable_ref_end);
319,893✔
1626
    ref_type baseline = m_alloc.align_size_to_section_boundary(real_baseline);
3,059,703✔
1627

3,059,703✔
1628
    // Check the consistency of the allocation of used memory
3,059,703✔
1629
    MemUsageVerifier mem_usage_1(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
639,807✔
1630
    m_top.report_memory_usage(mem_usage_1);
639,807✔
1631
    mem_usage_1.canonicalize();
639,807✔
1632

639,807✔
1633
    // Check concistency of the allocation of the immutable memory that was
319,893✔
1634
    // marked as free before the file was opened.
319,893✔
1635
    MemUsageVerifier mem_usage_2(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
319,893✔
1636
    {
639,807✔
1637
        REALM_ASSERT_EX(m_top.size() == 3 || m_top.size() == 5 || m_top.size() == 7 || m_top.size() >= 10,
639,807✔
1638
                        m_top.size());
639,807✔
1639
        Allocator& alloc = m_top.get_alloc();
639,807✔
1640
        Array pos(alloc), len(alloc), ver(alloc);
639,807✔
1641
        pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
639,807✔
1642
        len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
639,807✔
1643
        ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
639,807✔
1644
        if (m_top.size() > s_free_pos_ndx) {
319,893✔
1645
            if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
319,698✔
1646
                pos.init_from_ref(ref);
318,738✔
1647
        }
639,612✔
1648
        if (m_top.size() > s_free_size_ndx) {
639,807✔
1649
            if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
639,612✔
1650
                len.init_from_ref(ref);
318,738✔
1651
        }
319,698✔
1652
        if (m_top.size() > s_free_version_ndx) {
320,133✔
1653
            if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
319,938✔
1654
                ver.init_from_ref(ref);
319,638✔
1655
        }
320,598✔
1656
        REALM_ASSERT(pos.is_attached() == len.is_attached());
320,793✔
1657
        REALM_ASSERT(pos.is_attached() || !ver.is_attached()); // pos.is_attached() <== ver.is_attached()
320,793✔
1658
        if (pos.is_attached()) {
320,133✔
1659
            size_t n = pos.size();
318,738✔
1660
            REALM_ASSERT_3(n, ==, len.size());
318,738✔
1661
            if (ver.is_attached())
318,738✔
1662
                REALM_ASSERT_3(n, ==, ver.size());
318,738✔
1663
            for (size_t i = 0; i != n; ++i) {
5,225,721✔
1664
                ref_type ref = to_ref(pos.get(i));
4,906,983✔
1665
                size_t size_of_i = to_size_t(len.get(i));
4,906,983✔
1666
                mem_usage_2.add_immutable(ref, size_of_i);
4,906,983✔
1667
            }
4,906,983✔
1668
            mem_usage_2.canonicalize();
318,738✔
1669
            mem_usage_1.add(mem_usage_2);
318,738✔
1670
            mem_usage_1.canonicalize();
318,738✔
1671
            mem_usage_2.clear();
318,738✔
1672
        }
318,738✔
1673
    }
319,893✔
1674

319,893✔
1675
    // Check the concistency of the allocation of the immutable memory that has
319,893✔
1676
    // been marked as free after the file was opened
319,893✔
1677
    for (const auto& free_block : m_alloc.m_free_read_only) {
1,153,917✔
1678
        mem_usage_2.add_immutable(free_block.first, free_block.second);
1,153,917✔
1679
    }
1,153,917✔
1680
    mem_usage_2.canonicalize();
319,893✔
1681
    mem_usage_1.add(mem_usage_2);
319,893✔
1682
    mem_usage_1.canonicalize();
319,893✔
1683
    mem_usage_2.clear();
319,893✔
1684

319,893!
1685
    // Check the consistency of the allocation of the mutable memory that has
319,893!
1686
    // been marked as free
319,893✔
1687
    m_alloc.for_all_free_entries([&](ref_type ref, size_t sz) {
2,733,429✔
1688
        mem_usage_2.add_mutable(ref, sz);
2,733,429!
1689
    });
2,733,429!
1690
    mem_usage_2.canonicalize();
319,893✔
1691
    mem_usage_1.add(mem_usage_2);
319,893✔
1692
    mem_usage_1.canonicalize();
319,893!
1693
    mem_usage_2.clear();
319,893!
1694

319,893✔
1695
    // There may be a hole between the end of file and the beginning of the slab area.
319,893✔
1696
    // We need to take that into account here.
319,893✔
1697
    REALM_ASSERT_3(real_immutable_ref_end, <=, real_baseline);
319,893!
1698
    auto slab_start = immutable_ref_end;
319,893✔
1699
    if (real_immutable_ref_end < slab_start) {
319,893✔
1700
        ref_type ref = real_immutable_ref_end;
319,893✔
1701
        size_t corrected_size = slab_start - real_immutable_ref_end;
319,893✔
1702
        mem_usage_1.add_immutable(ref, corrected_size);
319,893✔
1703
        mem_usage_1.canonicalize();
319,893✔
1704
    }
319,893!
1705

319,893✔
1706
    // At this point we have accounted for all memory managed by the slab
319,893✔
1707
    // allocator
319,893✔
1708
    mem_usage_1.check_total_coverage();
319,893✔
1709
#endif
319,893!
1710
}
319,893✔
1711

1712
void Group::validate_primary_columns()
1713
{
240✔
1714
    auto table_keys = this->get_table_keys();
240✔
1715
    for (auto tk : table_keys) {
900✔
1716
        auto table = get_table(tk);
900✔
1717
        table->validate_primary_column();
900✔
1718
    }
900✔
1719
}
240✔
1720

1721
#ifdef REALM_DEBUG
1722

1723
MemStats Group::get_stats()
1724
{
1725
    MemStats mem_stats;
1726
    m_top.stats(mem_stats);
1727

1728
    return mem_stats;
1729
}
1730

1731

1732
void Group::print() const
1733
{
1734
    m_alloc.print();
1735
}
1736

1737

1738
void Group::print_free() const
1739
{
1740
    Allocator& alloc = m_top.get_alloc();
1741
    Array pos(alloc), len(alloc), ver(alloc);
1742
    pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
1743
    len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
1744
    ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
1745
    if (m_top.size() > s_free_pos_ndx) {
1746
        if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
1747
            pos.init_from_ref(ref);
1748
    }
1749
    if (m_top.size() > s_free_size_ndx) {
1750
        if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
1751
            len.init_from_ref(ref);
1752
    }
1753
    if (m_top.size() > s_free_version_ndx) {
1754
        if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
1755
            ver.init_from_ref(ref);
1756
    }
1757

1758
    if (!pos.is_attached()) {
1759
        std::cout << "none\n";
1760
        return;
1761
    }
1762
    bool has_versions = ver.is_attached();
1763

1764
    size_t n = pos.size();
1765
    for (size_t i = 0; i != n; ++i) {
1766
        size_t offset = to_size_t(pos.get(i));
1767
        size_t size_of_i = to_size_t(len.get(i));
1768
        std::cout << i << ": " << offset << " " << size_of_i;
1769

1770
        if (has_versions) {
1771
            size_t version = to_size_t(ver.get(i));
1772
            std::cout << " " << version;
1773
        }
1774
        std::cout << "\n";
1775
    }
1776
    std::cout << "\n";
1777
}
1778
#endif
1779

1780
// LCOV_EXCL_STOP ignore debug functions
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc