• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 2093

02 Mar 2024 12:43AM UTC coverage: 90.92%. Remained the same
2093

push

Evergreen

web-flow
Use updated curl on evergreen windows hosts (#7409)

93896 of 173116 branches covered (54.24%)

238389 of 262196 relevant lines covered (90.92%)

5713411.51 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.23
/src/realm/group.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <new>
20
#include <algorithm>
21
#include <fstream>
22

23
#ifdef REALM_DEBUG
24
#include <iostream>
25
#include <iomanip>
26
#endif
27

28
#include <realm/util/file_mapper.hpp>
29
#include <realm/util/memory_stream.hpp>
30
#include <realm/util/thread.hpp>
31
#include <realm/impl/destroy_guard.hpp>
32
#include <realm/utilities.hpp>
33
#include <realm/exceptions.hpp>
34
#include <realm/group_writer.hpp>
35
#include <realm/transaction.hpp>
36
#include <realm/replication.hpp>
37

38
using namespace realm;
39
using namespace realm::util;
40

41
namespace {
42

43
class Initialization {
44
public:
45
    Initialization()
46
    {
24✔
47
        realm::cpuid_init();
24✔
48
    }
24✔
49
};
50

51
Initialization initialization;
52

53
} // anonymous namespace
54

55
Group::Group()
56
    : m_local_alloc(new SlabAlloc)
57
    , m_alloc(*m_local_alloc) // Throws
58
    , m_top(m_alloc)
59
    , m_tables(m_alloc)
60
    , m_table_names(m_alloc)
61
{
4,068✔
62
    init_array_parents();
4,068✔
63
    m_alloc.attach_empty(); // Throws
4,068✔
64
    m_file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
4,068✔
65
    ref_type top_ref = 0; // Instantiate a new empty group
4,068✔
66
    bool create_group_when_missing = true;
4,068✔
67
    bool writable = create_group_when_missing;
4,068✔
68
    attach(top_ref, writable, create_group_when_missing); // Throws
4,068✔
69
}
4,068✔
70

71

72
Group::Group(const std::string& file_path, const char* encryption_key)
73
    : m_local_alloc(new SlabAlloc) // Throws
74
    , m_alloc(*m_local_alloc)
75
    , m_top(m_alloc)
76
    , m_tables(m_alloc)
77
    , m_table_names(m_alloc)
78
{
846✔
79
    init_array_parents();
846✔
80

327✔
81
    SlabAlloc::Config cfg;
846✔
82
    cfg.read_only = true;
846✔
83
    cfg.no_create = true;
846✔
84
    cfg.encryption_key = encryption_key;
846✔
85
    ref_type top_ref = m_alloc.attach_file(file_path, cfg); // Throws
846✔
86
    // Non-Transaction Groups always allow writing and simply don't allow
327✔
87
    // committing when opened in read-only mode
327✔
88
    m_alloc.set_read_only(false);
846✔
89

327✔
90
    open(top_ref, file_path);
846✔
91
}
846✔
92

93

94
Group::Group(BinaryData buffer, bool take_ownership)
95
    : m_local_alloc(new SlabAlloc) // Throws
96
    , m_alloc(*m_local_alloc)
97
    , m_top(m_alloc)
98
    , m_tables(m_alloc)
99
    , m_table_names(m_alloc)
100
{
48✔
101
    REALM_ASSERT(buffer.data());
48✔
102

24✔
103
    init_array_parents();
48✔
104
    ref_type top_ref = m_alloc.attach_buffer(buffer.data(), buffer.size()); // Throws
48✔
105

24✔
106
    open(top_ref, {});
48✔
107

24✔
108
    if (take_ownership)
48✔
109
        m_alloc.own_buffer();
36✔
110
}
48✔
111

112
Group::Group(SlabAlloc* alloc) noexcept
113
    : m_alloc(*alloc)
114
    , // Throws
115
    m_top(m_alloc)
116
    , m_tables(m_alloc)
117
    , m_table_names(m_alloc)
118
{
2,039,301✔
119
    init_array_parents();
2,039,301✔
120
}
2,039,301✔
121

122
namespace {
123

124
class TableRecycler : public std::vector<Table*> {
125
public:
126
    ~TableRecycler()
127
    {
×
128
        REALM_UNREACHABLE();
129
        // if ever enabled, remember to release Tables:
×
130
        // for (auto t : *this) {
×
131
        //    delete t;
×
132
        //}
×
133
    }
×
134
};
135

136
// We use the classic approach to construct a FIFO from two LIFO's,
137
// insertion is done into recycler_1, removal is done from recycler_2,
138
// and when recycler_2 is empty, recycler_1 is reversed into recycler_2.
139
// this i O(1) for each entry.
140
auto& g_table_recycler_1 = *new TableRecycler;
141
auto& g_table_recycler_2 = *new TableRecycler;
142
// number of tables held back before being recycled. We hold back recycling
143
// the latest to increase the probability of detecting race conditions
144
// without crashing.
145
const static int g_table_recycling_delay = 100;
146
auto& g_table_recycler_mutex = *new std::mutex;
147

148
} // namespace
149

150
TableKeyIterator& TableKeyIterator::operator++()
151
{
1,188,981✔
152
    m_pos++;
1,188,981✔
153
    m_index_in_group++;
1,188,981✔
154
    load_key();
1,188,981✔
155
    return *this;
1,188,981✔
156
}
1,188,981✔
157

158
TableKey TableKeyIterator::operator*()
159
{
1,195,002✔
160
    if (!bool(m_table_key)) {
1,195,002✔
161
        load_key();
390,237✔
162
    }
390,237✔
163
    return m_table_key;
1,195,002✔
164
}
1,195,002✔
165

166
void TableKeyIterator::load_key()
167
{
1,579,209✔
168
    const Group& g = *m_group;
1,579,209✔
169
    size_t max_index_in_group = g.m_table_names.size();
1,579,209✔
170
    while (m_index_in_group < max_index_in_group) {
1,596,459✔
171
        RefOrTagged rot = g.m_tables.get_as_ref_or_tagged(m_index_in_group);
1,212,303✔
172
        if (rot.is_ref()) {
1,212,303✔
173
            Table* t;
1,195,053✔
174
            if (m_index_in_group < g.m_table_accessors.size() &&
1,195,053✔
175
                (t = load_atomic(g.m_table_accessors[m_index_in_group], std::memory_order_acquire))) {
1,195,053✔
176
                m_table_key = t->get_key();
729,234✔
177
            }
729,234✔
178
            else {
465,819✔
179
                m_table_key = Table::get_key_direct(g.m_tables.get_alloc(), rot.get_as_ref());
465,819✔
180
            }
465,819✔
181
            return;
1,195,053✔
182
        }
1,195,053✔
183
        m_index_in_group++;
17,250✔
184
    }
17,250✔
185
    m_table_key = TableKey();
973,125✔
186
}
384,156✔
187

188
TableKey TableKeys::operator[](size_t p) const
189
{
810✔
190
    if (p < m_iter.m_pos) {
810✔
191
        m_iter = TableKeyIterator(m_iter.m_group, 0);
×
192
    }
×
193
    while (m_iter.m_pos < p) {
1,053✔
194
        ++m_iter;
243✔
195
    }
243✔
196
    return *m_iter;
810✔
197
}
810✔
198

199
size_t Group::size() const noexcept
200
{
754,536✔
201
    return m_num_tables;
754,536✔
202
}
754,536✔
203

204

205
void Group::set_size() const noexcept
206
{
2,303,457✔
207
    int retval = 0;
2,303,457✔
208
    if (is_attached() && m_table_names.is_attached()) {
2,303,484✔
209
        size_t max_index = m_tables.size();
2,169,258✔
210
        REALM_ASSERT_EX(max_index < (1 << 16), max_index);
2,169,258✔
211
        for (size_t j = 0; j < max_index; ++j) {
6,786,849✔
212
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(j);
4,617,591✔
213
            if (rot.is_ref() && rot.get_as_ref()) {
4,617,591✔
214
                ++retval;
4,602,399✔
215
            }
4,602,399✔
216
        }
4,617,591✔
217
    }
2,169,258✔
218
    m_num_tables = retval;
2,303,457✔
219
}
2,303,457✔
220

221
std::map<TableRef, ColKey> Group::get_primary_key_columns_from_pk_table(TableRef pk_table)
222
{
×
223
    std::map<TableRef, ColKey> ret;
×
224
    REALM_ASSERT(pk_table);
×
225
    ColKey col_table = pk_table->get_column_key("pk_table");
×
226
    ColKey col_prop = pk_table->get_column_key("pk_property");
×
227
    for (auto pk_obj : *pk_table) {
×
228
        auto object_type = pk_obj.get<String>(col_table);
×
229
        auto name = std::string(g_class_name_prefix) + std::string(object_type);
×
230
        auto table = get_table(name);
×
231
        auto pk_col_name = pk_obj.get<String>(col_prop);
×
232
        auto pk_col = table->get_column_key(pk_col_name);
×
233
        ret.emplace(table, pk_col);
×
234
    }
×
235

236
    return ret;
×
237
}
×
238

239
TableKey Group::ndx2key(size_t ndx) const
240
{
501✔
241
    REALM_ASSERT(is_attached());
501✔
242
    Table* accessor = load_atomic(m_table_accessors[ndx], std::memory_order_acquire);
501✔
243
    if (accessor)
501✔
244
        return accessor->get_key(); // fast path
228✔
245

138✔
246
    // slow path:
138✔
247
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(ndx);
273✔
248
    if (rot.is_tagged())
273✔
249
        throw NoSuchTable();
×
250
    ref_type ref = rot.get_as_ref();
273✔
251
    REALM_ASSERT(ref);
273✔
252
    return Table::get_key_direct(m_tables.get_alloc(), ref);
273✔
253
}
273✔
254

255
size_t Group::key2ndx_checked(TableKey key) const
256
{
27,712,968✔
257
    size_t idx = key2ndx(key);
27,712,968✔
258
    // early out
14,954,970✔
259
    // note: don't lock when accessing m_table_accessors, because if we miss a concurrently introduced table
14,954,970✔
260
    // accessor, we'll just fall through to the slow path. Table accessors can be introduced concurrently,
14,954,970✔
261
    // but never removed. The following is only safe because 'm_table_accessors' will not be relocated
14,954,970✔
262
    // concurrently. (We aim to be safe in face of concurrent access to a frozen transaction, where tables
14,954,970✔
263
    // cannot be added or removed. All other races are undefined behaviour)
14,954,970✔
264
    if (idx < m_table_accessors.size()) {
27,821,349✔
265
        Table* tbl = load_atomic(m_table_accessors[idx], std::memory_order_acquire);
27,793,824✔
266
        if (tbl && tbl->get_key() == key)
27,793,824✔
267
            return idx;
25,792,527✔
268
    }
1,920,441✔
269
    // The notion of a const group as it is now, is not really
1,309,404✔
270
    // useful. It is linked to a distinction between a read
1,309,404✔
271
    // and a write transaction. This distinction is no longer
1,309,404✔
272
    // a compile time aspect (it's not const anymore)
1,309,404✔
273
    Allocator* alloc = const_cast<SlabAlloc*>(&m_alloc);
1,920,441✔
274
    if (m_tables.is_attached() && idx < m_tables.size()) {
1,920,441✔
275
        RefOrTagged rot = m_tables.get_as_ref_or_tagged(idx);
1,747,683✔
276
        if (rot.is_ref() && rot.get_as_ref() && (Table::get_key_direct(*alloc, rot.get_as_ref()) == key)) {
1,748,721✔
277

1,256,694✔
278
            return idx;
1,747,716✔
279
        }
1,747,716✔
280
    }
172,725✔
281
    throw NoSuchTable();
172,725✔
282
}
172,725✔
283

284

285
int Group::get_file_format_version() const noexcept
286
{
411,114✔
287
    return m_file_format_version;
411,114✔
288
}
411,114✔
289

290

291
void Group::set_file_format_version(int file_format) noexcept
292
{
2,037,396✔
293
    m_file_format_version = file_format;
2,037,396✔
294
}
2,037,396✔
295

296

297
int Group::get_committed_file_format_version() const noexcept
298
{
×
299
    return m_alloc.get_committed_file_format_version();
×
300
}
×
301

302
std::optional<int> Group::fake_target_file_format;
303

304
void _impl::GroupFriend::fake_target_file_format(const std::optional<int> format) noexcept
305
{
72✔
306
    Group::fake_target_file_format = format;
72✔
307
}
72✔
308

309
int Group::get_target_file_format_version_for_session(int current_file_format_version,
310
                                                      int requested_history_type) noexcept
311
{
116,127✔
312
    if (Group::fake_target_file_format) {
116,127✔
313
        return *Group::fake_target_file_format;
72✔
314
    }
72✔
315
    // Note: This function is responsible for choosing the target file format
57,126✔
316
    // for a sessions. If it selects a file format that is different from
57,126✔
317
    // `current_file_format_version`, it will trigger a file format upgrade
57,126✔
318
    // process.
57,126✔
319

57,126✔
320
    // Note: `current_file_format_version` may be zero at this time, which means
57,126✔
321
    // that the file format it is not yet decided (only possible for empty
57,126✔
322
    // Realms where top-ref is zero).
57,126✔
323

57,126✔
324
    // Please see Group::get_file_format_version() for information about the
57,126✔
325
    // individual file format versions.
57,126✔
326

57,126✔
327
    if (requested_history_type == Replication::hist_None) {
116,055✔
328
        if (current_file_format_version == 24) {
33,564✔
329
            // We are able to open these file formats in RO mode
12,360✔
330
            return current_file_format_version;
24,912✔
331
        }
24,912✔
332
    }
91,143✔
333

44,766✔
334
    return g_current_file_format_version;
91,143✔
335
}
91,143✔
336

337
void Group::get_version_and_history_info(const Array& top, _impl::History::version_type& version, int& history_type,
338
                                         int& history_schema_version) noexcept
339
{
126,420✔
340
    using version_type = _impl::History::version_type;
126,420✔
341
    version_type version_2 = 0;
126,420✔
342
    int history_type_2 = 0;
126,420✔
343
    int history_schema_version_2 = 0;
126,420✔
344
    if (top.is_attached()) {
126,420✔
345
        if (top.size() > s_version_ndx) {
88,572✔
346
            version_2 = version_type(top.get_as_ref_or_tagged(s_version_ndx).get_as_int());
88,248✔
347
        }
88,248✔
348
        if (top.size() > s_hist_type_ndx) {
88,572✔
349
            history_type_2 = int(top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
85,977✔
350
        }
85,977✔
351
        if (top.size() > s_hist_version_ndx) {
88,572✔
352
            history_schema_version_2 = int(top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
85,977✔
353
        }
85,977✔
354
    }
88,572✔
355
    // Version 0 is not a legal initial version, so it has to be set to 1
62,814✔
356
    // instead.
62,814✔
357
    if (version_2 == 0)
126,420✔
358
        version_2 = 1;
39,135✔
359
    version = version_2;
126,420✔
360
    history_type = history_type_2;
126,420✔
361
    history_schema_version = history_schema_version_2;
126,420✔
362
}
126,420✔
363

364
int Group::get_history_schema_version() noexcept
365
{
26,901✔
366
    bool history_schema_version = (m_top.is_attached() && m_top.size() > s_hist_version_ndx);
26,901✔
367
    if (history_schema_version) {
26,901✔
368
        return int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
969✔
369
    }
969✔
370
    return 0;
25,932✔
371
}
25,932✔
372

373
uint64_t Group::get_sync_file_id() const noexcept
374
{
13,523,736✔
375
    if (m_top.is_attached() && m_top.size() > s_sync_file_id_ndx) {
13,523,736✔
376
        return uint64_t(m_top.get_as_ref_or_tagged(s_sync_file_id_ndx).get_as_int());
6,041,658✔
377
    }
6,041,658✔
378
    auto repl = get_replication();
7,482,078✔
379
    if (repl && repl->get_history_type() == Replication::hist_SyncServer) {
7,482,078✔
380
        return 1;
2,610✔
381
    }
2,610✔
382
    return 0;
7,479,468✔
383
}
7,479,468✔
384

385
size_t Group::get_free_space_size(const Array& top) noexcept
386
{
24,540✔
387
    if (top.is_attached() && top.size() > s_free_size_ndx) {
24,540✔
388
        auto ref = top.get_as_ref(s_free_size_ndx);
24,540✔
389
        Array free_list_sizes(top.get_alloc());
24,540✔
390
        free_list_sizes.init_from_ref(ref);
24,540✔
391
        return size_t(free_list_sizes.get_sum());
24,540✔
392
    }
24,540✔
393
    return 0;
×
394
}
×
395

396
size_t Group::get_history_size(const Array& top) noexcept
397
{
24,540✔
398
    if (top.is_attached() && top.size() > s_hist_ref_ndx) {
24,540✔
399
        auto ref = top.get_as_ref(s_hist_ref_ndx);
540✔
400
        Array hist(top.get_alloc());
540✔
401
        hist.init_from_ref(ref);
540✔
402
        return hist.get_byte_size_deep();
540✔
403
    }
540✔
404
    return 0;
24,000✔
405
}
24,000✔
406

407
int Group::read_only_version_check(SlabAlloc& alloc, ref_type top_ref, const std::string& path)
408
{
1,032✔
409
    // Select file format if it is still undecided.
420✔
410
    auto file_format_version = alloc.get_committed_file_format_version();
1,032✔
411

420✔
412
    bool file_format_ok = false;
1,032✔
413
    // It is not possible to open prior file format versions without an upgrade.
420✔
414
    // Since a Realm file cannot be upgraded when opened in this mode
420✔
415
    // (we may be unable to write to the file), no earlier versions can be opened.
420✔
416
    // Please see Group::get_file_format_version() for information about the
420✔
417
    // individual file format versions.
420✔
418
    switch (file_format_version) {
1,032✔
419
        case 0:
6✔
420
            file_format_ok = (top_ref == 0);
6✔
421
            break;
6✔
422
        case g_current_file_format_version:
990✔
423
            file_format_ok = true;
990✔
424
            break;
990✔
425
    }
1,032✔
426
    if (REALM_UNLIKELY(!file_format_ok))
1,032✔
427
        throw FileAccessError(ErrorCodes::FileFormatUpgradeRequired,
438✔
428
                              util::format("Realm file at path '%1' cannot be opened in read-only mode because it "
36✔
429
                                           "has a file format version (%2) which requires an upgrade",
36✔
430
                                           path, file_format_version),
36✔
431
                              path);
36✔
432
    return file_format_version;
996✔
433
}
996✔
434

435
void Group::open(ref_type top_ref, const std::string& file_path)
436
{
846✔
437
    SlabAlloc::DetachGuard dg(m_alloc);
846✔
438
    m_file_format_version = read_only_version_check(m_alloc, top_ref, file_path);
846✔
439

327✔
440
    Replication::HistoryType history_type = Replication::hist_None;
846✔
441
    int target_file_format_version = get_target_file_format_version_for_session(m_file_format_version, history_type);
846✔
442
    if (m_file_format_version == 0) {
846✔
443
        set_file_format_version(target_file_format_version);
6✔
444
    }
6✔
445
    else {
840✔
446
        // From a technical point of view, we could upgrade the Realm file
324✔
447
        // format in memory here, but since upgrading can be expensive, it is
324✔
448
        // currently disallowed.
324✔
449
        REALM_ASSERT(target_file_format_version == m_file_format_version);
840✔
450
    }
840✔
451

327✔
452
    // Make all dynamically allocated memory (space beyond the attached file) as
327✔
453
    // available free-space.
327✔
454
    reset_free_space_tracking(); // Throws
846✔
455

327✔
456
    bool create_group_when_missing = true;
846✔
457
    bool writable = create_group_when_missing;
846✔
458
    attach(top_ref, writable, create_group_when_missing); // Throws
846✔
459
    dg.release();                                         // Do not detach after all
846✔
460
}
846✔
461

462
Group::~Group() noexcept
463
{
2,043,948✔
464
    // If this group accessor is detached at this point in time, it is either
1,418,835✔
465
    // because it is DB::m_group (m_is_shared), or it is a free-stading
1,418,835✔
466
    // group accessor that was never successfully opened.
1,418,835✔
467
    if (!m_top.is_attached())
2,043,948✔
468
        return;
2,038,488✔
469

2,901✔
470
    // Free-standing group accessor
2,901✔
471
    detach();
5,460✔
472

2,901✔
473
    // if a local allocator is set in m_local_alloc, then the destruction
2,901✔
474
    // of m_local_alloc will trigger destruction of the allocator, which will
2,901✔
475
    // verify that the allocator has been detached, so....
2,901✔
476
    if (m_local_alloc)
5,460✔
477
        m_local_alloc->detach();
4,878✔
478
}
5,460✔
479

480
void Group::remap_and_update_refs(ref_type new_top_ref, size_t new_file_size, bool writable)
481
{
352,806✔
482
    m_alloc.update_reader_view(new_file_size); // Throws
352,806✔
483
    update_allocator_wrappers(writable);
352,806✔
484

175,839✔
485
    // force update of all ref->ptr translations if the mapping has changed
175,839✔
486
    auto mapping_version = m_alloc.get_mapping_version();
352,806✔
487
    if (mapping_version != m_last_seen_mapping_version) {
352,806✔
488
        m_last_seen_mapping_version = mapping_version;
169,128✔
489
    }
169,128✔
490
    update_refs(new_top_ref);
352,806✔
491
}
352,806✔
492

493
void Group::validate_top_array(const Array& arr, const SlabAlloc& alloc, std::optional<size_t> read_lock_file_size,
494
                               std::optional<uint_fast64_t> read_lock_version)
495
{
2,203,203✔
496
    size_t top_size = arr.size();
2,203,203✔
497
    ref_type top_ref = arr.get_ref();
2,203,203✔
498

1,536,978✔
499
    switch (top_size) {
2,203,203✔
500
        // These are the valid sizes
501
        case 3:
1,536,933✔
502
        case 5:
1,536,933✔
503
        case 7:
1,593,654✔
504
        case 9:
1,593,654✔
505
        case 10:
1,593,654✔
506
        case 11:
2,197,395✔
507
        case 12: {
2,202,735✔
508
            ref_type table_names_ref = arr.get_as_ref_or_tagged(s_table_name_ndx).get_as_ref();
2,202,735✔
509
            ref_type tables_ref = arr.get_as_ref_or_tagged(s_table_refs_ndx).get_as_ref();
2,202,735✔
510
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
2,202,735✔
511

1,536,654✔
512
            // Logical file size must never exceed actual file size.
1,536,654✔
513
            auto file_size = alloc.get_baseline();
2,202,735✔
514
            if (logical_file_size > file_size) {
2,202,735✔
515
                std::string err = util::format("Invalid logical file size: %1, actual file size: %2, read lock file "
×
516
                                               "size: %3, read lock version: %4",
×
517
                                               logical_file_size, file_size, read_lock_file_size, read_lock_version);
×
518
                throw InvalidDatabase(err, "");
×
519
            }
×
520
            // First two entries must be valid refs pointing inside the file
1,536,654✔
521
            auto invalid_ref = [logical_file_size](ref_type ref) {
4,405,461✔
522
                return ref == 0 || (ref & 7) || ref > logical_file_size;
4,406,166✔
523
            };
4,405,461✔
524
            if (invalid_ref(table_names_ref) || invalid_ref(tables_ref)) {
2,202,993✔
525
                std::string err = util::format(
×
526
                    "Invalid top array (top_ref, [0], [1]): %1, %2, %3, read lock size: %4, read lock version: %5",
×
527
                    top_ref, table_names_ref, tables_ref, read_lock_file_size, read_lock_version);
×
528
                throw InvalidDatabase(err, "");
×
529
            }
×
530
            break;
2,202,735✔
531
        }
2,202,735✔
532
        default: {
1,536,654✔
533
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
×
534
            std::string err =
×
535
                util::format("Invalid top array size (ref: %1, array size: %2) file size: %3, read "
×
536
                             "lock size: %4, read lock version: %5",
×
537
                             top_ref, top_size, logical_file_size, read_lock_file_size, read_lock_version);
×
538
            throw InvalidDatabase(err, "");
×
539
            break;
1,536,654✔
540
        }
2,202,735✔
541
    }
2,203,203✔
542
}
2,203,203✔
543

544
void Group::attach(ref_type top_ref, bool writable, bool create_group_when_missing, size_t file_size,
545
                   uint_fast64_t version)
546
{
2,301,288✔
547
    REALM_ASSERT(!m_top.is_attached());
2,301,288✔
548
    if (create_group_when_missing)
2,301,288✔
549
        REALM_ASSERT(writable);
2,301,288✔
550

1,582,785✔
551
    // If this function throws, it must leave the group accesor in a the
1,582,785✔
552
    // unattached state.
1,582,785✔
553

1,582,785✔
554
    m_tables.detach();
2,301,288✔
555
    m_table_names.detach();
2,301,288✔
556
    m_is_writable = writable;
2,301,288✔
557

1,582,785✔
558
    if (top_ref != 0) {
2,301,288✔
559
        m_top.init_from_ref(top_ref);
2,160,105✔
560
        validate_top_array(m_top, m_alloc, file_size, version);
2,160,105✔
561
        m_table_names.init_from_parent();
2,160,105✔
562
        m_tables.init_from_parent();
2,160,105✔
563
    }
2,160,105✔
564
    else if (create_group_when_missing) {
141,183✔
565
        create_empty_group(); // Throws
11,625✔
566
    }
11,625✔
567
    m_attached = true;
2,301,288✔
568
    set_size();
2,301,288✔
569

1,582,785✔
570
    size_t sz = m_tables.is_attached() ? m_tables.size() : 0;
2,232,414✔
571
    while (m_table_accessors.size() > sz) {
2,301,378✔
572
        if (Table* t = m_table_accessors.back()) {
90✔
573
            t->detach(Table::cookie_void);
87✔
574
            recycle_table_accessor(t);
87✔
575
        }
87✔
576
        m_table_accessors.pop_back();
90✔
577
    }
90✔
578
    while (m_table_accessors.size() < sz) {
6,435,687✔
579
        m_table_accessors.emplace_back();
4,134,399✔
580
    }
4,134,399✔
581
}
2,301,288✔
582

583

584
void Group::detach() noexcept
585
{
2,033,856✔
586
    detach_table_accessors();
2,033,856✔
587
    m_table_accessors.clear();
2,033,856✔
588

1,408,755✔
589
    m_table_names.detach();
2,033,856✔
590
    m_tables.detach();
2,033,856✔
591
    m_top.detach();
2,033,856✔
592

1,408,755✔
593
    m_attached = false;
2,033,856✔
594
}
2,033,856✔
595

596
void Group::attach_shared(ref_type new_top_ref, size_t new_file_size, bool writable, VersionID version)
597
{
2,032,080✔
598
    REALM_ASSERT_3(new_top_ref, <, new_file_size);
2,032,080✔
599
    REALM_ASSERT(!is_attached());
2,032,080✔
600

1,409,559✔
601
    // update readers view of memory
1,409,559✔
602
    m_alloc.update_reader_view(new_file_size); // Throws
2,032,080✔
603
    update_allocator_wrappers(writable);
2,032,080✔
604

1,409,559✔
605
    // When `new_top_ref` is null, ask attach() to create a new node structure
1,409,559✔
606
    // for an empty group, but only during the initiation of write
1,409,559✔
607
    // transactions. When the transaction being initiated is a read transaction,
1,409,559✔
608
    // we instead have to leave array accessors m_top, m_tables, and
1,409,559✔
609
    // m_table_names in their detached state, as there are no underlying array
1,409,559✔
610
    // nodes to attached them to. In the case of write transactions, the nodes
1,409,559✔
611
    // have to be created, as they have to be ready for being modified.
1,409,559✔
612
    bool create_group_when_missing = writable;
2,032,080✔
613
    attach(new_top_ref, writable, create_group_when_missing, new_file_size, version.version); // Throws
2,032,080✔
614
}
2,032,080✔
615

616

617
void Group::detach_table_accessors() noexcept
618
{
2,041,059✔
619
    for (auto& table_accessor : m_table_accessors) {
4,390,710✔
620
        if (Table* t = table_accessor) {
4,390,710✔
621
            t->detach(Table::cookie_transaction_ended);
2,058,207✔
622
            recycle_table_accessor(t);
2,058,207✔
623
            table_accessor = nullptr;
2,058,207✔
624
        }
2,058,207✔
625
    }
4,390,710✔
626
}
2,041,059✔
627

628

629
void Group::create_empty_group()
630
{
67,287✔
631
    m_top.create(Array::type_HasRefs); // Throws
67,287✔
632
    _impl::DeepArrayDestroyGuard dg_top(&m_top);
67,287✔
633
    {
67,287✔
634
        m_table_names.create(); // Throws
67,287✔
635
        _impl::DestroyGuard<ArrayStringShort> dg(&m_table_names);
67,287✔
636
        m_top.add(m_table_names.get_ref()); // Throws
67,287✔
637
        dg.release();
67,287✔
638
    }
67,287✔
639
    {
67,287✔
640
        m_tables.create(Array::type_HasRefs); // Throws
67,287✔
641
        _impl::DestroyGuard<Array> dg(&m_tables);
67,287✔
642
        m_top.add(m_tables.get_ref()); // Throws
67,287✔
643
        dg.release();
67,287✔
644
    }
67,287✔
645
    size_t initial_logical_file_size = sizeof(SlabAlloc::Header);
67,287✔
646
    m_top.add(RefOrTagged::make_tagged(initial_logical_file_size)); // Throws
67,287✔
647
    dg_top.release();
67,287✔
648
}
67,287✔
649

650

651
Table* Group::do_get_table(size_t table_ndx)
652
{
30,485,334✔
653
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
30,485,334✔
654
    // Get table accessor from cache if it exists, else create
16,224,525✔
655
    Table* table = load_atomic(m_table_accessors[table_ndx], std::memory_order_acquire);
30,485,334✔
656
    if (!table) {
30,485,334✔
657
        // double-checked locking idiom
1,291,491✔
658
        std::lock_guard<std::mutex> lock(m_accessor_mutex);
1,807,638✔
659
        table = m_table_accessors[table_ndx];
1,807,638✔
660
        if (!table)
1,807,638✔
661
            table = create_table_accessor(table_ndx); // Throws
1,802,358✔
662
    }
1,807,638✔
663
    return table;
30,485,334✔
664
}
30,485,334✔
665

666

667
Table* Group::do_get_table(StringData name)
668
{
8,729,925✔
669
    if (!m_table_names.is_attached())
8,729,925✔
670
        return 0;
294,687✔
671
    size_t table_ndx = m_table_names.find_first(name);
8,435,238✔
672
    if (table_ndx == not_found)
8,435,238✔
673
        return 0;
949,449✔
674

3,728,928✔
675
    Table* table = do_get_table(table_ndx); // Throws
7,485,789✔
676
    return table;
7,485,789✔
677
}
7,485,789✔
678

679
TableRef Group::add_table_with_primary_key(StringData name, DataType pk_type, StringData pk_name, bool nullable,
680
                                           Table::Type table_type)
681
{
92,940✔
682
    check_attached();
92,940✔
683
    check_table_name_uniqueness(name);
92,940✔
684

46,035✔
685
    auto table = do_add_table(name, table_type, false);
92,940✔
686

46,035✔
687
    // Add pk column - without replication
46,035✔
688
    ColumnAttrMask attr;
92,940✔
689
    if (nullable)
92,940✔
690
        attr.set(col_attr_Nullable);
15,774✔
691
    ColKey pk_col = table->generate_col_key(ColumnType(pk_type), attr);
92,940✔
692
    table->do_insert_root_column(pk_col, ColumnType(pk_type), pk_name);
92,940✔
693
    table->do_set_primary_key_column(pk_col);
92,940✔
694

46,035✔
695
    if (Replication* repl = *get_repl())
92,940✔
696
        repl->add_class_with_primary_key(table->get_key(), name, pk_type, pk_name, nullable, table_type);
92,184✔
697

46,035✔
698
    return TableRef(table, table->m_alloc.get_instance_version());
92,940✔
699
}
92,940✔
700

701
Table* Group::do_add_table(StringData name, Table::Type table_type, bool do_repl)
702
{
261,537✔
703
    if (!m_is_writable)
261,537✔
704
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
6✔
705

129,921✔
706
    // get new key and index
129,921✔
707
    // find first empty spot:
129,921✔
708
    uint32_t j;
261,531✔
709
    RefOrTagged rot = RefOrTagged::make_tagged(0);
261,531✔
710
    for (j = 0; j < m_tables.size(); ++j) {
54,612,594✔
711
        rot = m_tables.get_as_ref_or_tagged(j);
54,351,369✔
712
        if (!rot.is_ref())
54,351,369✔
713
            break;
306✔
714
    }
54,351,369✔
715
    bool gen_null_tag = (j == m_tables.size()); // new tags start at zero
261,531✔
716
    uint32_t tag = gen_null_tag ? 0 : uint32_t(rot.get_as_int());
261,396✔
717
    TableKey key = TableKey((tag << 16) | j);
261,531✔
718

129,921✔
719
    if (REALM_UNLIKELY(name.size() > max_table_name_length))
261,531✔
720
        throw InvalidArgument(ErrorCodes::InvalidName, util::format("Name too long: %1", name));
129,924✔
721

129,918✔
722
    using namespace _impl;
261,525✔
723
    size_t table_ndx = key2ndx(key);
261,525✔
724
    ref_type ref = Table::create_empty_table(m_alloc, key); // Throws
261,525✔
725
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
261,525✔
726

129,918✔
727
    rot = RefOrTagged::make_ref(ref);
261,525✔
728
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
261,525✔
729

129,918✔
730
    if (table_ndx == m_tables.size()) {
261,525✔
731
        m_tables.add(rot);
261,219✔
732
        m_table_names.add(name);
261,219✔
733
        // Need new slot for table accessor
129,747✔
734
        m_table_accessors.push_back(nullptr);
261,219✔
735
    }
261,219✔
736
    else {
306✔
737
        m_tables.set(table_ndx, rot);       // Throws
306✔
738
        m_table_names.set(table_ndx, name); // Throws
306✔
739
    }
306✔
740

129,918✔
741
    Replication* repl = *get_repl();
261,525✔
742
    if (do_repl && repl)
261,525✔
743
        repl->add_class(key, name, table_type);
162,849✔
744

129,918✔
745
    ++m_num_tables;
261,525✔
746

129,918✔
747
    Table* table = create_table_accessor(j);
261,525✔
748
    table->do_set_table_type(table_type);
261,525✔
749

129,918✔
750
    return table;
261,525✔
751
}
261,525✔
752

753
Table* Group::create_table_accessor(size_t table_ndx)
754
{
2,063,466✔
755
    REALM_ASSERT(m_tables.size() == m_table_accessors.size());
2,063,466✔
756
    REALM_ASSERT(table_ndx < m_table_accessors.size());
2,063,466✔
757

1,418,709✔
758
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(table_ndx);
2,063,466✔
759
    ref_type ref = rot.get_as_ref();
2,063,466✔
760
    if (ref == 0) {
2,063,466✔
761
        throw NoSuchTable();
×
762
    }
×
763
    Table* table = 0;
2,063,466✔
764
    {
2,063,466✔
765
        std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
2,063,466✔
766
        if (g_table_recycler_2.empty()) {
2,063,466✔
767
            while (!g_table_recycler_1.empty()) {
2,069,571✔
768
                auto t = g_table_recycler_1.back();
2,059,395✔
769
                g_table_recycler_1.pop_back();
2,059,395✔
770
                g_table_recycler_2.push_back(t);
2,059,395✔
771
            }
2,059,395✔
772
        }
10,176✔
773
        if (g_table_recycler_2.size() + g_table_recycler_1.size() > g_table_recycling_delay) {
2,063,466✔
774
            table = g_table_recycler_2.back();
2,043,159✔
775
            table->fully_detach();
2,043,159✔
776
            g_table_recycler_2.pop_back();
2,043,159✔
777
        }
2,043,159✔
778
    }
2,063,466✔
779
    if (table) {
2,063,466✔
780
        table->revive(get_repl(), m_alloc, m_is_writable);
2,043,066✔
781
        table->init(ref, this, table_ndx, m_is_writable, is_frozen());
2,043,066✔
782
    }
2,043,066✔
783
    else {
20,400✔
784
        std::unique_ptr<Table> new_table(new Table(get_repl(), m_alloc));  // Throws
20,400✔
785
        new_table->init(ref, this, table_ndx, m_is_writable, is_frozen()); // Throws
20,400✔
786
        table = new_table.release();
20,400✔
787
    }
20,400✔
788
    table->refresh_index_accessors();
2,063,466✔
789
    // must be atomic to allow concurrent probing of the m_table_accessors vector.
1,418,709✔
790
    store_atomic(m_table_accessors[table_ndx], table, std::memory_order_release);
2,063,466✔
791
    return table;
2,063,466✔
792
}
2,063,466✔
793

794

795
void Group::recycle_table_accessor(Table* to_be_recycled)
796
{
2,063,775✔
797
    std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
2,063,775✔
798
    g_table_recycler_1.push_back(to_be_recycled);
2,063,775✔
799
}
2,063,775✔
800

801
void Group::remove_table(StringData name)
802
{
483✔
803
    check_attached();
483✔
804
    size_t table_ndx = m_table_names.find_first(name);
483✔
805
    if (table_ndx == not_found)
483✔
806
        throw NoSuchTable();
6✔
807
    auto key = ndx2key(table_ndx);
477✔
808
    remove_table(table_ndx, key); // Throws
477✔
809
}
477✔
810

811

812
void Group::remove_table(TableKey key)
813
{
162✔
814
    check_attached();
162✔
815

81✔
816
    size_t table_ndx = key2ndx_checked(key);
162✔
817
    remove_table(table_ndx, key);
162✔
818
}
162✔
819

820

821
void Group::remove_table(size_t table_ndx, TableKey key)
822
{
639✔
823
    if (!m_is_writable)
639✔
824
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
825
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
639✔
826
    REALM_ASSERT(table_ndx < m_tables.size());
639✔
827
    TableRef table = get_table(key);
639✔
828

327✔
829
    // In principle we could remove a table even if it is the target of link
327✔
830
    // columns of other tables, however, to do that, we would have to
327✔
831
    // automatically remove the "offending" link columns from those other
327✔
832
    // tables. Such a behaviour is deemed too obscure, and we shall therefore
327✔
833
    // require that a removed table does not contain foreign origin backlink
327✔
834
    // columns.
327✔
835
    if (table->is_cross_table_link_target())
639✔
836
        throw CrossTableLinkTarget(table->get_name());
18✔
837

318✔
838
    {
621✔
839
        // We don't want to replicate the individual column removals along the
318✔
840
        // way as they're covered by the table removal
318✔
841
        Table::DisableReplication dr(*table);
621✔
842
        for (size_t i = table->get_column_count(); i > 0; --i) {
1,671✔
843
            ColKey col_key = table->spec_ndx2colkey(i - 1);
1,050✔
844
            table->remove_column(col_key);
1,050✔
845
        }
1,050✔
846
    }
621✔
847

318✔
848
    size_t prior_num_tables = m_tables.size();
621✔
849
    Replication* repl = *get_repl();
621✔
850
    if (repl)
621✔
851
        repl->erase_class(key, table->get_name(), prior_num_tables); // Throws
543✔
852

318✔
853
    int64_t ref_64 = m_tables.get(table_ndx);
621✔
854
    REALM_ASSERT(!int_cast_has_overflow<ref_type>(ref_64));
621✔
855
    ref_type ref = ref_type(ref_64);
621✔
856

318✔
857
    // Replace entry in m_tables with next tag to use:
318✔
858
    RefOrTagged rot = RefOrTagged::make_tagged((1 + (key.value >> 16)) & 0x7FFF);
621✔
859
    // Remove table
318✔
860
    m_tables.set(table_ndx, rot);     // Throws
621✔
861
    m_table_names.set(table_ndx, {}); // Throws
621✔
862
    m_table_accessors[table_ndx] = nullptr;
621✔
863
    --m_num_tables;
621✔
864

318✔
865
    table->detach(Table::cookie_removed);
621✔
866
    // Destroy underlying node structure
318✔
867
    Array::destroy_deep(ref, m_alloc);
621✔
868
    recycle_table_accessor(table.unchecked_ptr());
621✔
869
}
621✔
870

871

872
void Group::rename_table(StringData name, StringData new_name, bool require_unique_name)
873
{
24✔
874
    check_attached();
24✔
875
    size_t table_ndx = m_table_names.find_first(name);
24✔
876
    if (table_ndx == not_found)
24✔
877
        throw NoSuchTable();
6✔
878
    rename_table(ndx2key(table_ndx), new_name, require_unique_name); // Throws
18✔
879
}
18✔
880

881

882
void Group::rename_table(TableKey key, StringData new_name, bool require_unique_name)
883
{
24✔
884
    check_attached();
24✔
885
    if (!m_is_writable)
24✔
886
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
887
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
24✔
888
    if (require_unique_name && has_table(new_name))
24✔
889
        throw TableNameInUse();
6✔
890
    size_t table_ndx = key2ndx_checked(key);
18✔
891
    m_table_names.set(table_ndx, new_name);
18✔
892
    if (Replication* repl = *get_repl())
18✔
893
        repl->rename_class(key, new_name); // Throws
×
894
}
18✔
895

896
Obj Group::get_object(ObjLink link)
897
{
141,744✔
898
    auto target_table = get_table(link.get_table_key());
141,744✔
899
    ObjKey key = link.get_obj_key();
141,744✔
900
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
141,612✔
901
    return ct->get(key);
141,744✔
902
}
141,744✔
903

904
Obj Group::try_get_object(ObjLink link) noexcept
905
{
×
906
    auto target_table = get_table(link.get_table_key());
×
907
    ObjKey key = link.get_obj_key();
×
908
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
×
909
    return ct->try_get_obj(key);
×
910
}
×
911

912
void Group::validate(ObjLink link) const
913
{
21,396✔
914
    if (auto tk = link.get_table_key()) {
21,396✔
915
        auto target_key = link.get_obj_key();
21,396✔
916
        auto target_table = get_table(tk);
21,396✔
917
        const ClusterTree* ct =
21,396✔
918
            target_key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
21,243✔
919
        if (!ct->is_valid(target_key)) {
21,396✔
920
            throw InvalidArgument(ErrorCodes::KeyNotFound, "Target object not found");
12✔
921
        }
12✔
922
        if (target_table->is_embedded()) {
21,384✔
923
            throw IllegalOperation("Cannot link to embedded object");
36✔
924
        }
36✔
925
        if (target_table->is_asymmetric()) {
21,348✔
926
            throw IllegalOperation("Cannot link to ephemeral object");
6✔
927
        }
6✔
928
    }
21,348✔
929
}
21,396✔
930

931
ref_type Group::DefaultTableWriter::write_names(_impl::OutputStream& out)
932
{
618✔
933
    bool deep = true;                                                 // Deep
618✔
934
    bool only_if_modified = false;                                    // Always
618✔
935
    return m_group->m_table_names.write(out, deep, only_if_modified); // Throws
618✔
936
}
618✔
937
ref_type Group::DefaultTableWriter::write_tables(_impl::OutputStream& out)
938
{
618✔
939
    bool deep = true;                                            // Deep
618✔
940
    bool only_if_modified = false;                               // Always
618✔
941
    return m_group->m_tables.write(out, deep, only_if_modified); // Throws
618✔
942
}
618✔
943

944
auto Group::DefaultTableWriter::write_history(_impl::OutputStream& out) -> HistoryInfo
945
{
384✔
946
    bool deep = true;              // Deep
384✔
947
    bool only_if_modified = false; // Always
384✔
948
    ref_type history_ref = _impl::GroupFriend::get_history_ref(*m_group);
384✔
949
    HistoryInfo info;
384✔
950
    if (history_ref) {
384✔
951
        _impl::History::version_type version;
348✔
952
        int history_type, history_schema_version;
348✔
953
        _impl::GroupFriend::get_version_and_history_info(_impl::GroupFriend::get_alloc(*m_group),
348✔
954
                                                         m_group->m_top.get_ref(), version, history_type,
348✔
955
                                                         history_schema_version);
348✔
956
        REALM_ASSERT(history_type != Replication::hist_None);
348✔
957
        if (!m_should_write_history ||
348✔
958
            (history_type != Replication::hist_SyncClient && history_type != Replication::hist_SyncServer)) {
345✔
959
            return info; // Only sync history should be preserved when writing to a new file
144✔
960
        }
144✔
961
        info.type = history_type;
204✔
962
        info.version = history_schema_version;
204✔
963
        Array history{const_cast<Allocator&>(_impl::GroupFriend::get_alloc(*m_group))};
204✔
964
        history.init_from_ref(history_ref);
204✔
965
        info.ref = history.write(out, deep, only_if_modified); // Throws
204✔
966
    }
204✔
967
    info.sync_file_id = m_group->get_sync_file_id();
312✔
968
    return info;
240✔
969
}
384✔
970

971
void Group::write(std::ostream& out, bool pad) const
972
{
42✔
973
    DefaultTableWriter table_writer;
42✔
974
    write(out, pad, 0, table_writer);
42✔
975
}
42✔
976

977
void Group::write(std::ostream& out, bool pad_for_encryption, uint_fast64_t version_number, TableWriter& writer) const
978
{
630✔
979
    REALM_ASSERT(is_attached());
630✔
980
    writer.set_group(this);
630✔
981
    bool no_top_array = !m_top.is_attached();
630✔
982
    write(out, m_file_format_version, writer, no_top_array, pad_for_encryption, version_number); // Throws
630✔
983
}
630✔
984

985
void Group::write(File& file, const char* encryption_key, uint_fast64_t version_number, TableWriter& writer) const
986
{
588✔
987
    REALM_ASSERT(file.get_size() == 0);
588✔
988

294✔
989
    file.set_encryption_key(encryption_key);
588✔
990

294✔
991
    // Force the file system to allocate a node so we get a stable unique id.
294✔
992
    // See File::get_unique_id(). This is used to distinguish encrypted mappings.
294✔
993
    file.resize(1);
588✔
994

294✔
995
    // The aim is that the buffer size should be at least 1/256 of needed size but less than 64 Mb
294✔
996
    constexpr size_t upper_bound = 64 * 1024 * 1024;
588✔
997
    size_t min_space = std::min(get_used_space() >> 8, upper_bound);
588✔
998
    size_t buffer_size = 4096;
588✔
999
    while (buffer_size < min_space) {
660✔
1000
        buffer_size <<= 1;
72✔
1001
    }
72✔
1002
    File::Streambuf streambuf(&file, buffer_size);
588✔
1003

294✔
1004
    std::ostream out(&streambuf);
588✔
1005
    out.exceptions(std::ios_base::failbit | std::ios_base::badbit);
588✔
1006
    write(out, encryption_key != 0, version_number, writer);
588✔
1007
    int sync_status = streambuf.pubsync();
588✔
1008
    REALM_ASSERT(sync_status == 0);
588✔
1009
}
588✔
1010

1011
void Group::write(const std::string& path, const char* encryption_key, uint64_t version_number,
1012
                  bool write_history) const
1013
{
234✔
1014
    File file;
234✔
1015
    int flags = 0;
234✔
1016
    file.open(path, File::access_ReadWrite, File::create_Must, flags);
234✔
1017
    DefaultTableWriter table_writer(write_history);
234✔
1018
    write(file, encryption_key, version_number, table_writer);
234✔
1019
}
234✔
1020

1021

1022
BinaryData Group::write_to_mem() const
1023
{
42✔
1024
    REALM_ASSERT(is_attached());
42✔
1025

21✔
1026
    // Get max possible size of buffer
21✔
1027
    size_t max_size = m_alloc.get_total_size();
42✔
1028

21✔
1029
    auto buffer = std::unique_ptr<char[]>(new (std::nothrow) char[max_size]);
42✔
1030
    if (!buffer)
42✔
1031
        throw Exception(ErrorCodes::OutOfMemory, "Could not allocate memory while dumping to memory");
×
1032
    MemoryOutputStream out; // Throws
42✔
1033
    out.set_buffer(buffer.get(), buffer.get() + max_size);
42✔
1034
    write(out); // Throws
42✔
1035
    size_t buffer_size = out.size();
42✔
1036
    return BinaryData(buffer.release(), buffer_size);
42✔
1037
}
42✔
1038

1039

1040
void Group::write(std::ostream& out, int file_format_version, TableWriter& table_writer, bool no_top_array,
1041
                  bool pad_for_encryption, uint_fast64_t version_number)
1042
{
630✔
1043
    _impl::OutputStream out_2(out);
630✔
1044

315✔
1045
    // Write the file header
315✔
1046
    SlabAlloc::Header streaming_header;
630✔
1047
    if (no_top_array) {
630✔
1048
        file_format_version = 0;
12✔
1049
    }
12✔
1050
    else if (file_format_version == 0) {
618✔
1051
        // Use current file format version
1052
        file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
×
1053
    }
×
1054
    SlabAlloc::init_streaming_header(&streaming_header, file_format_version);
630✔
1055
    out_2.write(reinterpret_cast<const char*>(&streaming_header), sizeof streaming_header);
630✔
1056

315✔
1057
    ref_type top_ref = 0;
630✔
1058
    size_t final_file_size = sizeof streaming_header;
630✔
1059
    if (no_top_array) {
630✔
1060
        // Accept version number 1 as that number is (unfortunately) also used
6✔
1061
        // to denote the empty initial state of a Realm file.
6✔
1062
        REALM_ASSERT(version_number == 0 || version_number == 1);
12✔
1063
    }
12✔
1064
    else {
618✔
1065
        // Because we need to include the total logical file size in the
309✔
1066
        // top-array, we have to start by writing everything except the
309✔
1067
        // top-array, and then finally compute and write a correct version of
309✔
1068
        // the top-array. The free-space information of the group will only be
309✔
1069
        // included if a non-zero version number is given as parameter,
309✔
1070
        // indicating that versioning info is to be saved. This is used from
309✔
1071
        // DB to compact the database by writing only the live data
309✔
1072
        // into a separate file.
309✔
1073
        ref_type names_ref = table_writer.write_names(out_2);   // Throws
618✔
1074
        ref_type tables_ref = table_writer.write_tables(out_2); // Throws
618✔
1075
        SlabAlloc new_alloc;
618✔
1076
        new_alloc.attach_empty(); // Throws
618✔
1077
        Array top(new_alloc);
618✔
1078
        top.create(Array::type_HasRefs); // Throws
618✔
1079
        _impl::ShallowArrayDestroyGuard dg_top(&top);
618✔
1080
        int_fast64_t value_1 = from_ref(names_ref);
618✔
1081
        int_fast64_t value_2 = from_ref(tables_ref);
618✔
1082
        top.add(value_1); // Throws
618✔
1083
        top.add(value_2); // Throws
618✔
1084
        top.add(0);       // Throws
618✔
1085

309✔
1086
        int top_size = 3;
618✔
1087
        if (version_number) {
618✔
1088
            TableWriter::HistoryInfo history_info = table_writer.write_history(out_2); // Throws
384✔
1089

192✔
1090
            Array free_list(new_alloc);
384✔
1091
            Array size_list(new_alloc);
384✔
1092
            Array version_list(new_alloc);
384✔
1093
            free_list.create(Array::type_Normal); // Throws
384✔
1094
            _impl::DeepArrayDestroyGuard dg_1(&free_list);
384✔
1095
            size_list.create(Array::type_Normal); // Throws
384✔
1096
            _impl::DeepArrayDestroyGuard dg_2(&size_list);
384✔
1097
            version_list.create(Array::type_Normal); // Throws
384✔
1098
            _impl::DeepArrayDestroyGuard dg_3(&version_list);
384✔
1099
            bool deep = true;              // Deep
384✔
1100
            bool only_if_modified = false; // Always
384✔
1101
            ref_type free_list_ref = free_list.write(out_2, deep, only_if_modified);
384✔
1102
            ref_type size_list_ref = size_list.write(out_2, deep, only_if_modified);
384✔
1103
            ref_type version_list_ref = version_list.write(out_2, deep, only_if_modified);
384✔
1104
            top.add(RefOrTagged::make_ref(free_list_ref));     // Throws
384✔
1105
            top.add(RefOrTagged::make_ref(size_list_ref));     // Throws
384✔
1106
            top.add(RefOrTagged::make_ref(version_list_ref));  // Throws
384✔
1107
            top.add(RefOrTagged::make_tagged(version_number)); // Throws
384✔
1108
            top_size = 7;
384✔
1109

192✔
1110
            if (history_info.type != Replication::hist_None) {
384✔
1111
                top.add(RefOrTagged::make_tagged(history_info.type));
204✔
1112
                top.add(RefOrTagged::make_ref(history_info.ref));
204✔
1113
                top.add(RefOrTagged::make_tagged(history_info.version));
204✔
1114
                top.add(RefOrTagged::make_tagged(history_info.sync_file_id));
204✔
1115
                top_size = s_group_max_size;
204✔
1116
                // ^ this is too large, since the evacuation point entry is not there:
102✔
1117
                // (but the code below is self correcting)
102✔
1118
            }
204✔
1119
        }
384✔
1120
        top_ref = out_2.get_ref_of_next_array();
618✔
1121

309✔
1122
        // Produce a preliminary version of the top array whose
309✔
1123
        // representation is guaranteed to be able to hold the final file
309✔
1124
        // size
309✔
1125
        size_t max_top_byte_size = Array::get_max_byte_size(top_size);
618✔
1126
        size_t max_final_file_size = size_t(top_ref) + max_top_byte_size;
618✔
1127
        top.ensure_minimum_width(RefOrTagged::make_tagged(max_final_file_size)); // Throws
618✔
1128

309✔
1129
        // Finalize the top array by adding the projected final file size
309✔
1130
        // to it
309✔
1131
        size_t top_byte_size = top.get_byte_size();
618✔
1132
        final_file_size = size_t(top_ref) + top_byte_size;
618✔
1133
        top.set(2, RefOrTagged::make_tagged(final_file_size)); // Throws
618✔
1134

309✔
1135
        // Write the top array
309✔
1136
        bool deep = false;                        // Shallow
618✔
1137
        bool only_if_modified = false;            // Always
618✔
1138
        top.write(out_2, deep, only_if_modified); // Throws
618✔
1139
        REALM_ASSERT_3(size_t(out_2.get_ref_of_next_array()), ==, final_file_size);
618✔
1140

309✔
1141
        dg_top.reset(nullptr); // Destroy now
618✔
1142
    }
618✔
1143

315✔
1144
    // encryption will pad the file to a multiple of the page, so ensure the
315✔
1145
    // footer is aligned to the end of a page
315✔
1146
    if (pad_for_encryption) {
630✔
1147
#if REALM_ENABLE_ENCRYPTION
18✔
1148
        size_t unrounded_size = final_file_size + sizeof(SlabAlloc::StreamingFooter);
18✔
1149
        size_t rounded_size = round_up_to_page_size(unrounded_size);
18✔
1150
        if (rounded_size != unrounded_size) {
18✔
1151
            std::unique_ptr<char[]> buffer(new char[rounded_size - unrounded_size]());
18✔
1152
            out_2.write(buffer.get(), rounded_size - unrounded_size);
18✔
1153
        }
18✔
1154
#endif
18✔
1155
    }
18✔
1156

315✔
1157
    // Write streaming footer
315✔
1158
    SlabAlloc::StreamingFooter footer;
630✔
1159
    footer.m_top_ref = top_ref;
630✔
1160
    footer.m_magic_cookie = SlabAlloc::footer_magic_cookie;
630✔
1161
    out_2.write(reinterpret_cast<const char*>(&footer), sizeof footer);
630✔
1162
}
630✔
1163

1164

1165
void Group::update_refs(ref_type top_ref) noexcept
1166
{
352,806✔
1167
    // After Group::commit() we will always have free space tracking
175,839✔
1168
    // info.
175,839✔
1169
    REALM_ASSERT_3(m_top.size(), >=, 5);
352,806✔
1170

175,839✔
1171
    m_top.init_from_ref(top_ref);
352,806✔
1172

175,839✔
1173
    // Now we can update it's child arrays
175,839✔
1174
    m_table_names.update_from_parent();
352,806✔
1175
    m_tables.update_from_parent();
352,806✔
1176

175,839✔
1177
    // Update all attached table accessors.
175,839✔
1178
    for (auto& table_accessor : m_table_accessors) {
1,041,306✔
1179
        if (table_accessor) {
1,041,306✔
1180
            table_accessor->update_from_parent();
705,726✔
1181
        }
705,726✔
1182
    }
1,041,306✔
1183
}
352,806✔
1184

1185
bool Group::operator==(const Group& g) const
1186
{
66✔
1187
    for (auto tk : get_table_keys()) {
138✔
1188
        const StringData& table_name = get_table_name(tk);
138✔
1189

69✔
1190
        ConstTableRef table_1 = get_table(tk);
138✔
1191
        ConstTableRef table_2 = g.get_table(table_name);
138✔
1192
        if (!table_2)
138✔
1193
            return false;
12✔
1194
        if (table_1->get_primary_key_column().get_type() != table_2->get_primary_key_column().get_type()) {
126✔
1195
            return false;
×
1196
        }
×
1197
        if (table_1->is_embedded() != table_2->is_embedded())
126✔
1198
            return false;
×
1199
        if (table_1->is_embedded())
126✔
1200
            continue;
60✔
1201

33✔
1202
        if (*table_1 != *table_2)
66✔
1203
            return false;
18✔
1204
    }
66✔
1205
    return true;
51✔
1206
}
66✔
1207
size_t Group::get_used_space() const noexcept
1208
{
606✔
1209
    if (!m_top.is_attached())
606✔
1210
        return 0;
12✔
1211

297✔
1212
    size_t used_space = (size_t(m_top.get(2)) >> 1);
594✔
1213

297✔
1214
    if (m_top.size() > 4) {
594✔
1215
        Array free_lengths(const_cast<SlabAlloc&>(m_alloc));
468✔
1216
        free_lengths.init_from_ref(ref_type(m_top.get(4)));
468✔
1217
        used_space -= size_t(free_lengths.get_sum());
468✔
1218
    }
468✔
1219

297✔
1220
    return used_space;
594✔
1221
}
594✔
1222

1223

1224
namespace {
1225
class TransactAdvancer : public _impl::NullInstructionObserver {
1226
public:
1227
    TransactAdvancer(Group&, bool& schema_changed)
1228
        : m_schema_changed(schema_changed)
1229
    {
43,224✔
1230
    }
43,224✔
1231

1232
    bool insert_group_level_table(TableKey) noexcept
1233
    {
12,255✔
1234
        m_schema_changed = true;
12,255✔
1235
        return true;
12,255✔
1236
    }
12,255✔
1237

1238
    bool erase_class(TableKey) noexcept
1239
    {
×
1240
        m_schema_changed = true;
×
1241
        return true;
×
1242
    }
×
1243

1244
    bool rename_class(TableKey) noexcept
1245
    {
×
1246
        m_schema_changed = true;
×
1247
        return true;
×
1248
    }
×
1249

1250
    bool insert_column(ColKey)
1251
    {
38,790✔
1252
        m_schema_changed = true;
38,790✔
1253
        return true;
38,790✔
1254
    }
38,790✔
1255

1256
    bool erase_column(ColKey)
1257
    {
×
1258
        m_schema_changed = true;
×
1259
        return true;
×
1260
    }
×
1261

1262
    bool rename_column(ColKey) noexcept
1263
    {
×
1264
        m_schema_changed = true;
×
1265
        return true; // No-op
×
1266
    }
×
1267

1268
private:
1269
    bool& m_schema_changed;
1270
};
1271
} // anonymous namespace
1272

1273

1274
void Group::update_allocator_wrappers(bool writable)
1275
{
4,600,914✔
1276
    m_is_writable = writable;
4,600,914✔
1277
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
8,606,736✔
1278
        auto table_accessor = m_table_accessors[i];
4,005,822✔
1279
        if (table_accessor) {
4,005,822✔
1280
            table_accessor->update_allocator_wrapper(writable);
3,075,108✔
1281
        }
3,075,108✔
1282
    }
4,005,822✔
1283
}
4,600,914✔
1284

1285
void Group::flush_accessors_for_commit()
1286
{
602,568✔
1287
    for (auto& acc : m_table_accessors)
602,568✔
1288
        if (acc)
1,583,634✔
1289
            acc->flush_for_commit();
992,856✔
1290
}
602,568✔
1291

1292
void Group::refresh_dirty_accessors()
1293
{
260,820✔
1294
    if (!m_tables.is_attached()) {
260,820✔
1295
        m_table_accessors.clear();
54✔
1296
        return;
54✔
1297
    }
54✔
1298

167,298✔
1299
    // The array of Tables cannot have shrunk:
167,298✔
1300
    REALM_ASSERT(m_tables.size() >= m_table_accessors.size());
260,766✔
1301

167,298✔
1302
    // but it may have grown - and if so, we must resize the accessor array to match
167,298✔
1303
    if (m_tables.size() > m_table_accessors.size()) {
260,766✔
1304
        m_table_accessors.resize(m_tables.size());
×
1305
    }
×
1306

167,298✔
1307
    // Update all attached table accessors.
167,298✔
1308
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
785,709✔
1309
        auto& table_accessor = m_table_accessors[i];
524,943✔
1310
        if (table_accessor) {
524,943✔
1311
            // If the table has changed it's key in the file, it's a
208,116✔
1312
            // new table. This will detach the old accessor and remove it.
208,116✔
1313
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(i);
338,778✔
1314
            bool same_table = false;
338,778✔
1315
            if (rot.is_ref()) {
338,922✔
1316
                auto ref = rot.get_as_ref();
338,913✔
1317
                TableKey new_key = Table::get_key_direct(m_alloc, ref);
338,913✔
1318
                if (new_key == table_accessor->get_key())
338,913✔
1319
                    same_table = true;
338,580✔
1320
            }
338,913✔
1321
            if (same_table) {
338,778✔
1322
                table_accessor->refresh_accessor_tree();
338,583✔
1323
            }
338,583✔
1324
            else {
195✔
1325
                table_accessor->detach(Table::cookie_removed);
195✔
1326
                recycle_table_accessor(table_accessor);
195✔
1327
                m_table_accessors[i] = nullptr;
195✔
1328
            }
195✔
1329
        }
338,778✔
1330
    }
524,943✔
1331
}
260,766✔
1332

1333

1334
void Group::advance_transact(ref_type new_top_ref, util::InputStream* in, bool writable)
1335
{
261,213✔
1336
    REALM_ASSERT(is_attached());
261,213✔
1337
    // Exception safety: If this function throws, the group accessor and all of
167,691✔
1338
    // its subordinate accessors are left in a state that may not be fully
167,691✔
1339
    // consistent. Only minimal consistency is guaranteed (see
167,691✔
1340
    // AccessorConsistencyLevels). In this case, the application is required to
167,691✔
1341
    // either destroy the Group object, forcing all subordinate accessors to
167,691✔
1342
    // become detached, or take some other equivalent action that involves a
167,691✔
1343
    // call to Group::detach(), such as terminating the transaction in progress.
167,691✔
1344
    // such actions will also lead to the detachment of all subordinate
167,691✔
1345
    // accessors. Until then it is an error, and unsafe if the application
167,691✔
1346
    // attempts to access the group one of its subordinate accessors.
167,691✔
1347
    //
167,691✔
1348
    // The purpose of this function is to refresh all attached accessors after
167,691✔
1349
    // the underlying node structure has undergone arbitrary change, such as
167,691✔
1350
    // when a read transaction has been advanced to a later snapshot of the
167,691✔
1351
    // database.
167,691✔
1352
    //
167,691✔
1353
    // Initially, when this function is invoked, we cannot assume any
167,691✔
1354
    // correspondence between the accessor state and the underlying node
167,691✔
1355
    // structure. We can assume that the hierarchy is in a state of minimal
167,691✔
1356
    // consistency, and that it can be brought to a state of structural
167,691✔
1357
    // correspondence using information in the transaction logs. When structural
167,691✔
1358
    // correspondence is achieved, we can reliably refresh the accessor hierarchy
167,691✔
1359
    // (Table::refresh_accessor_tree()) to bring it back to a fully consistent
167,691✔
1360
    // state. See AccessorConsistencyLevels.
167,691✔
1361
    //
167,691✔
1362
    // Much of the information in the transaction logs is not used in this
167,691✔
1363
    // process, because the changes have already been applied to the underlying
167,691✔
1364
    // node structure. All we need to do here is to bring the accessors back
167,691✔
1365
    // into a state where they correctly reflect the underlying structure (or
167,691✔
1366
    // detach them if the underlying object has been removed.)
167,691✔
1367
    //
167,691✔
1368
    // This is no longer needed in Core, but we need to compute "schema_changed",
167,691✔
1369
    // for the benefit of ObjectStore.
167,691✔
1370
    bool schema_changed = false;
261,213✔
1371
    if (in && has_schema_change_notification_handler()) {
261,213✔
1372
        TransactAdvancer advancer(*this, schema_changed);
43,224✔
1373
        _impl::TransactLogParser parser; // Throws
43,224✔
1374
        parser.parse(*in, advancer);     // Throws
43,224✔
1375
    }
43,224✔
1376

167,691✔
1377
    m_top.detach();                                           // Soft detach
261,213✔
1378
    bool create_group_when_missing = false;                   // See Group::attach_shared().
261,213✔
1379
    attach(new_top_ref, writable, create_group_when_missing); // Throws
261,213✔
1380
    refresh_dirty_accessors();                                // Throws
261,213✔
1381

167,691✔
1382
    if (schema_changed)
261,213✔
1383
        send_schema_change_notification();
11,259✔
1384
}
261,213✔
1385

1386
void Group::prepare_top_for_history(int history_type, int history_schema_version, uint64_t file_ident)
1387
{
60,324✔
1388
    REALM_ASSERT(m_file_format_version >= 7);
60,324✔
1389
    while (m_top.size() < s_hist_type_ndx) {
300,630✔
1390
        m_top.add(0); // Throws
240,306✔
1391
    }
240,306✔
1392

29,745✔
1393
    if (m_top.size() > s_hist_version_ndx) {
60,324✔
1394
        int stored_history_type = int(m_top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
204✔
1395
        int stored_history_schema_version = int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
204✔
1396
        if (stored_history_type != Replication::hist_None) {
204✔
1397
            REALM_ASSERT(stored_history_type == history_type);
6✔
1398
            REALM_ASSERT(stored_history_schema_version == history_schema_version);
6✔
1399
        }
6✔
1400
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(history_type));              // Throws
204✔
1401
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(history_schema_version)); // Throws
204✔
1402
    }
204✔
1403
    else {
60,120✔
1404
        // No history yet
29,643✔
1405
        REALM_ASSERT(m_top.size() == s_hist_type_ndx);
60,120✔
1406
        ref_type history_ref = 0;                                    // No history yet
60,120✔
1407
        m_top.add(RefOrTagged::make_tagged(history_type));           // Throws
60,120✔
1408
        m_top.add(RefOrTagged::make_ref(history_ref));               // Throws
60,120✔
1409
        m_top.add(RefOrTagged::make_tagged(history_schema_version)); // Throws
60,120✔
1410
    }
60,120✔
1411

29,745✔
1412
    if (m_top.size() > s_sync_file_id_ndx) {
60,324✔
1413
        m_top.set(s_sync_file_id_ndx, RefOrTagged::make_tagged(file_ident));
42✔
1414
    }
42✔
1415
    else {
60,282✔
1416
        m_top.add(RefOrTagged::make_tagged(file_ident)); // Throws
60,282✔
1417
    }
60,282✔
1418
}
60,324✔
1419

1420
void Group::clear_history()
1421
{
36✔
1422
    bool has_history = (m_top.is_attached() && m_top.size() > s_hist_type_ndx);
36✔
1423
    if (has_history) {
36✔
1424
        auto hist_ref = m_top.get_as_ref(s_hist_ref_ndx);
36✔
1425
        Array::destroy_deep(hist_ref, m_top.get_alloc());
36✔
1426
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(Replication::hist_None)); // Throws
36✔
1427
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(0));                   // Throws
36✔
1428
        m_top.set(s_hist_ref_ndx, 0);                                                 // Throws
36✔
1429
    }
36✔
1430
}
36✔
1431

1432
#ifdef REALM_DEBUG // LCOV_EXCL_START ignore debug functions
1433

1434
class MemUsageVerifier : public Array::MemUsageHandler {
1435
public:
1436
    MemUsageVerifier(ref_type ref_begin, ref_type immutable_ref_end, ref_type mutable_ref_end, ref_type baseline)
1437
        : m_ref_begin(ref_begin)
1438
        , m_immutable_ref_end(immutable_ref_end)
1439
        , m_mutable_ref_end(mutable_ref_end)
1440
        , m_baseline(baseline)
1441
    {
113,829✔
1442
    }
113,829✔
1443
    void add_immutable(ref_type ref, size_t size)
1444
    {
2,514,561✔
1445
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
2,514,561✔
1446
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
2,514,561✔
1447
        REALM_ASSERT_3(size, >, 0);
2,514,561✔
1448
        REALM_ASSERT_3(ref, >=, m_ref_begin);
2,514,561✔
1449
        REALM_ASSERT_3(size, <=, m_immutable_ref_end - ref);
2,514,561✔
1450
        Chunk chunk;
2,514,561✔
1451
        chunk.ref = ref;
2,514,561✔
1452
        chunk.size = size;
2,514,561✔
1453
        m_chunks.push_back(chunk);
2,514,561✔
1454
    }
2,514,561✔
1455
    void add_mutable(ref_type ref, size_t size)
1456
    {
380,034✔
1457
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
380,034✔
1458
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
380,034✔
1459
        REALM_ASSERT_3(size, >, 0);
380,034✔
1460
        REALM_ASSERT_3(ref, >=, m_immutable_ref_end);
380,034✔
1461
        REALM_ASSERT_3(size, <=, m_mutable_ref_end - ref);
380,034✔
1462
        Chunk chunk;
380,034✔
1463
        chunk.ref = ref;
380,034✔
1464
        chunk.size = size;
380,034✔
1465
        m_chunks.push_back(chunk);
380,034✔
1466
    }
380,034✔
1467
    void add(ref_type ref, size_t size)
1468
    {
9,012,849✔
1469
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
9,012,849✔
1470
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
9,012,849✔
1471
        REALM_ASSERT_3(size, >, 0);
9,012,849✔
1472
        REALM_ASSERT_3(ref, >=, m_ref_begin);
9,012,849✔
1473
        REALM_ASSERT(size <= (ref < m_baseline ? m_immutable_ref_end : m_mutable_ref_end) - ref);
9,012,849✔
1474
        Chunk chunk;
9,012,849✔
1475
        chunk.ref = ref;
9,012,849✔
1476
        chunk.size = size;
9,012,849✔
1477
        m_chunks.push_back(chunk);
9,012,849✔
1478
    }
9,012,849✔
1479
    void add(const MemUsageVerifier& verifier)
1480
    {
169,326✔
1481
        m_chunks.insert(m_chunks.end(), verifier.m_chunks.begin(), verifier.m_chunks.end());
169,326✔
1482
    }
169,326✔
1483
    void handle(ref_type ref, size_t allocated, size_t) override
1484
    {
9,012,774✔
1485
        add(ref, allocated);
9,012,774✔
1486
    }
9,012,774✔
1487
    void canonicalize()
1488
    {
452,478✔
1489
        // Sort the chunks in order of increasing ref, then merge adjacent
226,158✔
1490
        // chunks while checking that there is no overlap
226,158✔
1491
        typedef std::vector<Chunk>::iterator iter;
452,478✔
1492
        iter i_1 = m_chunks.begin(), end = m_chunks.end();
452,478✔
1493
        iter i_2 = i_1;
452,478✔
1494
        sort(i_1, end);
452,478✔
1495
        if (i_1 != end) {
452,478✔
1496
            while (++i_2 != end) {
15,368,955✔
1497
                ref_type prev_ref_end = i_1->ref + i_1->size;
14,960,673✔
1498
                REALM_ASSERT_3(prev_ref_end, <=, i_2->ref);
14,960,673✔
1499
                if (i_2->ref == prev_ref_end) { // in-file
14,960,673✔
1500
                    i_1->size += i_2->size;     // Merge
11,851,554✔
1501
                }
11,851,554✔
1502
                else {
3,109,119✔
1503
                    *++i_1 = *i_2;
3,109,119✔
1504
                }
3,109,119✔
1505
            }
14,960,673✔
1506
            m_chunks.erase(i_1 + 1, end);
408,282✔
1507
        }
408,282✔
1508
    }
452,478✔
1509
    void clear()
1510
    {
169,326✔
1511
        m_chunks.clear();
169,326✔
1512
    }
169,326✔
1513
    void check_total_coverage()
1514
    {
56,916✔
1515
        REALM_ASSERT_3(m_chunks.size(), ==, 1);
56,916✔
1516
        REALM_ASSERT_3(m_chunks.front().ref, ==, m_ref_begin);
56,916✔
1517
        REALM_ASSERT_3(m_chunks.front().size, ==, m_mutable_ref_end - m_ref_begin);
56,916✔
1518
    }
56,916✔
1519

1520
private:
1521
    struct Chunk {
1522
        ref_type ref;
1523
        size_t size;
1524
        bool operator<(const Chunk& c) const
1525
        {
131,424,039✔
1526
            return ref < c.ref;
131,424,039✔
1527
        }
131,424,039✔
1528
    };
1529
    std::vector<Chunk> m_chunks;
1530
    ref_type m_ref_begin, m_immutable_ref_end, m_mutable_ref_end, m_baseline;
1531
};
1532

1533
#endif
1534

1535
void Group::verify() const
1536
{
124,272✔
1537
#ifdef REALM_DEBUG
124,272✔
1538
    REALM_ASSERT(is_attached());
124,272✔
1539

62,130✔
1540
    m_alloc.verify();
124,272✔
1541

62,130✔
1542
    if (!m_top.is_attached()) {
124,272✔
1543
        return;
75✔
1544
    }
75✔
1545

62,100✔
1546
    // Verify tables
62,100✔
1547
    {
124,197✔
1548
        auto keys = get_table_keys();
124,197✔
1549
        for (auto key : keys) {
217,650✔
1550
            ConstTableRef table = get_table(key);
217,650✔
1551
            REALM_ASSERT_3(table->get_key().value, ==, key.value);
217,650✔
1552
            table->verify();
217,650✔
1553
        }
217,650✔
1554
    }
124,197✔
1555

62,100✔
1556
    // Verify history if present
62,100✔
1557
    if (Replication* repl = *get_repl()) {
124,197✔
1558
        if (auto hist = repl->_create_history_read()) {
66,912✔
1559
            hist->set_group(const_cast<Group*>(this), false);
66,906✔
1560
            _impl::History::version_type version = 0;
66,906✔
1561
            int history_type = 0;
66,906✔
1562
            int history_schema_version = 0;
66,906✔
1563
            get_version_and_history_info(m_top, version, history_type, history_schema_version);
66,906✔
1564
            REALM_ASSERT(history_type != Replication::hist_None || history_schema_version == 0);
66,906✔
1565
            ref_type hist_ref = get_history_ref(m_top);
66,906✔
1566
            hist->update_from_ref_and_version(hist_ref, version);
66,906✔
1567
            hist->verify();
66,906✔
1568
        }
66,906✔
1569
    }
66,912✔
1570

62,100✔
1571
    if (auto tr = dynamic_cast<const Transaction*>(this)) {
124,197✔
1572
        // This is a transaction
61,959✔
1573
        if (tr->get_transact_stage() == DB::TransactStage::transact_Reading) {
123,921✔
1574
            // Verifying the memory cannot be done from a read transaction
33,657✔
1575
            // There might be a write transaction running that has freed some
33,657✔
1576
            // memory that is seen as being in use in this transaction
33,657✔
1577
            return;
67,293✔
1578
        }
67,293✔
1579
    }
56,904✔
1580
    size_t logical_file_size = to_size_t(m_top.get_as_ref_or_tagged(2).get_as_int());
56,904✔
1581
    size_t ref_begin = sizeof(SlabAlloc::Header);
56,904✔
1582
    ref_type real_immutable_ref_end = logical_file_size;
56,904✔
1583
    ref_type real_mutable_ref_end = m_alloc.get_total_size();
56,904✔
1584
    ref_type real_baseline = m_alloc.get_baseline();
56,904✔
1585
    // Fake that any empty area between the file and slab is part of the file (immutable):
28,443✔
1586
    ref_type immutable_ref_end = m_alloc.align_size_to_section_boundary(real_immutable_ref_end);
56,904✔
1587
    ref_type mutable_ref_end = m_alloc.align_size_to_section_boundary(real_mutable_ref_end);
56,904✔
1588
    ref_type baseline = m_alloc.align_size_to_section_boundary(real_baseline);
56,904✔
1589

28,443✔
1590
    // Check the consistency of the allocation of used memory
28,443✔
1591
    MemUsageVerifier mem_usage_1(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
56,904✔
1592
    m_top.report_memory_usage(mem_usage_1);
56,904✔
1593
    mem_usage_1.canonicalize();
56,904✔
1594

28,443✔
1595
    // Check concistency of the allocation of the immutable memory that was
28,443✔
1596
    // marked as free before the file was opened.
28,443✔
1597
    MemUsageVerifier mem_usage_2(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
56,904✔
1598
    {
56,904✔
1599
        REALM_ASSERT_EX(m_top.size() == 3 || m_top.size() == 5 || m_top.size() == 7 || m_top.size() >= 10,
56,904✔
1600
                        m_top.size());
56,904✔
1601
        Allocator& alloc = m_top.get_alloc();
56,904✔
1602
        Array pos(alloc), len(alloc), ver(alloc);
56,904✔
1603
        pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
56,904✔
1604
        len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
56,904✔
1605
        ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
56,904✔
1606
        if (m_top.size() > s_free_pos_ndx) {
56,904✔
1607
            if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
56,460✔
1608
                pos.init_from_ref(ref);
55,497✔
1609
        }
56,460✔
1610
        if (m_top.size() > s_free_size_ndx) {
56,904✔
1611
            if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
56,460✔
1612
                len.init_from_ref(ref);
55,497✔
1613
        }
56,460✔
1614
        if (m_top.size() > s_free_version_ndx) {
56,904✔
1615
            if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
56,460✔
1616
                ver.init_from_ref(ref);
55,497✔
1617
        }
56,460✔
1618
        REALM_ASSERT(pos.is_attached() == len.is_attached());
56,904✔
1619
        REALM_ASSERT(pos.is_attached() || !ver.is_attached()); // pos.is_attached() <== ver.is_attached()
56,904✔
1620
        if (pos.is_attached()) {
56,904✔
1621
            size_t n = pos.size();
55,497✔
1622
            REALM_ASSERT_3(n, ==, len.size());
55,497✔
1623
            if (ver.is_attached())
55,497✔
1624
                REALM_ASSERT_3(n, ==, ver.size());
55,497✔
1625
            for (size_t i = 0; i != n; ++i) {
2,424,852✔
1626
                ref_type ref = to_ref(pos.get(i));
2,369,355✔
1627
                size_t size_of_i = to_size_t(len.get(i));
2,369,355✔
1628
                mem_usage_2.add_immutable(ref, size_of_i);
2,369,355✔
1629
            }
2,369,355✔
1630
            mem_usage_2.canonicalize();
55,497✔
1631
            mem_usage_1.add(mem_usage_2);
55,497✔
1632
            mem_usage_1.canonicalize();
55,497✔
1633
            mem_usage_2.clear();
55,497✔
1634
        }
55,497✔
1635
    }
56,904✔
1636

28,443✔
1637
    // Check the concistency of the allocation of the immutable memory that has
28,443✔
1638
    // been marked as free after the file was opened
28,443✔
1639
    for (const auto& free_block : m_alloc.m_free_read_only) {
88,314✔
1640
        mem_usage_2.add_immutable(free_block.first, free_block.second);
88,314✔
1641
    }
88,314✔
1642
    mem_usage_2.canonicalize();
56,904✔
1643
    mem_usage_1.add(mem_usage_2);
56,904✔
1644
    mem_usage_1.canonicalize();
56,904✔
1645
    mem_usage_2.clear();
56,904✔
1646

28,443✔
1647
    // Check the consistency of the allocation of the mutable memory that has
28,443✔
1648
    // been marked as free
28,443✔
1649
    m_alloc.for_all_free_entries([&](ref_type ref, size_t sz) {
380,034✔
1650
        mem_usage_2.add_mutable(ref, sz);
380,034✔
1651
    });
380,034✔
1652
    mem_usage_2.canonicalize();
56,904✔
1653
    mem_usage_1.add(mem_usage_2);
56,904✔
1654
    mem_usage_1.canonicalize();
56,904✔
1655
    mem_usage_2.clear();
56,904✔
1656

28,443✔
1657
    // There may be a hole between the end of file and the beginning of the slab area.
28,443✔
1658
    // We need to take that into account here.
28,443✔
1659
    REALM_ASSERT_3(real_immutable_ref_end, <=, real_baseline);
56,904✔
1660
    auto slab_start = immutable_ref_end;
56,904✔
1661
    if (real_immutable_ref_end < slab_start) {
56,916✔
1662
        ref_type ref = real_immutable_ref_end;
56,916✔
1663
        size_t corrected_size = slab_start - real_immutable_ref_end;
56,916✔
1664
        mem_usage_1.add_immutable(ref, corrected_size);
56,916✔
1665
        mem_usage_1.canonicalize();
56,916✔
1666
    }
56,916✔
1667

28,443✔
1668
    // At this point we have accounted for all memory managed by the slab
28,443✔
1669
    // allocator
28,443✔
1670
    mem_usage_1.check_total_coverage();
56,904✔
1671
#endif
56,904✔
1672
}
56,904✔
1673

1674
void Group::validate_primary_columns()
1675
{
480✔
1676
    auto table_keys = this->get_table_keys();
480✔
1677
    for (auto tk : table_keys) {
1,782✔
1678
        auto table = get_table(tk);
1,782✔
1679
        table->validate_primary_column();
1,782✔
1680
    }
1,782✔
1681
}
480✔
1682

1683
#ifdef REALM_DEBUG
1684

1685
MemStats Group::get_stats()
1686
{
×
1687
    MemStats mem_stats;
×
1688
    m_top.stats(mem_stats);
×
1689

1690
    return mem_stats;
×
1691
}
×
1692

1693
void Group::print() const
1694
{
×
1695
    m_alloc.print();
×
1696
}
×
1697

1698

1699
void Group::print_free() const
1700
{
×
1701
    Allocator& alloc = m_top.get_alloc();
×
1702
    Array pos(alloc), len(alloc), ver(alloc);
×
1703
    pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
×
1704
    len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
×
1705
    ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
×
1706
    if (m_top.size() > s_free_pos_ndx) {
×
1707
        if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
×
1708
            pos.init_from_ref(ref);
×
1709
    }
×
1710
    if (m_top.size() > s_free_size_ndx) {
×
1711
        if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
×
1712
            len.init_from_ref(ref);
×
1713
    }
×
1714
    if (m_top.size() > s_free_version_ndx) {
×
1715
        if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
×
1716
            ver.init_from_ref(ref);
×
1717
    }
×
1718

1719
    if (!pos.is_attached()) {
×
1720
        std::cout << "none\n";
×
1721
        return;
×
1722
    }
×
1723
    bool has_versions = ver.is_attached();
×
1724

1725
    size_t n = pos.size();
×
1726
    for (size_t i = 0; i != n; ++i) {
×
1727
        size_t offset = to_size_t(pos.get(i));
×
1728
        size_t size_of_i = to_size_t(len.get(i));
×
1729
        std::cout << i << ": " << offset << " " << size_of_i;
×
1730

1731
        if (has_versions) {
×
1732
            size_t version = to_size_t(ver.get(i));
×
1733
            std::cout << " " << version;
×
1734
        }
×
1735
        std::cout << "\n";
×
1736
    }
×
1737
    std::cout << "\n";
×
1738
}
×
1739
#endif
1740

1741
// LCOV_EXCL_STOP ignore debug functions
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc