• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_402

21 Aug 2024 11:10AM UTC coverage: 91.054% (-0.03%) from 91.085%
jorgen.edelbo_402

Pull #7803

Evergreen

jedelbo
Small fix to Table::typed_write

When writing the realm to a new file from a write transaction,
the Table may be COW so that the top ref is changed. So don't
use the ref that is present in the group when the operation starts.
Pull Request #7803: Feature/string compression

103494 of 181580 branches covered (57.0%)

1929 of 1999 new or added lines in 46 files covered. (96.5%)

695 existing lines in 51 files now uncovered.

220142 of 241772 relevant lines covered (91.05%)

7344461.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.82
/src/realm/group.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <new>
20
#include <algorithm>
21
#include <fstream>
22

23
#ifdef REALM_DEBUG
24
#include <iostream>
25
#include <iomanip>
26
#endif
27

28
#include <realm/util/file_mapper.hpp>
29
#include <realm/util/memory_stream.hpp>
30
#include <realm/util/thread.hpp>
31
#include <realm/impl/destroy_guard.hpp>
32
#include <realm/utilities.hpp>
33
#include <realm/exceptions.hpp>
34
#include <realm/group_writer.hpp>
35
#include <realm/transaction.hpp>
36
#include <realm/replication.hpp>
37

38
using namespace realm;
39
using namespace realm::util;
40

41
namespace {
42

43
class Initialization {
44
public:
45
    Initialization()
46
    {
24✔
47
        realm::cpuid_init();
24✔
48
    }
24✔
49
};
50

51
Initialization initialization;
52

53
} // anonymous namespace
54

55
Group::Group()
56
    : m_local_alloc(new SlabAlloc)
2,160✔
57
    , m_alloc(*m_local_alloc) // Throws
2,160✔
58
    , m_top(m_alloc)
2,160✔
59
    , m_tables(m_alloc)
2,160✔
60
    , m_table_names(m_alloc)
2,160✔
61
{
4,320✔
62
    init_array_parents();
4,320✔
63
    m_alloc.attach_empty(); // Throws
4,320✔
64
    m_file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
4,320✔
65
    ref_type top_ref = 0; // Instantiate a new empty group
4,320✔
66
    bool create_group_when_missing = true;
4,320✔
67
    bool writable = create_group_when_missing;
4,320✔
68
    attach(top_ref, writable, create_group_when_missing); // Throws
4,320✔
69
}
4,320✔
70

71

72
Group::Group(const std::string& file_path, const char* encryption_key)
73
    : m_local_alloc(new SlabAlloc) // Throws
339✔
74
    , m_alloc(*m_local_alloc)
339✔
75
    , m_top(m_alloc)
339✔
76
    , m_tables(m_alloc)
339✔
77
    , m_table_names(m_alloc)
339✔
78
{
870✔
79
    init_array_parents();
870✔
80

81
    SlabAlloc::Config cfg;
870✔
82
    cfg.read_only = true;
870✔
83
    cfg.no_create = true;
870✔
84
    cfg.encryption_key = encryption_key;
870✔
85
    ref_type top_ref = m_alloc.attach_file(file_path, cfg); // Throws
870✔
86
    // Non-Transaction Groups always allow writing and simply don't allow
87
    // committing when opened in read-only mode
88
    m_alloc.set_read_only(false);
870✔
89

90
    open(top_ref, file_path);
870✔
91
}
870✔
92

93

94
Group::Group(BinaryData buffer, bool take_ownership)
95
    : m_local_alloc(new SlabAlloc) // Throws
21✔
96
    , m_alloc(*m_local_alloc)
21✔
97
    , m_top(m_alloc)
21✔
98
    , m_tables(m_alloc)
21✔
99
    , m_table_names(m_alloc)
21✔
100
{
42✔
101
    REALM_ASSERT(buffer.data());
42✔
102

103
    init_array_parents();
42✔
104
    ref_type top_ref = m_alloc.attach_buffer(buffer.data(), buffer.size()); // Throws
42✔
105

106
    open(top_ref, {});
42✔
107

108
    if (take_ownership)
42✔
109
        m_alloc.own_buffer();
30✔
110
}
42✔
111

112
Group::Group(SlabAlloc* alloc) noexcept
113
    : m_alloc(*alloc)
1,234,857✔
114
    , // Throws
115
    m_top(m_alloc)
1,234,857✔
116
    , m_tables(m_alloc)
1,234,857✔
117
    , m_table_names(m_alloc)
1,234,857✔
118
{
1,926,819✔
119
    init_array_parents();
1,926,819✔
120
}
1,926,819✔
121

122
namespace {
123

124
class TableRecycler : public std::vector<Table*> {
125
public:
126
    ~TableRecycler()
127
    {
×
128
        REALM_UNREACHABLE();
129
        // if ever enabled, remember to release Tables:
×
130
        // for (auto t : *this) {
×
131
        //    delete t;
×
132
        //}
×
133
    }
×
134
};
135

136
// We use the classic approach to construct a FIFO from two LIFO's,
137
// insertion is done into recycler_1, removal is done from recycler_2,
138
// and when recycler_2 is empty, recycler_1 is reversed into recycler_2.
139
// this i O(1) for each entry.
140
auto& g_table_recycler_1 = *new TableRecycler;
141
auto& g_table_recycler_2 = *new TableRecycler;
142
// number of tables held back before being recycled. We hold back recycling
143
// the latest to increase the probability of detecting race conditions
144
// without crashing.
145
const static int g_table_recycling_delay = 100;
146
auto& g_table_recycler_mutex = *new std::mutex;
147

148
} // namespace
149

150
TableKeyIterator& TableKeyIterator::operator++()
151
{
1,207,716✔
152
    m_pos++;
1,207,716✔
153
    m_index_in_group++;
1,207,716✔
154
    load_key();
1,207,716✔
155
    return *this;
1,207,716✔
156
}
1,207,716✔
157

158
TableKey TableKeyIterator::operator*()
159
{
1,213,698✔
160
    if (!bool(m_table_key)) {
1,213,698✔
161
        load_key();
389,478✔
162
    }
389,478✔
163
    return m_table_key;
1,213,698✔
164
}
1,213,698✔
165

166
void TableKeyIterator::load_key()
167
{
1,597,194✔
168
    const Group& g = *m_group;
1,597,194✔
169
    size_t max_index_in_group = g.m_table_names.size();
1,597,194✔
170
    while (m_index_in_group < max_index_in_group) {
1,622,136✔
171
        RefOrTagged rot = g.m_tables.get_as_ref_or_tagged(m_index_in_group);
1,238,709✔
172
        if (rot.is_ref()) {
1,238,709✔
173
            Table* t;
1,213,767✔
174
            if (m_index_in_group < g.m_table_accessors.size() &&
1,213,767✔
175
                (t = load_atomic(g.m_table_accessors[m_index_in_group], std::memory_order_acquire))) {
1,213,767✔
176
                m_table_key = t->get_key();
826,647✔
177
            }
826,647✔
178
            else {
387,120✔
179
                m_table_key = Table::get_key_direct(g.m_tables.get_alloc(), rot.get_as_ref());
387,120✔
180
            }
387,120✔
181
            return;
1,213,767✔
182
        }
1,213,767✔
183
        m_index_in_group++;
24,942✔
184
    }
24,942✔
185
    m_table_key = TableKey();
383,427✔
186
}
383,427✔
187

188
TableKey TableKeys::operator[](size_t p) const
189
{
867✔
190
    if (p < m_iter.m_pos) {
867✔
191
        m_iter = TableKeyIterator(m_iter.m_group, 0);
×
192
    }
×
193
    while (m_iter.m_pos < p) {
1,167✔
194
        ++m_iter;
300✔
195
    }
300✔
196
    return *m_iter;
867✔
197
}
867✔
198

199
size_t Group::size() const noexcept
200
{
777,012✔
201
    return m_num_tables;
777,012✔
202
}
777,012✔
203

204

205
void Group::set_size() const noexcept
206
{
2,184,393✔
207
    int retval = 0;
2,184,393✔
208
    if (is_attached() && m_table_names.is_attached()) {
2,185,590✔
209
        size_t max_index = m_tables.size();
2,055,753✔
210
        REALM_ASSERT_EX(max_index < (1 << 16), max_index);
2,055,753✔
211
        for (size_t j = 0; j < max_index; ++j) {
6,862,233✔
212
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(j);
4,806,480✔
213
            if (rot.is_ref() && rot.get_as_ref()) {
4,806,480✔
214
                ++retval;
4,769,601✔
215
            }
4,769,601✔
216
        }
4,806,480✔
217
    }
2,055,753✔
218
    m_num_tables = retval;
2,184,393✔
219
}
2,184,393✔
220

221
std::map<TableRef, ColKey> Group::get_primary_key_columns_from_pk_table(TableRef pk_table)
222
{
×
223
    std::map<TableRef, ColKey> ret;
×
224
    REALM_ASSERT(pk_table);
×
225
    ColKey col_table = pk_table->get_column_key("pk_table");
×
226
    ColKey col_prop = pk_table->get_column_key("pk_property");
×
227
    for (auto pk_obj : *pk_table) {
×
228
        auto object_type = pk_obj.get<String>(col_table);
×
229
        auto name = std::string(g_class_name_prefix) + std::string(object_type);
×
230
        auto table = get_table(name);
×
231
        auto pk_col_name = pk_obj.get<String>(col_prop);
×
232
        auto pk_col = table->get_column_key(pk_col_name);
×
233
        ret.emplace(table, pk_col);
×
234
    }
×
235

236
    return ret;
×
237
}
×
238

239
TableKey Group::ndx2key(size_t ndx) const
240
{
600✔
241
    REALM_ASSERT(is_attached());
600✔
242
    Table* accessor = load_atomic(m_table_accessors[ndx], std::memory_order_acquire);
600✔
243
    if (accessor)
600✔
244
        return accessor->get_key(); // fast path
264✔
245

246
    // slow path:
247
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(ndx);
336✔
248
    if (rot.is_tagged())
336✔
249
        throw NoSuchTable();
×
250
    ref_type ref = rot.get_as_ref();
336✔
251
    REALM_ASSERT(ref);
336✔
252
    return Table::get_key_direct(m_tables.get_alloc(), ref);
336✔
253
}
336✔
254

255
size_t Group::key2ndx_checked(TableKey key) const
256
{
29,133,204✔
257
    size_t idx = key2ndx(key);
29,133,204✔
258
    // early out
259
    // note: don't lock when accessing m_table_accessors, because if we miss a concurrently introduced table
260
    // accessor, we'll just fall through to the slow path. Table accessors can be introduced concurrently,
261
    // but never removed. The following is only safe because 'm_table_accessors' will not be relocated
262
    // concurrently. (We aim to be safe in face of concurrent access to a frozen transaction, where tables
263
    // cannot be added or removed. All other races are undefined behaviour)
264
    if (idx < m_table_accessors.size()) {
29,133,204✔
265
        Table* tbl = load_atomic(m_table_accessors[idx], std::memory_order_acquire);
28,933,968✔
266
        if (tbl && tbl->get_key() == key)
28,933,968✔
267
            return idx;
27,492,114✔
268
    }
28,933,968✔
269
    // The notion of a const group as it is now, is not really
270
    // useful. It is linked to a distinction between a read
271
    // and a write transaction. This distinction is no longer
272
    // a compile time aspect (it's not const anymore)
273
    Allocator* alloc = const_cast<SlabAlloc*>(&m_alloc);
1,641,090✔
274
    if (m_tables.is_attached() && idx < m_tables.size()) {
1,641,090✔
275
        RefOrTagged rot = m_tables.get_as_ref_or_tagged(idx);
1,441,290✔
276
        if (rot.is_ref() && rot.get_as_ref() && (Table::get_key_direct(*alloc, rot.get_as_ref()) == key)) {
1,441,488✔
277

278
            return idx;
1,439,847✔
279
        }
1,439,847✔
280
    }
1,441,290✔
281
    throw NoSuchTable();
201,243✔
282
}
1,641,090✔
283

284

285
int Group::get_file_format_version() const noexcept
286
{
418,734✔
287
    return m_file_format_version;
418,734✔
288
}
418,734✔
289

290

291
void Group::set_file_format_version(int file_format) noexcept
292
{
1,925,451✔
293
    m_file_format_version = file_format;
1,925,451✔
294
}
1,925,451✔
295

296

297
int Group::get_committed_file_format_version() const noexcept
298
{
×
299
    return m_alloc.get_committed_file_format_version();
×
300
}
×
301

302
std::optional<int> Group::fake_target_file_format;
303

304
void _impl::GroupFriend::fake_target_file_format(const std::optional<int> format) noexcept
305
{
72✔
306
    Group::fake_target_file_format = format;
72✔
307
}
72✔
308

309
int Group::get_target_file_format_version_for_session(int current_file_format_version,
310
                                                      int requested_history_type) noexcept
311
{
104,679✔
312
    if (Group::fake_target_file_format) {
104,679✔
313
        return *Group::fake_target_file_format;
72✔
314
    }
72✔
315
    // Note: This function is responsible for choosing the target file format
316
    // for a sessions. If it selects a file format that is different from
317
    // `current_file_format_version`, it will trigger a file format upgrade
318
    // process.
319

320
    // Note: `current_file_format_version` may be zero at this time, which means
321
    // that the file format it is not yet decided (only possible for empty
322
    // Realms where top-ref is zero).
323

324
    // Please see Group::get_file_format_version() for information about the
325
    // individual file format versions.
326

327
    if (requested_history_type == Replication::hist_None) {
104,607✔
328
        if (current_file_format_version == 24) {
35,154✔
329
            // We are able to open these file formats in RO mode
330
            return current_file_format_version;
24,966✔
331
        }
24,966✔
332
    }
35,154✔
333

334
    return g_current_file_format_version;
79,641✔
335
}
104,607✔
336

337
void Group::get_version_and_history_info(const Array& top, _impl::History::version_type& version, int& history_type,
338
                                         int& history_schema_version) noexcept
339
{
116,916✔
340
    using version_type = _impl::History::version_type;
116,916✔
341
    version_type version_2 = 0;
116,916✔
342
    int history_type_2 = 0;
116,916✔
343
    int history_schema_version_2 = 0;
116,916✔
344
    if (top.is_attached()) {
116,916✔
345
        if (top.size() > s_version_ndx) {
76,203✔
346
            version_2 = version_type(top.get_as_ref_or_tagged(s_version_ndx).get_as_int());
75,894✔
347
        }
75,894✔
348
        if (top.size() > s_hist_type_ndx) {
76,203✔
349
            history_type_2 = int(top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
74,283✔
350
        }
74,283✔
351
        if (top.size() > s_hist_version_ndx) {
76,203✔
352
            history_schema_version_2 = int(top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
74,283✔
353
        }
74,283✔
354
    }
76,203✔
355
    // Version 0 is not a legal initial version, so it has to be set to 1
356
    // instead.
357
    if (version_2 == 0)
116,916✔
358
        version_2 = 1;
42,801✔
359
    version = version_2;
116,916✔
360
    history_type = history_type_2;
116,916✔
361
    history_schema_version = history_schema_version_2;
116,916✔
362
}
116,916✔
363

364
int Group::get_history_schema_version() noexcept
365
{
27,495✔
366
    bool history_schema_version = (m_top.is_attached() && m_top.size() > s_hist_version_ndx);
27,495✔
367
    if (history_schema_version) {
27,495✔
368
        return int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
942✔
369
    }
942✔
370
    return 0;
26,553✔
371
}
27,495✔
372

373
uint64_t Group::get_sync_file_id() const noexcept
374
{
14,750,988✔
375
    if (m_top.is_attached() && m_top.size() > s_sync_file_id_ndx) {
14,750,988✔
376
        return uint64_t(m_top.get_as_ref_or_tagged(s_sync_file_id_ndx).get_as_int());
7,276,074✔
377
    }
7,276,074✔
378
    auto repl = get_replication();
7,474,914✔
379
    if (repl && repl->get_history_type() == Replication::hist_SyncServer) {
7,474,914✔
380
        return 1;
2,838✔
381
    }
2,838✔
382
    return 0;
7,472,076✔
383
}
7,474,914✔
384

385
size_t Group::get_free_space_size(const Array& top) noexcept
386
{
24,186✔
387
    if (top.is_attached() && top.size() > s_free_size_ndx) {
24,186✔
388
        auto ref = top.get_as_ref(s_free_size_ndx);
24,156✔
389
        Array free_list_sizes(top.get_alloc());
24,156✔
390
        free_list_sizes.init_from_ref(ref);
24,156✔
391
        return size_t(free_list_sizes.get_sum());
24,156✔
392
    }
24,156✔
393
    return 0;
30✔
394
}
24,186✔
395

396
size_t Group::get_history_size(const Array& top) noexcept
397
{
24,186✔
398
    if (top.is_attached() && top.size() > s_hist_ref_ndx) {
24,186✔
399
        auto ref = top.get_as_ref(s_hist_ref_ndx);
156✔
400
        Array hist(top.get_alloc());
156✔
401
        hist.init_from_ref(ref);
156✔
402
        return hist.get_byte_size_deep();
156✔
403
    }
156✔
404
    return 0;
24,030✔
405
}
24,186✔
406

407
int Group::read_only_version_check(SlabAlloc& alloc, ref_type top_ref, const std::string& path)
408
{
1,050✔
409
    // Select file format if it is still undecided.
410
    auto file_format_version = alloc.get_committed_file_format_version();
1,050✔
411

412
    bool file_format_ok = false;
1,050✔
413
    // It is not possible to open prior file format versions without an upgrade.
414
    // Since a Realm file cannot be upgraded when opened in this mode
415
    // (we may be unable to write to the file), no earlier versions can be opened.
416
    // Please see Group::get_file_format_version() for information about the
417
    // individual file format versions.
418
    switch (file_format_version) {
1,050✔
419
        case 0:
6✔
420
            file_format_ok = (top_ref == 0);
6✔
421
            break;
6✔
422
        case g_current_file_format_version:
1,008✔
423
            file_format_ok = true;
1,008✔
424
            break;
1,008✔
425
    }
1,050✔
426
    if (REALM_UNLIKELY(!file_format_ok))
1,050✔
427
        throw FileAccessError(ErrorCodes::FileFormatUpgradeRequired,
36✔
428
                              util::format("Realm file at path '%1' cannot be opened in read-only mode because it "
36✔
429
                                           "has a file format version (%2) which requires an upgrade",
36✔
430
                                           path, file_format_version),
36✔
431
                              path);
36✔
432
    return file_format_version;
1,014✔
433
}
1,050✔
434

435
void Group::open(ref_type top_ref, const std::string& file_path)
436
{
864✔
437
    SlabAlloc::DetachGuard dg(m_alloc);
864✔
438
    m_file_format_version = read_only_version_check(m_alloc, top_ref, file_path);
864✔
439

440
    Replication::HistoryType history_type = Replication::hist_None;
864✔
441
    int target_file_format_version = get_target_file_format_version_for_session(m_file_format_version, history_type);
864✔
442
    if (m_file_format_version == 0) {
864✔
443
        set_file_format_version(target_file_format_version);
6✔
444
    }
6✔
445
    else {
858✔
446
        // From a technical point of view, we could upgrade the Realm file
447
        // format in memory here, but since upgrading can be expensive, it is
448
        // currently disallowed.
449
        REALM_ASSERT(target_file_format_version == m_file_format_version);
858✔
450
    }
858✔
451

452
    // Make all dynamically allocated memory (space beyond the attached file) as
453
    // available free-space.
454
    reset_free_space_tracking(); // Throws
864✔
455

456
    bool create_group_when_missing = true;
864✔
457
    bool writable = create_group_when_missing;
864✔
458
    attach(top_ref, writable, create_group_when_missing); // Throws
864✔
459
    dg.release();                                         // Do not detach after all
864✔
460
}
864✔
461

462
Group::~Group() noexcept
463
{
1,931,958✔
464
    // If this group accessor is detached at this point in time, it is either
465
    // because it is DB::m_group (m_is_shared), or it is a free-stading
466
    // group accessor that was never successfully opened.
467
    if (!m_top.is_attached())
1,931,958✔
468
        return;
1,926,621✔
469

470
    // Free-standing group accessor
471
    detach();
5,337✔
472

473
    // if a local allocator is set in m_local_alloc, then the destruction
474
    // of m_local_alloc will trigger destruction of the allocator, which will
475
    // verify that the allocator has been detached, so....
476
    if (m_local_alloc)
5,337✔
477
        m_local_alloc->detach();
5,148✔
478
}
5,337✔
479

480
void Group::remap_and_update_refs(ref_type new_top_ref, size_t new_file_size, bool writable)
481
{
367,791✔
482
    m_alloc.update_reader_view(new_file_size); // Throws
367,791✔
483
    update_allocator_wrappers(writable);
367,791✔
484

485
    // force update of all ref->ptr translations if the mapping has changed
486
    auto mapping_version = m_alloc.get_mapping_version();
367,791✔
487
    if (mapping_version != m_last_seen_mapping_version) {
367,791✔
488
        m_last_seen_mapping_version = mapping_version;
178,872✔
489
    }
178,872✔
490
    update_refs(new_top_ref);
367,791✔
491
}
367,791✔
492

493
void Group::update_table_accessors()
494
{
5,301✔
495
    for (unsigned j = 0; j < m_table_accessors.size(); ++j) {
15,837✔
496
        Table* table = m_table_accessors[j];
10,536✔
497
        // this should be filtered further as an optimization
498
        if (table) {
10,536✔
499
            table->refresh_allocator_wrapper();
10,512✔
500
            table->update_from_parent();
10,512✔
501
        }
10,512✔
502
    }
10,536✔
503
}
5,301✔
504

505

506
void Group::validate_top_array(const Array& arr, const SlabAlloc& alloc, std::optional<size_t> read_lock_file_size,
507
                               std::optional<uint_fast64_t> read_lock_version)
508
{
2,075,415✔
509
    size_t top_size = arr.size();
2,075,415✔
510
    ref_type top_ref = arr.get_ref();
2,075,415✔
511

512
    switch (top_size) {
2,075,415✔
513
        // These are the valid sizes
514
        case 3:
576✔
515
        case 5:
576✔
516
        case 7:
113,763✔
517
        case 9:
113,763✔
518
        case 10:
113,763✔
519
        case 11:
2,063,919✔
520
        case 12: {
2,075,187✔
521
            ref_type table_names_ref = arr.get_as_ref_or_tagged(s_table_name_ndx).get_as_ref();
2,075,187✔
522
            ref_type tables_ref = arr.get_as_ref_or_tagged(s_table_refs_ndx).get_as_ref();
2,075,187✔
523
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
2,075,187✔
524

525
            // Logical file size must never exceed actual file size.
526
            auto file_size = alloc.get_baseline();
2,075,187✔
527
            if (logical_file_size > file_size) {
2,075,187✔
UNCOV
528
                std::string err = util::format("Invalid logical file size: %1, actual file size: %2, read lock file "
×
529
                                               "size: %3, read lock version: %4",
×
530
                                               logical_file_size, file_size, read_lock_file_size, read_lock_version);
×
531
                throw InvalidDatabase(err, "");
×
532
            }
×
533
            // First two entries must be valid refs pointing inside the file
534
            auto invalid_ref = [logical_file_size](ref_type ref) {
4,147,038✔
535
                return ref == 0 || (ref & 7) || ref > logical_file_size;
4,147,362✔
536
            };
4,147,038✔
537
            if (invalid_ref(table_names_ref) || invalid_ref(tables_ref)) {
2,075,214✔
UNCOV
538
                std::string err = util::format(
×
539
                    "Invalid top array (top_ref, [0], [1]): %1, %2, %3, read lock size: %4, read lock version: %5",
×
540
                    top_ref, table_names_ref, tables_ref, read_lock_file_size, read_lock_version);
×
541
                throw InvalidDatabase(err, "");
×
542
            }
×
543
            break;
2,075,187✔
544
        }
2,075,187✔
545
        default: {
2,075,187✔
UNCOV
546
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
×
547
            std::string err =
×
548
                util::format("Invalid top array size (ref: %1, array size: %2) file size: %3, read "
×
549
                             "lock size: %4, read lock version: %5",
×
550
                             top_ref, top_size, logical_file_size, read_lock_file_size, read_lock_version);
×
551
            throw InvalidDatabase(err, "");
×
552
            break;
×
553
        }
2,075,187✔
554
    }
2,075,415✔
555
}
2,075,415✔
556

557
void Group::attach(ref_type top_ref, bool writable, bool create_group_when_missing, size_t file_size,
558
                   uint_fast64_t version)
559
{
2,193,234✔
560
    REALM_ASSERT(!m_top.is_attached());
2,193,234✔
561
    if (create_group_when_missing)
2,193,234✔
562
        REALM_ASSERT(writable);
2,193,234✔
563

564
    // If this function throws, it must leave the group accesor in a the
565
    // unattached state.
566

567
    m_tables.detach();
2,193,234✔
568
    m_table_names.detach();
2,193,234✔
569
    m_is_writable = writable;
2,193,234✔
570

571
    if (top_ref != 0) {
2,193,234✔
572
        m_top.init_from_ref(top_ref);
2,047,758✔
573
        validate_top_array(m_top, m_alloc, file_size, version);
2,047,758✔
574
        m_table_names.init_from_parent();
2,047,758✔
575
        m_tables.init_from_parent();
2,047,758✔
576
    }
2,047,758✔
577
    else if (create_group_when_missing) {
145,476✔
578
        create_empty_group(); // Throws
13,572✔
579
    }
13,572✔
580
    m_attached = true;
2,193,234✔
581
    set_size();
2,193,234✔
582

583
    size_t sz = m_tables.is_attached() ? m_tables.size() : 0;
2,193,234✔
584
    while (m_table_accessors.size() > sz) {
2,193,303✔
585
        if (Table* t = m_table_accessors.back()) {
69✔
586
            t->detach(Table::cookie_void);
69✔
587
            recycle_table_accessor(t);
69✔
588
        }
69✔
589
        m_table_accessors.pop_back();
69✔
590
    }
69✔
591
    while (m_table_accessors.size() < sz) {
6,512,199✔
592
        m_table_accessors.emplace_back();
4,318,965✔
593
    }
4,318,965✔
594
}
2,193,234✔
595

596

597
void Group::detach() noexcept
598
{
1,929,804✔
599
    detach_table_accessors();
1,929,804✔
600
    m_table_accessors.clear();
1,929,804✔
601

602
    m_table_names.detach();
1,929,804✔
603
    m_tables.detach();
1,929,804✔
604
    m_top.detach();
1,929,804✔
605

606
    m_attached = false;
1,929,804✔
607
}
1,929,804✔
608

609
void Group::attach_shared(ref_type new_top_ref, size_t new_file_size, bool writable, VersionID version)
610
{
1,926,342✔
611
    REALM_ASSERT_3(new_top_ref, <, new_file_size);
1,926,342✔
612
    REALM_ASSERT(!is_attached());
1,926,342✔
613

614
    // update readers view of memory
615
    m_alloc.update_reader_view(new_file_size); // Throws
1,926,342✔
616
    update_allocator_wrappers(writable);
1,926,342✔
617

618
    // When `new_top_ref` is null, ask attach() to create a new node structure
619
    // for an empty group, but only during the initiation of write
620
    // transactions. When the transaction being initiated is a read transaction,
621
    // we instead have to leave array accessors m_top, m_tables, and
622
    // m_table_names in their detached state, as there are no underlying array
623
    // nodes to attached them to. In the case of write transactions, the nodes
624
    // have to be created, as they have to be ready for being modified.
625
    bool create_group_when_missing = writable;
1,926,342✔
626
    attach(new_top_ref, writable, create_group_when_missing, new_file_size, version.version); // Throws
1,926,342✔
627
}
1,926,342✔
628

629

630
void Group::detach_table_accessors() noexcept
631
{
1,929,840✔
632
    for (auto& table_accessor : m_table_accessors) {
4,605,543✔
633
        if (Table* t = table_accessor) {
4,605,543✔
634
            t->detach(Table::cookie_transaction_ended);
2,137,215✔
635
            recycle_table_accessor(t);
2,137,215✔
636
            table_accessor = nullptr;
2,137,215✔
637
        }
2,137,215✔
638
    }
4,605,543✔
639
}
1,929,840✔
640

641

642
void Group::create_empty_group()
643
{
70,650✔
644
    m_top.create(Array::type_HasRefs); // Throws
70,650✔
645
    _impl::DeepArrayDestroyGuard dg_top(&m_top);
70,650✔
646
    {
70,650✔
647
        m_table_names.create(); // Throws
70,650✔
648
        _impl::DestroyGuard<ArrayStringShort> dg(&m_table_names);
70,650✔
649
        m_top.add(m_table_names.get_ref()); // Throws
70,650✔
650
        dg.release();
70,650✔
651
    }
70,650✔
652
    {
70,650✔
653
        m_tables.create(Array::type_HasRefs); // Throws
70,650✔
654
        _impl::DestroyGuard<Array> dg(&m_tables);
70,650✔
655
        m_top.add(m_tables.get_ref()); // Throws
70,650✔
656
        dg.release();
70,650✔
657
    }
70,650✔
658
    size_t initial_logical_file_size = sizeof(SlabAlloc::Header);
70,650✔
659
    m_top.add(RefOrTagged::make_tagged(initial_logical_file_size)); // Throws
70,650✔
660
    dg_top.release();
70,650✔
661
}
70,650✔
662

663

664
Table* Group::do_get_table(size_t table_ndx)
665
{
32,443,575✔
666
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
32,443,575✔
667
    // Get table accessor from cache if it exists, else create
668
    Table* table = load_atomic(m_table_accessors[table_ndx], std::memory_order_acquire);
32,443,575✔
669
    if (!table) {
32,443,575✔
670
        // double-checked locking idiom
671
        std::lock_guard<std::mutex> lock(m_accessor_mutex);
1,862,835✔
672
        table = m_table_accessors[table_ndx];
1,862,835✔
673
        if (!table)
1,862,835✔
674
            table = create_table_accessor(table_ndx); // Throws
1,857,225✔
675
    }
1,862,835✔
676
    return table;
32,443,575✔
677
}
32,443,575✔
678

679

680
Table* Group::do_get_table(StringData name)
681
{
12,140,988✔
682
    if (!m_table_names.is_attached())
12,140,988✔
683
        return 0;
620,187✔
684
    size_t table_ndx = m_table_names.find_first(name);
11,520,801✔
685
    if (table_ndx == not_found)
11,520,801✔
686
        return 0;
3,821,979✔
687

688
    Table* table = do_get_table(table_ndx); // Throws
7,698,822✔
689
    return table;
7,698,822✔
690
}
11,520,801✔
691

692
TableRef Group::add_table_with_primary_key(StringData name, DataType pk_type, StringData pk_name, bool nullable,
693
                                           Table::Type table_type)
694
{
110,571✔
695
    check_attached();
110,571✔
696
    check_table_name_uniqueness(name);
110,571✔
697

698
    auto table = do_add_table(name, table_type, false);
110,571✔
699

700
    // Add pk column - without replication
701
    ColumnAttrMask attr;
110,571✔
702
    if (nullable)
110,571✔
703
        attr.set(col_attr_Nullable);
15,825✔
704
    ColKey pk_col = table->generate_col_key(ColumnType(pk_type), attr);
110,571✔
705
    table->do_insert_root_column(pk_col, ColumnType(pk_type), pk_name);
110,571✔
706
    table->do_set_primary_key_column(pk_col);
110,571✔
707

708
    if (Replication* repl = *get_repl())
110,571✔
709
        repl->add_class_with_primary_key(table->get_key(), name, pk_type, pk_name, nullable, table_type);
109,611✔
710

711
    return TableRef(table, table->m_alloc.get_instance_version());
110,571✔
712
}
110,571✔
713

714
Table* Group::do_add_table(StringData name, Table::Type table_type, bool do_repl)
715
{
282,453✔
716
    if (!m_is_writable)
282,453✔
717
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
6✔
718

719
    // get new key and index
720
    // find first empty spot:
721
    uint32_t j;
282,447✔
722
    RefOrTagged rot = RefOrTagged::make_tagged(0);
282,447✔
723
    for (j = 0; j < m_tables.size(); ++j) {
54,703,557✔
724
        rot = m_tables.get_as_ref_or_tagged(j);
54,422,247✔
725
        if (!rot.is_ref())
54,422,247✔
726
            break;
1,137✔
727
    }
54,422,247✔
728
    bool gen_null_tag = (j == m_tables.size()); // new tags start at zero
282,447✔
729
    uint32_t tag = gen_null_tag ? 0 : uint32_t(rot.get_as_int());
282,447✔
730
    TableKey key = TableKey((tag << 16) | j);
282,447✔
731

732
    if (REALM_UNLIKELY(name.size() > max_table_name_length))
282,447✔
733
        throw InvalidArgument(ErrorCodes::InvalidName, util::format("Name too long: %1", name));
6✔
734

735
    using namespace _impl;
282,441✔
736
    size_t table_ndx = key2ndx(key);
282,441✔
737
    ref_type ref = Table::create_empty_table(m_alloc, key); // Throws
282,441✔
738
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
282,441✔
739

740
    rot = RefOrTagged::make_ref(ref);
282,441✔
741
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
282,441✔
742

743
    if (table_ndx == m_tables.size()) {
282,441✔
744
        m_tables.add(rot);
281,301✔
745
        m_table_names.add(name);
281,301✔
746
        // Need new slot for table accessor
747
        m_table_accessors.push_back(nullptr);
281,301✔
748
    }
281,301✔
749
    else {
1,140✔
750
        m_tables.set(table_ndx, rot);       // Throws
1,140✔
751
        m_table_names.set(table_ndx, name); // Throws
1,140✔
752
    }
1,140✔
753

754
    Replication* repl = *get_repl();
282,441✔
755
    if (do_repl && repl)
282,441✔
756
        repl->add_class(key, name, table_type);
165,912✔
757

758
    ++m_num_tables;
282,441✔
759

760
    Table* table = create_table_accessor(j);
282,441✔
761
    table->do_set_table_type(table_type);
282,441✔
762

763
    return table;
282,441✔
764
}
282,447✔
765

766
Table* Group::create_table_accessor(size_t table_ndx)
767
{
2,139,744✔
768
    REALM_ASSERT(m_tables.size() == m_table_accessors.size());
2,139,744✔
769
    REALM_ASSERT(table_ndx < m_table_accessors.size());
2,139,744✔
770

771
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(table_ndx);
2,139,744✔
772
    ref_type ref = rot.get_as_ref();
2,139,744✔
773
    if (ref == 0) {
2,139,744✔
UNCOV
774
        throw NoSuchTable();
×
775
    }
×
776
    Table* table = 0;
2,139,744✔
777
    {
2,139,744✔
778
        std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
2,139,744✔
779
        if (g_table_recycler_2.empty()) {
2,139,744✔
780
            while (!g_table_recycler_1.empty()) {
2,138,031✔
781
                auto t = g_table_recycler_1.back();
2,126,799✔
782
                g_table_recycler_1.pop_back();
2,126,799✔
783
                g_table_recycler_2.push_back(t);
2,126,799✔
784
            }
2,126,799✔
785
        }
11,232✔
786
        if (g_table_recycler_2.size() + g_table_recycler_1.size() > g_table_recycling_delay) {
2,139,744✔
787
            table = g_table_recycler_2.back();
2,118,129✔
788
            table->fully_detach();
2,118,129✔
789
            g_table_recycler_2.pop_back();
2,118,129✔
790
        }
2,118,129✔
791
    }
2,139,744✔
792
    if (table) {
2,139,744✔
793
        table->revive(get_repl(), m_alloc, m_is_writable);
2,118,087✔
794
        table->init(ref, this, table_ndx, m_is_writable, is_frozen());
2,118,087✔
795
    }
2,118,087✔
796
    else {
21,657✔
797
        std::unique_ptr<Table> new_table(new Table(get_repl(), m_alloc));  // Throws
21,657✔
798
        new_table->init(ref, this, table_ndx, m_is_writable, is_frozen()); // Throws
21,657✔
799
        table = new_table.release();
21,657✔
800
    }
21,657✔
801
    table->refresh_index_accessors();
2,139,744✔
802
    // must be atomic to allow concurrent probing of the m_table_accessors vector.
803
    store_atomic(m_table_accessors[table_ndx], table, std::memory_order_release);
2,139,744✔
804
    return table;
2,139,744✔
805
}
2,139,744✔
806

807

808
void Group::recycle_table_accessor(Table* to_be_recycled)
809
{
2,138,268✔
810
    std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
2,138,268✔
811
    g_table_recycler_1.push_back(to_be_recycled);
2,138,268✔
812
}
2,138,268✔
813

814
void Group::remove_table(StringData name)
815
{
582✔
816
    check_attached();
582✔
817
    size_t table_ndx = m_table_names.find_first(name);
582✔
818
    if (table_ndx == not_found)
582✔
819
        throw NoSuchTable();
6✔
820
    auto key = ndx2key(table_ndx);
576✔
821
    remove_table(table_ndx, key); // Throws
576✔
822
}
576✔
823

824

825
void Group::remove_table(TableKey key)
826
{
1,083✔
827
    check_attached();
1,083✔
828

829
    size_t table_ndx = key2ndx_checked(key);
1,083✔
830
    remove_table(table_ndx, key);
1,083✔
831
}
1,083✔
832

833

834
void Group::remove_table(size_t table_ndx, TableKey key)
835
{
1,659✔
836
    if (!m_is_writable)
1,659✔
UNCOV
837
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
838
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
1,659✔
839
    REALM_ASSERT(table_ndx < m_tables.size());
1,659✔
840
    TableRef table = get_table(key);
1,659✔
841

842
    // In principle we could remove a table even if it is the target of link
843
    // columns of other tables, however, to do that, we would have to
844
    // automatically remove the "offending" link columns from those other
845
    // tables. Such a behaviour is deemed too obscure, and we shall therefore
846
    // require that a removed table does not contain foreign origin backlink
847
    // columns.
848
    if (table->is_cross_table_link_target())
1,659✔
849
        throw CrossTableLinkTarget(table->get_name());
18✔
850

851
    {
1,641✔
852
        // We don't want to replicate the individual column removals along the
853
        // way as they're covered by the table removal
854
        Table::DisableReplication dr(*table);
1,641✔
855
        table->remove_columns();
1,641✔
856
    }
1,641✔
857

858
    size_t prior_num_tables = m_tables.size();
1,641✔
859
    Replication* repl = *get_repl();
1,641✔
860
    if (repl)
1,641✔
861
        repl->erase_class(key, table->get_name(), prior_num_tables); // Throws
1,569✔
862

863
    int64_t ref_64 = m_tables.get(table_ndx);
1,641✔
864
    REALM_ASSERT(!int_cast_has_overflow<ref_type>(ref_64));
1,641✔
865
    ref_type ref = ref_type(ref_64);
1,641✔
866

867
    // Replace entry in m_tables with next tag to use:
868
    RefOrTagged rot = RefOrTagged::make_tagged((1 + (key.value >> 16)) & 0x7FFF);
1,641✔
869
    // Remove table
870
    m_tables.set(table_ndx, rot);     // Throws
1,641✔
871
    m_table_names.set(table_ndx, {}); // Throws
1,641✔
872
    m_table_accessors[table_ndx] = nullptr;
1,641✔
873
    --m_num_tables;
1,641✔
874

875
    table->detach(Table::cookie_removed);
1,641✔
876
    // Destroy underlying node structure
877
    Array::destroy_deep(ref, m_alloc);
1,641✔
878
    recycle_table_accessor(table.unchecked_ptr());
1,641✔
879
}
1,641✔
880

881

882
void Group::rename_table(StringData name, StringData new_name, bool require_unique_name)
883
{
24✔
884
    check_attached();
24✔
885
    size_t table_ndx = m_table_names.find_first(name);
24✔
886
    if (table_ndx == not_found)
24✔
887
        throw NoSuchTable();
6✔
888
    rename_table(ndx2key(table_ndx), new_name, require_unique_name); // Throws
18✔
889
}
18✔
890

891

892
void Group::rename_table(TableKey key, StringData new_name, bool require_unique_name)
893
{
24✔
894
    check_attached();
24✔
895
    if (!m_is_writable)
24✔
UNCOV
896
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
897
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
24✔
898
    if (require_unique_name && has_table(new_name))
24✔
899
        throw TableNameInUse();
6✔
900
    size_t table_ndx = key2ndx_checked(key);
18✔
901
    m_table_names.set(table_ndx, new_name);
18✔
902
    if (Replication* repl = *get_repl())
18✔
UNCOV
903
        repl->rename_class(key, new_name); // Throws
×
904
}
18✔
905

906
Obj Group::get_object(ObjLink link)
907
{
430,785✔
908
    auto target_table = get_table(link.get_table_key());
430,785✔
909
    ObjKey key = link.get_obj_key();
430,785✔
910
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
430,785✔
911
    return ct->get(key);
430,785✔
912
}
430,785✔
913

914
Obj Group::try_get_object(ObjLink link) noexcept
UNCOV
915
{
×
916
    auto target_table = get_table(link.get_table_key());
×
917
    ObjKey key = link.get_obj_key();
×
918
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
×
919
    return ct->try_get_obj(key);
×
920
}
×
921

922
void Group::validate(ObjLink link) const
923
{
309,717✔
924
    if (auto tk = link.get_table_key()) {
309,720✔
925
        auto target_key = link.get_obj_key();
309,717✔
926
        auto target_table = get_table(tk);
309,717✔
927
        const ClusterTree* ct =
309,717✔
928
            target_key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
309,717✔
929
        if (!ct->is_valid(target_key)) {
309,717✔
930
            throw InvalidArgument(ErrorCodes::KeyNotFound, "Target object not found");
12✔
931
        }
12✔
932
        if (target_table->is_embedded()) {
309,705✔
933
            throw IllegalOperation("Cannot link to embedded object");
54✔
934
        }
54✔
935
        if (target_table->is_asymmetric()) {
309,651✔
936
            throw IllegalOperation("Cannot link to ephemeral object");
6✔
937
        }
6✔
938
    }
309,651✔
939
}
309,717✔
940

941
ref_type Group::typed_write_tables(_impl::ArrayWriterBase& out) const
942
{
626,631✔
943
    ref_type ref = m_top.get_as_ref(1);
626,631✔
944
    if (out.only_modified && m_alloc.is_read_only(ref))
626,631✔
945
        return ref;
128,889✔
946
    auto num_tables = m_tables.size();
497,742✔
947
    TempArray dest(num_tables);
497,742✔
948
    for (unsigned j = 0; j < num_tables; ++j) {
1,903,734✔
949
        RefOrTagged rot = m_tables.get_as_ref_or_tagged(j);
1,405,992✔
950
        if (rot.is_tagged()) {
1,405,992✔
951
            dest.set(j, rot);
15,765✔
952
        }
15,765✔
953
        else {
1,390,227✔
954
            auto table = do_get_table(j);
1,390,227✔
955
            REALM_ASSERT_DEBUG(table);
1,390,227✔
956
            dest.set_as_ref(j, table->typed_write(out));
1,390,227✔
957
        }
1,390,227✔
958
    }
1,405,992✔
959
    return dest.write(out);
497,742✔
960
}
626,631✔
961

962
ref_type Group::DefaultTableWriter::write_names(_impl::OutputStream& out)
963
{
654✔
964
    bool deep = true;                                                           // Deep
654✔
965
    bool only_if_modified = false;                                              // Always
654✔
966
    bool compress = false;                                                      // true;
654✔
967
    return m_group->m_table_names.write(out, deep, only_if_modified, compress); // Throws
654✔
968
}
654✔
969
ref_type Group::DefaultTableWriter::write_tables(_impl::OutputStream& out)
970
{
654✔
971
    return m_group->typed_write_tables(out);
654✔
972
}
654✔
973

974
auto Group::DefaultTableWriter::write_history(_impl::OutputStream& out) -> HistoryInfo
975
{
402✔
976
    bool deep = true;              // Deep
402✔
977
    bool only_if_modified = false; // Always
402✔
978
    bool compress = false;
402✔
979
    ref_type history_ref = _impl::GroupFriend::get_history_ref(*m_group);
402✔
980
    HistoryInfo info;
402✔
981
    if (history_ref) {
402✔
982
        _impl::History::version_type version;
354✔
983
        int history_type, history_schema_version;
354✔
984
        _impl::GroupFriend::get_version_and_history_info(_impl::GroupFriend::get_alloc(*m_group),
354✔
985
                                                         m_group->m_top.get_ref(), version, history_type,
354✔
986
                                                         history_schema_version);
354✔
987
        REALM_ASSERT(history_type != Replication::hist_None);
354✔
988
        if (!m_should_write_history || history_type == Replication::hist_None) {
354✔
989
            return info; // Only sync history should be preserved when writing to a new file
6✔
990
        }
6✔
991
        info.type = history_type;
348✔
992
        info.version = history_schema_version;
348✔
993
        Array history{const_cast<Allocator&>(_impl::GroupFriend::get_alloc(*m_group))};
348✔
994
        history.init_from_ref(history_ref);
348✔
995
        info.ref = history.write(out, deep, only_if_modified, compress); // Throws
348✔
996
    }
348✔
997
    info.sync_file_id = m_group->get_sync_file_id();
396✔
998
    return info;
396✔
999
}
402✔
1000

1001
void Group::write(std::ostream& out, bool pad) const
1002
{
36✔
1003
    DefaultTableWriter table_writer;
36✔
1004
    write(out, pad, 0, table_writer);
36✔
1005
}
36✔
1006

1007
void Group::write(std::ostream& out, bool pad_for_encryption, uint_fast64_t version_number, TableWriter& writer) const
1008
{
666✔
1009
    REALM_ASSERT(is_attached());
666✔
1010
    writer.set_group(this);
666✔
1011
    bool no_top_array = !m_top.is_attached();
666✔
1012
    write(out, m_file_format_version, writer, no_top_array, pad_for_encryption, version_number); // Throws
666✔
1013
}
666✔
1014

1015
void Group::write(File& file, const char* encryption_key, uint_fast64_t version_number, TableWriter& writer) const
1016
{
630✔
1017
    REALM_ASSERT(file.get_size() == 0);
630✔
1018

1019
    file.set_encryption_key(encryption_key);
630✔
1020

1021
    // The aim is that the buffer size should be at least 1/256 of needed size but less than 64 Mb
1022
    constexpr size_t upper_bound = 64 * 1024 * 1024;
630✔
1023
    size_t min_space = std::min(get_used_space() >> 8, upper_bound);
630✔
1024
    size_t buffer_size = page_size();
630✔
1025
    while (buffer_size < min_space) {
642✔
1026
        buffer_size <<= 1;
12✔
1027
    }
12✔
1028
    File::Streambuf streambuf(&file, buffer_size);
630✔
1029

1030
    std::ostream out(&streambuf);
630✔
1031
    out.exceptions(std::ios_base::failbit | std::ios_base::badbit);
630✔
1032
    write(out, encryption_key != 0, version_number, writer);
630✔
1033
    int sync_status = streambuf.pubsync();
630✔
1034
    REALM_ASSERT(sync_status == 0);
630✔
1035
}
630✔
1036

1037
void Group::write(const std::string& path, const char* encryption_key, uint64_t version_number,
1038
                  bool write_history) const
1039
{
258✔
1040
    File file;
258✔
1041
    int flags = 0;
258✔
1042
    file.open(path, File::access_ReadWrite, File::create_Must, flags);
258✔
1043
    DefaultTableWriter table_writer(write_history);
258✔
1044
    write(file, encryption_key, version_number, table_writer);
258✔
1045
}
258✔
1046

1047

1048
BinaryData Group::write_to_mem() const
1049
{
36✔
1050
    REALM_ASSERT(is_attached());
36✔
1051

1052
    // Get max possible size of buffer
1053
    size_t max_size = m_alloc.get_total_size();
36✔
1054

1055
    auto buffer = std::unique_ptr<char[]>(new (std::nothrow) char[max_size]);
36✔
1056
    if (!buffer)
36✔
UNCOV
1057
        throw Exception(ErrorCodes::OutOfMemory, "Could not allocate memory while dumping to memory");
×
1058
    MemoryOutputStream out; // Throws
36✔
1059
    out.set_buffer(buffer.get(), buffer.get() + max_size);
36✔
1060
    write(out); // Throws
36✔
1061
    size_t buffer_size = out.size();
36✔
1062
    return BinaryData(buffer.release(), buffer_size);
36✔
1063
}
36✔
1064

1065

1066
void Group::write(std::ostream& out, int file_format_version, TableWriter& table_writer, bool no_top_array,
1067
                  bool pad_for_encryption, uint_fast64_t version_number)
1068
{
666✔
1069
    _impl::OutputStream out_2(out);
666✔
1070
    out_2.only_modified = false;
666✔
1071

1072
    // Write the file header
1073
    SlabAlloc::Header streaming_header;
666✔
1074
    if (no_top_array) {
666✔
1075
        file_format_version = 0;
12✔
1076
    }
12✔
1077
    else if (file_format_version == 0) {
654✔
1078
        // Use current file format version
UNCOV
1079
        file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
×
1080
    }
×
1081
    SlabAlloc::init_streaming_header(&streaming_header, file_format_version);
666✔
1082
    out_2.write(reinterpret_cast<const char*>(&streaming_header), sizeof streaming_header);
666✔
1083

1084
    ref_type top_ref = 0;
666✔
1085
    size_t final_file_size = sizeof streaming_header;
666✔
1086
    if (no_top_array) {
666✔
1087
        // Accept version number 1 as that number is (unfortunately) also used
1088
        // to denote the empty initial state of a Realm file.
1089
        REALM_ASSERT(version_number == 0 || version_number == 1);
12✔
1090
    }
12✔
1091
    else {
654✔
1092
        // Because we need to include the total logical file size in the
1093
        // top-array, we have to start by writing everything except the
1094
        // top-array, and then finally compute and write a correct version of
1095
        // the top-array. The free-space information of the group will only be
1096
        // included if a non-zero version number is given as parameter,
1097
        // indicating that versioning info is to be saved. This is used from
1098
        // DB to compact the database by writing only the live data
1099
        // into a separate file.
1100
        ref_type names_ref = table_writer.write_names(out_2);   // Throws
654✔
1101
        ref_type tables_ref = table_writer.write_tables(out_2);
654✔
1102

1103
        SlabAlloc new_alloc;
654✔
1104
        new_alloc.attach_empty(); // Throws
654✔
1105
        Array top(new_alloc);
654✔
1106
        top.create(Array::type_HasRefs); // Throws
654✔
1107
        _impl::ShallowArrayDestroyGuard dg_top(&top);
654✔
1108
        int_fast64_t value_1 = from_ref(names_ref);
654✔
1109
        int_fast64_t value_2 = from_ref(tables_ref);
654✔
1110
        top.add(value_1); // Throws
654✔
1111
        top.add(value_2); // Throws
654✔
1112
        top.add(0);       // Throws
654✔
1113

1114
        int top_size = 3;
654✔
1115
        if (version_number) {
654✔
1116
            TableWriter::HistoryInfo history_info = table_writer.write_history(out_2); // Throws
402✔
1117

1118
            Array free_list(new_alloc);
402✔
1119
            Array size_list(new_alloc);
402✔
1120
            Array version_list(new_alloc);
402✔
1121
            free_list.create(Array::type_Normal); // Throws
402✔
1122
            _impl::DeepArrayDestroyGuard dg_1(&free_list);
402✔
1123
            size_list.create(Array::type_Normal); // Throws
402✔
1124
            _impl::DeepArrayDestroyGuard dg_2(&size_list);
402✔
1125
            version_list.create(Array::type_Normal); // Throws
402✔
1126
            _impl::DeepArrayDestroyGuard dg_3(&version_list);
402✔
1127
            bool deep = true;              // Deep
402✔
1128
            bool only_if_modified = false; // Always
402✔
1129
            bool compress = false;
402✔
1130
            ref_type free_list_ref = free_list.write(out_2, deep, only_if_modified, compress);
402✔
1131
            ref_type size_list_ref = size_list.write(out_2, deep, only_if_modified, compress);
402✔
1132
            ref_type version_list_ref = version_list.write(out_2, deep, only_if_modified, compress);
402✔
1133
            top.add(RefOrTagged::make_ref(free_list_ref));     // Throws
402✔
1134
            top.add(RefOrTagged::make_ref(size_list_ref));     // Throws
402✔
1135
            top.add(RefOrTagged::make_ref(version_list_ref));  // Throws
402✔
1136
            top.add(RefOrTagged::make_tagged(version_number)); // Throws
402✔
1137
            top_size = 7;
402✔
1138

1139
            if (history_info.type != Replication::hist_None) {
402✔
1140
                top.add(RefOrTagged::make_tagged(history_info.type));
348✔
1141
                top.add(RefOrTagged::make_ref(history_info.ref));
348✔
1142
                top.add(RefOrTagged::make_tagged(history_info.version));
348✔
1143
                top.add(RefOrTagged::make_tagged(history_info.sync_file_id));
348✔
1144
                top_size = s_group_max_size;
348✔
1145
                // ^ this is too large, since the evacuation point entry is not there:
1146
                // (but the code below is self correcting)
1147
            }
348✔
1148
        }
402✔
1149
        top_ref = out_2.get_ref_of_next_array();
654✔
1150

1151
        // Produce a preliminary version of the top array whose
1152
        // representation is guaranteed to be able to hold the final file
1153
        // size
1154
        size_t max_top_byte_size = Array::get_max_byte_size(top_size);
654✔
1155
        size_t max_final_file_size = size_t(top_ref) + max_top_byte_size;
654✔
1156
        top.ensure_minimum_width(RefOrTagged::make_tagged(max_final_file_size)); // Throws
654✔
1157

1158
        // Finalize the top array by adding the projected final file size
1159
        // to it
1160
        size_t top_byte_size = top.get_byte_size();
654✔
1161
        final_file_size = size_t(top_ref) + top_byte_size;
654✔
1162
        top.set(2, RefOrTagged::make_tagged(final_file_size)); // Throws
654✔
1163

1164
        // Write the top array
1165
        bool deep = false;             // Shallow
654✔
1166
        bool only_if_modified = false; // Always
654✔
1167
        bool compress = false;
654✔
1168
        top.write(out_2, deep, only_if_modified, compress); // Throws
654✔
1169
        REALM_ASSERT_3(size_t(out_2.get_ref_of_next_array()), ==, final_file_size);
654✔
1170

1171
        dg_top.reset(nullptr); // Destroy now
654✔
1172
    }
654✔
1173

1174
    // encryption will pad the file to a multiple of the page, so ensure the
1175
    // footer is aligned to the end of a page
1176
    if (pad_for_encryption) {
666✔
1177
#if REALM_ENABLE_ENCRYPTION
30✔
1178
        size_t unrounded_size = final_file_size + sizeof(SlabAlloc::StreamingFooter);
30✔
1179
        size_t rounded_size = round_up_to_page_size(unrounded_size);
30✔
1180
        if (rounded_size != unrounded_size) {
30✔
1181
            std::unique_ptr<char[]> buffer(new char[rounded_size - unrounded_size]());
30✔
1182
            out_2.write(buffer.get(), rounded_size - unrounded_size);
30✔
1183
        }
30✔
1184
#endif
30✔
1185
    }
30✔
1186

1187
    // Write streaming footer
1188
    SlabAlloc::StreamingFooter footer;
666✔
1189
    footer.m_top_ref = top_ref;
666✔
1190
    footer.m_magic_cookie = SlabAlloc::footer_magic_cookie;
666✔
1191
    out_2.write(reinterpret_cast<const char*>(&footer), sizeof footer);
666✔
1192
}
666✔
1193

1194

1195
void Group::update_refs(ref_type top_ref) noexcept
1196
{
367,791✔
1197
    // After Group::commit() we will always have free space tracking
1198
    // info.
1199
    REALM_ASSERT_3(m_top.size(), >=, 5);
367,791✔
1200

1201
    m_top.init_from_ref(top_ref);
367,791✔
1202

1203
    // Now we can update it's child arrays
1204
    m_table_names.update_from_parent();
367,791✔
1205
    m_tables.update_from_parent();
367,791✔
1206

1207
    // Update all attached table accessors.
1208
    for (auto& table_accessor : m_table_accessors) {
1,122,636✔
1209
        if (table_accessor) {
1,122,636✔
1210
            table_accessor->update_from_parent();
1,092,231✔
1211
        }
1,092,231✔
1212
    }
1,122,636✔
1213
}
367,791✔
1214

1215
bool Group::operator==(const Group& g) const
1216
{
66✔
1217
    for (auto tk : get_table_keys()) {
138✔
1218
        const StringData& table_name = get_table_name(tk);
138✔
1219

1220
        ConstTableRef table_1 = get_table(tk);
138✔
1221
        ConstTableRef table_2 = g.get_table(table_name);
138✔
1222
        if (!table_2)
138✔
1223
            return false;
12✔
1224
        if (table_1->get_primary_key_column().get_type() != table_2->get_primary_key_column().get_type()) {
126✔
UNCOV
1225
            return false;
×
1226
        }
×
1227
        if (table_1->is_embedded() != table_2->is_embedded())
126✔
UNCOV
1228
            return false;
×
1229
        if (table_1->is_embedded())
126✔
1230
            continue;
60✔
1231

1232
        if (*table_1 != *table_2)
66✔
1233
            return false;
18✔
1234
    }
66✔
1235
    return true;
36✔
1236
}
66✔
1237
size_t Group::get_used_space() const noexcept
1238
{
648✔
1239
    if (!m_top.is_attached())
648✔
1240
        return 0;
12✔
1241

1242
    size_t used_space = (size_t(m_top.get(2)) >> 1);
636✔
1243

1244
    if (m_top.size() > 4) {
636✔
1245
        Array free_lengths(const_cast<SlabAlloc&>(m_alloc));
486✔
1246
        free_lengths.init_from_ref(ref_type(m_top.get(4)));
486✔
1247
        used_space -= size_t(free_lengths.get_sum());
486✔
1248
    }
486✔
1249

1250
    return used_space;
636✔
1251
}
648✔
1252

1253

1254
namespace {
1255
class TransactAdvancer : public _impl::NullInstructionObserver {
1256
public:
1257
    TransactAdvancer(Group&, bool& schema_changed)
1258
        : m_schema_changed(schema_changed)
19,899✔
1259
    {
43,602✔
1260
    }
43,602✔
1261

1262
    bool insert_group_level_table(TableKey) noexcept
1263
    {
24,261✔
1264
        m_schema_changed = true;
24,261✔
1265
        return true;
24,261✔
1266
    }
24,261✔
1267

1268
    bool erase_class(TableKey) noexcept
UNCOV
1269
    {
×
1270
        m_schema_changed = true;
×
1271
        return true;
×
1272
    }
×
1273

1274
    bool rename_class(TableKey) noexcept
UNCOV
1275
    {
×
1276
        m_schema_changed = true;
×
1277
        return true;
×
1278
    }
×
1279

1280
    bool insert_column(ColKey)
1281
    {
76,008✔
1282
        m_schema_changed = true;
76,008✔
1283
        return true;
76,008✔
1284
    }
76,008✔
1285

1286
    bool erase_column(ColKey)
UNCOV
1287
    {
×
1288
        m_schema_changed = true;
×
1289
        return true;
×
1290
    }
×
1291

1292
    bool rename_column(ColKey) noexcept
UNCOV
1293
    {
×
1294
        m_schema_changed = true;
×
1295
        return true; // No-op
×
1296
    }
×
1297

1298
private:
1299
    bool& m_schema_changed;
1300
};
1301
} // anonymous namespace
1302

1303

1304
void Group::update_allocator_wrappers(bool writable)
1305
{
5,076,444✔
1306
    m_is_writable = writable;
5,076,444✔
1307
    // This is tempting:
1308
    // m_alloc.set_read_only(!writable);
1309
    // - but m_alloc may refer to the "global" allocator in the DB object.
1310
    // Setting it here would cause different transactions to raze for
1311
    // changing the shared allocator setting. This is somewhat of a mess.
1312
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
10,590,399✔
1313
        auto table_accessor = m_table_accessors[i];
5,513,955✔
1314
        if (table_accessor) {
5,513,955✔
1315
            table_accessor->update_allocator_wrapper(writable);
4,114,761✔
1316
        }
4,114,761✔
1317
    }
5,513,955✔
1318
}
5,076,444✔
1319

1320
void Group::flush_accessors_for_commit()
1321
{
626,007✔
1322
    for (auto& acc : m_table_accessors)
626,007✔
1323
        if (acc)
1,717,998✔
1324
            acc->flush_for_commit();
1,215,108✔
1325
}
626,007✔
1326

1327
void Group::refresh_dirty_accessors(bool writable)
1328
{
260,574✔
1329
    if (!m_tables.is_attached()) {
260,574✔
1330
        m_table_accessors.clear();
33✔
1331
        return;
33✔
1332
    }
33✔
1333

1334
    // The array of Tables cannot have shrunk:
1335
    REALM_ASSERT(m_tables.size() >= m_table_accessors.size());
260,541✔
1336

1337
    // but it may have grown - and if so, we must resize the accessor array to match
1338
    if (m_tables.size() > m_table_accessors.size()) {
260,541✔
UNCOV
1339
        m_table_accessors.resize(m_tables.size());
×
1340
    }
×
1341

1342
    // Update all attached table accessors.
1343
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
828,969✔
1344
        auto& table_accessor = m_table_accessors[i];
568,428✔
1345
        if (table_accessor) {
568,428✔
1346
            // If the table has changed it's key in the file, it's a
1347
            // new table. This will detach the old accessor and remove it.
1348
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(i);
404,715✔
1349
            bool same_table = false;
404,715✔
1350
            if (rot.is_ref()) {
404,841✔
1351
                auto ref = rot.get_as_ref();
404,829✔
1352
                TableKey new_key = Table::get_key_direct(m_alloc, ref);
404,829✔
1353
                if (new_key == table_accessor->get_key())
404,829✔
1354
                    same_table = true;
404,787✔
1355
            }
404,829✔
1356
            if (same_table) {
404,814✔
1357
                table_accessor->refresh_accessor_tree(writable);
404,814✔
1358
            }
404,814✔
1359
            else {
4,294,967,294✔
1360
                table_accessor->detach(Table::cookie_removed);
4,294,967,294✔
1361
                recycle_table_accessor(table_accessor);
4,294,967,294✔
1362
                m_table_accessors[i] = nullptr;
4,294,967,294✔
1363
            }
4,294,967,294✔
1364
        }
404,715✔
1365
    }
568,428✔
1366
}
260,541✔
1367

1368

1369
void Group::advance_transact(ref_type new_top_ref, util::InputStream* in, bool writable)
1370
{
261,246✔
1371
    REALM_ASSERT(is_attached());
261,246✔
1372
    // Exception safety: If this function throws, the group accessor and all of
1373
    // its subordinate accessors are left in a state that may not be fully
1374
    // consistent. Only minimal consistency is guaranteed (see
1375
    // AccessorConsistencyLevels). In this case, the application is required to
1376
    // either destroy the Group object, forcing all subordinate accessors to
1377
    // become detached, or take some other equivalent action that involves a
1378
    // call to Group::detach(), such as terminating the transaction in progress.
1379
    // such actions will also lead to the detachment of all subordinate
1380
    // accessors. Until then it is an error, and unsafe if the application
1381
    // attempts to access the group one of its subordinate accessors.
1382
    //
1383
    // The purpose of this function is to refresh all attached accessors after
1384
    // the underlying node structure has undergone arbitrary change, such as
1385
    // when a read transaction has been advanced to a later snapshot of the
1386
    // database.
1387
    //
1388
    // Initially, when this function is invoked, we cannot assume any
1389
    // correspondence between the accessor state and the underlying node
1390
    // structure. We can assume that the hierarchy is in a state of minimal
1391
    // consistency, and that it can be brought to a state of structural
1392
    // correspondence using information in the transaction logs. When structural
1393
    // correspondence is achieved, we can reliably refresh the accessor hierarchy
1394
    // (Table::refresh_accessor_tree()) to bring it back to a fully consistent
1395
    // state. See AccessorConsistencyLevels.
1396
    //
1397
    // Much of the information in the transaction logs is not used in this
1398
    // process, because the changes have already been applied to the underlying
1399
    // node structure. All we need to do here is to bring the accessors back
1400
    // into a state where they correctly reflect the underlying structure (or
1401
    // detach them if the underlying object has been removed.)
1402
    //
1403
    // This is no longer needed in Core, but we need to compute "schema_changed",
1404
    // for the benefit of ObjectStore.
1405
    bool schema_changed = false;
261,246✔
1406
    if (in && has_schema_change_notification_handler()) {
261,246✔
1407
        TransactAdvancer advancer(*this, schema_changed);
43,602✔
1408
        _impl::TransactLogParser parser; // Throws
43,602✔
1409
        parser.parse(*in, advancer);     // Throws
43,602✔
1410
    }
43,602✔
1411

1412
    m_top.detach();                                           // Soft detach
261,246✔
1413
    bool create_group_when_missing = false;                   // See Group::attach_shared().
261,246✔
1414
    attach(new_top_ref, writable, create_group_when_missing); // Throws
261,246✔
1415
    refresh_dirty_accessors(writable);                        // Throws
261,246✔
1416

1417
    if (schema_changed)
261,246✔
1418
        send_schema_change_notification();
11,949✔
1419
}
261,246✔
1420

1421
void Group::prepare_top_for_history(int history_type, int history_schema_version, uint64_t file_ident)
1422
{
62,070✔
1423
    REALM_ASSERT(m_file_format_version >= 7);
62,070✔
1424
    while (m_top.size() < s_hist_type_ndx) {
309,741✔
1425
        m_top.add(0); // Throws
247,671✔
1426
    }
247,671✔
1427

1428
    if (m_top.size() > s_hist_version_ndx) {
62,070✔
1429
        int stored_history_type = int(m_top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
150✔
1430
        int stored_history_schema_version = int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
150✔
1431
        if (stored_history_type != Replication::hist_None) {
150✔
1432
            REALM_ASSERT(stored_history_type == history_type);
6✔
1433
            REALM_ASSERT(stored_history_schema_version == history_schema_version);
6✔
1434
        }
6✔
1435
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(history_type));              // Throws
150✔
1436
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(history_schema_version)); // Throws
150✔
1437
    }
150✔
1438
    else {
61,920✔
1439
        // No history yet
1440
        REALM_ASSERT(m_top.size() == s_hist_type_ndx);
61,920✔
1441
        ref_type history_ref = 0;                                    // No history yet
61,920✔
1442
        m_top.add(RefOrTagged::make_tagged(history_type));           // Throws
61,920✔
1443
        m_top.add(RefOrTagged::make_ref(history_ref));               // Throws
61,920✔
1444
        m_top.add(RefOrTagged::make_tagged(history_schema_version)); // Throws
61,920✔
1445
    }
61,920✔
1446

1447
    if (m_top.size() > s_sync_file_id_ndx) {
62,070✔
1448
        m_top.set(s_sync_file_id_ndx, RefOrTagged::make_tagged(file_ident));
42✔
1449
    }
42✔
1450
    else {
62,028✔
1451
        m_top.add(RefOrTagged::make_tagged(file_ident)); // Throws
62,028✔
1452
    }
62,028✔
1453
}
62,070✔
1454

1455
void Group::clear_history()
1456
{
36✔
1457
    bool has_history = (m_top.is_attached() && m_top.size() > s_hist_type_ndx);
36✔
1458
    if (has_history) {
36✔
1459
        auto hist_ref = m_top.get_as_ref(s_hist_ref_ndx);
36✔
1460
        Array::destroy_deep(hist_ref, m_top.get_alloc());
36✔
1461
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(Replication::hist_None)); // Throws
36✔
1462
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(0));                   // Throws
36✔
1463
        m_top.set(s_hist_ref_ndx, 0);                                                 // Throws
36✔
1464
    }
36✔
1465
}
36✔
1466

1467
#ifdef REALM_DEBUG // LCOV_EXCL_START ignore debug functions
1468

1469
class MemUsageVerifier : public Array::MemUsageHandler {
1470
public:
1471
    MemUsageVerifier(ref_type ref_begin, ref_type immutable_ref_end, ref_type mutable_ref_end, ref_type baseline)
1472
        : m_ref_begin(ref_begin)
59,646✔
1473
        , m_immutable_ref_end(immutable_ref_end)
59,646✔
1474
        , m_mutable_ref_end(mutable_ref_end)
59,646✔
1475
        , m_baseline(baseline)
59,646✔
1476
    {
119,208✔
1477
    }
119,208✔
1478
    void add_immutable(ref_type ref, size_t size)
1479
    {
2,703,858✔
1480
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
2,703,858✔
1481
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
2,703,858✔
1482
        REALM_ASSERT_3(size, >, 0);
2,703,858✔
1483
        REALM_ASSERT_3(ref, >=, m_ref_begin);
2,703,858✔
1484
        REALM_ASSERT_3(size, <=, m_immutable_ref_end - ref);
2,703,858✔
1485
        Chunk chunk;
2,703,858✔
1486
        chunk.ref = ref;
2,703,858✔
1487
        chunk.size = size;
2,703,858✔
1488
        m_chunks.push_back(chunk);
2,703,858✔
1489
    }
2,703,858✔
1490
    void add_mutable(ref_type ref, size_t size)
1491
    {
450,654✔
1492
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
450,654✔
1493
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
450,654✔
1494
        REALM_ASSERT_3(size, >, 0);
450,654✔
1495
        REALM_ASSERT_3(ref, >=, m_immutable_ref_end);
450,654✔
1496
        REALM_ASSERT_3(size, <=, m_mutable_ref_end - ref);
450,654✔
1497
        Chunk chunk;
450,654✔
1498
        chunk.ref = ref;
450,654✔
1499
        chunk.size = size;
450,654✔
1500
        m_chunks.push_back(chunk);
450,654✔
1501
    }
450,654✔
1502
    void add(ref_type ref, size_t size)
1503
    {
10,896,216✔
1504
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
10,896,216✔
1505
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
10,896,216✔
1506
        REALM_ASSERT_3(size, >, 0);
10,896,216✔
1507
        REALM_ASSERT_3(ref, >=, m_ref_begin);
10,896,216✔
1508
        REALM_ASSERT(size <= (ref < m_baseline ? m_immutable_ref_end : m_mutable_ref_end) - ref);
10,896,216✔
1509
        Chunk chunk;
10,896,216✔
1510
        chunk.ref = ref;
10,896,216✔
1511
        chunk.size = size;
10,896,216✔
1512
        m_chunks.push_back(chunk);
10,896,216✔
1513
    }
10,896,216✔
1514
    void add(const MemUsageVerifier& verifier)
1515
    {
176,544✔
1516
        m_chunks.insert(m_chunks.end(), verifier.m_chunks.begin(), verifier.m_chunks.end());
176,544✔
1517
    }
176,544✔
1518
    void handle(ref_type ref, size_t allocated, size_t) override
1519
    {
10,895,769✔
1520
        add(ref, allocated);
10,895,769✔
1521
    }
10,895,769✔
1522
    void canonicalize()
1523
    {
472,290✔
1524
        // Sort the chunks in order of increasing ref, then merge adjacent
1525
        // chunks while checking that there is no overlap
1526
        typedef std::vector<Chunk>::iterator iter;
472,290✔
1527
        iter i_1 = m_chunks.begin(), end = m_chunks.end();
472,290✔
1528
        iter i_2 = i_1;
472,290✔
1529
        sort(i_1, end);
472,290✔
1530
        if (i_1 != end) {
472,290✔
1531
            while (++i_2 != end) {
18,185,016✔
1532
                ref_type prev_ref_end = i_1->ref + i_1->size;
17,757,411✔
1533
                REALM_ASSERT_3(prev_ref_end, <=, i_2->ref);
17,757,411✔
1534
                if (i_2->ref == prev_ref_end) { // in-file
17,757,411✔
1535
                    i_1->size += i_2->size;     // Merge
13,993,161✔
1536
                }
13,993,161✔
1537
                else {
3,764,250✔
1538
                    *++i_1 = *i_2;
3,764,250✔
1539
                }
3,764,250✔
1540
            }
17,757,411✔
1541
            m_chunks.erase(i_1 + 1, end);
427,605✔
1542
        }
427,605✔
1543
    }
472,290✔
1544
    void clear()
1545
    {
176,541✔
1546
        m_chunks.clear();
176,541✔
1547
    }
176,541✔
1548
    void check_total_coverage()
1549
    {
59,604✔
1550
        REALM_ASSERT_3(m_chunks.size(), ==, 1);
59,604✔
1551
        REALM_ASSERT_3(m_chunks.front().ref, ==, m_ref_begin);
59,604✔
1552
        REALM_ASSERT_3(m_chunks.front().size, ==, m_mutable_ref_end - m_ref_begin);
59,604✔
1553
    }
59,604✔
1554

1555
private:
1556
    struct Chunk {
1557
        ref_type ref;
1558
        size_t size;
1559
        bool operator<(const Chunk& c) const
1560
        {
159,338,724✔
1561
            return ref < c.ref;
159,338,724✔
1562
        }
159,338,724✔
1563
    };
1564
    std::vector<Chunk> m_chunks;
1565
    ref_type m_ref_begin, m_immutable_ref_end, m_mutable_ref_end, m_baseline;
1566
};
1567

1568
#endif
1569

1570
void Group::verify() const
1571
{
127,272✔
1572
#ifdef REALM_DEBUG
127,272✔
1573
    REALM_ASSERT(is_attached());
127,272✔
1574

1575
    m_alloc.verify();
127,272✔
1576

1577
    if (!m_top.is_attached()) {
127,272✔
1578
        return;
54✔
1579
    }
54✔
1580

1581
    // Verify tables
1582
    {
127,218✔
1583
        auto keys = get_table_keys();
127,218✔
1584
        for (auto key : keys) {
224,004✔
1585
            ConstTableRef table = get_table(key);
224,004✔
1586
            REALM_ASSERT_3(table->get_key().value, ==, key.value);
224,004✔
1587
            table->verify();
224,004✔
1588
        }
224,004✔
1589
    }
127,218✔
1590

1591
    // Verify history if present
1592
    if (Replication* repl = *get_repl()) {
127,218✔
1593
        if (auto hist = repl->_create_history_read()) {
69,858✔
1594
            hist->set_group(const_cast<Group*>(this), false);
69,852✔
1595
            _impl::History::version_type version = 0;
69,852✔
1596
            int history_type = 0;
69,852✔
1597
            int history_schema_version = 0;
69,852✔
1598
            get_version_and_history_info(m_top, version, history_type, history_schema_version);
69,852✔
1599
            REALM_ASSERT(history_type != Replication::hist_None || history_schema_version == 0);
69,852✔
1600
            ref_type hist_ref = get_history_ref(m_top);
69,852✔
1601
            hist->update_from_ref_and_version(hist_ref, version);
69,852✔
1602
            hist->verify();
69,852✔
1603
        }
69,852✔
1604
    }
69,858✔
1605

1606
    if (auto tr = dynamic_cast<const Transaction*>(this)) {
127,218✔
1607
        // This is a transaction
1608
        if (tr->get_transact_stage() == DB::TransactStage::transact_Reading) {
126,888✔
1609
            // Verifying the memory cannot be done from a read transaction
1610
            // There might be a write transaction running that has freed some
1611
            // memory that is seen as being in use in this transaction
1612
            return;
67,614✔
1613
        }
67,614✔
1614
    }
126,888✔
1615
    size_t logical_file_size = to_size_t(m_top.get_as_ref_or_tagged(2).get_as_int());
59,604✔
1616
    size_t ref_begin = sizeof(SlabAlloc::Header);
59,604✔
1617
    ref_type real_immutable_ref_end = logical_file_size;
59,604✔
1618
    ref_type real_mutable_ref_end = m_alloc.get_total_size();
59,604✔
1619
    ref_type real_baseline = m_alloc.get_baseline();
59,604✔
1620
    // Fake that any empty area between the file and slab is part of the file (immutable):
1621
    ref_type immutable_ref_end = m_alloc.align_size_to_section_boundary(real_immutable_ref_end);
59,604✔
1622
    ref_type mutable_ref_end = m_alloc.align_size_to_section_boundary(real_mutable_ref_end);
59,604✔
1623
    ref_type baseline = m_alloc.align_size_to_section_boundary(real_baseline);
59,604✔
1624

1625
    // Check the consistency of the allocation of used memory
1626
    MemUsageVerifier mem_usage_1(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
59,604✔
1627
    m_top.report_memory_usage(mem_usage_1);
59,604✔
1628
    mem_usage_1.canonicalize();
59,604✔
1629

1630
    // Check concistency of the allocation of the immutable memory that was
1631
    // marked as free before the file was opened.
1632
    MemUsageVerifier mem_usage_2(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
59,604✔
1633
    {
59,604✔
1634
        REALM_ASSERT_EX(m_top.size() == 3 || m_top.size() == 5 || m_top.size() == 7 || m_top.size() >= 10,
59,604✔
1635
                        m_top.size());
59,604✔
1636
        Allocator& alloc = m_top.get_alloc();
59,604✔
1637
        Array pos(alloc), len(alloc), ver(alloc);
59,604✔
1638
        pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
59,604✔
1639
        len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
59,604✔
1640
        ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
59,604✔
1641
        if (m_top.size() > s_free_pos_ndx) {
59,604✔
1642
            if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
59,115✔
1643
                pos.init_from_ref(ref);
57,336✔
1644
        }
59,115✔
1645
        if (m_top.size() > s_free_size_ndx) {
59,604✔
1646
            if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
59,115✔
1647
                len.init_from_ref(ref);
57,336✔
1648
        }
59,115✔
1649
        if (m_top.size() > s_free_version_ndx) {
59,604✔
1650
            if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
59,112✔
1651
                ver.init_from_ref(ref);
57,333✔
1652
        }
59,112✔
1653
        REALM_ASSERT(pos.is_attached() == len.is_attached());
59,604✔
1654
        REALM_ASSERT(pos.is_attached() || !ver.is_attached()); // pos.is_attached() <== ver.is_attached()
59,604✔
1655
        if (pos.is_attached()) {
59,604✔
1656
            size_t n = pos.size();
57,333✔
1657
            REALM_ASSERT_3(n, ==, len.size());
57,333✔
1658
            if (ver.is_attached())
57,333✔
1659
                REALM_ASSERT_3(n, ==, ver.size());
57,336✔
1660
            for (size_t i = 0; i != n; ++i) {
2,592,948✔
1661
                ref_type ref = to_ref(pos.get(i));
2,535,615✔
1662
                size_t size_of_i = to_size_t(len.get(i));
2,535,615✔
1663
                mem_usage_2.add_immutable(ref, size_of_i);
2,535,615✔
1664
            }
2,535,615✔
1665
            mem_usage_2.canonicalize();
57,333✔
1666
            mem_usage_1.add(mem_usage_2);
57,333✔
1667
            mem_usage_1.canonicalize();
57,333✔
1668
            mem_usage_2.clear();
57,333✔
1669
        }
57,333✔
1670
    }
59,604✔
1671

1672
    // Check the concistency of the allocation of the immutable memory that has
1673
    // been marked as free after the file was opened
1674
    for (const auto& free_block : m_alloc.m_free_read_only) {
108,660✔
1675
        mem_usage_2.add_immutable(free_block.first, free_block.second);
108,660✔
1676
    }
108,660✔
1677
    mem_usage_2.canonicalize();
59,604✔
1678
    mem_usage_1.add(mem_usage_2);
59,604✔
1679
    mem_usage_1.canonicalize();
59,604✔
1680
    mem_usage_2.clear();
59,604✔
1681

1682
    // Check the consistency of the allocation of the mutable memory that has
1683
    // been marked as free
1684
    m_alloc.for_all_free_entries([&](ref_type ref, size_t sz) {
450,654✔
1685
        mem_usage_2.add_mutable(ref, sz);
450,654✔
1686
    });
450,654✔
1687
    mem_usage_2.canonicalize();
59,604✔
1688
    mem_usage_1.add(mem_usage_2);
59,604✔
1689
    mem_usage_1.canonicalize();
59,604✔
1690
    mem_usage_2.clear();
59,604✔
1691

1692
    // There may be a hole between the end of file and the beginning of the slab area.
1693
    // We need to take that into account here.
1694
    REALM_ASSERT_3(real_immutable_ref_end, <=, real_baseline);
59,604✔
1695
    auto slab_start = immutable_ref_end;
59,604✔
1696
    if (real_immutable_ref_end < slab_start) {
59,604✔
1697
        ref_type ref = real_immutable_ref_end;
59,604✔
1698
        size_t corrected_size = slab_start - real_immutable_ref_end;
59,604✔
1699
        mem_usage_1.add_immutable(ref, corrected_size);
59,604✔
1700
        mem_usage_1.canonicalize();
59,604✔
1701
    }
59,604✔
1702

1703
    // At this point we have accounted for all memory managed by the slab
1704
    // allocator
1705
    mem_usage_1.check_total_coverage();
59,604✔
1706
#endif
59,604✔
1707
}
59,604✔
1708

1709
void Group::validate_primary_columns()
1710
{
480✔
1711
    auto table_keys = this->get_table_keys();
480✔
1712
    for (auto tk : table_keys) {
1,800✔
1713
        auto table = get_table(tk);
1,800✔
1714
        table->validate_primary_column();
1,800✔
1715
    }
1,800✔
1716
}
480✔
1717

1718
#ifdef REALM_DEBUG
1719

1720
MemStats Group::get_stats()
UNCOV
1721
{
×
1722
    MemStats mem_stats;
×
1723
    m_top.stats(mem_stats);
×
1724

UNCOV
1725
    return mem_stats;
×
1726
}
×
1727

1728
void Group::print() const
UNCOV
1729
{
×
1730
    m_alloc.print();
×
1731
}
×
1732

1733

1734
void Group::print_free() const
UNCOV
1735
{
×
1736
    Allocator& alloc = m_top.get_alloc();
×
1737
    Array pos(alloc), len(alloc), ver(alloc);
×
1738
    pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
×
1739
    len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
×
1740
    ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
×
1741
    if (m_top.size() > s_free_pos_ndx) {
×
1742
        if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
×
1743
            pos.init_from_ref(ref);
×
1744
    }
×
1745
    if (m_top.size() > s_free_size_ndx) {
×
1746
        if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
×
1747
            len.init_from_ref(ref);
×
1748
    }
×
1749
    if (m_top.size() > s_free_version_ndx) {
×
1750
        if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
×
1751
            ver.init_from_ref(ref);
×
1752
    }
×
1753

UNCOV
1754
    if (!pos.is_attached()) {
×
1755
        std::cout << "none\n";
×
1756
        return;
×
1757
    }
×
1758
    bool has_versions = ver.is_attached();
×
1759

UNCOV
1760
    size_t n = pos.size();
×
1761
    for (size_t i = 0; i != n; ++i) {
×
1762
        size_t offset = to_size_t(pos.get(i));
×
1763
        size_t size_of_i = to_size_t(len.get(i));
×
1764
        std::cout << i << ": " << offset << " " << size_of_i;
×
1765

UNCOV
1766
        if (has_versions) {
×
1767
            size_t version = to_size_t(ver.get(i));
×
1768
            std::cout << " " << version;
×
1769
        }
×
1770
        std::cout << "\n";
×
1771
    }
×
1772
    std::cout << "\n";
×
1773
}
×
1774
#endif
1775

1776
// LCOV_EXCL_STOP ignore debug functions
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc