• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / github_pull_request_281750

30 Oct 2023 03:37PM UTC coverage: 90.528% (-1.0%) from 91.571%
github_pull_request_281750

Pull #6073

Evergreen

jedelbo
Log free space and history sizes when opening file
Pull Request #6073: Merge next-major

95488 of 175952 branches covered (0.0%)

8973 of 12277 new or added lines in 149 files covered. (73.09%)

622 existing lines in 51 files now uncovered.

233503 of 257934 relevant lines covered (90.53%)

6533720.56 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.86
/src/realm/group.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <new>
20
#include <algorithm>
21
#include <fstream>
22

23
#ifdef REALM_DEBUG
24
#include <iostream>
25
#include <iomanip>
26
#endif
27

28
#include <realm/util/file_mapper.hpp>
29
#include <realm/util/memory_stream.hpp>
30
#include <realm/util/miscellaneous.hpp>
31
#include <realm/util/thread.hpp>
32
#include <realm/impl/destroy_guard.hpp>
33
#include <realm/utilities.hpp>
34
#include <realm/exceptions.hpp>
35
#include <realm/group_writer.hpp>
36
#include <realm/transaction.hpp>
37
#include <realm/replication.hpp>
38

39
using namespace realm;
40
using namespace realm::util;
41

42
namespace {
43

44
class Initialization {
45
public:
46
    Initialization()
47
    {
24✔
48
        realm::cpuid_init();
24✔
49
    }
24✔
50
};
51

52
Initialization initialization;
53

54
} // anonymous namespace
55

56
Group::Group()
57
    : m_local_alloc(new SlabAlloc)
58
    , m_alloc(*m_local_alloc) // Throws
59
    , m_top(m_alloc)
60
    , m_tables(m_alloc)
61
    , m_table_names(m_alloc)
62
{
4,014✔
63
    init_array_parents();
4,014✔
64
    m_alloc.attach_empty(); // Throws
4,014✔
65
    m_file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
4,014✔
66
    ref_type top_ref = 0; // Instantiate a new empty group
4,014✔
67
    bool create_group_when_missing = true;
4,014✔
68
    bool writable = create_group_when_missing;
4,014✔
69
    attach(top_ref, writable, create_group_when_missing); // Throws
4,014✔
70
}
4,014✔
71

72

73
Group::Group(const std::string& file_path, const char* encryption_key)
74
    : m_local_alloc(new SlabAlloc) // Throws
75
    , m_alloc(*m_local_alloc)
76
    , m_top(m_alloc)
77
    , m_tables(m_alloc)
78
    , m_table_names(m_alloc)
79
{
849✔
80
    init_array_parents();
849✔
81

327✔
82
    SlabAlloc::Config cfg;
849✔
83
    cfg.read_only = true;
849✔
84
    cfg.no_create = true;
849✔
85
    cfg.encryption_key = encryption_key;
849✔
86
    ref_type top_ref = m_alloc.attach_file(file_path, cfg); // Throws
849✔
87
    // Non-Transaction Groups always allow writing and simply don't allow
327✔
88
    // committing when opened in read-only mode
327✔
89
    m_alloc.set_read_only(false);
849✔
90

327✔
91
    open(top_ref, file_path);
849✔
92
}
849✔
93

94

95
Group::Group(BinaryData buffer, bool take_ownership)
96
    : m_local_alloc(new SlabAlloc) // Throws
97
    , m_alloc(*m_local_alloc)
98
    , m_top(m_alloc)
99
    , m_tables(m_alloc)
100
    , m_table_names(m_alloc)
101
{
48✔
102
    REALM_ASSERT(buffer.data());
48✔
103

24✔
104
    init_array_parents();
48✔
105
    ref_type top_ref = m_alloc.attach_buffer(buffer.data(), buffer.size()); // Throws
48✔
106

24✔
107
    open(top_ref, {});
48✔
108

24✔
109
    if (take_ownership)
48✔
110
        m_alloc.own_buffer();
36✔
111
}
48✔
112

113
Group::Group(SlabAlloc* alloc) noexcept
114
    : m_alloc(*alloc)
115
    , // Throws
116
    m_top(m_alloc)
117
    , m_tables(m_alloc)
118
    , m_table_names(m_alloc)
119
{
3,081,651✔
120
    init_array_parents();
3,081,651✔
121
}
3,081,651✔
122

123
namespace {
124

125
class TableRecycler : public std::vector<Table*> {
126
public:
127
    ~TableRecycler()
128
    {
×
129
        REALM_UNREACHABLE();
×
130
        // if ever enabled, remember to release Tables:
×
131
        // for (auto t : *this) {
×
132
        //    delete t;
×
133
        //}
×
134
    }
×
135
};
136

137
// We use the classic approach to construct a FIFO from two LIFO's,
138
// insertion is done into recycler_1, removal is done from recycler_2,
139
// and when recycler_2 is empty, recycler_1 is reversed into recycler_2.
140
// this i O(1) for each entry.
141
auto& g_table_recycler_1 = *new TableRecycler;
142
auto& g_table_recycler_2 = *new TableRecycler;
143
// number of tables held back before being recycled. We hold back recycling
144
// the latest to increase the probability of detecting race conditions
145
// without crashing.
146
const static int g_table_recycling_delay = 100;
147
auto& g_table_recycler_mutex = *new std::mutex;
148

149
} // namespace
150

151
TableKeyIterator& TableKeyIterator::operator++()
152
{
5,039,853✔
153
    m_pos++;
5,039,853✔
154
    m_index_in_group++;
5,039,853✔
155
    load_key();
5,039,853✔
156
    return *this;
5,039,853✔
157
}
5,039,853✔
158

159
TableKey TableKeyIterator::operator*()
160
{
5,174,805✔
161
    if (!bool(m_table_key)) {
5,174,805✔
162
        load_key();
1,293,912✔
163
    }
1,293,912✔
164
    return m_table_key;
5,174,805✔
165
}
5,174,805✔
166

167
void TableKeyIterator::load_key()
168
{
6,333,759✔
169
    const Group& g = *m_group;
6,333,759✔
170
    size_t max_index_in_group = g.m_table_names.size();
6,333,759✔
171
    while (m_index_in_group < max_index_in_group) {
6,701,724✔
172
        RefOrTagged rot = g.m_tables.get_as_ref_or_tagged(m_index_in_group);
5,542,800✔
173
        if (rot.is_ref()) {
5,542,800✔
174
            Table* t;
5,174,835✔
175
            if (m_index_in_group < g.m_table_accessors.size() &&
5,174,835✔
176
                (t = load_atomic(g.m_table_accessors[m_index_in_group], std::memory_order_acquire))) {
5,174,835✔
177
                m_table_key = t->get_key();
1,124,058✔
178
            }
1,124,058✔
179
            else {
4,050,777✔
180
                m_table_key = Table::get_key_direct(g.m_tables.get_alloc(), rot.get_as_ref());
4,050,777✔
181
            }
4,050,777✔
182
            return;
5,174,835✔
183
        }
5,174,835✔
184
        m_index_in_group++;
367,965✔
185
    }
367,965✔
186
    m_table_key = TableKey();
3,760,770✔
187
}
1,158,924✔
188

189
TableKey TableKeys::operator[](size_t p) const
190
{
654✔
191
    if (p < m_iter.m_pos) {
654✔
192
        m_iter = TableKeyIterator(m_iter.m_group, 0);
×
193
    }
×
194
    while (m_iter.m_pos < p) {
810✔
195
        ++m_iter;
156✔
196
    }
156✔
197
    return *m_iter;
654✔
198
}
654✔
199

200
size_t Group::size() const noexcept
201
{
1,743,438✔
202
    return m_num_tables;
1,743,438✔
203
}
1,743,438✔
204

205

206
void Group::set_size() const noexcept
207
{
3,300,897✔
208
    int retval = 0;
3,300,897✔
209
    if (is_attached() && m_table_names.is_attached()) {
3,301,011✔
210
        size_t max_index = m_tables.size();
3,153,576✔
211
        REALM_ASSERT_EX(max_index < (1 << 16), max_index);
3,153,576✔
212
        for (size_t j = 0; j < max_index; ++j) {
12,525,453✔
213
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(j);
9,371,877✔
214
            if (rot.is_ref() && rot.get_as_ref()) {
9,371,877✔
215
                ++retval;
9,026,706✔
216
            }
9,026,706✔
217
        }
9,371,877✔
218
    }
3,153,576✔
219
    m_num_tables = retval;
3,300,897✔
220
}
3,300,897✔
221

222
std::map<TableRef, ColKey> Group::get_primary_key_columns_from_pk_table(TableRef pk_table)
UNCOV
223
{
×
UNCOV
224
    std::map<TableRef, ColKey> ret;
×
UNCOV
225
    REALM_ASSERT(pk_table);
×
UNCOV
226
    ColKey col_table = pk_table->get_column_key("pk_table");
×
UNCOV
227
    ColKey col_prop = pk_table->get_column_key("pk_property");
×
UNCOV
228
    for (auto pk_obj : *pk_table) {
×
UNCOV
229
        auto object_type = pk_obj.get<String>(col_table);
×
UNCOV
230
        auto name = std::string(g_class_name_prefix) + std::string(object_type);
×
UNCOV
231
        auto table = get_table(name);
×
UNCOV
232
        auto pk_col_name = pk_obj.get<String>(col_prop);
×
UNCOV
233
        auto pk_col = table->get_column_key(pk_col_name);
×
UNCOV
234
        ret.emplace(table, pk_col);
×
UNCOV
235
    }
×
236

UNCOV
237
    return ret;
×
UNCOV
238
}
×
239

240
TableKey Group::ndx2key(size_t ndx) const
241
{
7,851✔
242
    REALM_ASSERT(is_attached());
7,851✔
243
    Table* accessor = load_atomic(m_table_accessors[ndx], std::memory_order_acquire);
7,851✔
244
    if (accessor)
7,851✔
245
        return accessor->get_key(); // fast path
1,917✔
246

2,799✔
247
    // slow path:
2,799✔
248
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(ndx);
5,934✔
249
    if (rot.is_tagged())
5,934✔
250
        throw NoSuchTable();
×
251
    ref_type ref = rot.get_as_ref();
5,934✔
252
    REALM_ASSERT(ref);
5,934✔
253
    return Table::get_key_direct(m_tables.get_alloc(), ref);
5,934✔
254
}
5,934✔
255

256
size_t Group::key2ndx_checked(TableKey key) const
257
{
41,182,008✔
258
    size_t idx = key2ndx(key);
41,182,008✔
259
    // early out
22,828,290✔
260
    // note: don't lock when accessing m_table_accessors, because if we miss a concurrently introduced table
22,828,290✔
261
    // accessor, we'll just fall through to the slow path. Table accessors can be introduced concurrently,
22,828,290✔
262
    // but never removed. The following is only safe because 'm_table_accessors' will not be relocated
22,828,290✔
263
    // concurrently. (We aim to be safe in face of concurrent access to a frozen transaction, where tables
22,828,290✔
264
    // cannot be added or removed. All other races are undefined behaviour)
22,828,290✔
265
    if (idx < m_table_accessors.size()) {
41,182,008✔
266
        Table* tbl = load_atomic(m_table_accessors[idx], std::memory_order_acquire);
40,980,324✔
267
        if (tbl && tbl->get_key() == key)
40,980,324✔
268
            return idx;
35,128,836✔
269
    }
6,053,172✔
270
    // The notion of a const group as it is now, is not really
3,588,489✔
271
    // useful. It is linked to a distinction between a read
3,588,489✔
272
    // and a write transaction. This distinction is no longer
3,588,489✔
273
    // a compile time aspect (it's not const anymore)
3,588,489✔
274
    Allocator* alloc = const_cast<SlabAlloc*>(&m_alloc);
6,053,172✔
275
    if (m_tables.is_attached() && idx < m_tables.size()) {
6,053,361✔
276
        RefOrTagged rot = m_tables.get_as_ref_or_tagged(idx);
5,811,006✔
277
        if (rot.is_ref() && rot.get_as_ref() && (Table::get_key_direct(*alloc, rot.get_as_ref()) == key)) {
5,814,765✔
278

3,348,078✔
279
            return idx;
5,812,875✔
280
        }
5,812,875✔
281
    }
2,147,724,058✔
282
    throw NoSuchTable();
2,147,724,058✔
283
}
2,147,724,058✔
284

285

286
int Group::get_file_format_version() const noexcept
287
{
1,194,405✔
288
    return m_file_format_version;
1,194,405✔
289
}
1,194,405✔
290

291

292
void Group::set_file_format_version(int file_format) noexcept
293
{
3,080,355✔
294
    m_file_format_version = file_format;
3,080,355✔
295
}
3,080,355✔
296

297

298
int Group::get_committed_file_format_version() const noexcept
299
{
×
300
    return m_alloc.get_committed_file_format_version();
×
301
}
×
302

303
std::optional<int> Group::fake_target_file_format;
304

305
void _impl::GroupFriend::fake_target_file_format(const std::optional<int> format) noexcept
306
{
72✔
307
    Group::fake_target_file_format = format;
72✔
308
}
72✔
309

310
int Group::get_target_file_format_version_for_session(int current_file_format_version,
311
                                                      int requested_history_type) noexcept
312
{
168,339✔
313
    if (Group::fake_target_file_format) {
168,339✔
314
        return *Group::fake_target_file_format;
72✔
315
    }
72✔
316
    // Note: This function is responsible for choosing the target file format
82,866✔
317
    // for a sessions. If it selects a file format that is different from
82,866✔
318
    // `current_file_format_version`, it will trigger a file format upgrade
82,866✔
319
    // process.
82,866✔
320

82,866✔
321
    // Note: `current_file_format_version` may be zero at this time, which means
82,866✔
322
    // that the file format it is not yet decided (only possible for empty
82,866✔
323
    // Realms where top-ref is zero).
82,866✔
324

82,866✔
325
    // Please see Group::get_file_format_version() for information about the
82,866✔
326
    // individual file format versions.
82,866✔
327

82,866✔
328
    if (requested_history_type == Replication::hist_None) {
168,267✔
329
        if (current_file_format_version == 23) {
34,011✔
330
            // We are able to open these file formats in RO mode
UNCOV
331
            return current_file_format_version;
×
UNCOV
332
        }
×
333
    }
168,267✔
334

82,866✔
335
    return g_current_file_format_version;
168,267✔
336
}
168,267✔
337

338
void Group::get_version_and_history_info(const Array& top, _impl::History::version_type& version, int& history_type,
339
                                         int& history_schema_version) noexcept
340
{
771,675✔
341
    using version_type = _impl::History::version_type;
771,675✔
342
    version_type version_2 = 0;
771,675✔
343
    int history_type_2 = 0;
771,675✔
344
    int history_schema_version_2 = 0;
771,675✔
345
    if (top.is_attached()) {
771,675✔
346
        if (top.size() > s_version_ndx) {
722,622✔
347
            version_2 = version_type(top.get_as_ref_or_tagged(s_version_ndx).get_as_int());
722,280✔
348
        }
722,280✔
349
        if (top.size() > s_hist_type_ndx) {
722,622✔
350
            history_type_2 = int(top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
719,244✔
351
        }
719,244✔
352
        if (top.size() > s_hist_version_ndx) {
722,622✔
353
            history_schema_version_2 = int(top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
719,241✔
354
        }
719,241✔
355
    }
722,622✔
356
    // Version 0 is not a legal initial version, so it has to be set to 1
384,735✔
357
    // instead.
384,735✔
358
    if (version_2 == 0)
771,675✔
359
        version_2 = 1;
51,375✔
360
    version = version_2;
771,675✔
361
    history_type = history_type_2;
771,675✔
362
    history_schema_version = history_schema_version_2;
771,675✔
363
}
771,675✔
364

365
int Group::get_history_schema_version() noexcept
366
{
23,571✔
367
    bool history_schema_version = (m_top.is_attached() && m_top.size() > s_hist_version_ndx);
23,571✔
368
    if (history_schema_version) {
23,571✔
369
        return int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
705✔
370
    }
705✔
371
    return 0;
22,866✔
372
}
22,866✔
373

374
uint64_t Group::get_sync_file_id() const noexcept
375
{
13,460,733✔
376
    if (m_top.is_attached() && m_top.size() > s_sync_file_id_ndx) {
13,460,733✔
377
        return uint64_t(m_top.get_as_ref_or_tagged(s_sync_file_id_ndx).get_as_int());
6,080,064✔
378
    }
6,080,064✔
379
    auto repl = get_replication();
7,380,669✔
380
    if (repl && repl->get_history_type() == Replication::hist_SyncServer) {
7,380,669✔
381
        return 1;
2,679✔
382
    }
2,679✔
383
    return 0;
7,377,990✔
384
}
7,377,990✔
385

386
size_t Group::get_free_space_size(const Array& top) noexcept
387
{
24,006✔
388
    if (top.is_attached() && top.size() > s_free_size_ndx) {
24,006✔
389
        auto ref = top.get_as_ref(s_free_size_ndx);
24,006✔
390
        Array free_list_sizes(top.get_alloc());
24,006✔
391
        free_list_sizes.init_from_ref(ref);
24,006✔
392
        return size_t(free_list_sizes.get_sum());
24,006✔
393
    }
24,006✔
NEW
394
    return 0;
×
NEW
395
}
×
396

397
size_t Group::get_history_size(const Array& top) noexcept
398
{
24,006✔
399
    if (top.is_attached() && top.size() > s_hist_ref_ndx) {
24,006✔
400
        auto ref = top.get_as_ref(s_hist_ref_ndx);
6✔
401
        Array hist(top.get_alloc());
6✔
402
        hist.init_from_ref(ref);
6✔
403
        return hist.get_byte_size_deep();
6✔
404
    }
6✔
405
    return 0;
24,000✔
406
}
24,000✔
407

408
int Group::read_only_version_check(SlabAlloc& alloc, ref_type top_ref, const std::string& path)
409
{
1,029✔
410
    // Select file format if it is still undecided.
417✔
411
    auto file_format_version = alloc.get_committed_file_format_version();
1,029✔
412

417✔
413
    bool file_format_ok = false;
1,029✔
414
    // It is not possible to open prior file format versions without an upgrade.
417✔
415
    // Since a Realm file cannot be upgraded when opened in this mode
417✔
416
    // (we may be unable to write to the file), no earlier versions can be opened.
417✔
417
    // Please see Group::get_file_format_version() for information about the
417✔
418
    // individual file format versions.
417✔
419
    switch (file_format_version) {
1,029✔
420
        case 0:
6✔
421
            file_format_ok = (top_ref == 0);
6✔
422
            break;
6✔
423
        case 23:
396✔
424
        case g_current_file_format_version:
987✔
425
            file_format_ok = true;
987✔
426
            break;
987✔
427
    }
1,029✔
428
    if (REALM_UNLIKELY(!file_format_ok))
1,029✔
429
        throw FileAccessError(ErrorCodes::FileFormatUpgradeRequired,
435✔
430
                              util::format("Realm file at path '%1' cannot be opened in read-only mode because it "
36✔
431
                                           "has a file format version (%2) which requires an upgrade",
36✔
432
                                           path, file_format_version),
36✔
433
                              path);
36✔
434
    return file_format_version;
993✔
435
}
993✔
436

437
void Group::open(ref_type top_ref, const std::string& file_path)
438
{
849✔
439
    SlabAlloc::DetachGuard dg(m_alloc);
849✔
440
    m_file_format_version = read_only_version_check(m_alloc, top_ref, file_path);
849✔
441

327✔
442
    Replication::HistoryType history_type = Replication::hist_None;
849✔
443
    int target_file_format_version = get_target_file_format_version_for_session(m_file_format_version, history_type);
849✔
444
    if (m_file_format_version == 0) {
849✔
445
        set_file_format_version(target_file_format_version);
6✔
446
    }
6✔
447
    else {
843✔
448
        // From a technical point of view, we could upgrade the Realm file
324✔
449
        // format in memory here, but since upgrading can be expensive, it is
324✔
450
        // currently disallowed.
324✔
451
        REALM_ASSERT(target_file_format_version == m_file_format_version);
843✔
452
    }
843✔
453

327✔
454
    // Make all dynamically allocated memory (space beyond the attached file) as
327✔
455
    // available free-space.
327✔
456
    reset_free_space_tracking(); // Throws
849✔
457

327✔
458
    bool create_group_when_missing = true;
849✔
459
    bool writable = create_group_when_missing;
849✔
460
    attach(top_ref, writable, create_group_when_missing); // Throws
849✔
461
    dg.release();                                         // Do not detach after all
849✔
462
}
849✔
463

464
Group::~Group() noexcept
465
{
3,088,566✔
466
    // If this group accessor is detached at this point in time, it is either
1,980,333✔
467
    // because it is DB::m_group (m_is_shared), or it is a free-stading
1,980,333✔
468
    // group accessor that was never successfully opened.
1,980,333✔
469
    if (!m_top.is_attached())
3,088,566✔
470
        return;
3,082,302✔
471

3,711✔
472
    // Free-standing group accessor
3,711✔
473
    detach();
6,264✔
474

3,711✔
475
    // if a local allocator is set in m_local_alloc, then the destruction
3,711✔
476
    // of m_local_alloc will trigger destruction of the allocator, which will
3,711✔
477
    // verify that the allocator has been detached, so....
3,711✔
478
    if (m_local_alloc)
6,264✔
479
        m_local_alloc->detach();
4,827✔
480
}
6,264✔
481

482
void Group::remap_and_update_refs(ref_type new_top_ref, size_t new_file_size, bool writable)
483
{
389,169✔
484
    m_alloc.update_reader_view(new_file_size); // Throws
389,169✔
485
    update_allocator_wrappers(writable);
389,169✔
486

193,758✔
487
    // force update of all ref->ptr translations if the mapping has changed
193,758✔
488
    auto mapping_version = m_alloc.get_mapping_version();
389,169✔
489
    if (mapping_version != m_last_seen_mapping_version) {
389,169✔
490
        m_last_seen_mapping_version = mapping_version;
197,742✔
491
    }
197,742✔
492
    update_refs(new_top_ref);
389,169✔
493
}
389,169✔
494

495
void Group::validate_top_array(const Array& arr, const SlabAlloc& alloc, std::optional<size_t> read_lock_file_size,
496
                               std::optional<uint_fast64_t> read_lock_version)
497
{
3,230,772✔
498
    size_t top_size = arr.size();
3,230,772✔
499
    ref_type top_ref = arr.get_ref();
3,230,772✔
500

2,063,664✔
501
    switch (top_size) {
3,230,772✔
502
        // These are the valid sizes
503
        case 3:
2,063,343✔
504
        case 5:
2,063,343✔
505
        case 7:
2,113,065✔
506
        case 9:
2,113,065✔
507
        case 10:
2,113,065✔
508
        case 11:
3,225,369✔
509
        case 12: {
3,230,028✔
510
            ref_type table_names_ref = arr.get_as_ref_or_tagged(s_table_name_ndx).get_as_ref();
3,230,028✔
511
            ref_type tables_ref = arr.get_as_ref_or_tagged(s_table_refs_ndx).get_as_ref();
3,230,028✔
512
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
3,230,028✔
513

2,063,064✔
514
            // Logical file size must never exceed actual file size.
2,063,064✔
515
            auto file_size = alloc.get_baseline();
3,230,028✔
516
            if (logical_file_size > file_size) {
3,230,028✔
517
                std::string err = util::format("Invalid logical file size: %1, actual file size: %2, read lock file "
×
518
                                               "size: %3, read lock version: %4",
×
519
                                               logical_file_size, file_size, read_lock_file_size, read_lock_version);
×
520
                throw InvalidDatabase(err, "");
×
521
            }
×
522
            // First two entries must be valid refs pointing inside the file
2,063,064✔
523
            auto invalid_ref = [logical_file_size](ref_type ref) {
6,453,348✔
524
                return ref == 0 || (ref & 7) || ref > logical_file_size;
6,455,166✔
525
            };
6,453,348✔
526
            if (invalid_ref(table_names_ref) || invalid_ref(tables_ref)) {
3,230,043✔
527
                std::string err = util::format(
×
528
                    "Invalid top array (top_ref, [0], [1]): %1, %2, %3, read lock size: %4, read lock version: %5",
×
529
                    top_ref, table_names_ref, tables_ref, read_lock_file_size, read_lock_version);
×
530
                throw InvalidDatabase(err, "");
×
531
            }
×
532
            break;
3,230,028✔
533
        }
3,230,028✔
534
        default: {
2,063,064✔
535
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
×
536
            std::string err =
×
537
                util::format("Invalid top array size (ref: %1, array size: %2) file size: %3, read "
×
538
                             "lock size: %4, read lock version: %5",
×
539
                             top_ref, top_size, logical_file_size, read_lock_file_size, read_lock_version);
×
540
            throw InvalidDatabase(err, "");
×
541
            break;
2,063,064✔
542
        }
3,230,028✔
543
    }
3,230,772✔
544
}
3,230,772✔
545

546
void Group::attach(ref_type top_ref, bool writable, bool create_group_when_missing, size_t file_size,
547
                   uint_fast64_t version)
548
{
3,304,602✔
549
    REALM_ASSERT(!m_top.is_attached());
3,304,602✔
550
    if (create_group_when_missing)
3,304,602✔
551
        REALM_ASSERT(writable);
3,304,602✔
552

2,099,721✔
553
    // If this function throws, it must leave the group accesor in a the
2,099,721✔
554
    // unattached state.
2,099,721✔
555

2,099,721✔
556
    m_tables.detach();
3,304,602✔
557
    m_table_names.detach();
3,304,602✔
558
    m_is_writable = writable;
3,304,602✔
559

2,099,721✔
560
    if (top_ref != 0) {
3,304,602✔
561
        m_top.init_from_ref(top_ref);
3,143,514✔
562
        validate_top_array(m_top, m_alloc, file_size, version);
3,143,514✔
563
        m_table_names.init_from_parent();
3,143,514✔
564
        m_tables.init_from_parent();
3,143,514✔
565
    }
3,143,514✔
566
    else if (create_group_when_missing) {
161,088✔
567
        create_empty_group(); // Throws
14,301✔
568
    }
14,301✔
569
    m_attached = true;
3,304,602✔
570
    set_size();
3,304,602✔
571

2,099,721✔
572
    size_t sz = m_tables.is_attached() ? m_tables.size() : 0;
3,229,542✔
573
    while (m_table_accessors.size() > sz) {
3,304,710✔
574
        if (Table* t = m_table_accessors.back()) {
108✔
575
            t->detach(Table::cookie_void);
99✔
576
            recycle_table_accessor(t);
99✔
577
        }
99✔
578
        m_table_accessors.pop_back();
108✔
579
    }
108✔
580
    while (m_table_accessors.size() < sz) {
12,226,701✔
581
        m_table_accessors.emplace_back();
8,922,099✔
582
    }
8,922,099✔
583
}
3,304,602✔
584

585

586
void Group::detach() noexcept
587
{
3,083,703✔
588
    detach_table_accessors();
3,083,703✔
589
    m_table_accessors.clear();
3,083,703✔
590

1,975,518✔
591
    m_table_names.detach();
3,083,703✔
592
    m_tables.detach();
3,083,703✔
593
    m_top.detach();
3,083,703✔
594

1,975,518✔
595
    m_attached = false;
3,083,703✔
596
}
3,083,703✔
597

598
void Group::attach_shared(ref_type new_top_ref, size_t new_file_size, bool writable, VersionID version)
599
{
3,081,510✔
600
    REALM_ASSERT_3(new_top_ref, <, new_file_size);
3,081,510✔
601
    REALM_ASSERT(!is_attached());
3,081,510✔
602

1,975,920✔
603
    // update readers view of memory
1,975,920✔
604
    m_alloc.update_reader_view(new_file_size); // Throws
3,081,510✔
605
    update_allocator_wrappers(writable);
3,081,510✔
606

1,975,920✔
607
    // When `new_top_ref` is null, ask attach() to create a new node structure
1,975,920✔
608
    // for an empty group, but only during the initiation of write
1,975,920✔
609
    // transactions. When the transaction being initiated is a read transaction,
1,975,920✔
610
    // we instead have to leave array accessors m_top, m_tables, and
1,975,920✔
611
    // m_table_names in their detached state, as there are no underlying array
1,975,920✔
612
    // nodes to attached them to. In the case of write transactions, the nodes
1,975,920✔
613
    // have to be created, as they have to be ready for being modified.
1,975,920✔
614
    bool create_group_when_missing = writable;
3,081,510✔
615
    attach(new_top_ref, writable, create_group_when_missing, new_file_size, version.version); // Throws
3,081,510✔
616
}
3,081,510✔
617

618

619
void Group::detach_table_accessors() noexcept
620
{
3,080,832✔
621
    for (auto& table_accessor : m_table_accessors) {
9,245,550✔
622
        if (Table* t = table_accessor) {
9,245,550✔
623
            t->detach(Table::cookie_transaction_ended);
5,435,070✔
624
            recycle_table_accessor(t);
5,435,070✔
625
            table_accessor = nullptr;
5,435,070✔
626
        }
5,435,070✔
627
    }
9,245,550✔
628
}
3,080,832✔
629

630

631
void Group::create_empty_group()
632
{
78,405✔
633
    m_top.create(Array::type_HasRefs); // Throws
78,405✔
634
    _impl::DeepArrayDestroyGuard dg_top(&m_top);
78,405✔
635
    {
78,405✔
636
        m_table_names.create(); // Throws
78,405✔
637
        _impl::DestroyGuard<ArrayStringShort> dg(&m_table_names);
78,405✔
638
        m_top.add(m_table_names.get_ref()); // Throws
78,405✔
639
        dg.release();
78,405✔
640
    }
78,405✔
641
    {
78,405✔
642
        m_tables.create(Array::type_HasRefs); // Throws
78,405✔
643
        _impl::DestroyGuard<Array> dg(&m_tables);
78,405✔
644
        m_top.add(m_tables.get_ref()); // Throws
78,405✔
645
        dg.release();
78,405✔
646
    }
78,405✔
647
    size_t initial_logical_file_size = sizeof(SlabAlloc::Header);
78,405✔
648
    m_top.add(RefOrTagged::make_tagged(initial_logical_file_size)); // Throws
78,405✔
649
    dg_top.release();
78,405✔
650
}
78,405✔
651

652

653
Table* Group::do_get_table(size_t table_ndx)
654
{
36,829,011✔
655
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
36,829,011✔
656
    // Get table accessor from cache if it exists, else create
20,491,977✔
657
    Table* table = load_atomic(m_table_accessors[table_ndx], std::memory_order_acquire);
36,829,011✔
658
    if (!table) {
36,829,011✔
659
        // double-checked locking idiom
3,001,647✔
660
        std::lock_guard<std::mutex> lock(m_accessor_mutex);
5,122,041✔
661
        table = m_table_accessors[table_ndx];
5,122,041✔
662
        if (!table)
5,122,041✔
663
            table = create_table_accessor(table_ndx); // Throws
5,120,040✔
664
    }
5,122,041✔
665
    return table;
36,829,011✔
666
}
36,829,011✔
667

668

669
Table* Group::do_get_table(StringData name)
670
{
9,299,379✔
671
    if (!m_table_names.is_attached())
9,299,379✔
672
        return 0;
279,885✔
673
    size_t table_ndx = m_table_names.find_first(name);
9,019,494✔
674
    if (table_ndx == not_found)
9,019,494✔
675
        return 0;
1,008,204✔
676

3,994,920✔
677
    Table* table = do_get_table(table_ndx); // Throws
8,011,290✔
678
    return table;
8,011,290✔
679
}
8,011,290✔
680

681
TableRef Group::add_table_with_primary_key(StringData name, DataType pk_type, StringData pk_name, bool nullable,
682
                                           Table::Type table_type)
683
{
129,045✔
684
    check_attached();
129,045✔
685
    check_table_name_uniqueness(name);
129,045✔
686

63,825✔
687
    auto table = do_add_table(name, table_type, false);
129,045✔
688

63,825✔
689
    // Add pk column - without replication
63,825✔
690
    ColumnAttrMask attr;
129,045✔
691
    if (nullable)
129,045✔
692
        attr.set(col_attr_Nullable);
12,759✔
693
    ColKey pk_col = table->generate_col_key(ColumnType(pk_type), attr);
129,045✔
694
    table->do_insert_root_column(pk_col, ColumnType(pk_type), pk_name);
129,045✔
695
    table->do_set_primary_key_column(pk_col);
129,045✔
696

63,825✔
697
    if (Replication* repl = *get_repl())
129,045✔
698
        repl->add_class_with_primary_key(table->get_key(), name, pk_type, pk_name, nullable, table_type);
128,307✔
699

63,825✔
700
    return TableRef(table, table->m_alloc.get_instance_version());
129,045✔
701
}
129,045✔
702

703
Table* Group::do_add_table(StringData name, Table::Type table_type, bool do_repl)
704
{
335,835✔
705
    if (!m_is_writable)
335,835✔
706
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
6✔
707

166,494✔
708
    // get new key and index
166,494✔
709
    // find first empty spot:
166,494✔
710
    uint32_t j;
335,829✔
711
    RefOrTagged rot = RefOrTagged::make_tagged(0);
335,829✔
712
    for (j = 0; j < m_tables.size(); ++j) {
49,625,181✔
713
        rot = m_tables.get_as_ref_or_tagged(j);
49,294,284✔
714
        if (!rot.is_ref())
49,294,284✔
715
            break;
4,932✔
716
    }
49,294,284✔
717
    bool gen_null_tag = (j == m_tables.size()); // new tags start at zero
335,829✔
718
    uint32_t tag = gen_null_tag ? 0 : uint32_t(rot.get_as_int());
333,261✔
719
    TableKey key = TableKey((tag << 16) | j);
335,829✔
720

166,494✔
721
    if (REALM_UNLIKELY(name.size() > max_table_name_length))
335,829✔
722
        throw InvalidArgument(ErrorCodes::InvalidName, util::format("Name too long: %1", name));
166,497✔
723

166,491✔
724
    using namespace _impl;
335,823✔
725
    size_t table_ndx = key2ndx(key);
335,823✔
726
    ref_type ref = Table::create_empty_table(m_alloc, key); // Throws
335,823✔
727
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
335,823✔
728

166,491✔
729
    rot = RefOrTagged::make_ref(ref);
335,823✔
730
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
335,823✔
731

166,491✔
732
    if (table_ndx == m_tables.size()) {
335,823✔
733
        m_tables.add(rot);
330,882✔
734
        m_table_names.add(name);
330,882✔
735
        // Need new slot for table accessor
164,118✔
736
        m_table_accessors.push_back(nullptr);
330,882✔
737
    }
330,882✔
738
    else {
4,941✔
739
        m_tables.set(table_ndx, rot);       // Throws
4,941✔
740
        m_table_names.set(table_ndx, name); // Throws
4,941✔
741
    }
4,941✔
742

166,491✔
743
    Replication* repl = *get_repl();
335,823✔
744
    if (do_repl && repl)
335,823✔
745
        repl->add_class(key, name, table_type);
201,177✔
746

166,491✔
747
    ++m_num_tables;
335,823✔
748

166,491✔
749
    Table* table = create_table_accessor(j);
335,823✔
750
    table->do_set_table_type(table_type);
335,823✔
751

166,491✔
752
    return table;
335,823✔
753
}
335,823✔
754

755

756
Table* Group::create_table_accessor(size_t table_ndx)
757
{
5,455,737✔
758
    REALM_ASSERT(m_tables.size() == m_table_accessors.size());
5,455,737✔
759
    REALM_ASSERT(table_ndx < m_table_accessors.size());
5,455,737✔
760

3,172,071✔
761
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(table_ndx);
5,455,737✔
762
    ref_type ref = rot.get_as_ref();
5,455,737✔
763
    if (ref == 0) {
5,455,737✔
764
        throw NoSuchTable();
×
765
    }
×
766
    Table* table = 0;
5,455,737✔
767
    {
5,455,737✔
768
        std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
5,455,737✔
769
        if (g_table_recycler_2.empty()) {
5,455,737✔
770
            while (!g_table_recycler_1.empty()) {
5,485,008✔
771
                auto t = g_table_recycler_1.back();
5,441,409✔
772
                g_table_recycler_1.pop_back();
5,441,409✔
773
                g_table_recycler_2.push_back(t);
5,441,409✔
774
            }
5,441,409✔
775
        }
43,599✔
776
        if (g_table_recycler_2.size() + g_table_recycler_1.size() > g_table_recycling_delay) {
5,455,737✔
777
            table = g_table_recycler_2.back();
5,428,041✔
778
            table->fully_detach();
5,428,041✔
779
            g_table_recycler_2.pop_back();
5,428,041✔
780
        }
5,428,041✔
781
    }
5,455,737✔
782
    if (table) {
5,455,737✔
783
        table->revive(get_repl(), m_alloc, m_is_writable);
5,427,396✔
784
        table->init(ref, this, table_ndx, m_is_writable, is_frozen());
5,427,396✔
785
    }
5,427,396✔
786
    else {
28,341✔
787
        std::unique_ptr<Table> new_table(new Table(get_repl(), m_alloc));  // Throws
28,341✔
788
        new_table->init(ref, this, table_ndx, m_is_writable, is_frozen()); // Throws
28,341✔
789
        table = new_table.release();
28,341✔
790
    }
28,341✔
791
    table->refresh_index_accessors();
5,455,737✔
792
    // must be atomic to allow concurrent probing of the m_table_accessors vector.
3,172,071✔
793
    store_atomic(m_table_accessors[table_ndx], table, std::memory_order_release);
5,455,737✔
794
    return table;
5,455,737✔
795
}
5,455,737✔
796

797

798
void Group::recycle_table_accessor(Table* to_be_recycled)
799
{
5,454,804✔
800
    std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
5,454,804✔
801
    g_table_recycler_1.push_back(to_be_recycled);
5,454,804✔
802
}
5,454,804✔
803

804
void Group::remove_table(StringData name)
805
{
7,833✔
806
    check_attached();
7,833✔
807
    size_t table_ndx = m_table_names.find_first(name);
7,833✔
808
    if (table_ndx == not_found)
7,833✔
809
        throw NoSuchTable();
6✔
810
    auto key = ndx2key(table_ndx);
7,827✔
811
    remove_table(table_ndx, key); // Throws
7,827✔
812
}
7,827✔
813

814

815
void Group::remove_table(TableKey key)
816
{
141✔
817
    check_attached();
141✔
818

69✔
819
    size_t table_ndx = key2ndx_checked(key);
141✔
820
    remove_table(table_ndx, key);
141✔
821
}
141✔
822

823

824
void Group::remove_table(size_t table_ndx, TableKey key)
825
{
7,968✔
826
    if (!m_is_writable)
7,968✔
827
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
828
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
7,968✔
829
    REALM_ASSERT(table_ndx < m_tables.size());
7,968✔
830
    TableRef table = get_table(key);
7,968✔
831

3,780✔
832
    // In principle we could remove a table even if it is the target of link
3,780✔
833
    // columns of other tables, however, to do that, we would have to
3,780✔
834
    // automatically remove the "offending" link columns from those other
3,780✔
835
    // tables. Such a behaviour is deemed too obscure, and we shall therefore
3,780✔
836
    // require that a removed table does not contain foreign origin backlink
3,780✔
837
    // columns.
3,780✔
838
    if (table->is_cross_table_link_target())
7,968✔
839
        throw CrossTableLinkTarget(table->get_name());
24✔
840

3,768✔
841
    {
7,944✔
842
        // We don't want to replicate the individual column removals along the
3,768✔
843
        // way as they're covered by the table removal
3,768✔
844
        Table::DisableReplication dr(*table);
7,944✔
845
        for (size_t i = table->get_column_count(); i > 0; --i) {
25,476✔
846
            ColKey col_key = table->spec_ndx2colkey(i - 1);
17,532✔
847
            table->remove_column(col_key);
17,532✔
848
        }
17,532✔
849
    }
7,944✔
850

3,768✔
851
    size_t prior_num_tables = m_tables.size();
7,944✔
852
    Replication* repl = *get_repl();
7,944✔
853
    if (repl)
7,944✔
854
        repl->erase_class(key, table->get_name(), prior_num_tables); // Throws
7,866✔
855

3,768✔
856
    int64_t ref_64 = m_tables.get(table_ndx);
7,944✔
857
    REALM_ASSERT(!int_cast_has_overflow<ref_type>(ref_64));
7,944✔
858
    ref_type ref = ref_type(ref_64);
7,944✔
859

3,768✔
860
    // Replace entry in m_tables with next tag to use:
3,768✔
861
    RefOrTagged rot = RefOrTagged::make_tagged((1 + (key.value >> 16)) & 0x7FFF);
7,944✔
862
    // Remove table
3,768✔
863
    m_tables.set(table_ndx, rot);     // Throws
7,944✔
864
    m_table_names.set(table_ndx, {}); // Throws
7,944✔
865
    m_table_accessors[table_ndx] = nullptr;
7,944✔
866
    --m_num_tables;
7,944✔
867

3,768✔
868
    table->detach(Table::cookie_removed);
7,944✔
869
    // Destroy underlying node structure
3,768✔
870
    Array::destroy_deep(ref, m_alloc);
7,944✔
871
    recycle_table_accessor(table.unchecked_ptr());
7,944✔
872
}
7,944✔
873

874

875
void Group::rename_table(StringData name, StringData new_name, bool require_unique_name)
876
{
24✔
877
    check_attached();
24✔
878
    size_t table_ndx = m_table_names.find_first(name);
24✔
879
    if (table_ndx == not_found)
24✔
880
        throw NoSuchTable();
6✔
881
    rename_table(ndx2key(table_ndx), new_name, require_unique_name); // Throws
18✔
882
}
18✔
883

884

885
void Group::rename_table(TableKey key, StringData new_name, bool require_unique_name)
886
{
24✔
887
    check_attached();
24✔
888
    if (!m_is_writable)
24✔
889
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
890
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
24✔
891
    if (require_unique_name && has_table(new_name))
24✔
892
        throw TableNameInUse();
6✔
893
    size_t table_ndx = key2ndx_checked(key);
18✔
894
    m_table_names.set(table_ndx, new_name);
18✔
895
    if (Replication* repl = *get_repl())
18✔
896
        repl->rename_class(key, new_name); // Throws
×
897
}
18✔
898

899
Obj Group::get_object(ObjLink link)
900
{
149,619✔
901
    auto target_table = get_table(link.get_table_key());
149,619✔
902
    ObjKey key = link.get_obj_key();
149,619✔
903
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
144,990✔
904
    return ct->get(key);
149,619✔
905
}
149,619✔
906

907
Obj Group::try_get_object(ObjLink link) noexcept
UNCOV
908
{
×
UNCOV
909
    auto target_table = get_table(link.get_table_key());
×
UNCOV
910
    ObjKey key = link.get_obj_key();
×
UNCOV
911
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
×
UNCOV
912
    return ct->try_get_obj(key);
×
UNCOV
913
}
×
914

915
void Group::validate(ObjLink link) const
916
{
12,660✔
917
    if (auto tk = link.get_table_key()) {
12,660✔
918
        auto target_key = link.get_obj_key();
12,660✔
919
        auto target_table = get_table(tk);
12,660✔
920
        const ClusterTree* ct =
12,660✔
921
            target_key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
12,582✔
922
        if (!ct->is_valid(target_key)) {
12,660✔
923
            throw InvalidArgument(ErrorCodes::KeyNotFound, "Target object not found");
12✔
924
        }
12✔
925
        if (target_table->is_embedded()) {
12,648✔
926
            throw IllegalOperation("Cannot link to embedded object");
×
927
        }
×
928
        if (target_table->is_asymmetric()) {
12,648✔
929
            throw IllegalOperation("Cannot link to ephemeral object");
6✔
930
        }
6✔
931
    }
12,648✔
932
}
12,660✔
933

934
ref_type Group::DefaultTableWriter::write_names(_impl::OutputStream& out)
935
{
462✔
936
    bool deep = true;                                                 // Deep
462✔
937
    bool only_if_modified = false;                                    // Always
462✔
938
    return m_group->m_table_names.write(out, deep, only_if_modified); // Throws
462✔
939
}
462✔
940
ref_type Group::DefaultTableWriter::write_tables(_impl::OutputStream& out)
941
{
462✔
942
    bool deep = true;                                            // Deep
462✔
943
    bool only_if_modified = false;                               // Always
462✔
944
    return m_group->m_tables.write(out, deep, only_if_modified); // Throws
462✔
945
}
462✔
946

947
auto Group::DefaultTableWriter::write_history(_impl::OutputStream& out) -> HistoryInfo
948
{
228✔
949
    bool deep = true;              // Deep
228✔
950
    bool only_if_modified = false; // Always
228✔
951
    ref_type history_ref = _impl::GroupFriend::get_history_ref(*m_group);
228✔
952
    HistoryInfo info;
228✔
953
    if (history_ref) {
228✔
954
        _impl::History::version_type version;
192✔
955
        int history_type, history_schema_version;
192✔
956
        _impl::GroupFriend::get_version_and_history_info(_impl::GroupFriend::get_alloc(*m_group),
192✔
957
                                                         m_group->m_top.get_ref(), version, history_type,
192✔
958
                                                         history_schema_version);
192✔
959
        REALM_ASSERT(history_type != Replication::hist_None);
192✔
960
        if (!m_should_write_history ||
192✔
961
            (history_type != Replication::hist_SyncClient && history_type != Replication::hist_SyncServer)) {
189✔
962
            return info; // Only sync history should be preserved when writing to a new file
144✔
963
        }
144✔
964
        info.type = history_type;
48✔
965
        info.version = history_schema_version;
48✔
966
        Array history{const_cast<Allocator&>(_impl::GroupFriend::get_alloc(*m_group))};
48✔
967
        history.init_from_ref(history_ref);
48✔
968
        info.ref = history.write(out, deep, only_if_modified); // Throws
48✔
969
    }
48✔
970
    info.sync_file_id = m_group->get_sync_file_id();
156✔
971
    return info;
84✔
972
}
228✔
973

974
void Group::write(std::ostream& out, bool pad) const
975
{
42✔
976
    DefaultTableWriter table_writer;
42✔
977
    write(out, pad, 0, table_writer);
42✔
978
}
42✔
979

980
void Group::write(std::ostream& out, bool pad_for_encryption, uint_fast64_t version_number, TableWriter& writer) const
981
{
474✔
982
    REALM_ASSERT(is_attached());
474✔
983
    writer.set_group(this);
474✔
984
    bool no_top_array = !m_top.is_attached();
474✔
985
    write(out, m_file_format_version, writer, no_top_array, pad_for_encryption, version_number); // Throws
474✔
986
}
474✔
987

988
void Group::write(File& file, const char* encryption_key, uint_fast64_t version_number, TableWriter& writer) const
989
{
432✔
990
    REALM_ASSERT(file.get_size() == 0);
432✔
991

216✔
992
    file.set_encryption_key(encryption_key);
432✔
993

216✔
994
    // The aim is that the buffer size should be at least 1/256 of needed size but less than 64 Mb
216✔
995
    constexpr size_t upper_bound = 64 * 1024 * 1024;
432✔
996
    size_t min_space = std::min(get_used_space() >> 8, upper_bound);
432✔
997
    size_t buffer_size = 4096;
432✔
998
    while (buffer_size < min_space) {
504✔
999
        buffer_size <<= 1;
72✔
1000
    }
72✔
1001
    File::Streambuf streambuf(&file, buffer_size);
432✔
1002

216✔
1003
    std::ostream out(&streambuf);
432✔
1004
    out.exceptions(std::ios_base::failbit | std::ios_base::badbit);
432✔
1005
    write(out, encryption_key != 0, version_number, writer);
432✔
1006
    int sync_status = streambuf.pubsync();
432✔
1007
    REALM_ASSERT(sync_status == 0);
432✔
1008
}
432✔
1009

1010
void Group::write(const std::string& path, const char* encryption_key, uint64_t version_number,
1011
                  bool write_history) const
1012
{
234✔
1013
    File file;
234✔
1014
    int flags = 0;
234✔
1015
    file.open(path, File::access_ReadWrite, File::create_Must, flags);
234✔
1016
    DefaultTableWriter table_writer(write_history);
234✔
1017
    write(file, encryption_key, version_number, table_writer);
234✔
1018
}
234✔
1019

1020

1021
BinaryData Group::write_to_mem() const
1022
{
42✔
1023
    REALM_ASSERT(is_attached());
42✔
1024

21✔
1025
    // Get max possible size of buffer
21✔
1026
    size_t max_size = m_alloc.get_total_size();
42✔
1027

21✔
1028
    auto buffer = std::unique_ptr<char[]>(new (std::nothrow) char[max_size]);
42✔
1029
    if (!buffer)
42✔
1030
        throw Exception(ErrorCodes::OutOfMemory, "Could not allocate memory while dumping to memory");
×
1031
    MemoryOutputStream out; // Throws
42✔
1032
    out.set_buffer(buffer.get(), buffer.get() + max_size);
42✔
1033
    write(out); // Throws
42✔
1034
    size_t buffer_size = out.size();
42✔
1035
    return BinaryData(buffer.release(), buffer_size);
42✔
1036
}
42✔
1037

1038

1039
void Group::write(std::ostream& out, int file_format_version, TableWriter& table_writer, bool no_top_array,
1040
                  bool pad_for_encryption, uint_fast64_t version_number)
1041
{
474✔
1042
    _impl::OutputStream out_2(out);
474✔
1043

237✔
1044
    // Write the file header
237✔
1045
    SlabAlloc::Header streaming_header;
474✔
1046
    if (no_top_array) {
474✔
1047
        file_format_version = 0;
12✔
1048
    }
12✔
1049
    else if (file_format_version == 0) {
462✔
1050
        // Use current file format version
1051
        file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
×
1052
    }
×
1053
    SlabAlloc::init_streaming_header(&streaming_header, file_format_version);
474✔
1054
    out_2.write(reinterpret_cast<const char*>(&streaming_header), sizeof streaming_header);
474✔
1055

237✔
1056
    ref_type top_ref = 0;
474✔
1057
    size_t final_file_size = sizeof streaming_header;
474✔
1058
    if (no_top_array) {
474✔
1059
        // Accept version number 1 as that number is (unfortunately) also used
6✔
1060
        // to denote the empty initial state of a Realm file.
6✔
1061
        REALM_ASSERT(version_number == 0 || version_number == 1);
12✔
1062
    }
12✔
1063
    else {
462✔
1064
        // Because we need to include the total logical file size in the
231✔
1065
        // top-array, we have to start by writing everything except the
231✔
1066
        // top-array, and then finally compute and write a correct version of
231✔
1067
        // the top-array. The free-space information of the group will only be
231✔
1068
        // included if a non-zero version number is given as parameter,
231✔
1069
        // indicating that versioning info is to be saved. This is used from
231✔
1070
        // DB to compact the database by writing only the live data
231✔
1071
        // into a separate file.
231✔
1072
        ref_type names_ref = table_writer.write_names(out_2);   // Throws
462✔
1073
        ref_type tables_ref = table_writer.write_tables(out_2); // Throws
462✔
1074
        SlabAlloc new_alloc;
462✔
1075
        new_alloc.attach_empty(); // Throws
462✔
1076
        Array top(new_alloc);
462✔
1077
        top.create(Array::type_HasRefs); // Throws
462✔
1078
        _impl::ShallowArrayDestroyGuard dg_top(&top);
462✔
1079
        int_fast64_t value_1 = from_ref(names_ref);
462✔
1080
        int_fast64_t value_2 = from_ref(tables_ref);
462✔
1081
        top.add(value_1); // Throws
462✔
1082
        top.add(value_2); // Throws
462✔
1083
        top.add(0);       // Throws
462✔
1084

231✔
1085
        int top_size = 3;
462✔
1086
        if (version_number) {
462✔
1087
            TableWriter::HistoryInfo history_info = table_writer.write_history(out_2); // Throws
228✔
1088

114✔
1089
            Array free_list(new_alloc);
228✔
1090
            Array size_list(new_alloc);
228✔
1091
            Array version_list(new_alloc);
228✔
1092
            free_list.create(Array::type_Normal); // Throws
228✔
1093
            _impl::DeepArrayDestroyGuard dg_1(&free_list);
228✔
1094
            size_list.create(Array::type_Normal); // Throws
228✔
1095
            _impl::DeepArrayDestroyGuard dg_2(&size_list);
228✔
1096
            version_list.create(Array::type_Normal); // Throws
228✔
1097
            _impl::DeepArrayDestroyGuard dg_3(&version_list);
228✔
1098
            bool deep = true;              // Deep
228✔
1099
            bool only_if_modified = false; // Always
228✔
1100
            ref_type free_list_ref = free_list.write(out_2, deep, only_if_modified);
228✔
1101
            ref_type size_list_ref = size_list.write(out_2, deep, only_if_modified);
228✔
1102
            ref_type version_list_ref = version_list.write(out_2, deep, only_if_modified);
228✔
1103
            top.add(RefOrTagged::make_ref(free_list_ref));     // Throws
228✔
1104
            top.add(RefOrTagged::make_ref(size_list_ref));     // Throws
228✔
1105
            top.add(RefOrTagged::make_ref(version_list_ref));  // Throws
228✔
1106
            top.add(RefOrTagged::make_tagged(version_number)); // Throws
228✔
1107
            top_size = 7;
228✔
1108

114✔
1109
            if (history_info.type != Replication::hist_None) {
228✔
1110
                top.add(RefOrTagged::make_tagged(history_info.type));
48✔
1111
                top.add(RefOrTagged::make_ref(history_info.ref));
48✔
1112
                top.add(RefOrTagged::make_tagged(history_info.version));
48✔
1113
                top.add(RefOrTagged::make_tagged(history_info.sync_file_id));
48✔
1114
                top_size = s_group_max_size;
48✔
1115
                // ^ this is too large, since the evacuation point entry is not there:
24✔
1116
                // (but the code below is self correcting)
24✔
1117
            }
48✔
1118
        }
228✔
1119
        top_ref = out_2.get_ref_of_next_array();
462✔
1120

231✔
1121
        // Produce a preliminary version of the top array whose
231✔
1122
        // representation is guaranteed to be able to hold the final file
231✔
1123
        // size
231✔
1124
        size_t max_top_byte_size = Array::get_max_byte_size(top_size);
462✔
1125
        size_t max_final_file_size = size_t(top_ref) + max_top_byte_size;
462✔
1126
        top.ensure_minimum_width(RefOrTagged::make_tagged(max_final_file_size)); // Throws
462✔
1127

231✔
1128
        // Finalize the top array by adding the projected final file size
231✔
1129
        // to it
231✔
1130
        size_t top_byte_size = top.get_byte_size();
462✔
1131
        final_file_size = size_t(top_ref) + top_byte_size;
462✔
1132
        top.set(2, RefOrTagged::make_tagged(final_file_size)); // Throws
462✔
1133

231✔
1134
        // Write the top array
231✔
1135
        bool deep = false;                        // Shallow
462✔
1136
        bool only_if_modified = false;            // Always
462✔
1137
        top.write(out_2, deep, only_if_modified); // Throws
462✔
1138
        REALM_ASSERT_3(size_t(out_2.get_ref_of_next_array()), ==, final_file_size);
462✔
1139

231✔
1140
        dg_top.reset(nullptr); // Destroy now
462✔
1141
    }
462✔
1142

237✔
1143
    // encryption will pad the file to a multiple of the page, so ensure the
237✔
1144
    // footer is aligned to the end of a page
237✔
1145
    if (pad_for_encryption) {
474✔
1146
#if REALM_ENABLE_ENCRYPTION
18✔
1147
        size_t unrounded_size = final_file_size + sizeof(SlabAlloc::StreamingFooter);
18✔
1148
        size_t rounded_size = round_up_to_page_size(unrounded_size);
18✔
1149
        if (rounded_size != unrounded_size) {
18✔
1150
            std::unique_ptr<char[]> buffer(new char[rounded_size - unrounded_size]());
18✔
1151
            out_2.write(buffer.get(), rounded_size - unrounded_size);
18✔
1152
        }
18✔
1153
#endif
18✔
1154
    }
18✔
1155

237✔
1156
    // Write streaming footer
237✔
1157
    SlabAlloc::StreamingFooter footer;
474✔
1158
    footer.m_top_ref = top_ref;
474✔
1159
    footer.m_magic_cookie = SlabAlloc::footer_magic_cookie;
474✔
1160
    out_2.write(reinterpret_cast<const char*>(&footer), sizeof footer);
474✔
1161
}
474✔
1162

1163

1164
void Group::update_refs(ref_type top_ref) noexcept
1165
{
389,169✔
1166
    // After Group::commit() we will always have free space tracking
193,758✔
1167
    // info.
193,758✔
1168
    REALM_ASSERT_3(m_top.size(), >=, 5);
389,169✔
1169

193,758✔
1170
    m_top.init_from_ref(top_ref);
389,169✔
1171

193,758✔
1172
    // Now we can update it's child arrays
193,758✔
1173
    m_table_names.update_from_parent();
389,169✔
1174
    m_tables.update_from_parent();
389,169✔
1175

193,758✔
1176
    // Update all attached table accessors.
193,758✔
1177
    for (auto& table_accessor : m_table_accessors) {
1,319,373✔
1178
        if (table_accessor) {
1,319,373✔
1179
            table_accessor->update_from_parent();
810,882✔
1180
        }
810,882✔
1181
    }
1,319,373✔
1182
}
389,169✔
1183

1184
bool Group::operator==(const Group& g) const
1185
{
66✔
1186
    for (auto tk : get_table_keys()) {
138✔
1187
        const StringData& table_name = get_table_name(tk);
138✔
1188

69✔
1189
        ConstTableRef table_1 = get_table(tk);
138✔
1190
        ConstTableRef table_2 = g.get_table(table_name);
138✔
1191
        if (!table_2)
138✔
1192
            return false;
12✔
1193
        if (table_1->get_primary_key_column().get_type() != table_2->get_primary_key_column().get_type()) {
126✔
1194
            return false;
×
1195
        }
×
1196
        if (table_1->is_embedded() != table_2->is_embedded())
126✔
1197
            return false;
×
1198
        if (table_1->is_embedded())
126✔
1199
            continue;
60✔
1200

33✔
1201
        if (*table_1 != *table_2)
66✔
1202
            return false;
18✔
1203
    }
66✔
1204
    return true;
51✔
1205
}
66✔
1206
size_t Group::get_used_space() const noexcept
1207
{
450✔
1208
    if (!m_top.is_attached())
450✔
1209
        return 0;
12✔
1210

219✔
1211
    size_t used_space = (size_t(m_top.get(2)) >> 1);
438✔
1212

219✔
1213
    if (m_top.size() > 4) {
438✔
1214
        Array free_lengths(const_cast<SlabAlloc&>(m_alloc));
312✔
1215
        free_lengths.init_from_ref(ref_type(m_top.get(4)));
312✔
1216
        used_space -= size_t(free_lengths.get_sum());
312✔
1217
    }
312✔
1218

219✔
1219
    return used_space;
438✔
1220
}
438✔
1221

1222

1223
namespace {
1224
class TransactAdvancer : public _impl::NullInstructionObserver {
1225
public:
1226
    TransactAdvancer(Group&, bool& schema_changed)
1227
        : m_schema_changed(schema_changed)
1228
    {
41,028✔
1229
    }
41,028✔
1230

1231
    bool insert_group_level_table(TableKey) noexcept
1232
    {
11,256✔
1233
        m_schema_changed = true;
11,256✔
1234
        return true;
11,256✔
1235
    }
11,256✔
1236

1237
    bool erase_class(TableKey) noexcept
1238
    {
×
1239
        m_schema_changed = true;
×
1240
        return true;
×
1241
    }
×
1242

1243
    bool rename_class(TableKey) noexcept
1244
    {
×
1245
        m_schema_changed = true;
×
1246
        return true;
×
1247
    }
×
1248

1249
    bool insert_column(ColKey)
1250
    {
35,556✔
1251
        m_schema_changed = true;
35,556✔
1252
        return true;
35,556✔
1253
    }
35,556✔
1254

1255
    bool erase_column(ColKey)
1256
    {
×
1257
        m_schema_changed = true;
×
1258
        return true;
×
1259
    }
×
1260

1261
    bool rename_column(ColKey) noexcept
1262
    {
×
1263
        m_schema_changed = true;
×
1264
        return true; // No-op
×
1265
    }
×
1266

1267
private:
1268
    bool& m_schema_changed;
1269
};
1270
} // anonymous namespace
1271

1272

1273
void Group::update_allocator_wrappers(bool writable)
1274
{
9,232,395✔
1275
    m_is_writable = writable;
9,232,395✔
1276
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
17,737,809✔
1277
        auto table_accessor = m_table_accessors[i];
8,505,414✔
1278
        if (table_accessor) {
8,505,414✔
1279
            table_accessor->update_allocator_wrapper(writable);
6,879,681✔
1280
        }
6,879,681✔
1281
    }
8,505,414✔
1282
}
9,232,395✔
1283

1284
void Group::flush_accessors_for_commit()
1285
{
1,362,273✔
1286
    for (auto& acc : m_table_accessors)
1,362,273✔
1287
        if (acc)
5,477,343✔
1288
            acc->flush_for_commit();
3,951,627✔
1289
}
1,362,273✔
1290

1291
void Group::refresh_dirty_accessors()
1292
{
214,335✔
1293
    if (!m_tables.is_attached()) {
214,335✔
1294
        m_table_accessors.clear();
69✔
1295
        return;
69✔
1296
    }
69✔
1297

117,399✔
1298
    // The array of Tables cannot have shrunk:
117,399✔
1299
    REALM_ASSERT(m_tables.size() >= m_table_accessors.size());
214,266✔
1300

117,399✔
1301
    // but it may have grown - and if so, we must resize the accessor array to match
117,399✔
1302
    if (m_tables.size() > m_table_accessors.size()) {
214,266✔
1303
        m_table_accessors.resize(m_tables.size());
×
1304
    }
×
1305

117,399✔
1306
    // Update all attached table accessors.
117,399✔
1307
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
701,835✔
1308
        auto& table_accessor = m_table_accessors[i];
487,569✔
1309
        if (table_accessor) {
487,569✔
1310
            // If the table has changed it's key in the file, it's a
168,438✔
1311
            // new table. This will detach the old accessor and remove it.
168,438✔
1312
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(i);
313,062✔
1313
            bool same_table = false;
313,062✔
1314
            if (rot.is_ref()) {
313,071✔
1315
                auto ref = rot.get_as_ref();
313,071✔
1316
                TableKey new_key = Table::get_key_direct(m_alloc, ref);
313,071✔
1317
                if (new_key == table_accessor->get_key())
313,071✔
1318
                    same_table = true;
313,011✔
1319
            }
313,071✔
1320
            if (same_table) {
313,062✔
1321
                table_accessor->refresh_accessor_tree();
313,011✔
1322
            }
313,011✔
1323
            else {
51✔
1324
                table_accessor->detach(Table::cookie_removed);
51✔
1325
                recycle_table_accessor(table_accessor);
51✔
1326
                m_table_accessors[i] = nullptr;
51✔
1327
            }
51✔
1328
        }
313,062✔
1329
    }
487,569✔
1330
}
214,266✔
1331

1332

1333
void Group::advance_transact(ref_type new_top_ref, util::InputStream* in, bool writable)
1334
{
214,431✔
1335
    REALM_ASSERT(is_attached());
214,431✔
1336
    // Exception safety: If this function throws, the group accessor and all of
117,498✔
1337
    // its subordinate accessors are left in a state that may not be fully
117,498✔
1338
    // consistent. Only minimal consistency is guaranteed (see
117,498✔
1339
    // AccessorConsistencyLevels). In this case, the application is required to
117,498✔
1340
    // either destroy the Group object, forcing all subordinate accessors to
117,498✔
1341
    // become detached, or take some other equivalent action that involves a
117,498✔
1342
    // call to Group::detach(), such as terminating the transaction in progress.
117,498✔
1343
    // such actions will also lead to the detachment of all subordinate
117,498✔
1344
    // accessors. Until then it is an error, and unsafe if the application
117,498✔
1345
    // attempts to access the group one of its subordinate accessors.
117,498✔
1346
    //
117,498✔
1347
    // The purpose of this function is to refresh all attached accessors after
117,498✔
1348
    // the underlying node structure has undergone arbitrary change, such as
117,498✔
1349
    // when a read transaction has been advanced to a later snapshot of the
117,498✔
1350
    // database.
117,498✔
1351
    //
117,498✔
1352
    // Initially, when this function is invoked, we cannot assume any
117,498✔
1353
    // correspondence between the accessor state and the underlying node
117,498✔
1354
    // structure. We can assume that the hierarchy is in a state of minimal
117,498✔
1355
    // consistency, and that it can be brought to a state of structural
117,498✔
1356
    // correspondence using information in the transaction logs. When structural
117,498✔
1357
    // correspondence is achieved, we can reliably refresh the accessor hierarchy
117,498✔
1358
    // (Table::refresh_accessor_tree()) to bring it back to a fully consistent
117,498✔
1359
    // state. See AccessorConsistencyLevels.
117,498✔
1360
    //
117,498✔
1361
    // Much of the information in the transaction logs is not used in this
117,498✔
1362
    // process, because the changes have already been applied to the underlying
117,498✔
1363
    // node structure. All we need to do here is to bring the accessors back
117,498✔
1364
    // into a state where they correctly reflect the underlying structure (or
117,498✔
1365
    // detach them if the underlying object has been removed.)
117,498✔
1366
    //
117,498✔
1367
    // This is no longer needed in Core, but we need to compute "schema_changed",
117,498✔
1368
    // for the benefit of ObjectStore.
117,498✔
1369
    bool schema_changed = false;
214,431✔
1370
    if (in && has_schema_change_notification_handler()) {
214,431✔
1371
        TransactAdvancer advancer(*this, schema_changed);
41,028✔
1372
        _impl::TransactLogParser parser; // Throws
41,028✔
1373
        parser.parse(*in, advancer);     // Throws
41,028✔
1374
    }
41,028✔
1375

117,498✔
1376
    m_top.detach();                                           // Soft detach
214,431✔
1377
    bool create_group_when_missing = false;                   // See Group::attach_shared().
214,431✔
1378
    attach(new_top_ref, writable, create_group_when_missing); // Throws
214,431✔
1379
    refresh_dirty_accessors();                                // Throws
214,431✔
1380

117,498✔
1381
    if (schema_changed)
214,431✔
1382
        send_schema_change_notification();
10,374✔
1383
}
214,431✔
1384

1385
void Group::prepare_top_for_history(int history_type, int history_schema_version, uint64_t file_ident)
1386
{
68,601✔
1387
    REALM_ASSERT(m_file_format_version >= 7);
68,601✔
1388
    while (m_top.size() < s_hist_type_ndx) {
342,021✔
1389
        m_top.add(0); // Throws
273,420✔
1390
    }
273,420✔
1391

33,807✔
1392
    if (m_top.size() > s_hist_version_ndx) {
68,601✔
1393
        int stored_history_type = int(m_top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
204✔
1394
        int stored_history_schema_version = int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
204✔
1395
        if (stored_history_type != Replication::hist_None) {
204✔
1396
            REALM_ASSERT(stored_history_type == history_type);
6✔
1397
            REALM_ASSERT(stored_history_schema_version == history_schema_version);
6✔
1398
        }
6✔
1399
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(history_type));              // Throws
204✔
1400
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(history_schema_version)); // Throws
204✔
1401
    }
204✔
1402
    else {
68,397✔
1403
        // No history yet
33,705✔
1404
        REALM_ASSERT(m_top.size() == s_hist_type_ndx);
68,397✔
1405
        ref_type history_ref = 0;                                    // No history yet
68,397✔
1406
        m_top.add(RefOrTagged::make_tagged(history_type));           // Throws
68,397✔
1407
        m_top.add(RefOrTagged::make_ref(history_ref));               // Throws
68,397✔
1408
        m_top.add(RefOrTagged::make_tagged(history_schema_version)); // Throws
68,397✔
1409
    }
68,397✔
1410

33,807✔
1411
    if (m_top.size() > s_sync_file_id_ndx) {
68,601✔
1412
        m_top.set(s_sync_file_id_ndx, RefOrTagged::make_tagged(file_ident));
42✔
1413
    }
42✔
1414
    else {
68,559✔
1415
        m_top.add(RefOrTagged::make_tagged(file_ident)); // Throws
68,559✔
1416
    }
68,559✔
1417
}
68,601✔
1418

1419
void Group::clear_history()
1420
{
36✔
1421
    bool has_history = (m_top.is_attached() && m_top.size() > s_hist_type_ndx);
36✔
1422
    if (has_history) {
36✔
1423
        auto hist_ref = m_top.get_as_ref(s_hist_ref_ndx);
36✔
1424
        Array::destroy_deep(hist_ref, m_top.get_alloc());
36✔
1425
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(Replication::hist_None)); // Throws
36✔
1426
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(0));                   // Throws
36✔
1427
        m_top.set(s_hist_ref_ndx, 0);                                                 // Throws
36✔
1428
    }
36✔
1429
}
36✔
1430

1431
#ifdef REALM_DEBUG // LCOV_EXCL_START ignore debug functions
1432

1433
class MemUsageVerifier : public Array::MemUsageHandler {
1434
public:
1435
    MemUsageVerifier(ref_type ref_begin, ref_type immutable_ref_end, ref_type mutable_ref_end, ref_type baseline)
1436
        : m_ref_begin(ref_begin)
1437
        , m_immutable_ref_end(immutable_ref_end)
1438
        , m_mutable_ref_end(mutable_ref_end)
1439
        , m_baseline(baseline)
1440
    {
1,279,527✔
1441
    }
1,279,527✔
1442
    void add_immutable(ref_type ref, size_t size)
1443
    {
12,632,265✔
1444
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
12,632,265✔
1445
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
12,632,265✔
1446
        REALM_ASSERT_3(size, >, 0);
12,632,265✔
1447
        REALM_ASSERT_3(ref, >=, m_ref_begin);
12,632,265✔
1448
        REALM_ASSERT_3(size, <=, m_immutable_ref_end - ref);
12,632,265✔
1449
        Chunk chunk;
12,632,265✔
1450
        chunk.ref = ref;
12,632,265✔
1451
        chunk.size = size;
12,632,265✔
1452
        m_chunks.push_back(chunk);
12,632,265✔
1453
    }
12,632,265✔
1454
    void add_mutable(ref_type ref, size_t size)
1455
    {
5,437,833✔
1456
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
5,437,833✔
1457
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
5,437,833✔
1458
        REALM_ASSERT_3(size, >, 0);
5,437,833✔
1459
        REALM_ASSERT_3(ref, >=, m_immutable_ref_end);
5,437,833✔
1460
        REALM_ASSERT_3(size, <=, m_mutable_ref_end - ref);
5,437,833✔
1461
        Chunk chunk;
5,437,833✔
1462
        chunk.ref = ref;
5,437,833✔
1463
        chunk.size = size;
5,437,833✔
1464
        m_chunks.push_back(chunk);
5,437,833✔
1465
    }
5,437,833✔
1466
    void add(ref_type ref, size_t size)
1467
    {
52,366,422✔
1468
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
52,366,422✔
1469
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
52,366,422✔
1470
        REALM_ASSERT_3(size, >, 0);
52,366,422✔
1471
        REALM_ASSERT_3(ref, >=, m_ref_begin);
52,366,422✔
1472
        REALM_ASSERT(size <= (ref < m_baseline ? m_immutable_ref_end : m_mutable_ref_end) - ref);
52,366,422✔
1473
        Chunk chunk;
52,366,422✔
1474
        chunk.ref = ref;
52,366,422✔
1475
        chunk.size = size;
52,366,422✔
1476
        m_chunks.push_back(chunk);
52,366,422✔
1477
    }
52,366,422✔
1478
    void add(const MemUsageVerifier& verifier)
1479
    {
1,916,877✔
1480
        m_chunks.insert(m_chunks.end(), verifier.m_chunks.begin(), verifier.m_chunks.end());
1,916,877✔
1481
    }
1,916,877✔
1482
    void handle(ref_type ref, size_t allocated, size_t) override
1483
    {
52,366,407✔
1484
        add(ref, allocated);
52,366,407✔
1485
    }
52,366,407✔
1486
    void canonicalize()
1487
    {
5,113,281✔
1488
        // Sort the chunks in order of increasing ref, then merge adjacent
2,556,564✔
1489
        // chunks while checking that there is no overlap
2,556,564✔
1490
        typedef std::vector<Chunk>::iterator iter;
5,113,281✔
1491
        iter i_1 = m_chunks.begin(), end = m_chunks.end();
5,113,281✔
1492
        iter i_2 = i_1;
5,113,281✔
1493
        sort(i_1, end);
5,113,281✔
1494
        if (i_1 != end) {
5,113,281✔
1495
            while (++i_2 != end) {
107,811,288✔
1496
                ref_type prev_ref_end = i_1->ref + i_1->size;
103,036,530✔
1497
                REALM_ASSERT_3(prev_ref_end, <=, i_2->ref);
103,036,530✔
1498
                if (i_2->ref == prev_ref_end) { // in-file
103,036,530✔
1499
                    i_1->size += i_2->size;     // Merge
69,796,860✔
1500
                }
69,796,860✔
1501
                else {
33,239,670✔
1502
                    *++i_1 = *i_2;
33,239,670✔
1503
                }
33,239,670✔
1504
            }
103,036,530✔
1505
            m_chunks.erase(i_1 + 1, end);
4,774,758✔
1506
        }
4,774,758✔
1507
    }
5,113,281✔
1508
    void clear()
1509
    {
1,916,877✔
1510
        m_chunks.clear();
1,916,877✔
1511
    }
1,916,877✔
1512
    void check_total_coverage()
1513
    {
639,765✔
1514
        REALM_ASSERT_3(m_chunks.size(), ==, 1);
639,765✔
1515
        REALM_ASSERT_3(m_chunks.front().ref, ==, m_ref_begin);
639,765✔
1516
        REALM_ASSERT_3(m_chunks.front().size, ==, m_mutable_ref_end - m_ref_begin);
639,765✔
1517
    }
639,765✔
1518

1519
private:
1520
    struct Chunk {
1521
        ref_type ref;
1522
        size_t size;
1523
        bool operator<(const Chunk& c) const
1524
        {
655,594,497✔
1525
            return ref < c.ref;
655,594,497✔
1526
        }
655,594,497✔
1527
    };
1528
    std::vector<Chunk> m_chunks;
1529
    ref_type m_ref_begin, m_immutable_ref_end, m_mutable_ref_end, m_baseline;
1530
};
1531

1532
#endif
1533

1534
void Group::verify() const
1535
{
702,528✔
1536
#ifdef REALM_DEBUG
702,528✔
1537
    REALM_ASSERT(is_attached());
702,528✔
1538

351,225✔
1539
    m_alloc.verify();
702,528✔
1540

351,225✔
1541
    if (!m_top.is_attached()) {
702,528✔
1542
        return;
147✔
1543
    }
147✔
1544

351,150✔
1545
    // Verify tables
351,150✔
1546
    {
702,381✔
1547
        auto keys = get_table_keys();
702,381✔
1548
        for (auto key : keys) {
2,929,212✔
1549
            ConstTableRef table = get_table(key);
2,929,212✔
1550
            REALM_ASSERT_3(table->get_key().value, ==, key.value);
2,929,212✔
1551
            table->verify();
2,929,212✔
1552
        }
2,929,212✔
1553
    }
702,381✔
1554

351,150✔
1555
    // Verify history if present
351,150✔
1556
    if (Replication* repl = *get_repl()) {
702,381✔
1557
        if (auto hist = repl->_create_history_read()) {
656,676✔
1558
            hist->set_group(const_cast<Group*>(this), false);
656,673✔
1559
            _impl::History::version_type version = 0;
656,673✔
1560
            int history_type = 0;
656,673✔
1561
            int history_schema_version = 0;
656,673✔
1562
            get_version_and_history_info(m_top, version, history_type, history_schema_version);
656,673✔
1563
            REALM_ASSERT(history_type != Replication::hist_None || history_schema_version == 0);
656,673✔
1564
            ref_type hist_ref = get_history_ref(m_top);
656,673✔
1565
            hist->update_from_ref_and_version(hist_ref, version);
656,673✔
1566
            hist->verify();
656,673✔
1567
        }
656,673✔
1568
    }
656,676✔
1569

351,150✔
1570
    if (auto tr = dynamic_cast<const Transaction*>(this)) {
702,381✔
1571
        // This is a transaction
351,009✔
1572
        if (tr->get_transact_stage() == DB::TransactStage::transact_Reading) {
702,096✔
1573
            // Verifying the memory cannot be done from a read transaction
31,290✔
1574
            // There might be a write transaction running that has freed some
31,290✔
1575
            // memory that is seen as being in use in this transaction
31,290✔
1576
            return;
62,622✔
1577
        }
62,622✔
1578
    }
639,759✔
1579
    size_t logical_file_size = to_size_t(m_top.get_as_ref_or_tagged(2).get_as_int());
639,759✔
1580
    size_t ref_begin = sizeof(SlabAlloc::Header);
639,759✔
1581
    ref_type real_immutable_ref_end = logical_file_size;
639,759✔
1582
    ref_type real_mutable_ref_end = m_alloc.get_total_size();
639,759✔
1583
    ref_type real_baseline = m_alloc.get_baseline();
639,759✔
1584
    // Fake that any empty area between the file and slab is part of the file (immutable):
319,860✔
1585
    ref_type immutable_ref_end = m_alloc.align_size_to_section_boundary(real_immutable_ref_end);
639,759✔
1586
    ref_type mutable_ref_end = m_alloc.align_size_to_section_boundary(real_mutable_ref_end);
639,759✔
1587
    ref_type baseline = m_alloc.align_size_to_section_boundary(real_baseline);
639,759✔
1588

319,860✔
1589
    // Check the consistency of the allocation of used memory
319,860✔
1590
    MemUsageVerifier mem_usage_1(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
639,759✔
1591
    m_top.report_memory_usage(mem_usage_1);
639,759✔
1592
    mem_usage_1.canonicalize();
639,759✔
1593

319,860✔
1594
    // Check concistency of the allocation of the immutable memory that was
319,860✔
1595
    // marked as free before the file was opened.
319,860✔
1596
    MemUsageVerifier mem_usage_2(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
639,759✔
1597
    {
639,759✔
1598
        REALM_ASSERT_EX(m_top.size() == 3 || m_top.size() == 5 || m_top.size() == 7 || m_top.size() >= 10,
639,759✔
1599
                        m_top.size());
639,759✔
1600
        Allocator& alloc = m_top.get_alloc();
639,759✔
1601
        Array pos(alloc), len(alloc), ver(alloc);
639,759✔
1602
        pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
639,759✔
1603
        len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
639,759✔
1604
        ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
639,759✔
1605
        if (m_top.size() > s_free_pos_ndx) {
639,759✔
1606
            if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
639,330✔
1607
                pos.init_from_ref(ref);
637,347✔
1608
        }
639,330✔
1609
        if (m_top.size() > s_free_size_ndx) {
639,759✔
1610
            if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
639,330✔
1611
                len.init_from_ref(ref);
637,347✔
1612
        }
639,330✔
1613
        if (m_top.size() > s_free_version_ndx) {
639,759✔
1614
            if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
639,330✔
1615
                ver.init_from_ref(ref);
637,347✔
1616
        }
639,330✔
1617
        REALM_ASSERT(pos.is_attached() == len.is_attached());
639,759✔
1618
        REALM_ASSERT(pos.is_attached() || !ver.is_attached()); // pos.is_attached() <== ver.is_attached()
639,759✔
1619
        if (pos.is_attached()) {
639,759✔
1620
            size_t n = pos.size();
637,347✔
1621
            REALM_ASSERT_3(n, ==, len.size());
637,347✔
1622
            if (ver.is_attached())
637,347✔
1623
                REALM_ASSERT_3(n, ==, ver.size());
637,347✔
1624
            for (size_t i = 0; i != n; ++i) {
10,354,704✔
1625
                ref_type ref = to_ref(pos.get(i));
9,717,357✔
1626
                size_t size_of_i = to_size_t(len.get(i));
9,717,357✔
1627
                mem_usage_2.add_immutable(ref, size_of_i);
9,717,357✔
1628
            }
9,717,357✔
1629
            mem_usage_2.canonicalize();
637,347✔
1630
            mem_usage_1.add(mem_usage_2);
637,347✔
1631
            mem_usage_1.canonicalize();
637,347✔
1632
            mem_usage_2.clear();
637,347✔
1633
        }
637,347✔
1634
    }
639,759✔
1635

319,860✔
1636
    // Check the concistency of the allocation of the immutable memory that has
319,860✔
1637
    // been marked as free after the file was opened
319,860✔
1638
    for (const auto& free_block : m_alloc.m_free_read_only) {
2,275,140✔
1639
        mem_usage_2.add_immutable(free_block.first, free_block.second);
2,275,140✔
1640
    }
2,275,140✔
1641
    mem_usage_2.canonicalize();
639,759✔
1642
    mem_usage_1.add(mem_usage_2);
639,759✔
1643
    mem_usage_1.canonicalize();
639,759✔
1644
    mem_usage_2.clear();
639,759✔
1645

319,860✔
1646
    // Check the consistency of the allocation of the mutable memory that has
319,860✔
1647
    // been marked as free
319,860✔
1648
    m_alloc.for_all_free_entries([&](ref_type ref, size_t sz) {
5,437,833✔
1649
        mem_usage_2.add_mutable(ref, sz);
5,437,833✔
1650
    });
5,437,833✔
1651
    mem_usage_2.canonicalize();
639,759✔
1652
    mem_usage_1.add(mem_usage_2);
639,759✔
1653
    mem_usage_1.canonicalize();
639,759✔
1654
    mem_usage_2.clear();
639,759✔
1655

319,860✔
1656
    // There may be a hole between the end of file and the beginning of the slab area.
319,860✔
1657
    // We need to take that into account here.
319,860✔
1658
    REALM_ASSERT_3(real_immutable_ref_end, <=, real_baseline);
639,759✔
1659
    auto slab_start = immutable_ref_end;
639,759✔
1660
    if (real_immutable_ref_end < slab_start) {
639,765✔
1661
        ref_type ref = real_immutable_ref_end;
639,765✔
1662
        size_t corrected_size = slab_start - real_immutable_ref_end;
639,765✔
1663
        mem_usage_1.add_immutable(ref, corrected_size);
639,765✔
1664
        mem_usage_1.canonicalize();
639,765✔
1665
    }
639,765✔
1666

319,860✔
1667
    // At this point we have accounted for all memory managed by the slab
319,860✔
1668
    // allocator
319,860✔
1669
    mem_usage_1.check_total_coverage();
639,759✔
1670
#endif
639,759✔
1671
}
639,759✔
1672

1673
void Group::validate_primary_columns()
1674
{
480✔
1675
    auto table_keys = this->get_table_keys();
480✔
1676
    for (auto tk : table_keys) {
1,800✔
1677
        auto table = get_table(tk);
1,800✔
1678
        table->validate_primary_column();
1,800✔
1679
    }
1,800✔
1680
}
480✔
1681

1682
#ifdef REALM_DEBUG
1683

1684
MemStats Group::get_stats()
1685
{
×
1686
    MemStats mem_stats;
×
1687
    m_top.stats(mem_stats);
×
1688

1689
    return mem_stats;
×
1690
}
×
1691

1692

1693
void Group::print() const
1694
{
×
1695
    m_alloc.print();
×
1696
}
×
1697

1698

1699
void Group::print_free() const
1700
{
×
1701
    Allocator& alloc = m_top.get_alloc();
×
1702
    Array pos(alloc), len(alloc), ver(alloc);
×
1703
    pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
×
1704
    len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
×
1705
    ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
×
1706
    if (m_top.size() > s_free_pos_ndx) {
×
1707
        if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
×
1708
            pos.init_from_ref(ref);
×
1709
    }
×
1710
    if (m_top.size() > s_free_size_ndx) {
×
1711
        if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
×
1712
            len.init_from_ref(ref);
×
1713
    }
×
1714
    if (m_top.size() > s_free_version_ndx) {
×
1715
        if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
×
1716
            ver.init_from_ref(ref);
×
1717
    }
×
1718

1719
    if (!pos.is_attached()) {
×
1720
        std::cout << "none\n";
×
1721
        return;
×
1722
    }
×
1723
    bool has_versions = ver.is_attached();
×
1724

1725
    size_t n = pos.size();
×
1726
    for (size_t i = 0; i != n; ++i) {
×
1727
        size_t offset = to_size_t(pos.get(i));
×
1728
        size_t size_of_i = to_size_t(len.get(i));
×
1729
        std::cout << i << ": " << offset << " " << size_of_i;
×
1730

1731
        if (has_versions) {
×
1732
            size_t version = to_size_t(ver.get(i));
×
1733
            std::cout << " " << version;
×
1734
        }
×
1735
        std::cout << "\n";
×
1736
    }
×
1737
    std::cout << "\n";
×
1738
}
×
1739
#endif
1740

1741
// LCOV_EXCL_STOP ignore debug functions
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc