• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_337

03 Jul 2024 01:04PM UTC coverage: 90.864% (-0.1%) from 90.984%
jorgen.edelbo_337

Pull #7826

Evergreen

nicola-cab
Merge branch 'master' of github.com:realm/realm-core into next-major
Pull Request #7826: Merge Next major

102968 of 181176 branches covered (56.83%)

3131 of 3738 new or added lines in 54 files covered. (83.76%)

106 existing lines in 23 files now uncovered.

217725 of 239616 relevant lines covered (90.86%)

6844960.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.1
/src/realm/group.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <new>
20
#include <algorithm>
21
#include <fstream>
22

23
#ifdef REALM_DEBUG
24
#include <iostream>
25
#include <iomanip>
26
#endif
27

28
#include <realm/util/file_mapper.hpp>
29
#include <realm/util/memory_stream.hpp>
30
#include <realm/util/thread.hpp>
31
#include <realm/impl/destroy_guard.hpp>
32
#include <realm/utilities.hpp>
33
#include <realm/exceptions.hpp>
34
#include <realm/group_writer.hpp>
35
#include <realm/transaction.hpp>
36
#include <realm/replication.hpp>
37

38
using namespace realm;
39
using namespace realm::util;
40

41
namespace {
42

43
class Initialization {
44
public:
45
    Initialization()
46
    {
24✔
47
        realm::cpuid_init();
24✔
48
    }
24✔
49
};
50

51
Initialization initialization;
52

53
} // anonymous namespace
54

55
Group::Group()
56
    : m_local_alloc(new SlabAlloc)
2,184✔
57
    , m_alloc(*m_local_alloc) // Throws
2,184✔
58
    , m_top(m_alloc)
2,184✔
59
    , m_tables(m_alloc)
2,184✔
60
    , m_table_names(m_alloc)
2,184✔
61
{
4,368✔
62
    init_array_parents();
4,368✔
63
    m_alloc.attach_empty(); // Throws
4,368✔
64
    m_file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
4,368✔
65
    ref_type top_ref = 0; // Instantiate a new empty group
4,368✔
66
    bool create_group_when_missing = true;
4,368✔
67
    bool writable = create_group_when_missing;
4,368✔
68
    attach(top_ref, writable, create_group_when_missing); // Throws
4,368✔
69
}
4,368✔
70

71

72
Group::Group(const std::string& file_path, const char* encryption_key)
73
    : m_local_alloc(new SlabAlloc) // Throws
339✔
74
    , m_alloc(*m_local_alloc)
339✔
75
    , m_top(m_alloc)
339✔
76
    , m_tables(m_alloc)
339✔
77
    , m_table_names(m_alloc)
339✔
78
{
870✔
79
    init_array_parents();
870✔
80

81
    SlabAlloc::Config cfg;
870✔
82
    cfg.read_only = true;
870✔
83
    cfg.no_create = true;
870✔
84
    cfg.encryption_key = encryption_key;
870✔
85
    ref_type top_ref = m_alloc.attach_file(file_path, cfg); // Throws
870✔
86
    // Non-Transaction Groups always allow writing and simply don't allow
87
    // committing when opened in read-only mode
88
    m_alloc.set_read_only(false);
870✔
89

90
    open(top_ref, file_path);
870✔
91
}
870✔
92

93

94
Group::Group(BinaryData buffer, bool take_ownership)
95
    : m_local_alloc(new SlabAlloc) // Throws
24✔
96
    , m_alloc(*m_local_alloc)
24✔
97
    , m_top(m_alloc)
24✔
98
    , m_tables(m_alloc)
24✔
99
    , m_table_names(m_alloc)
24✔
100
{
48✔
101
    REALM_ASSERT(buffer.data());
48✔
102

103
    init_array_parents();
48✔
104
    ref_type top_ref = m_alloc.attach_buffer(buffer.data(), buffer.size()); // Throws
48✔
105

106
    open(top_ref, {});
48✔
107

108
    if (take_ownership)
48✔
109
        m_alloc.own_buffer();
36✔
110
}
48✔
111

112
Group::Group(SlabAlloc* alloc) noexcept
113
    : m_alloc(*alloc)
1,180,785✔
114
    , // Throws
115
    m_top(m_alloc)
1,180,785✔
116
    , m_tables(m_alloc)
1,180,785✔
117
    , m_table_names(m_alloc)
1,180,785✔
118
{
1,869,102✔
119
    init_array_parents();
1,869,102✔
120
}
1,869,102✔
121

122
namespace {
123

124
class TableRecycler : public std::vector<Table*> {
125
public:
126
    ~TableRecycler()
127
    {
×
128
        REALM_UNREACHABLE();
129
        // if ever enabled, remember to release Tables:
×
130
        // for (auto t : *this) {
×
131
        //    delete t;
×
132
        //}
×
133
    }
×
134
};
135

136
// We use the classic approach to construct a FIFO from two LIFO's,
137
// insertion is done into recycler_1, removal is done from recycler_2,
138
// and when recycler_2 is empty, recycler_1 is reversed into recycler_2.
139
// this i O(1) for each entry.
140
auto& g_table_recycler_1 = *new TableRecycler;
141
auto& g_table_recycler_2 = *new TableRecycler;
142
// number of tables held back before being recycled. We hold back recycling
143
// the latest to increase the probability of detecting race conditions
144
// without crashing.
145
const static int g_table_recycling_delay = 100;
146
auto& g_table_recycler_mutex = *new std::mutex;
147

148
} // namespace
149

150
TableKeyIterator& TableKeyIterator::operator++()
151
{
1,203,132✔
152
    m_pos++;
1,203,132✔
153
    m_index_in_group++;
1,203,132✔
154
    load_key();
1,203,132✔
155
    return *this;
1,203,132✔
156
}
1,203,132✔
157

158
TableKey TableKeyIterator::operator*()
159
{
1,209,123✔
160
    if (!bool(m_table_key)) {
1,209,123✔
161
        load_key();
386,958✔
162
    }
386,958✔
163
    return m_table_key;
1,209,123✔
164
}
1,209,123✔
165

166
void TableKeyIterator::load_key()
167
{
1,590,093✔
168
    const Group& g = *m_group;
1,590,093✔
169
    size_t max_index_in_group = g.m_table_names.size();
1,590,093✔
170
    while (m_index_in_group < max_index_in_group) {
1,602,081✔
171
        RefOrTagged rot = g.m_tables.get_as_ref_or_tagged(m_index_in_group);
1,221,117✔
172
        if (rot.is_ref()) {
1,221,117✔
173
            Table* t;
1,209,129✔
174
            if (m_index_in_group < g.m_table_accessors.size() &&
1,209,129✔
175
                (t = load_atomic(g.m_table_accessors[m_index_in_group], std::memory_order_acquire))) {
1,209,129✔
176
                m_table_key = t->get_key();
823,299✔
177
            }
823,299✔
178
            else {
385,830✔
179
                m_table_key = Table::get_key_direct(g.m_tables.get_alloc(), rot.get_as_ref());
385,830✔
180
            }
385,830✔
181
            return;
1,209,129✔
182
        }
1,209,129✔
183
        m_index_in_group++;
11,988✔
184
    }
11,988✔
185
    m_table_key = TableKey();
380,964✔
186
}
380,964✔
187

188
TableKey TableKeys::operator[](size_t p) const
189
{
669✔
190
    if (p < m_iter.m_pos) {
669✔
191
        m_iter = TableKeyIterator(m_iter.m_group, 0);
×
192
    }
×
193
    while (m_iter.m_pos < p) {
798✔
194
        ++m_iter;
129✔
195
    }
129✔
196
    return *m_iter;
669✔
197
}
669✔
198

199
size_t Group::size() const noexcept
200
{
772,812✔
201
    return m_num_tables;
772,812✔
202
}
772,812✔
203

204

205
void Group::set_size() const noexcept
206
{
2,092,653✔
207
    int retval = 0;
2,092,653✔
208
    if (is_attached() && m_table_names.is_attached()) {
2,094,051✔
209
        size_t max_index = m_tables.size();
1,960,818✔
210
        REALM_ASSERT_EX(max_index < (1 << 16), max_index);
1,960,818✔
211
        for (size_t j = 0; j < max_index; ++j) {
6,640,584✔
212
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(j);
4,679,766✔
213
            if (rot.is_ref() && rot.get_as_ref()) {
4,679,766✔
214
                ++retval;
4,648,794✔
215
            }
4,648,794✔
216
        }
4,679,766✔
217
    }
1,960,818✔
218
    m_num_tables = retval;
2,092,653✔
219
}
2,092,653✔
220

221
std::map<TableRef, ColKey> Group::get_primary_key_columns_from_pk_table(TableRef pk_table)
222
{
×
223
    std::map<TableRef, ColKey> ret;
×
224
    REALM_ASSERT(pk_table);
×
225
    ColKey col_table = pk_table->get_column_key("pk_table");
×
226
    ColKey col_prop = pk_table->get_column_key("pk_property");
×
227
    for (auto pk_obj : *pk_table) {
×
228
        auto object_type = pk_obj.get<String>(col_table);
×
229
        auto name = std::string(g_class_name_prefix) + std::string(object_type);
×
230
        auto table = get_table(name);
×
231
        auto pk_col_name = pk_obj.get<String>(col_prop);
×
232
        auto pk_col = table->get_column_key(pk_col_name);
×
233
        ret.emplace(table, pk_col);
×
234
    }
×
235

236
    return ret;
×
237
}
×
238

239
TableKey Group::ndx2key(size_t ndx) const
240
{
420✔
241
    REALM_ASSERT(is_attached());
420✔
242
    Table* accessor = load_atomic(m_table_accessors[ndx], std::memory_order_acquire);
420✔
243
    if (accessor)
420✔
244
        return accessor->get_key(); // fast path
216✔
245

246
    // slow path:
247
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(ndx);
204✔
248
    if (rot.is_tagged())
204✔
249
        throw NoSuchTable();
×
250
    ref_type ref = rot.get_as_ref();
204✔
251
    REALM_ASSERT(ref);
204✔
252
    return Table::get_key_direct(m_tables.get_alloc(), ref);
204✔
253
}
204✔
254

255
size_t Group::key2ndx_checked(TableKey key) const
256
{
28,490,106✔
257
    size_t idx = key2ndx(key);
28,490,106✔
258
    // early out
259
    // note: don't lock when accessing m_table_accessors, because if we miss a concurrently introduced table
260
    // accessor, we'll just fall through to the slow path. Table accessors can be introduced concurrently,
261
    // but never removed. The following is only safe because 'm_table_accessors' will not be relocated
262
    // concurrently. (We aim to be safe in face of concurrent access to a frozen transaction, where tables
263
    // cannot be added or removed. All other races are undefined behaviour)
264
    if (idx < m_table_accessors.size()) {
28,688,148✔
265
        Table* tbl = load_atomic(m_table_accessors[idx], std::memory_order_acquire);
28,633,470✔
266
        if (tbl && tbl->get_key() == key)
28,633,470✔
267
            return idx;
27,570,024✔
268
    }
28,633,470✔
269
    // The notion of a const group as it is now, is not really
270
    // useful. It is linked to a distinction between a read
271
    // and a write transaction. This distinction is no longer
272
    // a compile time aspect (it's not const anymore)
273
    Allocator* alloc = const_cast<SlabAlloc*>(&m_alloc);
920,082✔
274
    if (m_tables.is_attached() && idx < m_tables.size()) {
1,319,445✔
275
        RefOrTagged rot = m_tables.get_as_ref_or_tagged(idx);
1,318,782✔
276
        if (rot.is_ref() && rot.get_as_ref() && (Table::get_key_direct(*alloc, rot.get_as_ref()) == key)) {
1,318,794✔
277

278
            return idx;
1,317,477✔
279
        }
1,317,477✔
280
    }
1,318,782✔
281
    throw NoSuchTable();
4,294,967,294✔
282
}
920,082✔
283

284

285
int Group::get_file_format_version() const noexcept
286
{
414,849✔
287
    return m_file_format_version;
414,849✔
288
}
414,849✔
289

290

291
void Group::set_file_format_version(int file_format) noexcept
292
{
1,867,872✔
293
    m_file_format_version = file_format;
1,867,872✔
294
}
1,867,872✔
295

296

297
int Group::get_committed_file_format_version() const noexcept
298
{
×
299
    return m_alloc.get_committed_file_format_version();
×
300
}
×
301

302
std::optional<int> Group::fake_target_file_format;
303

304
void _impl::GroupFriend::fake_target_file_format(const std::optional<int> format) noexcept
305
{
72✔
306
    Group::fake_target_file_format = format;
72✔
307
}
72✔
308

309
int Group::get_target_file_format_version_for_session(int current_file_format_version,
310
                                                      int requested_history_type) noexcept
311
{
104,145✔
312
    if (Group::fake_target_file_format) {
104,145✔
313
        return *Group::fake_target_file_format;
72✔
314
    }
72✔
315
    // Note: This function is responsible for choosing the target file format
316
    // for a sessions. If it selects a file format that is different from
317
    // `current_file_format_version`, it will trigger a file format upgrade
318
    // process.
319

320
    // Note: `current_file_format_version` may be zero at this time, which means
321
    // that the file format it is not yet decided (only possible for empty
322
    // Realms where top-ref is zero).
323

324
    // Please see Group::get_file_format_version() for information about the
325
    // individual file format versions.
326

327
    if (requested_history_type == Replication::hist_None) {
104,073✔
328
        if (current_file_format_version == 24) {
35,214✔
329
            // We are able to open these file formats in RO mode
330
            return current_file_format_version;
24,972✔
331
        }
24,972✔
332
    }
35,214✔
333

334
    return g_current_file_format_version;
79,101✔
335
}
104,073✔
336

337
void Group::get_version_and_history_info(const Array& top, _impl::History::version_type& version, int& history_type,
338
                                         int& history_schema_version) noexcept
339
{
117,528✔
340
    using version_type = _impl::History::version_type;
117,528✔
341
    version_type version_2 = 0;
117,528✔
342
    int history_type_2 = 0;
117,528✔
343
    int history_schema_version_2 = 0;
117,528✔
344
    if (top.is_attached()) {
117,528✔
345
        if (top.size() > s_version_ndx) {
77,310✔
346
            version_2 = version_type(top.get_as_ref_or_tagged(s_version_ndx).get_as_int());
76,986✔
347
        }
76,986✔
348
        if (top.size() > s_hist_type_ndx) {
77,310✔
349
            history_type_2 = int(top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
73,968✔
350
        }
73,968✔
351
        if (top.size() > s_hist_version_ndx) {
77,310✔
352
            history_schema_version_2 = int(top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
73,968✔
353
        }
73,968✔
354
    }
77,310✔
355
    // Version 0 is not a legal initial version, so it has to be set to 1
356
    // instead.
357
    if (version_2 == 0)
117,528✔
358
        version_2 = 1;
42,318✔
359
    version = version_2;
117,528✔
360
    history_type = history_type_2;
117,528✔
361
    history_schema_version = history_schema_version_2;
117,528✔
362
}
117,528✔
363

364
int Group::get_history_schema_version() noexcept
365
{
26,100✔
366
    bool history_schema_version = (m_top.is_attached() && m_top.size() > s_hist_version_ndx);
26,100✔
367
    if (history_schema_version) {
26,100✔
368
        return int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
942✔
369
    }
942✔
370
    return 0;
25,158✔
371
}
26,100✔
372

373
uint64_t Group::get_sync_file_id() const noexcept
374
{
13,649,721✔
375
    if (m_top.is_attached() && m_top.size() > s_sync_file_id_ndx) {
13,649,721✔
376
        return uint64_t(m_top.get_as_ref_or_tagged(s_sync_file_id_ndx).get_as_int());
6,061,095✔
377
    }
6,061,095✔
378
    auto repl = get_replication();
7,588,626✔
379
    if (repl && repl->get_history_type() == Replication::hist_SyncServer) {
7,588,626✔
380
        return 1;
2,826✔
381
    }
2,826✔
382
    return 0;
7,585,800✔
383
}
7,588,626✔
384

385
size_t Group::get_free_space_size(const Array& top) noexcept
386
{
24,186✔
387
    if (top.is_attached() && top.size() > s_free_size_ndx) {
24,186✔
388
        auto ref = top.get_as_ref(s_free_size_ndx);
24,156✔
389
        Array free_list_sizes(top.get_alloc());
24,156✔
390
        free_list_sizes.init_from_ref(ref);
24,156✔
391
        return size_t(free_list_sizes.get_sum());
24,156✔
392
    }
24,156✔
393
    return 0;
30✔
394
}
24,186✔
395

396
size_t Group::get_history_size(const Array& top) noexcept
397
{
24,186✔
398
    if (top.is_attached() && top.size() > s_hist_ref_ndx) {
24,186✔
399
        auto ref = top.get_as_ref(s_hist_ref_ndx);
156✔
400
        Array hist(top.get_alloc());
156✔
401
        hist.init_from_ref(ref);
156✔
402
        return hist.get_byte_size_deep();
156✔
403
    }
156✔
404
    return 0;
24,030✔
405
}
24,186✔
406

407
int Group::read_only_version_check(SlabAlloc& alloc, ref_type top_ref, const std::string& path)
408
{
1,056✔
409
    // Select file format if it is still undecided.
410
    auto file_format_version = alloc.get_committed_file_format_version();
1,056✔
411

412
    bool file_format_ok = false;
1,056✔
413
    // It is not possible to open prior file format versions without an upgrade.
414
    // Since a Realm file cannot be upgraded when opened in this mode
415
    // (we may be unable to write to the file), no earlier versions can be opened.
416
    // Please see Group::get_file_format_version() for information about the
417
    // individual file format versions.
418
    switch (file_format_version) {
1,056✔
419
        case 0:
6✔
420
            file_format_ok = (top_ref == 0);
6✔
421
            break;
6✔
422
        case g_current_file_format_version:
1,014✔
423
            file_format_ok = true;
1,014✔
424
            break;
1,014✔
425
    }
1,056✔
426
    if (REALM_UNLIKELY(!file_format_ok))
1,056✔
427
        throw FileAccessError(ErrorCodes::FileFormatUpgradeRequired,
36✔
428
                              util::format("Realm file at path '%1' cannot be opened in read-only mode because it "
36✔
429
                                           "has a file format version (%2) which requires an upgrade",
36✔
430
                                           path, file_format_version),
36✔
431
                              path);
36✔
432
    return file_format_version;
1,020✔
433
}
1,056✔
434

435
void Group::open(ref_type top_ref, const std::string& file_path)
436
{
870✔
437
    SlabAlloc::DetachGuard dg(m_alloc);
870✔
438
    m_file_format_version = read_only_version_check(m_alloc, top_ref, file_path);
870✔
439

440
    Replication::HistoryType history_type = Replication::hist_None;
870✔
441
    int target_file_format_version = get_target_file_format_version_for_session(m_file_format_version, history_type);
870✔
442
    if (m_file_format_version == 0) {
870✔
443
        set_file_format_version(target_file_format_version);
6✔
444
    }
6✔
445
    else {
864✔
446
        // From a technical point of view, we could upgrade the Realm file
447
        // format in memory here, but since upgrading can be expensive, it is
448
        // currently disallowed.
449
        REALM_ASSERT(target_file_format_version == m_file_format_version);
864✔
450
    }
864✔
451

452
    // Make all dynamically allocated memory (space beyond the attached file) as
453
    // available free-space.
454
    reset_free_space_tracking(); // Throws
870✔
455

456
    bool create_group_when_missing = true;
870✔
457
    bool writable = create_group_when_missing;
870✔
458
    attach(top_ref, writable, create_group_when_missing); // Throws
870✔
459
    dg.release();                                         // Do not detach after all
870✔
460
}
870✔
461

462
Group::~Group() noexcept
463
{
1,874,232✔
464
    // If this group accessor is detached at this point in time, it is either
465
    // because it is DB::m_group (m_is_shared), or it is a free-stading
466
    // group accessor that was never successfully opened.
467
    if (!m_top.is_attached())
1,874,232✔
468
        return;
1,868,865✔
469

470
    // Free-standing group accessor
471
    detach();
5,367✔
472

473
    // if a local allocator is set in m_local_alloc, then the destruction
474
    // of m_local_alloc will trigger destruction of the allocator, which will
475
    // verify that the allocator has been detached, so....
476
    if (m_local_alloc)
5,367✔
477
        m_local_alloc->detach();
5,202✔
478
}
5,367✔
479

480
void Group::remap_and_update_refs(ref_type new_top_ref, size_t new_file_size, bool writable)
481
{
364,974✔
482
    m_alloc.update_reader_view(new_file_size); // Throws
364,974✔
483
    update_allocator_wrappers(writable);
364,974✔
484

485
    // force update of all ref->ptr translations if the mapping has changed
486
    auto mapping_version = m_alloc.get_mapping_version();
364,974✔
487
    if (mapping_version != m_last_seen_mapping_version) {
364,974✔
488
        m_last_seen_mapping_version = mapping_version;
177,468✔
489
    }
177,468✔
490
    update_refs(new_top_ref);
364,974✔
491
}
364,974✔
492

493
void Group::update_table_accessors()
494
{
5,352✔
495
    for (unsigned j = 0; j < m_table_accessors.size(); ++j) {
15,984✔
496
        Table* table = m_table_accessors[j];
10,632✔
497
        // this should be filtered further as an optimization
498
        if (table) {
10,632✔
499
            table->refresh_allocator_wrapper();
10,608✔
500
            table->update_from_parent();
10,608✔
501
        }
10,608✔
502
    }
10,632✔
503
}
5,352✔
504

505

506
void Group::validate_top_array(const Array& arr, const SlabAlloc& alloc, std::optional<size_t> read_lock_file_size,
507
                               std::optional<uint_fast64_t> read_lock_version)
508
{
1,980,210✔
509
    size_t top_size = arr.size();
1,980,210✔
510
    ref_type top_ref = arr.get_ref();
1,980,210✔
511

512
    switch (top_size) {
1,980,210✔
513
        // These are the valid sizes
514
        case 3:
582✔
515
        case 5:
582✔
516
        case 7:
112,401✔
517
        case 9:
112,401✔
518
        case 10:
112,401✔
519
        case 11:
1,968,618✔
520
        case 12: {
1,979,886✔
521
            ref_type table_names_ref = arr.get_as_ref_or_tagged(s_table_name_ndx).get_as_ref();
1,979,886✔
522
            ref_type tables_ref = arr.get_as_ref_or_tagged(s_table_refs_ndx).get_as_ref();
1,979,886✔
523
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
1,979,886✔
524

525
            // Logical file size must never exceed actual file size.
526
            auto file_size = alloc.get_baseline();
1,979,886✔
527
            if (logical_file_size > file_size) {
1,979,886✔
528
                std::string err = util::format("Invalid logical file size: %1, actual file size: %2, read lock file "
×
529
                                               "size: %3, read lock version: %4",
×
530
                                               logical_file_size, file_size, read_lock_file_size, read_lock_version);
×
531
                throw InvalidDatabase(err, "");
×
532
            }
×
533
            // First two entries must be valid refs pointing inside the file
534
            auto invalid_ref = [logical_file_size](ref_type ref) {
3,955,821✔
535
                return ref == 0 || (ref & 7) || ref > logical_file_size;
3,956,103✔
536
            };
3,955,821✔
537
            if (invalid_ref(table_names_ref) || invalid_ref(tables_ref)) {
1,979,886✔
538
                std::string err = util::format(
×
539
                    "Invalid top array (top_ref, [0], [1]): %1, %2, %3, read lock size: %4, read lock version: %5",
×
540
                    top_ref, table_names_ref, tables_ref, read_lock_file_size, read_lock_version);
×
541
                throw InvalidDatabase(err, "");
×
542
            }
×
543
            break;
1,979,886✔
544
        }
1,979,886✔
545
        default: {
1,979,886✔
546
            auto logical_file_size = arr.get_as_ref_or_tagged(s_file_size_ndx).get_as_int();
×
547
            std::string err =
×
548
                util::format("Invalid top array size (ref: %1, array size: %2) file size: %3, read "
×
549
                             "lock size: %4, read lock version: %5",
×
550
                             top_ref, top_size, logical_file_size, read_lock_file_size, read_lock_version);
×
551
            throw InvalidDatabase(err, "");
×
552
            break;
×
553
        }
1,979,886✔
554
    }
1,980,210✔
555
}
1,980,210✔
556

557
void Group::attach(ref_type top_ref, bool writable, bool create_group_when_missing, size_t file_size,
558
                   uint_fast64_t version)
559
{
2,099,550✔
560
    REALM_ASSERT(!m_top.is_attached());
2,099,550✔
561
    if (create_group_when_missing)
2,099,550✔
562
        REALM_ASSERT(writable);
2,099,550✔
563

564
    // If this function throws, it must leave the group accesor in a the
565
    // unattached state.
566

567
    m_tables.detach();
2,099,550✔
568
    m_table_names.detach();
2,099,550✔
569
    m_is_writable = writable;
2,099,550✔
570

571
    if (top_ref != 0) {
2,099,550✔
572
        m_top.init_from_ref(top_ref);
1,952,292✔
573
        validate_top_array(m_top, m_alloc, file_size, version);
1,952,292✔
574
        m_table_names.init_from_parent();
1,952,292✔
575
        m_tables.init_from_parent();
1,952,292✔
576
    }
1,952,292✔
577
    else if (create_group_when_missing) {
147,258✔
578
        create_empty_group(); // Throws
13,527✔
579
    }
13,527✔
580
    m_attached = true;
2,099,550✔
581
    set_size();
2,099,550✔
582

583
    size_t sz = m_tables.is_attached() ? m_tables.size() : 0;
2,099,550✔
584
    while (m_table_accessors.size() > sz) {
2,099,655✔
585
        if (Table* t = m_table_accessors.back()) {
105✔
586
            t->detach(Table::cookie_void);
96✔
587
            recycle_table_accessor(t);
96✔
588
        }
96✔
589
        m_table_accessors.pop_back();
105✔
590
    }
105✔
591
    while (m_table_accessors.size() < sz) {
6,329,352✔
592
        m_table_accessors.emplace_back();
4,229,802✔
593
    }
4,229,802✔
594
}
2,099,550✔
595

596

597
void Group::detach() noexcept
598
{
1,872,183✔
599
    detach_table_accessors();
1,872,183✔
600
    m_table_accessors.clear();
1,872,183✔
601

602
    m_table_names.detach();
1,872,183✔
603
    m_tables.detach();
1,872,183✔
604
    m_top.detach();
1,872,183✔
605

606
    m_attached = false;
1,872,183✔
607
}
1,872,183✔
608

609
void Group::attach_shared(ref_type new_top_ref, size_t new_file_size, bool writable, VersionID version)
610
{
1,868,634✔
611
    REALM_ASSERT_3(new_top_ref, <, new_file_size);
1,868,634✔
612
    REALM_ASSERT(!is_attached());
1,868,634✔
613

614
    // update readers view of memory
615
    m_alloc.update_reader_view(new_file_size); // Throws
1,868,634✔
616
    update_allocator_wrappers(writable);
1,868,634✔
617

618
    // When `new_top_ref` is null, ask attach() to create a new node structure
619
    // for an empty group, but only during the initiation of write
620
    // transactions. When the transaction being initiated is a read transaction,
621
    // we instead have to leave array accessors m_top, m_tables, and
622
    // m_table_names in their detached state, as there are no underlying array
623
    // nodes to attached them to. In the case of write transactions, the nodes
624
    // have to be created, as they have to be ready for being modified.
625
    bool create_group_when_missing = writable;
1,868,634✔
626
    attach(new_top_ref, writable, create_group_when_missing, new_file_size, version.version); // Throws
1,868,634✔
627
}
1,868,634✔
628

629

630
void Group::detach_table_accessors() noexcept
631
{
1,872,336✔
632
    for (auto& table_accessor : m_table_accessors) {
4,512,618✔
633
        if (Table* t = table_accessor) {
4,512,618✔
634
            t->detach(Table::cookie_transaction_ended);
2,012,109✔
635
            recycle_table_accessor(t);
2,012,109✔
636
            table_accessor = nullptr;
2,012,109✔
637
        }
2,012,109✔
638
    }
4,512,618✔
639
}
1,872,336✔
640

641

642
void Group::create_empty_group()
643
{
70,272✔
644
    m_top.create(Array::type_HasRefs); // Throws
70,272✔
645
    _impl::DeepArrayDestroyGuard dg_top(&m_top);
70,272✔
646
    {
70,272✔
647
        m_table_names.create(); // Throws
70,272✔
648
        _impl::DestroyGuard<ArrayStringShort> dg(&m_table_names);
70,272✔
649
        m_top.add(m_table_names.get_ref()); // Throws
70,272✔
650
        dg.release();
70,272✔
651
    }
70,272✔
652
    {
70,272✔
653
        m_tables.create(Array::type_HasRefs); // Throws
70,272✔
654
        _impl::DestroyGuard<Array> dg(&m_tables);
70,272✔
655
        m_top.add(m_tables.get_ref()); // Throws
70,272✔
656
        dg.release();
70,272✔
657
    }
70,272✔
658
    size_t initial_logical_file_size = sizeof(SlabAlloc::Header);
70,272✔
659
    m_top.add(RefOrTagged::make_tagged(initial_logical_file_size)); // Throws
70,272✔
660
    dg_top.release();
70,272✔
661
}
70,272✔
662

663

664
Table* Group::do_get_table(size_t table_ndx)
665
{
32,014,284✔
666
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
32,014,284✔
667
    // Get table accessor from cache if it exists, else create
668
    Table* table = load_atomic(m_table_accessors[table_ndx], std::memory_order_acquire);
32,014,284✔
669
    if (!table) {
32,014,284✔
670
        // double-checked locking idiom
671
        std::lock_guard<std::mutex> lock(m_accessor_mutex);
1,740,861✔
672
        table = m_table_accessors[table_ndx];
1,740,861✔
673
        if (!table)
1,740,861✔
674
            table = create_table_accessor(table_ndx); // Throws
1,734,816✔
675
    }
1,740,861✔
676
    return table;
32,014,284✔
677
}
32,014,284✔
678

679

680
Table* Group::do_get_table(StringData name)
681
{
9,313,950✔
682
    if (!m_table_names.is_attached())
9,313,950✔
683
        return 0;
588,744✔
684
    size_t table_ndx = m_table_names.find_first(name);
8,725,206✔
685
    if (table_ndx == not_found)
8,725,206✔
686
        return 0;
1,096,908✔
687

688
    Table* table = do_get_table(table_ndx); // Throws
7,628,298✔
689
    return table;
7,628,298✔
690
}
8,725,206✔
691

692
TableRef Group::add_table_with_primary_key(StringData name, DataType pk_type, StringData pk_name, bool nullable,
693
                                           Table::Type table_type)
694
{
109,143✔
695
    check_attached();
109,143✔
696
    check_table_name_uniqueness(name);
109,143✔
697

698
    auto table = do_add_table(name, table_type, false);
109,143✔
699

700
    // Add pk column - without replication
701
    ColumnAttrMask attr;
109,143✔
702
    if (nullable)
109,143✔
703
        attr.set(col_attr_Nullable);
15,807✔
704
    ColKey pk_col = table->generate_col_key(ColumnType(pk_type), attr);
109,143✔
705
    table->do_insert_root_column(pk_col, ColumnType(pk_type), pk_name);
109,143✔
706
    table->do_set_primary_key_column(pk_col);
109,143✔
707

708
    if (Replication* repl = *get_repl())
109,143✔
709
        repl->add_class_with_primary_key(table->get_key(), name, pk_type, pk_name, nullable, table_type);
108,186✔
710

711
    return TableRef(table, table->m_alloc.get_instance_version());
109,143✔
712
}
109,143✔
713

714
Table* Group::do_add_table(StringData name, Table::Type table_type, bool do_repl)
715
{
279,579✔
716
    if (!m_is_writable)
279,579✔
717
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
6✔
718

719
    // get new key and index
720
    // find first empty spot:
721
    uint32_t j;
279,573✔
722
    RefOrTagged rot = RefOrTagged::make_tagged(0);
279,573✔
723
    for (j = 0; j < m_tables.size(); ++j) {
54,289,101✔
724
        rot = m_tables.get_as_ref_or_tagged(j);
54,010,545✔
725
        if (!rot.is_ref())
54,010,545✔
726
            break;
1,017✔
727
    }
54,010,545✔
728
    bool gen_null_tag = (j == m_tables.size()); // new tags start at zero
279,573✔
729
    uint32_t tag = gen_null_tag ? 0 : uint32_t(rot.get_as_int());
279,573✔
730
    TableKey key = TableKey((tag << 16) | j);
279,573✔
731

732
    if (REALM_UNLIKELY(name.size() > max_table_name_length))
279,573✔
733
        throw InvalidArgument(ErrorCodes::InvalidName, util::format("Name too long: %1", name));
6✔
734

735
    using namespace _impl;
279,567✔
736
    size_t table_ndx = key2ndx(key);
279,567✔
737
    ref_type ref = Table::create_empty_table(m_alloc, key); // Throws
279,567✔
738
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
279,567✔
739

740
    rot = RefOrTagged::make_ref(ref);
279,567✔
741
    REALM_ASSERT(m_table_accessors.size() == m_tables.size());
279,567✔
742

743
    if (table_ndx == m_tables.size()) {
279,567✔
744
        m_tables.add(rot);
278,550✔
745
        m_table_names.add(name);
278,550✔
746
        // Need new slot for table accessor
747
        m_table_accessors.push_back(nullptr);
278,550✔
748
    }
278,550✔
749
    else {
1,017✔
750
        m_tables.set(table_ndx, rot);       // Throws
1,017✔
751
        m_table_names.set(table_ndx, name); // Throws
1,017✔
752
    }
1,017✔
753

754
    Replication* repl = *get_repl();
279,567✔
755
    if (do_repl && repl)
279,567✔
756
        repl->add_class(key, name, table_type);
164,397✔
757

758
    ++m_num_tables;
279,567✔
759

760
    Table* table = create_table_accessor(j);
279,567✔
761
    table->do_set_table_type(table_type);
279,567✔
762

763
    return table;
279,567✔
764
}
279,573✔
765

766
Table* Group::create_table_accessor(size_t table_ndx)
767
{
2,014,572✔
768
    REALM_ASSERT(m_tables.size() == m_table_accessors.size());
2,014,572✔
769
    REALM_ASSERT(table_ndx < m_table_accessors.size());
2,014,572✔
770

771
    RefOrTagged rot = m_tables.get_as_ref_or_tagged(table_ndx);
2,014,572✔
772
    ref_type ref = rot.get_as_ref();
2,014,572✔
773
    if (ref == 0) {
2,014,572✔
774
        throw NoSuchTable();
×
775
    }
×
776
    Table* table = 0;
2,014,572✔
777
    {
2,014,572✔
778
        std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
2,014,572✔
779
        if (g_table_recycler_2.empty()) {
2,014,572✔
780
            while (!g_table_recycler_1.empty()) {
2,004,345✔
781
                auto t = g_table_recycler_1.back();
1,994,175✔
782
                g_table_recycler_1.pop_back();
1,994,175✔
783
                g_table_recycler_2.push_back(t);
1,994,175✔
784
            }
1,994,175✔
785
        }
10,170✔
786
        if (g_table_recycler_2.size() + g_table_recycler_1.size() > g_table_recycling_delay) {
2,014,572✔
787
            table = g_table_recycler_2.back();
1,984,782✔
788
            table->fully_detach();
1,984,782✔
789
            g_table_recycler_2.pop_back();
1,984,782✔
790
        }
1,984,782✔
791
    }
2,014,572✔
792
    if (table) {
2,014,572✔
793
        table->revive(get_repl(), m_alloc, m_is_writable);
1,984,656✔
794
        table->init(ref, this, table_ndx, m_is_writable, is_frozen());
1,984,656✔
795
    }
1,984,656✔
796
    else {
29,916✔
797
        std::unique_ptr<Table> new_table(new Table(get_repl(), m_alloc));  // Throws
29,916✔
798
        new_table->init(ref, this, table_ndx, m_is_writable, is_frozen()); // Throws
29,916✔
799
        table = new_table.release();
29,916✔
800
    }
29,916✔
801
    table->refresh_index_accessors();
2,014,572✔
802
    // must be atomic to allow concurrent probing of the m_table_accessors vector.
803
    store_atomic(m_table_accessors[table_ndx], table, std::memory_order_release);
2,014,572✔
804
    return table;
2,014,572✔
805
}
2,014,572✔
806

807

808
void Group::recycle_table_accessor(Table* to_be_recycled)
809
{
2,014,398✔
810
    std::lock_guard<std::mutex> lg(g_table_recycler_mutex);
2,014,398✔
811
    g_table_recycler_1.push_back(to_be_recycled);
2,014,398✔
812
}
2,014,398✔
813

814
void Group::remove_table(StringData name)
815
{
402✔
816
    check_attached();
402✔
817
    size_t table_ndx = m_table_names.find_first(name);
402✔
818
    if (table_ndx == not_found)
402✔
819
        throw NoSuchTable();
6✔
820
    auto key = ndx2key(table_ndx);
396✔
821
    remove_table(table_ndx, key); // Throws
396✔
822
}
396✔
823

824

825
void Group::remove_table(TableKey key)
826
{
1,074✔
827
    check_attached();
1,074✔
828

829
    size_t table_ndx = key2ndx_checked(key);
1,074✔
830
    remove_table(table_ndx, key);
1,074✔
831
}
1,074✔
832

833

834
void Group::remove_table(size_t table_ndx, TableKey key)
835
{
1,470✔
836
    if (!m_is_writable)
1,470✔
837
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
838
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
1,470✔
839
    REALM_ASSERT(table_ndx < m_tables.size());
1,470✔
840
    TableRef table = get_table(key);
1,470✔
841

842
    // In principle we could remove a table even if it is the target of link
843
    // columns of other tables, however, to do that, we would have to
844
    // automatically remove the "offending" link columns from those other
845
    // tables. Such a behaviour is deemed too obscure, and we shall therefore
846
    // require that a removed table does not contain foreign origin backlink
847
    // columns.
848
    if (table->is_cross_table_link_target())
1,470✔
849
        throw CrossTableLinkTarget(table->get_name());
18✔
850

851
    {
1,452✔
852
        // We don't want to replicate the individual column removals along the
853
        // way as they're covered by the table removal
854
        Table::DisableReplication dr(*table);
1,452✔
855
        table->remove_columns();
1,452✔
856
    }
1,452✔
857

858
    size_t prior_num_tables = m_tables.size();
1,452✔
859
    Replication* repl = *get_repl();
1,452✔
860
    if (repl)
1,452✔
861
        repl->erase_class(key, table->get_name(), prior_num_tables); // Throws
1,374✔
862

863
    int64_t ref_64 = m_tables.get(table_ndx);
1,452✔
864
    REALM_ASSERT(!int_cast_has_overflow<ref_type>(ref_64));
1,452✔
865
    ref_type ref = ref_type(ref_64);
1,452✔
866

867
    // Replace entry in m_tables with next tag to use:
868
    RefOrTagged rot = RefOrTagged::make_tagged((1 + (key.value >> 16)) & 0x7FFF);
1,452✔
869
    // Remove table
870
    m_tables.set(table_ndx, rot);     // Throws
1,452✔
871
    m_table_names.set(table_ndx, {}); // Throws
1,452✔
872
    m_table_accessors[table_ndx] = nullptr;
1,452✔
873
    --m_num_tables;
1,452✔
874

875
    table->detach(Table::cookie_removed);
1,452✔
876
    // Destroy underlying node structure
877
    Array::destroy_deep(ref, m_alloc);
1,452✔
878
    recycle_table_accessor(table.unchecked_ptr());
1,452✔
879
}
1,452✔
880

881

882
void Group::rename_table(StringData name, StringData new_name, bool require_unique_name)
883
{
24✔
884
    check_attached();
24✔
885
    size_t table_ndx = m_table_names.find_first(name);
24✔
886
    if (table_ndx == not_found)
24✔
887
        throw NoSuchTable();
6✔
888
    rename_table(ndx2key(table_ndx), new_name, require_unique_name); // Throws
18✔
889
}
18✔
890

891

892
void Group::rename_table(TableKey key, StringData new_name, bool require_unique_name)
893
{
24✔
894
    check_attached();
24✔
895
    if (!m_is_writable)
24✔
896
        throw LogicError(ErrorCodes::ReadOnlyDB, "Database not writable");
×
897
    REALM_ASSERT_3(m_tables.size(), ==, m_table_names.size());
24✔
898
    if (require_unique_name && has_table(new_name))
24✔
899
        throw TableNameInUse();
6✔
900
    size_t table_ndx = key2ndx_checked(key);
18✔
901
    m_table_names.set(table_ndx, new_name);
18✔
902
    if (Replication* repl = *get_repl())
18✔
903
        repl->rename_class(key, new_name); // Throws
×
904
}
18✔
905

906
Obj Group::get_object(ObjLink link)
907
{
430,524✔
908
    auto target_table = get_table(link.get_table_key());
430,524✔
909
    ObjKey key = link.get_obj_key();
430,524✔
910
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
430,524✔
911
    return ct->get(key);
430,524✔
912
}
430,524✔
913

914
Obj Group::try_get_object(ObjLink link) noexcept
915
{
×
916
    auto target_table = get_table(link.get_table_key());
×
917
    ObjKey key = link.get_obj_key();
×
918
    ClusterTree* ct = key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
×
919
    return ct->try_get_obj(key);
×
920
}
×
921

922
void Group::validate(ObjLink link) const
923
{
309,738✔
924
    if (auto tk = link.get_table_key()) {
309,738✔
925
        auto target_key = link.get_obj_key();
309,738✔
926
        auto target_table = get_table(tk);
309,738✔
927
        const ClusterTree* ct =
309,738✔
928
            target_key.is_unresolved() ? target_table->m_tombstones.get() : &target_table->m_clusters;
309,738✔
929
        if (!ct->is_valid(target_key)) {
309,738✔
930
            throw InvalidArgument(ErrorCodes::KeyNotFound, "Target object not found");
12✔
931
        }
12✔
932
        if (target_table->is_embedded()) {
309,726✔
933
            throw IllegalOperation("Cannot link to embedded object");
54✔
934
        }
54✔
935
        if (target_table->is_asymmetric()) {
309,672✔
936
            throw IllegalOperation("Cannot link to ephemeral object");
6✔
937
        }
6✔
938
    }
309,672✔
939
}
309,738✔
940

941
ref_type Group::typed_write_tables(_impl::ArrayWriterBase& out) const
942
{
622,755✔
943
    ref_type ref = m_top.get_as_ref(1);
622,755✔
944
    if (out.only_modified && m_alloc.is_read_only(ref))
622,755✔
945
        return ref;
129,252✔
946
    Array a(m_alloc);
493,503✔
947
    a.init_from_ref(ref);
493,503✔
948
    REALM_ASSERT_DEBUG(a.has_refs());
493,503✔
949
    TempArray dest(a.size());
493,503✔
950
    for (unsigned j = 0; j < a.size(); ++j) {
1,869,894✔
951
        RefOrTagged rot = a.get_as_ref_or_tagged(j);
1,376,391✔
952
        if (rot.is_tagged()) {
1,376,391✔
953
            dest.set(j, rot);
10,731✔
954
        }
10,731✔
955
        else {
1,365,660✔
956
            auto table = do_get_table(j);
1,365,660✔
957
            REALM_ASSERT_DEBUG(table);
1,365,660✔
958
            dest.set_as_ref(j, table->typed_write(rot.get_as_ref(), out));
1,365,660✔
959
        }
1,365,660✔
960
    }
1,376,391✔
961
    return dest.write(out);
493,503✔
962
}
622,755✔
963
void Group::table_typed_print(std::string prefix, ref_type ref) const
NEW
964
{
×
NEW
965
    REALM_ASSERT(m_top.get_as_ref(1) == ref);
×
NEW
966
    Array a(m_alloc);
×
NEW
967
    a.init_from_ref(ref);
×
NEW
968
    REALM_ASSERT(a.has_refs());
×
NEW
969
    for (unsigned j = 0; j < a.size(); ++j) {
×
NEW
970
        auto pref = prefix + "  " + to_string(j) + ":\t";
×
NEW
971
        RefOrTagged rot = a.get_as_ref_or_tagged(j);
×
NEW
972
        if (rot.is_tagged() || rot.get_as_ref() == 0)
×
NEW
973
            continue;
×
NEW
974
        auto table_accessor = do_get_table(j);
×
NEW
975
        REALM_ASSERT(table_accessor);
×
NEW
976
        table_accessor->typed_print(pref, rot.get_as_ref());
×
NEW
977
    }
×
NEW
978
}
×
979
void Group::typed_print(std::string prefix) const
NEW
980
{
×
NEW
981
    std::cout << "Group top array" << std::endl;
×
NEW
982
    for (unsigned j = 0; j < m_top.size(); ++j) {
×
NEW
983
        auto pref = prefix + "  " + to_string(j) + ":\t";
×
NEW
984
        RefOrTagged rot = m_top.get_as_ref_or_tagged(j);
×
NEW
985
        if (rot.is_ref() && rot.get_as_ref()) {
×
NEW
986
            if (j == 1) {
×
987
                // Tables
NEW
988
                std::cout << pref << "All Tables" << std::endl;
×
NEW
989
                table_typed_print(pref, rot.get_as_ref());
×
NEW
990
            }
×
NEW
991
            else {
×
NEW
992
                Array a(m_alloc);
×
NEW
993
                a.init_from_ref(rot.get_as_ref());
×
NEW
994
                std::cout << pref;
×
NEW
995
                a.typed_print(pref);
×
NEW
996
            }
×
NEW
997
        }
×
NEW
998
        else {
×
NEW
999
            std::cout << pref << rot.get_as_int() << std::endl;
×
NEW
1000
        }
×
NEW
1001
    }
×
NEW
1002
    std::cout << "}" << std::endl;
×
NEW
1003
}
×
1004

1005

1006
ref_type Group::DefaultTableWriter::write_names(_impl::OutputStream& out)
1007
{
660✔
1008
    bool deep = true;                                                           // Deep
660✔
1009
    bool only_if_modified = false;                                              // Always
660✔
1010
    bool compress = false;                                                      // true;
660✔
1011
    return m_group->m_table_names.write(out, deep, only_if_modified, compress); // Throws
660✔
1012
}
660✔
1013
ref_type Group::DefaultTableWriter::write_tables(_impl::OutputStream& out)
1014
{
660✔
1015
    return m_group->typed_write_tables(out);
660✔
1016
}
660✔
1017

1018
auto Group::DefaultTableWriter::write_history(_impl::OutputStream& out) -> HistoryInfo
1019
{
402✔
1020
    bool deep = true;              // Deep
402✔
1021
    bool only_if_modified = false; // Always
402✔
1022
    bool compress = false;
402✔
1023
    ref_type history_ref = _impl::GroupFriend::get_history_ref(*m_group);
402✔
1024
    HistoryInfo info;
402✔
1025
    if (history_ref) {
402✔
1026
        _impl::History::version_type version;
354✔
1027
        int history_type, history_schema_version;
354✔
1028
        _impl::GroupFriend::get_version_and_history_info(_impl::GroupFriend::get_alloc(*m_group),
354✔
1029
                                                         m_group->m_top.get_ref(), version, history_type,
354✔
1030
                                                         history_schema_version);
354✔
1031
        REALM_ASSERT(history_type != Replication::hist_None);
354✔
1032
        if (!m_should_write_history || history_type == Replication::hist_None) {
354✔
1033
            return info; // Only sync history should be preserved when writing to a new file
6✔
1034
        }
6✔
1035
        info.type = history_type;
348✔
1036
        info.version = history_schema_version;
348✔
1037
        Array history{const_cast<Allocator&>(_impl::GroupFriend::get_alloc(*m_group))};
348✔
1038
        history.init_from_ref(history_ref);
348✔
1039
        info.ref = history.write(out, deep, only_if_modified, compress); // Throws
348✔
1040
    }
348✔
1041
    info.sync_file_id = m_group->get_sync_file_id();
396✔
1042
    return info;
396✔
1043
}
402✔
1044

1045
void Group::write(std::ostream& out, bool pad) const
1046
{
42✔
1047
    DefaultTableWriter table_writer;
42✔
1048
    write(out, pad, 0, table_writer);
42✔
1049
}
42✔
1050

1051
void Group::write(std::ostream& out, bool pad_for_encryption, uint_fast64_t version_number, TableWriter& writer) const
1052
{
672✔
1053
    REALM_ASSERT(is_attached());
672✔
1054
    writer.set_group(this);
672✔
1055
    bool no_top_array = !m_top.is_attached();
672✔
1056
    write(out, m_file_format_version, writer, no_top_array, pad_for_encryption, version_number); // Throws
672✔
1057
}
672✔
1058

1059
void Group::write(File& file, const char* encryption_key, uint_fast64_t version_number, TableWriter& writer) const
1060
{
630✔
1061
    REALM_ASSERT(file.get_size() == 0);
630✔
1062

1063
    file.set_encryption_key(encryption_key);
630✔
1064

1065
    // The aim is that the buffer size should be at least 1/256 of needed size but less than 64 Mb
1066
    constexpr size_t upper_bound = 64 * 1024 * 1024;
630✔
1067
    size_t min_space = std::min(get_used_space() >> 8, upper_bound);
630✔
1068
    size_t buffer_size = page_size();
630✔
1069
    while (buffer_size < min_space) {
681✔
1070
        buffer_size <<= 1;
51✔
1071
    }
51✔
1072
    File::Streambuf streambuf(&file, buffer_size);
630✔
1073

1074
    std::ostream out(&streambuf);
630✔
1075
    out.exceptions(std::ios_base::failbit | std::ios_base::badbit);
630✔
1076
    write(out, encryption_key != 0, version_number, writer);
630✔
1077
    int sync_status = streambuf.pubsync();
630✔
1078
    REALM_ASSERT(sync_status == 0);
630✔
1079
}
630✔
1080

1081
void Group::write(const std::string& path, const char* encryption_key, uint64_t version_number,
1082
                  bool write_history) const
1083
{
258✔
1084
    File file;
258✔
1085
    int flags = 0;
258✔
1086
    file.open(path, File::access_ReadWrite, File::create_Must, flags);
258✔
1087
    DefaultTableWriter table_writer(write_history);
258✔
1088
    write(file, encryption_key, version_number, table_writer);
258✔
1089
}
258✔
1090

1091

1092
BinaryData Group::write_to_mem() const
1093
{
42✔
1094
    REALM_ASSERT(is_attached());
42✔
1095

1096
    // Get max possible size of buffer
1097
    size_t max_size = m_alloc.get_total_size();
42✔
1098

1099
    auto buffer = std::unique_ptr<char[]>(new (std::nothrow) char[max_size]);
42✔
1100
    if (!buffer)
42✔
1101
        throw Exception(ErrorCodes::OutOfMemory, "Could not allocate memory while dumping to memory");
×
1102
    MemoryOutputStream out; // Throws
42✔
1103
    out.set_buffer(buffer.get(), buffer.get() + max_size);
42✔
1104
    write(out); // Throws
42✔
1105
    size_t buffer_size = out.size();
42✔
1106
    return BinaryData(buffer.release(), buffer_size);
42✔
1107
}
42✔
1108

1109

1110
void Group::write(std::ostream& out, int file_format_version, TableWriter& table_writer, bool no_top_array,
1111
                  bool pad_for_encryption, uint_fast64_t version_number)
1112
{
672✔
1113
    _impl::OutputStream out_2(out);
672✔
1114
    out_2.only_modified = false;
672✔
1115

1116
    // Write the file header
1117
    SlabAlloc::Header streaming_header;
672✔
1118
    if (no_top_array) {
672✔
1119
        file_format_version = 0;
12✔
1120
    }
12✔
1121
    else if (file_format_version == 0) {
660✔
1122
        // Use current file format version
1123
        file_format_version = get_target_file_format_version_for_session(0, Replication::hist_None);
×
1124
    }
×
1125
    SlabAlloc::init_streaming_header(&streaming_header, file_format_version);
672✔
1126
    out_2.write(reinterpret_cast<const char*>(&streaming_header), sizeof streaming_header);
672✔
1127

1128
    ref_type top_ref = 0;
672✔
1129
    size_t final_file_size = sizeof streaming_header;
672✔
1130
    if (no_top_array) {
672✔
1131
        // Accept version number 1 as that number is (unfortunately) also used
1132
        // to denote the empty initial state of a Realm file.
1133
        REALM_ASSERT(version_number == 0 || version_number == 1);
12✔
1134
    }
12✔
1135
    else {
660✔
1136
        // Because we need to include the total logical file size in the
1137
        // top-array, we have to start by writing everything except the
1138
        // top-array, and then finally compute and write a correct version of
1139
        // the top-array. The free-space information of the group will only be
1140
        // included if a non-zero version number is given as parameter,
1141
        // indicating that versioning info is to be saved. This is used from
1142
        // DB to compact the database by writing only the live data
1143
        // into a separate file.
1144
        ref_type names_ref = table_writer.write_names(out_2);   // Throws
660✔
1145
        ref_type tables_ref = table_writer.write_tables(out_2);
660✔
1146

1147
        SlabAlloc new_alloc;
660✔
1148
        new_alloc.attach_empty(); // Throws
660✔
1149
        Array top(new_alloc);
660✔
1150
        top.create(Array::type_HasRefs); // Throws
660✔
1151
        _impl::ShallowArrayDestroyGuard dg_top(&top);
660✔
1152
        int_fast64_t value_1 = from_ref(names_ref);
660✔
1153
        int_fast64_t value_2 = from_ref(tables_ref);
660✔
1154
        top.add(value_1); // Throws
660✔
1155
        top.add(value_2); // Throws
660✔
1156
        top.add(0);       // Throws
660✔
1157

1158
        int top_size = 3;
660✔
1159
        if (version_number) {
660✔
1160
            TableWriter::HistoryInfo history_info = table_writer.write_history(out_2); // Throws
402✔
1161

1162
            Array free_list(new_alloc);
402✔
1163
            Array size_list(new_alloc);
402✔
1164
            Array version_list(new_alloc);
402✔
1165
            free_list.create(Array::type_Normal); // Throws
402✔
1166
            _impl::DeepArrayDestroyGuard dg_1(&free_list);
402✔
1167
            size_list.create(Array::type_Normal); // Throws
402✔
1168
            _impl::DeepArrayDestroyGuard dg_2(&size_list);
402✔
1169
            version_list.create(Array::type_Normal); // Throws
402✔
1170
            _impl::DeepArrayDestroyGuard dg_3(&version_list);
402✔
1171
            bool deep = true;              // Deep
402✔
1172
            bool only_if_modified = false; // Always
402✔
1173
            bool compress = false;
402✔
1174
            ref_type free_list_ref = free_list.write(out_2, deep, only_if_modified, compress);
402✔
1175
            ref_type size_list_ref = size_list.write(out_2, deep, only_if_modified, compress);
402✔
1176
            ref_type version_list_ref = version_list.write(out_2, deep, only_if_modified, compress);
402✔
1177
            top.add(RefOrTagged::make_ref(free_list_ref));     // Throws
402✔
1178
            top.add(RefOrTagged::make_ref(size_list_ref));     // Throws
402✔
1179
            top.add(RefOrTagged::make_ref(version_list_ref));  // Throws
402✔
1180
            top.add(RefOrTagged::make_tagged(version_number)); // Throws
402✔
1181
            top_size = 7;
402✔
1182

1183
            if (history_info.type != Replication::hist_None) {
402✔
1184
                top.add(RefOrTagged::make_tagged(history_info.type));
348✔
1185
                top.add(RefOrTagged::make_ref(history_info.ref));
348✔
1186
                top.add(RefOrTagged::make_tagged(history_info.version));
348✔
1187
                top.add(RefOrTagged::make_tagged(history_info.sync_file_id));
348✔
1188
                top_size = s_group_max_size;
348✔
1189
                // ^ this is too large, since the evacuation point entry is not there:
1190
                // (but the code below is self correcting)
1191
            }
348✔
1192
        }
402✔
1193
        top_ref = out_2.get_ref_of_next_array();
660✔
1194

1195
        // Produce a preliminary version of the top array whose
1196
        // representation is guaranteed to be able to hold the final file
1197
        // size
1198
        size_t max_top_byte_size = Array::get_max_byte_size(top_size);
660✔
1199
        size_t max_final_file_size = size_t(top_ref) + max_top_byte_size;
660✔
1200
        top.ensure_minimum_width(RefOrTagged::make_tagged(max_final_file_size)); // Throws
660✔
1201

1202
        // Finalize the top array by adding the projected final file size
1203
        // to it
1204
        size_t top_byte_size = top.get_byte_size();
660✔
1205
        final_file_size = size_t(top_ref) + top_byte_size;
660✔
1206
        top.set(2, RefOrTagged::make_tagged(final_file_size)); // Throws
660✔
1207

1208
        // Write the top array
1209
        bool deep = false;             // Shallow
660✔
1210
        bool only_if_modified = false; // Always
660✔
1211
        bool compress = false;
660✔
1212
        top.write(out_2, deep, only_if_modified, compress); // Throws
660✔
1213
        REALM_ASSERT_3(size_t(out_2.get_ref_of_next_array()), ==, final_file_size);
660✔
1214

1215
        dg_top.reset(nullptr); // Destroy now
660✔
1216
    }
660✔
1217

1218
    // encryption will pad the file to a multiple of the page, so ensure the
1219
    // footer is aligned to the end of a page
1220
    if (pad_for_encryption) {
672✔
1221
#if REALM_ENABLE_ENCRYPTION
30✔
1222
        size_t unrounded_size = final_file_size + sizeof(SlabAlloc::StreamingFooter);
30✔
1223
        size_t rounded_size = round_up_to_page_size(unrounded_size);
30✔
1224
        if (rounded_size != unrounded_size) {
30✔
1225
            std::unique_ptr<char[]> buffer(new char[rounded_size - unrounded_size]());
30✔
1226
            out_2.write(buffer.get(), rounded_size - unrounded_size);
30✔
1227
        }
30✔
1228
#endif
30✔
1229
    }
30✔
1230

1231
    // Write streaming footer
1232
    SlabAlloc::StreamingFooter footer;
672✔
1233
    footer.m_top_ref = top_ref;
672✔
1234
    footer.m_magic_cookie = SlabAlloc::footer_magic_cookie;
672✔
1235
    out_2.write(reinterpret_cast<const char*>(&footer), sizeof footer);
672✔
1236
}
672✔
1237

1238

1239
void Group::update_refs(ref_type top_ref) noexcept
1240
{
364,965✔
1241
    // After Group::commit() we will always have free space tracking
1242
    // info.
1243
    REALM_ASSERT_3(m_top.size(), >=, 5);
364,965✔
1244

1245
    m_top.init_from_ref(top_ref);
364,965✔
1246

1247
    // Now we can update it's child arrays
1248
    m_table_names.update_from_parent();
364,965✔
1249
    m_tables.update_from_parent();
364,965✔
1250

1251
    // Update all attached table accessors.
1252
    for (auto& table_accessor : m_table_accessors) {
1,103,481✔
1253
        if (table_accessor) {
1,103,481✔
1254
            table_accessor->update_from_parent();
1,073,631✔
1255
        }
1,073,631✔
1256
    }
1,103,481✔
1257
}
364,965✔
1258

1259
bool Group::operator==(const Group& g) const
1260
{
66✔
1261
    for (auto tk : get_table_keys()) {
138✔
1262
        const StringData& table_name = get_table_name(tk);
138✔
1263

1264
        ConstTableRef table_1 = get_table(tk);
138✔
1265
        ConstTableRef table_2 = g.get_table(table_name);
138✔
1266
        if (!table_2)
138✔
1267
            return false;
12✔
1268
        if (table_1->get_primary_key_column().get_type() != table_2->get_primary_key_column().get_type()) {
126✔
1269
            return false;
×
1270
        }
×
1271
        if (table_1->is_embedded() != table_2->is_embedded())
126✔
1272
            return false;
×
1273
        if (table_1->is_embedded())
126✔
1274
            continue;
60✔
1275

1276
        if (*table_1 != *table_2)
66✔
1277
            return false;
18✔
1278
    }
66✔
1279
    return true;
36✔
1280
}
66✔
1281
size_t Group::get_used_space() const noexcept
1282
{
648✔
1283
    if (!m_top.is_attached())
648✔
1284
        return 0;
12✔
1285

1286
    size_t used_space = (size_t(m_top.get(2)) >> 1);
636✔
1287

1288
    if (m_top.size() > 4) {
636✔
1289
        Array free_lengths(const_cast<SlabAlloc&>(m_alloc));
486✔
1290
        free_lengths.init_from_ref(ref_type(m_top.get(4)));
486✔
1291
        used_space -= size_t(free_lengths.get_sum());
486✔
1292
    }
486✔
1293

1294
    return used_space;
636✔
1295
}
648✔
1296

1297

1298
namespace {
1299
class TransactAdvancer : public _impl::NullInstructionObserver {
1300
public:
1301
    TransactAdvancer(Group&, bool& schema_changed)
1302
        : m_schema_changed(schema_changed)
19,689✔
1303
    {
43,440✔
1304
    }
43,440✔
1305

1306
    bool insert_group_level_table(TableKey) noexcept
1307
    {
23,847✔
1308
        m_schema_changed = true;
23,847✔
1309
        return true;
23,847✔
1310
    }
23,847✔
1311

1312
    bool erase_class(TableKey) noexcept
1313
    {
×
1314
        m_schema_changed = true;
×
1315
        return true;
×
1316
    }
×
1317

1318
    bool rename_class(TableKey) noexcept
1319
    {
×
1320
        m_schema_changed = true;
×
1321
        return true;
×
1322
    }
×
1323

1324
    bool insert_column(ColKey)
1325
    {
73,986✔
1326
        m_schema_changed = true;
73,986✔
1327
        return true;
73,986✔
1328
    }
73,986✔
1329

1330
    bool erase_column(ColKey)
1331
    {
×
1332
        m_schema_changed = true;
×
1333
        return true;
×
1334
    }
×
1335

1336
    bool rename_column(ColKey) noexcept
1337
    {
×
1338
        m_schema_changed = true;
×
1339
        return true; // No-op
×
1340
    }
×
1341

1342
private:
1343
    bool& m_schema_changed;
1344
};
1345
} // anonymous namespace
1346

1347

1348
void Group::update_allocator_wrappers(bool writable)
1349
{
5,374,560✔
1350
    m_is_writable = writable;
5,374,560✔
1351
    // This is tempting:
1352
    // m_alloc.set_read_only(!writable);
1353
    // - but m_alloc may refer to the "global" allocator in the DB object.
1354
    // Setting it here would cause different transactions to raze for
1355
    // changing the shared allocator setting. This is somewhat of a mess.
1356
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
11,090,319✔
1357
        auto table_accessor = m_table_accessors[i];
5,715,759✔
1358
        if (table_accessor) {
5,715,759✔
1359
            table_accessor->update_allocator_wrapper(writable);
4,370,187✔
1360
        }
4,370,187✔
1361
    }
5,715,759✔
1362
}
5,374,560✔
1363

1364
void Group::flush_accessors_for_commit()
1365
{
622,074✔
1366
    for (auto& acc : m_table_accessors)
622,074✔
1367
        if (acc)
1,677,999✔
1368
            acc->flush_for_commit();
1,200,105✔
1369
}
622,074✔
1370

1371
void Group::refresh_dirty_accessors()
1372
{
224,916✔
1373
    if (!m_tables.is_attached()) {
224,916✔
1374
        m_table_accessors.clear();
48✔
1375
        return;
48✔
1376
    }
48✔
1377

1378
    // The array of Tables cannot have shrunk:
1379
    REALM_ASSERT(m_tables.size() >= m_table_accessors.size());
224,868✔
1380

1381
    // but it may have grown - and if so, we must resize the accessor array to match
1382
    if (m_tables.size() > m_table_accessors.size()) {
224,868✔
1383
        m_table_accessors.resize(m_tables.size());
×
1384
    }
×
1385

1386
    // Update all attached table accessors.
1387
    for (size_t i = 0; i < m_table_accessors.size(); ++i) {
755,520✔
1388
        auto& table_accessor = m_table_accessors[i];
530,652✔
1389
        if (table_accessor) {
530,652✔
1390
            // If the table has changed it's key in the file, it's a
1391
            // new table. This will detach the old accessor and remove it.
1392
            RefOrTagged rot = m_tables.get_as_ref_or_tagged(i);
367,659✔
1393
            bool same_table = false;
367,659✔
1394
            if (rot.is_ref()) {
367,689✔
1395
                auto ref = rot.get_as_ref();
367,680✔
1396
                TableKey new_key = Table::get_key_direct(m_alloc, ref);
367,680✔
1397
                if (new_key == table_accessor->get_key())
367,680✔
1398
                    same_table = true;
367,746✔
1399
            }
367,680✔
1400
            if (same_table) {
367,752✔
1401
                table_accessor->refresh_accessor_tree();
367,752✔
1402
            }
367,752✔
1403
            else {
4,294,967,294✔
1404
                table_accessor->detach(Table::cookie_removed);
4,294,967,294✔
1405
                recycle_table_accessor(table_accessor);
4,294,967,294✔
1406
                m_table_accessors[i] = nullptr;
4,294,967,294✔
1407
            }
4,294,967,294✔
1408
        }
367,659✔
1409
    }
530,652✔
1410
}
224,868✔
1411

1412

1413
void Group::advance_transact(ref_type new_top_ref, util::InputStream* in, bool writable)
1414
{
225,276✔
1415
    REALM_ASSERT(is_attached());
225,276✔
1416
    // Exception safety: If this function throws, the group accessor and all of
1417
    // its subordinate accessors are left in a state that may not be fully
1418
    // consistent. Only minimal consistency is guaranteed (see
1419
    // AccessorConsistencyLevels). In this case, the application is required to
1420
    // either destroy the Group object, forcing all subordinate accessors to
1421
    // become detached, or take some other equivalent action that involves a
1422
    // call to Group::detach(), such as terminating the transaction in progress.
1423
    // such actions will also lead to the detachment of all subordinate
1424
    // accessors. Until then it is an error, and unsafe if the application
1425
    // attempts to access the group one of its subordinate accessors.
1426
    //
1427
    // The purpose of this function is to refresh all attached accessors after
1428
    // the underlying node structure has undergone arbitrary change, such as
1429
    // when a read transaction has been advanced to a later snapshot of the
1430
    // database.
1431
    //
1432
    // Initially, when this function is invoked, we cannot assume any
1433
    // correspondence between the accessor state and the underlying node
1434
    // structure. We can assume that the hierarchy is in a state of minimal
1435
    // consistency, and that it can be brought to a state of structural
1436
    // correspondence using information in the transaction logs. When structural
1437
    // correspondence is achieved, we can reliably refresh the accessor hierarchy
1438
    // (Table::refresh_accessor_tree()) to bring it back to a fully consistent
1439
    // state. See AccessorConsistencyLevels.
1440
    //
1441
    // Much of the information in the transaction logs is not used in this
1442
    // process, because the changes have already been applied to the underlying
1443
    // node structure. All we need to do here is to bring the accessors back
1444
    // into a state where they correctly reflect the underlying structure (or
1445
    // detach them if the underlying object has been removed.)
1446
    //
1447
    // This is no longer needed in Core, but we need to compute "schema_changed",
1448
    // for the benefit of ObjectStore.
1449
    bool schema_changed = false;
225,276✔
1450
    if (in && has_schema_change_notification_handler()) {
225,276✔
1451
        TransactAdvancer advancer(*this, schema_changed);
43,440✔
1452
        _impl::TransactLogParser parser; // Throws
43,440✔
1453
        parser.parse(*in, advancer);     // Throws
43,440✔
1454
    }
43,440✔
1455

1456
    m_top.detach();                                           // Soft detach
225,276✔
1457
    bool create_group_when_missing = false;                   // See Group::attach_shared().
225,276✔
1458
    attach(new_top_ref, writable, create_group_when_missing); // Throws
225,276✔
1459
    refresh_dirty_accessors();                                // Throws
225,276✔
1460

1461
    if (schema_changed)
225,276✔
1462
        send_schema_change_notification();
11,793✔
1463
}
225,276✔
1464

1465
void Group::prepare_top_for_history(int history_type, int history_schema_version, uint64_t file_ident)
1466
{
61,617✔
1467
    REALM_ASSERT(m_file_format_version >= 7);
61,617✔
1468
    while (m_top.size() < s_hist_type_ndx) {
307,470✔
1469
        m_top.add(0); // Throws
245,853✔
1470
    }
245,853✔
1471

1472
    if (m_top.size() > s_hist_version_ndx) {
61,617✔
1473
        int stored_history_type = int(m_top.get_as_ref_or_tagged(s_hist_type_ndx).get_as_int());
150✔
1474
        int stored_history_schema_version = int(m_top.get_as_ref_or_tagged(s_hist_version_ndx).get_as_int());
150✔
1475
        if (stored_history_type != Replication::hist_None) {
150✔
1476
            REALM_ASSERT(stored_history_type == history_type);
6✔
1477
            REALM_ASSERT(stored_history_schema_version == history_schema_version);
6✔
1478
        }
6✔
1479
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(history_type));              // Throws
150✔
1480
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(history_schema_version)); // Throws
150✔
1481
    }
150✔
1482
    else {
61,467✔
1483
        // No history yet
1484
        REALM_ASSERT(m_top.size() == s_hist_type_ndx);
61,467✔
1485
        ref_type history_ref = 0;                                    // No history yet
61,467✔
1486
        m_top.add(RefOrTagged::make_tagged(history_type));           // Throws
61,467✔
1487
        m_top.add(RefOrTagged::make_ref(history_ref));               // Throws
61,467✔
1488
        m_top.add(RefOrTagged::make_tagged(history_schema_version)); // Throws
61,467✔
1489
    }
61,467✔
1490

1491
    if (m_top.size() > s_sync_file_id_ndx) {
61,617✔
1492
        m_top.set(s_sync_file_id_ndx, RefOrTagged::make_tagged(file_ident));
42✔
1493
    }
42✔
1494
    else {
61,575✔
1495
        m_top.add(RefOrTagged::make_tagged(file_ident)); // Throws
61,575✔
1496
    }
61,575✔
1497
}
61,617✔
1498

1499
void Group::clear_history()
1500
{
36✔
1501
    bool has_history = (m_top.is_attached() && m_top.size() > s_hist_type_ndx);
36✔
1502
    if (has_history) {
36✔
1503
        auto hist_ref = m_top.get_as_ref(s_hist_ref_ndx);
36✔
1504
        Array::destroy_deep(hist_ref, m_top.get_alloc());
36✔
1505
        m_top.set(s_hist_type_ndx, RefOrTagged::make_tagged(Replication::hist_None)); // Throws
36✔
1506
        m_top.set(s_hist_version_ndx, RefOrTagged::make_tagged(0));                   // Throws
36✔
1507
        m_top.set(s_hist_ref_ndx, 0);                                                 // Throws
36✔
1508
    }
36✔
1509
}
36✔
1510

1511
#ifdef REALM_DEBUG // LCOV_EXCL_START ignore debug functions
1512

1513
class MemUsageVerifier : public Array::MemUsageHandler {
1514
public:
1515
    MemUsageVerifier(ref_type ref_begin, ref_type immutable_ref_end, ref_type mutable_ref_end, ref_type baseline)
1516
        : m_ref_begin(ref_begin)
59,688✔
1517
        , m_immutable_ref_end(immutable_ref_end)
59,688✔
1518
        , m_mutable_ref_end(mutable_ref_end)
59,688✔
1519
        , m_baseline(baseline)
59,688✔
1520
    {
119,358✔
1521
    }
119,358✔
1522
    void add_immutable(ref_type ref, size_t size)
1523
    {
2,544,396✔
1524
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
2,544,396✔
1525
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
2,544,396✔
1526
        REALM_ASSERT_3(size, >, 0);
2,544,396✔
1527
        REALM_ASSERT_3(ref, >=, m_ref_begin);
2,544,396✔
1528
        REALM_ASSERT_3(size, <=, m_immutable_ref_end - ref);
2,544,396✔
1529
        Chunk chunk;
2,544,396✔
1530
        chunk.ref = ref;
2,544,396✔
1531
        chunk.size = size;
2,544,396✔
1532
        m_chunks.push_back(chunk);
2,544,396✔
1533
    }
2,544,396✔
1534
    void add_mutable(ref_type ref, size_t size)
1535
    {
440,901✔
1536
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
440,901✔
1537
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
440,901✔
1538
        REALM_ASSERT_3(size, >, 0);
440,901✔
1539
        REALM_ASSERT_3(ref, >=, m_immutable_ref_end);
440,901✔
1540
        REALM_ASSERT_3(size, <=, m_mutable_ref_end - ref);
440,901✔
1541
        Chunk chunk;
440,901✔
1542
        chunk.ref = ref;
440,901✔
1543
        chunk.size = size;
440,901✔
1544
        m_chunks.push_back(chunk);
440,901✔
1545
    }
440,901✔
1546
    void add(ref_type ref, size_t size)
1547
    {
9,376,527✔
1548
        REALM_ASSERT_3(ref % 8, ==, 0);  // 8-byte alignment
9,376,527✔
1549
        REALM_ASSERT_3(size % 8, ==, 0); // 8-byte alignment
9,376,527✔
1550
        REALM_ASSERT_3(size, >, 0);
9,376,527✔
1551
        REALM_ASSERT_3(ref, >=, m_ref_begin);
9,376,527✔
1552
        REALM_ASSERT(size <= (ref < m_baseline ? m_immutable_ref_end : m_mutable_ref_end) - ref);
9,376,527✔
1553
        Chunk chunk;
9,376,527✔
1554
        chunk.ref = ref;
9,376,527✔
1555
        chunk.size = size;
9,376,527✔
1556
        m_chunks.push_back(chunk);
9,376,527✔
1557
    }
9,376,527✔
1558
    void add(const MemUsageVerifier& verifier)
1559
    {
176,751✔
1560
        m_chunks.insert(m_chunks.end(), verifier.m_chunks.begin(), verifier.m_chunks.end());
176,751✔
1561
    }
176,751✔
1562
    void handle(ref_type ref, size_t allocated, size_t) override
1563
    {
9,376,512✔
1564
        add(ref, allocated);
9,376,512✔
1565
    }
9,376,512✔
1566
    void canonicalize()
1567
    {
472,857✔
1568
        // Sort the chunks in order of increasing ref, then merge adjacent
1569
        // chunks while checking that there is no overlap
1570
        typedef std::vector<Chunk>::iterator iter;
472,857✔
1571
        iter i_1 = m_chunks.begin(), end = m_chunks.end();
472,857✔
1572
        iter i_2 = i_1;
472,857✔
1573
        sort(i_1, end);
472,857✔
1574
        if (i_1 != end) {
472,857✔
1575
            while (++i_2 != end) {
16,079,748✔
1576
                ref_type prev_ref_end = i_1->ref + i_1->size;
15,650,703✔
1577
                REALM_ASSERT_3(prev_ref_end, <=, i_2->ref);
15,650,703✔
1578
                if (i_2->ref == prev_ref_end) { // in-file
15,650,703✔
1579
                    i_1->size += i_2->size;     // Merge
12,302,256✔
1580
                }
12,302,256✔
1581
                else {
3,348,447✔
1582
                    *++i_1 = *i_2;
3,348,447✔
1583
                }
3,348,447✔
1584
            }
15,650,703✔
1585
            m_chunks.erase(i_1 + 1, end);
429,045✔
1586
        }
429,045✔
1587
    }
472,857✔
1588
    void clear()
1589
    {
176,751✔
1590
        m_chunks.clear();
176,751✔
1591
    }
176,751✔
1592
    void check_total_coverage()
1593
    {
59,679✔
1594
        REALM_ASSERT_3(m_chunks.size(), ==, 1);
59,679✔
1595
        REALM_ASSERT_3(m_chunks.front().ref, ==, m_ref_begin);
59,679✔
1596
        REALM_ASSERT_3(m_chunks.front().size, ==, m_mutable_ref_end - m_ref_begin);
59,679✔
1597
    }
59,679✔
1598

1599
private:
1600
    struct Chunk {
1601
        ref_type ref;
1602
        size_t size;
1603
        bool operator<(const Chunk& c) const
1604
        {
136,304,208✔
1605
            return ref < c.ref;
136,304,208✔
1606
        }
136,304,208✔
1607
    };
1608
    std::vector<Chunk> m_chunks;
1609
    ref_type m_ref_begin, m_immutable_ref_end, m_mutable_ref_end, m_baseline;
1610
};
1611

1612
#endif
1613

1614
void Group::verify() const
1615
{
127,074✔
1616
#ifdef REALM_DEBUG
127,074✔
1617
    REALM_ASSERT(is_attached());
127,074✔
1618

1619
    m_alloc.verify();
127,074✔
1620

1621
    if (!m_top.is_attached()) {
127,074✔
1622
        return;
78✔
1623
    }
78✔
1624

1625
    // Verify tables
1626
    {
126,996✔
1627
        auto keys = get_table_keys();
126,996✔
1628
        for (auto key : keys) {
232,875✔
1629
            ConstTableRef table = get_table(key);
232,875✔
1630
            REALM_ASSERT_3(table->get_key().value, ==, key.value);
232,875✔
1631
            table->verify();
232,875✔
1632
        }
232,875✔
1633
    }
126,996✔
1634

1635
    // Verify history if present
1636
    if (Replication* repl = *get_repl()) {
126,996✔
1637
        if (auto hist = repl->_create_history_read()) {
69,645✔
1638
            hist->set_group(const_cast<Group*>(this), false);
69,639✔
1639
            _impl::History::version_type version = 0;
69,639✔
1640
            int history_type = 0;
69,639✔
1641
            int history_schema_version = 0;
69,639✔
1642
            get_version_and_history_info(m_top, version, history_type, history_schema_version);
69,639✔
1643
            REALM_ASSERT(history_type != Replication::hist_None || history_schema_version == 0);
69,639✔
1644
            ref_type hist_ref = get_history_ref(m_top);
69,639✔
1645
            hist->update_from_ref_and_version(hist_ref, version);
69,639✔
1646
            hist->verify();
69,639✔
1647
        }
69,639✔
1648
    }
69,645✔
1649

1650
    if (auto tr = dynamic_cast<const Transaction*>(this)) {
126,996✔
1651
        // This is a transaction
1652
        if (tr->get_transact_stage() == DB::TransactStage::transact_Reading) {
126,669✔
1653
            // Verifying the memory cannot be done from a read transaction
1654
            // There might be a write transaction running that has freed some
1655
            // memory that is seen as being in use in this transaction
1656
            return;
67,326✔
1657
        }
67,326✔
1658
    }
126,669✔
1659
    size_t logical_file_size = to_size_t(m_top.get_as_ref_or_tagged(2).get_as_int());
59,670✔
1660
    size_t ref_begin = sizeof(SlabAlloc::Header);
59,670✔
1661
    ref_type real_immutable_ref_end = logical_file_size;
59,670✔
1662
    ref_type real_mutable_ref_end = m_alloc.get_total_size();
59,670✔
1663
    ref_type real_baseline = m_alloc.get_baseline();
59,670✔
1664
    // Fake that any empty area between the file and slab is part of the file (immutable):
1665
    ref_type immutable_ref_end = m_alloc.align_size_to_section_boundary(real_immutable_ref_end);
59,670✔
1666
    ref_type mutable_ref_end = m_alloc.align_size_to_section_boundary(real_mutable_ref_end);
59,670✔
1667
    ref_type baseline = m_alloc.align_size_to_section_boundary(real_baseline);
59,670✔
1668

1669
    // Check the consistency of the allocation of used memory
1670
    MemUsageVerifier mem_usage_1(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
59,670✔
1671
    m_top.report_memory_usage(mem_usage_1);
59,670✔
1672
    mem_usage_1.canonicalize();
59,670✔
1673

1674
    // Check concistency of the allocation of the immutable memory that was
1675
    // marked as free before the file was opened.
1676
    MemUsageVerifier mem_usage_2(ref_begin, immutable_ref_end, mutable_ref_end, baseline);
59,670✔
1677
    {
59,670✔
1678
        REALM_ASSERT_EX(m_top.size() == 3 || m_top.size() == 5 || m_top.size() == 7 || m_top.size() >= 10,
59,670✔
1679
                        m_top.size());
59,670✔
1680
        Allocator& alloc = m_top.get_alloc();
59,670✔
1681
        Array pos(alloc), len(alloc), ver(alloc);
59,670✔
1682
        pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
59,670✔
1683
        len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
59,670✔
1684
        ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
59,670✔
1685
        if (m_top.size() > s_free_pos_ndx) {
59,670✔
1686
            if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
59,169✔
1687
                pos.init_from_ref(ref);
57,393✔
1688
        }
59,169✔
1689
        if (m_top.size() > s_free_size_ndx) {
59,670✔
1690
            if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
59,169✔
1691
                len.init_from_ref(ref);
57,393✔
1692
        }
59,169✔
1693
        if (m_top.size() > s_free_version_ndx) {
59,670✔
1694
            if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
59,169✔
1695
                ver.init_from_ref(ref);
57,393✔
1696
        }
59,169✔
1697
        REALM_ASSERT(pos.is_attached() == len.is_attached());
59,670✔
1698
        REALM_ASSERT(pos.is_attached() || !ver.is_attached()); // pos.is_attached() <== ver.is_attached()
59,670✔
1699
        if (pos.is_attached()) {
59,670✔
1700
            size_t n = pos.size();
57,393✔
1701
            REALM_ASSERT_3(n, ==, len.size());
57,393✔
1702
            if (ver.is_attached())
57,393✔
1703
                REALM_ASSERT_3(n, ==, ver.size());
57,393✔
1704
            for (size_t i = 0; i != n; ++i) {
2,434,296✔
1705
                ref_type ref = to_ref(pos.get(i));
2,376,903✔
1706
                size_t size_of_i = to_size_t(len.get(i));
2,376,903✔
1707
                mem_usage_2.add_immutable(ref, size_of_i);
2,376,903✔
1708
            }
2,376,903✔
1709
            mem_usage_2.canonicalize();
57,393✔
1710
            mem_usage_1.add(mem_usage_2);
57,393✔
1711
            mem_usage_1.canonicalize();
57,393✔
1712
            mem_usage_2.clear();
57,393✔
1713
        }
57,393✔
1714
    }
59,670✔
1715

1716
    // Check the concistency of the allocation of the immutable memory that has
1717
    // been marked as free after the file was opened
1718
    for (const auto& free_block : m_alloc.m_free_read_only) {
107,808✔
1719
        mem_usage_2.add_immutable(free_block.first, free_block.second);
107,808✔
1720
    }
107,808✔
1721
    mem_usage_2.canonicalize();
59,670✔
1722
    mem_usage_1.add(mem_usage_2);
59,670✔
1723
    mem_usage_1.canonicalize();
59,670✔
1724
    mem_usage_2.clear();
59,670✔
1725

1726
    // Check the consistency of the allocation of the mutable memory that has
1727
    // been marked as free
1728
    m_alloc.for_all_free_entries([&](ref_type ref, size_t sz) {
440,901✔
1729
        mem_usage_2.add_mutable(ref, sz);
440,901✔
1730
    });
440,901✔
1731
    mem_usage_2.canonicalize();
59,670✔
1732
    mem_usage_1.add(mem_usage_2);
59,670✔
1733
    mem_usage_1.canonicalize();
59,670✔
1734
    mem_usage_2.clear();
59,670✔
1735

1736
    // There may be a hole between the end of file and the beginning of the slab area.
1737
    // We need to take that into account here.
1738
    REALM_ASSERT_3(real_immutable_ref_end, <=, real_baseline);
59,670✔
1739
    auto slab_start = immutable_ref_end;
59,670✔
1740
    if (real_immutable_ref_end < slab_start) {
59,679✔
1741
        ref_type ref = real_immutable_ref_end;
59,679✔
1742
        size_t corrected_size = slab_start - real_immutable_ref_end;
59,679✔
1743
        mem_usage_1.add_immutable(ref, corrected_size);
59,679✔
1744
        mem_usage_1.canonicalize();
59,679✔
1745
    }
59,679✔
1746

1747
    // At this point we have accounted for all memory managed by the slab
1748
    // allocator
1749
    mem_usage_1.check_total_coverage();
59,670✔
1750
#endif
59,670✔
1751
}
59,670✔
1752

1753
void Group::validate_primary_columns()
1754
{
480✔
1755
    auto table_keys = this->get_table_keys();
480✔
1756
    for (auto tk : table_keys) {
1,800✔
1757
        auto table = get_table(tk);
1,800✔
1758
        table->validate_primary_column();
1,800✔
1759
    }
1,800✔
1760
}
480✔
1761

1762
#ifdef REALM_DEBUG
1763

1764
MemStats Group::get_stats()
1765
{
×
1766
    MemStats mem_stats;
×
1767
    m_top.stats(mem_stats);
×
1768

1769
    return mem_stats;
×
1770
}
×
1771

1772
void Group::print() const
1773
{
×
1774
    m_alloc.print();
×
1775
}
×
1776

1777

1778
void Group::print_free() const
1779
{
×
1780
    Allocator& alloc = m_top.get_alloc();
×
1781
    Array pos(alloc), len(alloc), ver(alloc);
×
1782
    pos.set_parent(const_cast<Array*>(&m_top), s_free_pos_ndx);
×
1783
    len.set_parent(const_cast<Array*>(&m_top), s_free_size_ndx);
×
1784
    ver.set_parent(const_cast<Array*>(&m_top), s_free_version_ndx);
×
1785
    if (m_top.size() > s_free_pos_ndx) {
×
1786
        if (ref_type ref = m_top.get_as_ref(s_free_pos_ndx))
×
1787
            pos.init_from_ref(ref);
×
1788
    }
×
1789
    if (m_top.size() > s_free_size_ndx) {
×
1790
        if (ref_type ref = m_top.get_as_ref(s_free_size_ndx))
×
1791
            len.init_from_ref(ref);
×
1792
    }
×
1793
    if (m_top.size() > s_free_version_ndx) {
×
1794
        if (ref_type ref = m_top.get_as_ref(s_free_version_ndx))
×
1795
            ver.init_from_ref(ref);
×
1796
    }
×
1797

1798
    if (!pos.is_attached()) {
×
1799
        std::cout << "none\n";
×
1800
        return;
×
1801
    }
×
1802
    bool has_versions = ver.is_attached();
×
1803

1804
    size_t n = pos.size();
×
1805
    for (size_t i = 0; i != n; ++i) {
×
1806
        size_t offset = to_size_t(pos.get(i));
×
1807
        size_t size_of_i = to_size_t(len.get(i));
×
1808
        std::cout << i << ": " << offset << " " << size_of_i;
×
1809

1810
        if (has_versions) {
×
1811
            size_t version = to_size_t(ver.get(i));
×
1812
            std::cout << " " << version;
×
1813
        }
×
1814
        std::cout << "\n";
×
1815
    }
×
1816
    std::cout << "\n";
×
1817
}
×
1818
#endif
1819

1820
// LCOV_EXCL_STOP ignore debug functions
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc