• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 2469

03 Jul 2024 10:12PM UTC coverage: 90.958% (-0.03%) from 90.984%
2469

push

Evergreen

web-flow
RCORE-2185 Sync client should steal file ident of fresh realm when performing client reset (#7850)

* Initial changes to use the file ident from the fresh realm during client reset

* Fixed failing realm_sync_test tests

* Don't send UPLOAD Messages while downloading fresh realm

* Allow sending QUERY bootstrap for fresh download sessions

* Added SHARED_GROUP_FRESH_PATH to generate path for fresh realm

* Removed SHARED_GROUP_FRESH_PATH and used session_reason setting instead

* Some cleanup after tests passing

* Added test to verify no UPLOAD messages are sent during fresh realm download

* Use is_fresh_path to determine if hook event called by client reset fresh realm download session

* Fixed tsan failure around REQUIRE() within hook event callback in flx_migration test

* Updates from review and streamlined changes based on recommendations

* Reverted some test changes that are no longer needed

* Updated logic for when to perform a client reset diff

* Updated fresh realm download to update upload progress but not send upload messages

* Removed has_client_reset_config flag in favor of get_cliet_reset_config()

* Updats from the review - renamed m_allow_uploads to m_delay_uploads

* Updated assert

* Updated test to start with file ident, added comment about client reset and no file ident

* Updated comment for m_delay_uploads

102284 of 180462 branches covered (56.68%)

140 of 147 new or added lines in 10 files covered. (95.24%)

90 existing lines in 15 files now uncovered.

215145 of 236531 relevant lines covered (90.96%)

6144068.37 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.56
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <algorithm>
20
#include <atomic>
21
#include <cinttypes>
22
#include <cstring>
23
#include <exception>
24
#include <memory>
25
#include <type_traits>
26

27
#if REALM_DEBUG
28
#include <iostream>
29
#include <unordered_set>
30
#endif
31

32
#ifdef REALM_SLAB_ALLOC_DEBUG
33
#include <cstdlib>
34
#endif
35

36
#include <realm/util/encrypted_file_mapping.hpp>
37
#include <realm/util/errno.hpp>
38
#include <realm/util/scope_exit.hpp>
39
#include <realm/util/terminate.hpp>
40
#include <realm/array.hpp>
41
#include <realm/alloc_slab.hpp>
42
#include <realm/disable_sync_to_disk.hpp>
43
#include <realm/group.hpp>
44

45
using namespace realm;
46
using namespace realm::util;
47

48

49
namespace {
50

51
#ifdef REALM_SLAB_ALLOC_DEBUG
52
std::map<ref_type, void*> malloc_debug_map;
53
#endif
54

55
class InvalidFreeSpace : std::exception {
56
public:
57
    const char* what() const noexcept override
58
    {
×
59
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
60
               "further writes can be performed.";
×
61
    }
×
62
};
63

64
std::atomic<size_t> total_slab_allocated(0);
65

66
} // anonymous namespace
67

68
size_t SlabAlloc::get_total_slab_size() noexcept
69
{
×
70
    return total_slab_allocated;
×
71
}
×
72

73
SlabAlloc::SlabAlloc()
74
{
104,790✔
75
    m_initial_section_size = section_size();
104,790✔
76
    m_free_space_state = free_space_Clean;
104,790✔
77
    m_baseline = 0;
104,790✔
78
}
104,790✔
79

80
util::File& SlabAlloc::get_file()
81
{
980,361✔
82
    return m_file;
980,361✔
83
}
980,361✔
84

85

86
inline constexpr SlabAlloc::Header SlabAlloc::empty_file_header = {
87
    {0, 0}, // top-refs
88
    {'T', '-', 'D', 'B'},
89
    {0, 0}, // undecided file format
90
    0,      // reserved
91
    0       // flags (lsb is select bit)
92
};
93

94

95
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
96
{
642✔
97
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
642✔
98
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
642✔
99
    *streaming_header = {
642✔
100
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
642✔
101
        {'T', '-', 'D', 'B'},
642✔
102
        {storage_type(file_format_version), 0},
642✔
103
        0, // reserved
642✔
104
        0  // flags (lsb is select bit)
642✔
105
    };
642✔
106
}
642✔
107

108
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
109
    : ref_end(r)
41,025✔
110
    , size(s)
41,025✔
111
{
83,316✔
112
    // Ensure that allocation is aligned to at least 8 bytes
113
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
83,316✔
114

115
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
83,316✔
116
    addr = new char[size];
83,316✔
117
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
83,316✔
118
#if REALM_ENABLE_ALLOC_SET_ZERO
119
    std::fill(addr, addr + size, 0);
120
#endif
121
}
83,316✔
122

123
SlabAlloc::Slab::~Slab()
124
{
86,916✔
125
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
86,916✔
126
    if (addr)
86,916✔
127
        delete[] addr;
83,316✔
128
}
86,916✔
129

130
void SlabAlloc::detach(bool keep_file_open) noexcept
131
{
105,675✔
132
    delete[] m_ref_translation_ptr.exchange(nullptr);
105,675✔
133
    m_translation_table_size = 0;
105,675✔
134
    set_read_only(true);
105,675✔
135
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
105,675✔
136
    switch (m_attach_mode) {
105,675✔
137
        case attach_None:
429✔
138
            break;
429✔
139
        case attach_UsersBuffer:
24✔
140
            break;
24✔
141
        case attach_OwnedBuffer:
5,034✔
142
            delete[] m_data;
5,034✔
143
            break;
5,034✔
144
        case attach_SharedFile:
73,620✔
145
        case attach_UnsharedFile:
74,700✔
146
            m_data = 0;
74,700✔
147
            m_mappings.clear();
74,700✔
148
            m_youngest_live_version = 0;
74,700✔
149
            if (!keep_file_open)
74,700✔
150
                m_file.close();
74,271✔
151
            break;
74,700✔
152
        case attach_Heap:
25,494✔
153
            m_data = 0;
25,494✔
154
            break;
25,494✔
155
        default:
✔
156
            REALM_UNREACHABLE();
157
    }
105,675✔
158

159
    // Release all allocated memory - this forces us to create new
160
    // slabs after re-attaching thereby ensuring that the slabs are
161
    // placed correctly (logically) after the end of the file.
162
    m_slabs.clear();
105,681✔
163
    clear_freelists();
105,681✔
164

165
    m_attach_mode = attach_None;
105,681✔
166
}
105,681✔
167

168

169
SlabAlloc::~SlabAlloc() noexcept
170
{
104,802✔
171
#ifdef REALM_DEBUG
104,802✔
172
    if (is_attached()) {
104,802✔
173
        // A shared group does not guarantee that all space is free
174
        if (m_attach_mode != attach_SharedFile) {
714✔
175
            // No point inchecking if free space info is invalid
176
            if (m_free_space_state != free_space_Invalid) {
708✔
177
                if (REALM_COVER_NEVER(!is_all_free())) {
708✔
178
                    print();
×
179
#ifndef REALM_SLAB_ALLOC_DEBUG
×
180
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
181
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
182
                                 "then run under Valgrind with --leak-check=full\n";
×
183
                    REALM_TERMINATE("SlabAlloc detected a leak");
184
#endif
×
185
                }
×
186
            }
708✔
187
        }
708✔
188
    }
714✔
189
#endif
104,802✔
190

191
    if (is_attached())
104,802✔
192
        detach();
714✔
193
}
104,802✔
194

195

196
MemRef SlabAlloc::do_alloc(size_t size)
197
{
28,922,973✔
198
    CriticalSection cs(changes);
28,922,973✔
199
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
28,922,973✔
200
    REALM_ASSERT_EX((size & 0x7) == 0, size,
28,922,973✔
201
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
28,922,973✔
202
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
28,922,973✔
203
    // This limits the size of any array to ensure it can fit within a memory section.
204
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
205
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
28,922,973✔
206

207
    // If we failed to correctly record free space, new allocations cannot be
208
    // carried out until the free space record is reset.
209
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
28,922,973✔
210
        throw InvalidFreeSpace();
×
211

212
    m_free_space_state = free_space_Dirty;
28,922,973✔
213
    m_commit_size += size;
28,922,973✔
214

215
    // minimal allocation is sizeof(FreeListEntry)
216
    if (size < sizeof(FreeBlock))
28,922,973✔
217
        size = sizeof(FreeBlock);
6,591✔
218
    // align to multipla of 8
219
    if (size & 0x7)
28,922,973✔
220
        size = (size + 7) & ~0x7;
×
221

222
    FreeBlock* entry = allocate_block(static_cast<int>(size));
28,922,973✔
223
    mark_allocated(entry);
28,922,973✔
224
    ref_type ref = entry->ref;
28,922,973✔
225

226
#ifdef REALM_DEBUG
28,922,973✔
227
    if (REALM_COVER_NEVER(m_debug_out))
28,922,973✔
228
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
×
229
#endif
28,922,973✔
230

231
    char* addr = reinterpret_cast<char*>(entry);
28,922,973✔
232
    REALM_ASSERT_EX(addr == translate(ref), addr, ref, get_file_path_for_assertions());
28,922,973✔
233

234
#if REALM_ENABLE_ALLOC_SET_ZERO
235
    std::fill(addr, addr + size, 0);
236
#endif
237
#ifdef REALM_SLAB_ALLOC_DEBUG
238
    malloc_debug_map[ref] = malloc(1);
239
#endif
240
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
28,922,973✔
241
    return MemRef(addr, ref, *this);
28,922,973✔
242
}
28,922,973✔
243

244
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
245
{
4,398,339✔
246
    auto bb = bb_before(entry);
4,398,339✔
247
    if (bb->block_before_size <= 0)
4,398,339✔
248
        return nullptr; // no prev block, or it is in use
3,465,975✔
249
    return block_before(bb);
932,364✔
250
}
4,398,339✔
251

252
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
253
{
4,398,735✔
254
    auto bb = bb_after(entry);
4,398,735✔
255
    if (bb->block_after_size <= 0)
4,398,735✔
256
        return nullptr; // no next block, or it is in use
1,525,707✔
257
    return block_after(bb);
2,873,028✔
258
}
4,398,735✔
259

260
SlabAlloc::FreeList SlabAlloc::find(int size)
261
{
28,946,079✔
262
    FreeList retval;
28,946,079✔
263
    retval.it = m_block_map.lower_bound(size);
28,946,079✔
264
    if (retval.it != m_block_map.end()) {
28,946,079✔
265
        retval.size = retval.it->first;
28,856,898✔
266
    }
28,856,898✔
267
    else {
89,181✔
268
        retval.size = 0;
89,181✔
269
    }
89,181✔
270
    return retval;
28,946,079✔
271
}
28,946,079✔
272

273
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
274
{
28,505,331✔
275
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
28,505,331✔
276
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
39,704,460✔
277
        ++hint.it;
11,199,129✔
278
    if (hint.it == m_block_map.end())
28,505,331✔
279
        hint.size = 0; // indicate "not found"
83,316✔
280
    return hint;
28,505,331✔
281
}
28,505,331✔
282

283
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
284
{
28,861,512✔
285
    FreeBlock* retval = list.it->second;
28,861,512✔
286
    FreeBlock* header = retval->next;
28,861,512✔
287
    if (header == retval)
28,861,512✔
288
        m_block_map.erase(list.it);
28,685,286✔
289
    else
176,226✔
290
        list.it->second = header;
176,226✔
291
    retval->unlink();
28,861,512✔
292
    return retval;
28,861,512✔
293
}
28,861,512✔
294

295
void SlabAlloc::FreeBlock::unlink()
296
{
32,660,790✔
297
    auto _next = next;
32,660,790✔
298
    auto _prev = prev;
32,660,790✔
299
    _next->prev = prev;
32,660,790✔
300
    _prev->next = next;
32,660,790✔
301
    clear_links();
32,660,790✔
302
}
32,660,790✔
303

304
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
305
{
3,806,088✔
306
    int size = bb_before(entry)->block_after_size;
3,806,088✔
307
    auto it = m_block_map.find(size);
3,806,088✔
308
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
3,806,088✔
309
    auto header = it->second;
3,806,088✔
310
    if (header == entry) {
3,806,088✔
311
        header = entry->next;
3,526,191✔
312
        if (header == entry)
3,526,191✔
313
            m_block_map.erase(it);
1,997,571✔
314
        else
1,528,620✔
315
            it->second = header;
1,528,620✔
316
    }
3,526,191✔
317
    entry->unlink();
3,806,088✔
318
}
3,806,088✔
319

320
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
321
{
33,567,504✔
322
    int size = bb_before(entry)->block_after_size;
33,567,504✔
323
    FreeBlock* header;
33,567,504✔
324
    auto it = m_block_map.find(size);
33,567,504✔
325
    if (it != m_block_map.end()) {
33,567,504✔
326
        header = it->second;
2,007,183✔
327
        it->second = entry;
2,007,183✔
328
        entry->next = header;
2,007,183✔
329
        entry->prev = header->prev;
2,007,183✔
330
        entry->prev->next = entry;
2,007,183✔
331
        entry->next->prev = entry;
2,007,183✔
332
    }
2,007,183✔
333
    else {
31,560,321✔
334
        header = nullptr;
31,560,321✔
335
        m_block_map[size] = entry;
31,560,321✔
336
        entry->next = entry->prev = entry;
31,560,321✔
337
    }
31,560,321✔
338
}
33,567,504✔
339

340
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
341
{
4,397,748✔
342
    auto bb = bb_before(entry);
4,397,748✔
343
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
4,397,748✔
344
    auto alloc_size = -bb->block_after_size;
4,397,748✔
345
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
4,397,748✔
346
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
4,397,748✔
347
                    get_file_path_for_assertions());
4,397,748✔
348
    bb->block_after_size = alloc_size;
4,397,748✔
349
    bb = bb_after(entry);
4,397,748✔
350
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
4,397,748✔
351
    REALM_ASSERT(-bb->block_before_size == alloc_size);
4,397,748✔
352
    bb->block_before_size = alloc_size;
4,397,748✔
353
}
4,397,748✔
354

355
void SlabAlloc::mark_allocated(FreeBlock* entry)
356
{
28,939,824✔
357
    auto bb = bb_before(entry);
28,939,824✔
358
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
28,939,824✔
359
    auto bb2 = bb_after(entry);
28,939,824✔
360
    bb->block_after_size = 0 - bb->block_after_size;
28,939,824✔
361
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
28,939,824✔
362
    bb2->block_before_size = 0 - bb2->block_before_size;
28,939,824✔
363
}
28,939,824✔
364

365
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
366
{
28,947,582✔
367
    FreeList list = find(size);
28,947,582✔
368
    if (list.found_exact(size)) {
28,947,582✔
369
        return pop_freelist_entry(list);
443,928✔
370
    }
443,928✔
371
    // no exact matches.
372
    list = find_larger(list, size);
28,503,654✔
373
    FreeBlock* block;
28,503,654✔
374
    if (list.found_something()) {
28,503,654✔
375
        block = pop_freelist_entry(list);
28,418,370✔
376
    }
28,418,370✔
377
    else {
85,284✔
378
        block = grow_slab(size);
85,284✔
379
    }
85,284✔
380
    FreeBlock* remaining = break_block(block, size);
28,503,654✔
381
    if (remaining)
28,503,654✔
382
        push_freelist_entry(remaining);
28,494,318✔
383
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
28,503,654✔
384
    return block;
28,503,654✔
385
}
28,947,582✔
386

387
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
388
{
766,305✔
389
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
766,305✔
390
    bb->block_before_size = 0;
766,305✔
391
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
766,305✔
392
    bb->block_after_size = block_size;
766,305✔
393
    auto entry = block_after(bb);
766,305✔
394
    entry->clear_links();
766,305✔
395
    entry->ref = ref_start + sizeof(BetweenBlocks);
766,305✔
396
    bb = bb_after(entry);
766,305✔
397
    bb->block_before_size = block_size;
766,305✔
398
    bb->block_after_size = 0;
766,305✔
399
    return entry;
766,305✔
400
}
766,305✔
401

402
void SlabAlloc::clear_freelists()
403
{
942,861✔
404
    m_block_map.clear();
942,861✔
405
}
942,861✔
406

407
void SlabAlloc::rebuild_freelists_from_slab()
408
{
837,183✔
409
    clear_freelists();
837,183✔
410
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
837,183✔
411
    for (const auto& e : m_slabs) {
837,183✔
412
        FreeBlock* entry = slab_to_entry(e, ref_start);
682,989✔
413
        push_freelist_entry(entry);
682,989✔
414
        ref_start = align_size_to_section_boundary(e.ref_end);
682,989✔
415
    }
682,989✔
416
}
837,183✔
417

418
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
419
{
28,498,308✔
420
    int size = size_from_block(block);
28,498,308✔
421
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
28,498,308✔
422
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
28,498,308✔
423
        return nullptr;
6✔
424
    bb_after(block)->block_before_size = remaining_size;
28,498,302✔
425
    bb_before(block)->block_after_size = new_size;
28,498,302✔
426
    auto bb_between = bb_after(block);
28,498,302✔
427
    bb_between->block_before_size = new_size;
28,498,302✔
428
    bb_between->block_after_size = remaining_size;
28,498,302✔
429
    FreeBlock* remaining_block = block_after(bb_between);
28,498,302✔
430
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
28,498,302✔
431
    remaining_block->clear_links();
28,498,302✔
432
    block->clear_links();
28,498,302✔
433
    return remaining_block;
28,498,302✔
434
}
28,498,308✔
435

436
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
437
{
3,806,037✔
438
    int size_first = size_from_block(first);
3,806,037✔
439
    int size_last = size_from_block(last);
3,806,037✔
440
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
3,806,037✔
441
    bb_before(first)->block_after_size = new_size;
3,806,037✔
442
    bb_after(last)->block_before_size = new_size;
3,806,037✔
443
    return first;
3,806,037✔
444
}
3,806,037✔
445

446
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
447
{
83,316✔
448
    // Allocate new slab.
449
    // - Always allocate at least 128K. This is also the amount of
450
    //   memory that we allow the slab allocator to keep between
451
    //   transactions. Allowing it to keep a small amount between
452
    //   transactions makes very small transactions faster by avoiding
453
    //   repeated unmap/mmap system calls.
454
    // - When allocating, allocate as much as we already have, but
455
    // - Never allocate more than a full section (64MB). This policy
456
    //   leads to gradual allocation of larger and larger blocks until
457
    //   we reach allocation of entire sections.
458
    size += 2 * sizeof(BetweenBlocks);
83,316✔
459
    size_t new_size = minimal_alloc;
83,316✔
460
    while (new_size < uint64_t(size))
118,290✔
461
        new_size += minimal_alloc;
34,974✔
462
    size_t already_allocated = get_allocated_size();
83,316✔
463
    if (new_size < already_allocated)
83,316✔
464
        new_size = already_allocated;
3,333✔
465
    if (new_size > maximal_alloc)
83,316✔
466
        new_size = maximal_alloc;
18✔
467

468
    ref_type ref;
83,316✔
469
    if (m_slabs.empty()) {
83,316✔
470
        ref = m_baseline.load(std::memory_order_relaxed);
73,002✔
471
    }
73,002✔
472
    else {
10,314✔
473
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
474
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
10,314✔
475
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
10,314✔
476
        ref = curr_ref_end;
10,314✔
477
    }
10,314✔
478
    ref = align_size_to_section_boundary(ref);
83,316✔
479
    size_t ref_end = ref;
83,316✔
480
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
83,316✔
481
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
482
                                      util::to_string(new_size));
×
483
    }
×
484

485
    REALM_ASSERT(matches_section_boundary(ref));
83,316✔
486

487
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
83,316✔
488
    // Create new slab and add to list of slabs
489
    m_slabs.emplace_back(ref_end, new_size); // Throws
83,316✔
490
    const Slab& slab = m_slabs.back();
83,316✔
491
    extend_fast_mapping_with_slab(slab.addr);
83,316✔
492

493
    // build a single block from that entry
494
    return slab_to_entry(slab, ref);
83,316✔
495
}
83,316✔
496

497

498
void SlabAlloc::do_free(ref_type ref, char* addr)
499
{
20,307,297✔
500
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
20,307,297✔
501
    CriticalSection cs(changes);
20,307,297✔
502

503
    bool read_only = is_read_only(ref);
20,307,297✔
504
#ifdef REALM_SLAB_ALLOC_DEBUG
505
    free(malloc_debug_map[ref]);
506
#endif
507

508
    // Get size from segment
509
    size_t size =
20,307,297✔
510
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
20,307,297✔
511

512
#ifdef REALM_DEBUG
20,307,297✔
513
    if (REALM_COVER_NEVER(m_debug_out))
20,307,297✔
514
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
×
515
#endif
20,307,297✔
516

517
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
20,307,297✔
518
        return;
×
519

520
    // Mutable memory cannot be freed unless it has first been allocated, and
521
    // any allocation puts free space tracking into the "dirty" state.
522
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
20,307,297✔
523
                    free_space_Dirty, get_file_path_for_assertions());
20,307,297✔
524

525
    m_free_space_state = free_space_Dirty;
20,307,297✔
526

527
    if (read_only) {
20,307,297✔
528
        // Free space in read only segment is tracked separately
529
        try {
15,912,357✔
530
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
15,912,357✔
531
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
15,912,357✔
532
            auto next = m_free_read_only.lower_bound(ref);
15,912,357✔
533
            if (next != m_free_read_only.end()) {
15,912,357✔
534
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
14,457,507✔
535
                                        get_file_path_for_assertions());
14,457,507✔
536
                // See if element can be combined with next element
537
                if (ref + size == next->first) {
14,457,507✔
538
                    // if so, combine to include next element and remove that from collection
539
                    size += next->second;
2,699,019✔
540
                    next = m_free_read_only.erase(next);
2,699,019✔
541
                }
2,699,019✔
542
            }
14,457,507✔
543
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
15,912,357✔
544
                // There must be a previous element - see if we can merge
545
                auto prev = next;
13,287,870✔
546
                prev--;
13,287,870✔
547

548
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
13,287,870✔
549
                                        get_file_path_for_assertions());
13,287,870✔
550
                // See if element can be combined with previous element
551
                // We can do that just by adding the size
552
                if (prev->first + prev->second == ref) {
13,287,870✔
553
                    prev->second += size;
6,510,864✔
554
                    return; // Done!
6,510,864✔
555
                }
6,510,864✔
556
                m_free_read_only.emplace_hint(next, ref, size); // Throws
6,777,006✔
557
            }
6,777,006✔
558
            else {
2,624,487✔
559
                m_free_read_only.emplace(ref, size); // Throws
2,624,487✔
560
            }
2,624,487✔
561
        }
15,912,357✔
562
        catch (...) {
15,912,357✔
563
            m_free_space_state = free_space_Invalid;
×
564
        }
×
565
    }
15,912,357✔
566
    else {
4,394,940✔
567
        m_commit_size -= size;
4,394,940✔
568

569
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
570
        if (size < sizeof(FreeBlock))
4,394,940✔
571
            size = sizeof(FreeBlock);
6,591✔
572
        // align to multipla of 8
573
        if (size & 0x7)
4,394,940✔
574
            size = (size + 7) & ~0x7;
×
575

576
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
4,394,940✔
577
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
4,394,940✔
578
        mark_freed(e, static_cast<int>(size));
4,394,940✔
579
        free_block(ref, e);
4,394,940✔
580
    }
4,394,940✔
581
}
20,307,297✔
582

583
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
584
{
4,398,306✔
585
    // merge with surrounding blocks if possible
586
    block->ref = ref;
4,398,306✔
587
    FreeBlock* prev = get_prev_block_if_mergeable(block);
4,398,306✔
588
    if (prev) {
4,398,306✔
589
        remove_freelist_entry(prev);
932,889✔
590
        block = merge_blocks(prev, block);
932,889✔
591
    }
932,889✔
592
    FreeBlock* next = get_next_block_if_mergeable(block);
4,398,306✔
593
    if (next) {
4,398,306✔
594
        remove_freelist_entry(next);
2,873,607✔
595
        block = merge_blocks(block, next);
2,873,607✔
596
    }
2,873,607✔
597
    push_freelist_entry(block);
4,398,306✔
598
}
4,398,306✔
599

600
size_t SlabAlloc::consolidate_free_read_only()
601
{
624,054✔
602
    CriticalSection cs(changes);
624,054✔
603
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
624,054✔
604
        throw InvalidFreeSpace();
×
605

606
    return m_free_read_only.size();
624,054✔
607
}
624,054✔
608

609

610
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
611
{
2,414,187✔
612
    REALM_ASSERT_DEBUG(translate(ref) == addr);
2,414,187✔
613
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
2,414,187✔
614
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
2,414,187✔
615
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
2,414,187✔
616

617
    // Possible future enhancement: check if we can extend current space instead
618
    // of unconditionally allocating new space. In that case, remember to
619
    // check whether m_free_space_state == free_state_Invalid. Also remember to
620
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
621

622
    // Allocate new space
623
    MemRef new_mem = do_alloc(new_size); // Throws
2,414,187✔
624

625
    // Copy existing segment
626
    char* new_addr = new_mem.get_addr();
2,414,187✔
627
    realm::safe_copy_n(addr, old_size, new_addr);
2,414,187✔
628

629
    // Add old segment to freelist
630
    do_free(ref, addr);
2,414,187✔
631

632
#ifdef REALM_DEBUG
2,414,187✔
633
    if (REALM_COVER_NEVER(m_debug_out)) {
2,414,187✔
634
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
635
                  << " new_size: " << new_size << "\n";
×
636
    }
×
637
#endif // REALM_DEBUG
2,414,187✔
638

639
    return new_mem;
2,414,187✔
640
}
2,414,187✔
641

642

643
char* SlabAlloc::do_translate(ref_type) const noexcept
644
{
×
645
    REALM_ASSERT(false); // never come here
×
646
    return nullptr;
×
647
}
×
648

649

650
int SlabAlloc::get_committed_file_format_version() noexcept
651
{
74,754✔
652
    {
74,754✔
653
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
74,754✔
654
        if (m_mappings.size()) {
74,754✔
655
            // if we have mapped a file, m_mappings will have at least one mapping and
656
            // the first will be to the start of the file. Don't come here, if we're
657
            // just attaching a buffer. They don't have mappings.
658
            util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
74,712✔
659
        }
74,712✔
660
    }
74,754✔
661
    const Header& header = *reinterpret_cast<const Header*>(m_data);
74,754✔
662
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
74,754✔
663
    int file_format_version = int(header.m_file_format[slot_selector]);
74,754✔
664
    return file_format_version;
74,754✔
665
}
74,754✔
666

667
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
668
{
217,893✔
669
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
670
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
217,893✔
671
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
217,893✔
672
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
217,893✔
673
}
217,893✔
674

675
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
676
{
×
677
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
678
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
679
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
680
    if (is_file_on_streaming_form(header)) {
×
681
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
682
        return ref_type(footer.m_top_ref);
×
683
    }
×
684
    else {
×
685
        return to_ref(header.m_top_ref[slot_selector]);
×
686
    }
×
687
}
×
688

689
std::string SlabAlloc::get_file_path_for_assertions() const
690
{
×
691
    return m_file.get_path();
×
692
}
×
693

694
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
695
{
47,475✔
696
    if (cfg.read_only) {
47,475✔
697
        // If the file is opened read-only, we cannot change it. This is not a problem,
698
        // because for a read-only file we assume that it will not change while we use it,
699
        // hence there will be no need to grow memory mappings.
700
        // This assumption obviously will not hold, if the file is shared by multiple
701
        // processes or threads with different opening modes.
702
        // Currently, there is no way to detect if this assumption is violated.
703
        return false;
×
704
    }
×
705
    size_t expected_size = size_t(-1);
47,475✔
706
    size_t size = static_cast<size_t>(m_file.get_size());
47,475✔
707

708
    // It is not safe to change the size of a file on streaming form, since the footer
709
    // must remain available and remain at the very end of the file.
710
    REALM_ASSERT(!is_file_on_streaming_form());
47,475✔
711

712
    // check if online compaction allows us to shrink the file:
713
    if (top_ref) {
47,475✔
714
        // Get the expected file size by looking up logical file size stored in top array
715
        Array top(*this);
7,287✔
716
        top.init_from_ref(top_ref);
7,287✔
717
        size_t logical_size = Group::get_logical_file_size(top);
7,287✔
718
        // make sure we're page aligned, so the code below doesn't first
719
        // truncate the file, then expand it again
720
        expected_size = round_up_to_page_size(logical_size);
7,287✔
721
    }
7,287✔
722

723
    // Check if we can shrink the file
724
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
47,475✔
725
        detach(true); // keep m_file open
6✔
726
        m_file.resize(expected_size);
6✔
727
        m_file.close();
6✔
728
        return true;
6✔
729
    }
6✔
730

731
    // We can only safely mmap the file, if its size matches a page boundary. If not,
732
    // we must change the size to match before mmaping it.
733
    if (size != round_up_to_page_size(size)) {
47,469✔
734
        // The file size did not match a page boundary.
735
        // We must extend the file to a page boundary (unless already there)
736
        // The file must be extended to match in size prior to being mmapped,
737
        // as extending it after mmap has undefined behavior.
738
        if (cfg.session_initiator || !cfg.is_shared) {
423!
739
            // We can only safely extend the file if we're the session initiator, or if
740
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
741
            // done to ensure well defined behavior for memory mappings. It does not matter,
742
            // that the free space management isn't informed
743
            size = round_up_to_page_size(size);
423✔
744
            detach(true); // keep m_file open
423✔
745
            m_file.prealloc(size);
423✔
746
            m_file.close();
423✔
747
            return true;
423✔
748
        }
423✔
749
        else {
×
750
            // Getting here, we have a file of a size that will not work, and without being
751
            // allowed to extend it. This should not be possible. But allowing a retry is
752
            // arguably better than giving up and crashing...
753
            throw Retry();
×
754
        }
×
755
    }
423✔
756
    return false;
47,046✔
757
}
47,469✔
758

759
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
760
{
74,886✔
761
    m_cfg = cfg;
74,886✔
762
    m_write_observer = write_observer;
74,886✔
763
    // ExceptionSafety: If this function throws, it must leave the allocator in
764
    // the detached state.
765

766
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
74,886✔
767

768
    // When 'read_only' is true, this function will throw InvalidDatabase if the
769
    // file exists already but is empty. This can happen if another process is
770
    // currently creating it. Note however, that it is only legal for multiple
771
    // processes to access a database file concurrently if it is done via a
772
    // DB, and in that case 'read_only' can never be true.
773
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
74,886✔
774
    // session_initiator can be set *only* if we're shared.
775
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
74,886✔
776
                    get_file_path_for_assertions());
74,886✔
777
    // clear_file can be set *only* if we're the first session.
778
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
74,886✔
779
                    get_file_path_for_assertions());
74,886✔
780

781
    using namespace realm::util;
74,886✔
782
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
74,886✔
783
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
74,886✔
784
    set_read_only(cfg.read_only);
74,886✔
785
    try {
74,886✔
786
        m_file.open(path.c_str(), access, create, 0); // Throws
74,886✔
787
    }
74,886✔
788
    catch (const FileAccessError& ex) {
74,886✔
789
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
42✔
790
        if (ex.code() == ErrorCodes::PermissionDenied) {
42✔
791
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
792
                                cfg.read_only ? "read" : "read-write");
6✔
793
        }
6✔
794
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
42✔
795
    }
42✔
796
    File::CloseGuard fcg(m_file);
74,844✔
797
    auto physical_file_size = m_file.get_size();
74,844✔
798
    // Note that get_size() may (will) return a different size before and after
799
    // the call below to set_encryption_key.
800
    m_file.set_encryption_key(cfg.encryption_key);
74,844✔
801

802
    size_t size = 0;
74,844✔
803
    // The size of a database file must not exceed what can be encoded in
804
    // size_t.
805
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
74,844✔
806
        throw InvalidDatabase("Realm file too large", path);
×
807
    if (cfg.clear_file_on_error && cfg.session_initiator) {
74,844✔
808
        if (size == 0 && physical_file_size != 0) {
2,091✔
809
            cfg.clear_file = true;
6✔
810
        }
6✔
811
        else if (size > 0) {
2,085✔
812
            try {
186✔
813
                read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
186✔
814
            }
186✔
815
            catch (const InvalidDatabase&) {
186✔
816
                cfg.clear_file = true;
30✔
817
            }
30✔
818
        }
186✔
819
    }
2,091✔
820
    if (cfg.clear_file) {
74,844✔
821
        m_file.resize(0);
22,734✔
822
        size = 0;
22,734✔
823
        physical_file_size = 0;
22,734✔
824
    }
22,734✔
825
    else if (cfg.encryption_key && !cfg.clear_file && size == 0 && physical_file_size != 0) {
52,110✔
826
        // The opened file holds data, but is so small it cannot have
827
        // been created with encryption
828
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
12✔
829
    }
12✔
830
    if (size == 0) {
74,832✔
831
        if (REALM_UNLIKELY(cfg.read_only))
40,158✔
832
            throw InvalidDatabase("Read-only access to empty Realm file", path);
×
833
        // We want all non-streaming files to be a multiple of the page size
834
        // to simplify memory mapping, so just pre-reserve the required space now
835
        m_file.prealloc(page_size()); // Throws
40,158✔
836
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
40,158✔
837
        m_file.write(0, data, sizeof empty_file_header); // Throws
40,158✔
838

839
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
40,158✔
840
        if (!disable_sync)
40,158✔
841
            m_file.sync(); // Throws
12✔
842

843
        size = size_t(m_file.get_size());
40,158✔
844
    }
40,158✔
845

846
    ref_type top_ref = read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
74,832✔
847
    m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
74,832✔
848
    // m_data not valid at this point!
849
    m_baseline = 0;
74,832✔
850
    // make sure that any call to begin_read cause any slab to be placed in free
851
    // lists correctly
852
    m_free_space_state = free_space_Invalid;
74,832✔
853

854
    // Ensure clean up, if we need to back out:
855
    DetachGuard dg(*this);
74,832✔
856

857
    reset_free_space_tracking();
74,832✔
858

859
    // the file could have been produced on a device with a different
860
    // page size than our own so don't expect the size to be aligned
861
    if (cfg.encryption_key && size != 0 && size != round_up_to_page_size(size)) {
74,832✔
862
        size = round_up_to_page_size(size);
×
863
    }
×
864
    update_reader_view(size);
74,832✔
865
    REALM_ASSERT(m_mappings.size());
74,832✔
866
    m_data = m_mappings[0].primary_mapping.get_addr();
74,832✔
867
    util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
74,832✔
868
    dg.release();  // Do not detach
74,832✔
869
    fcg.release(); // Do not close
74,832✔
870
    return top_ref;
74,832✔
871
}
74,832✔
872

873
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
874
{
47,631✔
875
    auto header = reinterpret_cast<const Header*>(m_data);
47,631✔
876
    if (!is_file_on_streaming_form(*header))
47,631✔
877
        return;
47,097✔
878

879
    // Make sure the database is not on streaming format. If we did not do this,
880
    // a later commit would have to do it. That would require coordination with
881
    // anybody concurrently joining the session, so it seems easier to do it at
882
    // session initialization, even if it means writing the database during open.
883
    {
534✔
884
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
534✔
885
        Header& writable_header = *writable_map.get_addr();
534✔
886
        util::encryption_read_barrier(writable_map, 0);
534✔
887
        writable_header.m_top_ref[1] = top_ref;
534✔
888
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
534✔
889
        realm::util::encryption_write_barrier(writable_map, 0);
534✔
890
        writable_map.sync();
534✔
891
        util::encryption_read_barrier(writable_map, 0);
534✔
892
        writable_header.m_flags |= flags_SelectBit;
534✔
893
        realm::util::encryption_write_barrier(writable_map, 0);
534✔
894
        writable_map.sync();
534✔
895

896
        util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
534✔
897
    }
534✔
898
}
534✔
899

900
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
901
{
90✔
902
    // ExceptionSafety: If this function throws, it must leave the allocator in
903
    // the detached state.
904

905
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
906
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
907

908
    // Verify the data structures
909
    std::string path;                                     // No path
90✔
910
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
911

912
    m_data = data;
90✔
913
    size = align_size_to_section_boundary(size);
90✔
914
    m_baseline = size;
90✔
915
    m_attach_mode = attach_UsersBuffer;
90✔
916

917
    m_translation_table_size = 1;
90✔
918
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
919
    return top_ref;
90✔
920
}
90✔
921

922
void SlabAlloc::init_in_memory_buffer()
923
{
25,494✔
924
    m_attach_mode = attach_Heap;
25,494✔
925
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,494✔
926
    m_data = m_virtual_file_buffer.back().addr;
25,494✔
927
    m_virtual_file_size = sizeof(empty_file_header);
25,494✔
928
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,494✔
929

930
    m_baseline = m_virtual_file_size;
25,494✔
931
    m_translation_table_size = 1;
25,494✔
932
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,494✔
933
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,494✔
934
    m_ref_translation_ptr = ref_translation_ptr;
25,494✔
935
}
25,494✔
936

937
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
938
{
5,355,570✔
939
    auto idx = get_section_index(ref);
5,355,570✔
940
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
5,355,570✔
941
    auto& buf = m_virtual_file_buffer[idx];
5,355,570✔
942
    return buf.addr + (ref - buf.start_ref);
5,355,570✔
943
}
5,355,570✔
944

945
void SlabAlloc::attach_empty()
946
{
4,992✔
947
    // ExceptionSafety: If this function throws, it must leave the allocator in
948
    // the detached state.
949

950
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
4,992✔
951

952
    m_attach_mode = attach_OwnedBuffer;
4,992✔
953
    m_data = nullptr; // Empty buffer
4,992✔
954

955
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
956

957
    // No ref must ever be less than the header size, so we will use that as the
958
    // baseline here.
959
    size_t size = align_size_to_section_boundary(sizeof(Header));
4,992✔
960
    m_baseline = size;
4,992✔
961
    m_translation_table_size = 1;
4,992✔
962
    m_ref_translation_ptr = new RefTranslation[1];
4,992✔
963
}
4,992✔
964

965
ref_type SlabAlloc::read_and_validate_header(util::File& file, const std::string& path, size_t size,
966
                                             bool session_initiator, util::WriteObserver* write_observer)
967
{
75,018✔
968
    try {
75,018✔
969
        // we'll read header and (potentially) footer
970
        File::Map<char> map_header(file, File::access_ReadOnly, sizeof(Header), write_observer);
75,018✔
971
        util::encryption_read_barrier(map_header, 0, sizeof(Header));
75,018✔
972
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
75,018✔
973

974
        File::Map<char> map_footer;
75,018✔
975
        const StreamingFooter* footer = nullptr;
75,018✔
976
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
75,018✔
977
            size_t footer_ref = size - sizeof(StreamingFooter);
654✔
978
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
654✔
979
            size_t footer_offset = footer_ref - footer_page_base;
654✔
980
            map_footer = File::Map<char>(file, footer_page_base, File::access_ReadOnly,
654✔
981
                                         sizeof(StreamingFooter) + footer_offset, write_observer);
654✔
982
            util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
654✔
983
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
654✔
984
        }
654✔
985

986
        auto top_ref = validate_header(header, footer, size, path, file.get_encryption() != nullptr); // Throws
75,018✔
987

988
        if (session_initiator && is_file_on_streaming_form(*header)) {
75,018✔
989
            // Don't compare file format version fields as they are allowed to differ.
990
            // Also don't compare reserved fields.
991
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, path);
570✔
992
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0], path);
570✔
993
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1], path);
570✔
994
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2], path);
570✔
995
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3], path);
570✔
996
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0], path);
570✔
997
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], path);
570✔
998
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie, path);
570✔
999
        }
570✔
1000
        return top_ref;
75,018✔
1001
    }
75,018✔
1002
    catch (const InvalidDatabase&) {
75,018✔
1003
        throw;
84✔
1004
    }
84✔
1005
    catch (const DecryptionFailed& e) {
75,018✔
1006
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
66✔
1007
    }
66✔
1008
    catch (const std::exception& e) {
75,018✔
1009
        throw InvalidDatabase(e.what(), path);
12✔
1010
    }
12✔
1011
    catch (...) {
75,018✔
1012
        throw InvalidDatabase("unknown error", path);
×
1013
    }
×
1014
}
75,018✔
1015

1016
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1017
{
54✔
1018
    char buf[256];
54✔
1019
    snprintf(buf, sizeof(buf),
54✔
1020
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
54✔
1021
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
54✔
1022
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
54✔
1023
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
54✔
1024
             header.m_flags);
54✔
1025
    msg += buf;
54✔
1026
    throw InvalidDatabase(msg, path);
54✔
1027
}
54✔
1028

1029
// Note: This relies on proper mappings having been established by the caller
1030
// for both the header and the streaming footer
1031
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1032
{
90✔
1033
    auto header = reinterpret_cast<const Header*>(data);
90✔
1034
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1035
    return validate_header(header, footer, size, path);
90✔
1036
}
90✔
1037

1038
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1039
                                    const std::string& path, bool is_encrypted)
1040
{
75,030✔
1041
    // Verify that size is sane and 8-byte aligned
1042
    if (REALM_UNLIKELY(size < sizeof(Header)))
75,030✔
1043
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
54✔
1044
                              path);
54✔
1045
    if (REALM_UNLIKELY(size % 8 != 0))
74,976✔
1046
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
×
1047

1048
    // First four bytes of info block is file format id
1049
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
74,976✔
1050
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
74,976✔
1051
        if (is_encrypted) {
54✔
1052
            // Encrypted files check the hmac on read, so there's a lot less
1053
            // which could go wrong and have us still reach this point
1054
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
18✔
1055
                                   path);
18✔
1056
        }
18✔
1057
        else {
36✔
1058
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
36✔
1059
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
36✔
1060
                                   *header, path);
36✔
1061
        }
36✔
1062
    }
54✔
1063

1064
    // Last bit in info block indicates which top_ref block is valid
1065
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
74,976✔
1066

1067
    // Top-ref must always point within buffer
1068
    auto top_ref = header->m_top_ref[slot_selector];
74,976✔
1069
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
74,976✔
1070
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
696✔
1071
            throw InvalidDatabase(
×
1072
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1073
                path);
×
1074
        }
×
1075
        REALM_ASSERT(footer);
696✔
1076
        top_ref = footer->m_top_ref;
696✔
1077
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
696✔
1078
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1079
                                               "The file is probably truncated.",
×
1080
                                               footer->m_magic_cookie),
×
1081
                                  path);
×
1082
        }
×
1083
    }
696✔
1084
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
74,976✔
1085
        throw_header_exception("top ref is not aligned", *header, path);
×
1086
    }
×
1087
    if (REALM_UNLIKELY(top_ref >= size)) {
74,976✔
1088
        throw_header_exception(
×
1089
            util::format(
×
1090
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1091
                top_ref),
×
1092
            *header, path);
×
1093
    }
×
1094
    return ref_type(top_ref);
74,976✔
1095
}
74,976✔
1096

1097

1098
size_t SlabAlloc::get_total_size() const noexcept
1099
{
1,100,283✔
1100
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
1,100,283✔
1101
}
1,100,283✔
1102

1103

1104
void SlabAlloc::reset_free_space_tracking()
1105
{
725,511✔
1106
    CriticalSection cs(changes);
725,511✔
1107
    if (is_free_space_clean())
725,511✔
1108
        return;
9,249✔
1109

1110
    // Free all scratch space (done after all data has
1111
    // been commited to persistent space)
1112
    m_free_read_only.clear();
716,262✔
1113

1114
    // release slabs.. keep the initial allocation if it's a minimal allocation,
1115
    // otherwise release it as well. This saves map/unmap for small transactions.
1116
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
725,877✔
1117
        auto& last_slab = m_slabs.back();
9,615✔
1118
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
9,615✔
1119
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
9,615✔
1120
        --m_translation_table_size;
9,615✔
1121
        m_slabs.pop_back();
9,615✔
1122
    }
9,615✔
1123
    rebuild_freelists_from_slab();
716,262✔
1124
    m_free_space_state = free_space_Clean;
716,262✔
1125
    m_commit_size = 0;
716,262✔
1126
}
716,262✔
1127

1128
inline bool randomly_false_in_debug(bool x)
1129
{
×
1130
#ifdef REALM_DEBUG
×
1131
    if (x)
×
1132
        return (std::rand() & 1);
×
1133
#endif
×
1134
    return x;
×
1135
}
×
1136

1137

1138
/*
1139
  Memory mapping
1140

1141
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1142
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1143
  essential to map the file in even sized sections.
1144

1145
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1146
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1147
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1148
  We don't want to extend the file in increments as large as the chunk size.
1149

1150
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1151
  old one in the mapping table. However, we must keep the old mapping open, because older
1152
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1153
  and only cleaned out once we know that no transaction can refer to them anymore.
1154

1155
  Interaction with encryption
1156

1157
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1158
  The binding to the file is done by software. This allows us to "cheat" and allocate
1159
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1160
  extends beyond the end of file, because it will not be accessed.
1161

1162
  Growing/Changing the mapping table.
1163

1164
  There are two mapping tables:
1165

1166
  * m_mappings: This is the "source of truth" about what the current mapping is.
1167
    It is only accessed under lock.
1168
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1169
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1170
    table can only be extended. Only selected members in each entry can be changed.
1171
    See RefTranslation in alloc.hpp for more details.
1172
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1173
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1174
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1175
    to memory allocation and release, which is already serialized (since write transactions are
1176
    single threaded).
1177

1178
  When m_mappings is changed due to an extend operation changing a mapping, or when
1179
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1180

1181
  * A new fast mapping table is created. The old one is not modified.
1182
  * The old one is held in a waiting area until it is no longer relevant because no
1183
    live transaction can refer to it any more.
1184
 */
1185
void SlabAlloc::update_reader_view(size_t file_size)
1186
{
2,728,635✔
1187
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
2,728,635✔
1188
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
2,728,635✔
1189
    if (file_size <= old_baseline) {
2,728,635✔
1190
        schedule_refresh_of_outdated_encrypted_pages();
2,608,932✔
1191
        return;
2,608,932✔
1192
    }
2,608,932✔
1193

1194
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
119,703✔
1195
    bool replace_last_mapping = false;
119,703✔
1196
    size_t old_num_mappings = get_section_index(old_slab_base);
119,703✔
1197

1198
    if (!is_in_memory()) {
119,703✔
1199
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
95,103✔
1200
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
95,103✔
1201
                        get_file_path_for_assertions());
95,103✔
1202
        REALM_ASSERT_DEBUG(is_free_space_clean());
95,103✔
1203

1204
        // Create the new mappings we needed to cover the new size. We don't mutate
1205
        // any of the member variables until we've successfully created all of the
1206
        // mappings so that we leave things in a consistent state if one of them
1207
        // hits an allocation failure.
1208

1209
        std::vector<MapEntry> new_mappings;
95,103✔
1210
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
95,103✔
1211

1212
        {
95,103✔
1213
            // If the old slab base was greater than the old baseline then the final
1214
            // mapping was a partial section and we need to replace it with a larger
1215
            // mapping.
1216
            if (old_baseline < old_slab_base) {
95,103✔
1217
                // old_slab_base should be 0 if we had no mappings previously
1218
                REALM_ASSERT(old_num_mappings > 0);
20,379✔
1219
                // try to extend the old mapping in-place instead of replacing it.
1220
                MapEntry& cur_entry = m_mappings.back();
20,379✔
1221
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
20,379✔
1222
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
20,379✔
1223
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
20,379✔
1224
                    replace_last_mapping = true;
66✔
1225
                    --old_num_mappings;
66✔
1226
                }
66✔
1227
            }
20,379✔
1228

1229
            // Create new mappings covering from the end of the last complete
1230
            // section to the end of the new file size.
1231
            const auto new_slab_base = align_size_to_section_boundary(file_size);
95,103✔
1232
            const size_t num_mappings = get_section_index(new_slab_base);
95,103✔
1233
            new_mappings.reserve(num_mappings - old_num_mappings);
95,103✔
1234
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
170,025✔
1235
                const size_t section_start_offset = get_section_base(k);
74,934✔
1236
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
74,934✔
1237
                if (section_size == (1 << section_shift)) {
74,934✔
1238
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
66✔
1239
                                                                  section_size, m_write_observer)});
66✔
1240
                }
66✔
1241
                else {
74,868✔
1242
                    new_mappings.emplace_back();
74,868✔
1243
                    auto& mapping = new_mappings.back().primary_mapping;
74,868✔
1244
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
74,868✔
1245
                                                        section_start_offset, m_write_observer);
74,868✔
1246
                    if (reserved) {
74,868✔
1247
                        // if reservation is supported, first attempt at extending must succeed
1248
                        if (!mapping.try_extend_to(section_size))
74,868✔
1249
                            throw std::bad_alloc();
12✔
1250
                    }
74,868✔
UNCOV
1251
                    else {
×
UNCOV
1252
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size,
×
UNCOV
1253
                                                                section_start_offset, m_write_observer);
×
UNCOV
1254
                    }
×
1255
                }
74,868✔
1256
            }
74,934✔
1257
        }
95,103✔
1258

1259
        // Now that we've successfully created our mappings, update our member
1260
        // variables (and assume that resizing a simple vector won't produce memory
1261
        // allocation failures, unlike 64 MB mmaps).
1262
        if (replace_last_mapping) {
95,091✔
1263
            MapEntry& cur_entry = m_mappings.back();
60✔
1264
            // We should not have a xover mapping here because that would mean
1265
            // that there was already something mapped after the last section
1266
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
60✔
1267
            // save the old mapping/keep it open
1268
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
60✔
1269
            m_mappings.pop_back();
60✔
1270
            m_mapping_version++;
60✔
1271
        }
60✔
1272

1273
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
95,091✔
1274
    }
95,091✔
1275

1276
    m_baseline.store(file_size, std::memory_order_relaxed);
119,691✔
1277

1278
    const size_t ref_start = align_size_to_section_boundary(file_size);
119,691✔
1279
    const size_t ref_displacement = ref_start - old_slab_base;
119,691✔
1280
    if (ref_displacement > 0) {
119,691✔
1281
        // Rebase slabs as m_baseline is now bigger than old_slab_base
1282
        for (auto& e : m_slabs) {
74,808✔
1283
            e.ref_end += ref_displacement;
72✔
1284
        }
72✔
1285
    }
74,808✔
1286

1287
    rebuild_freelists_from_slab();
119,691✔
1288

1289
    // Build the fast path mapping
1290

1291
    // The fast path mapping is an array which is used from multiple threads
1292
    // without locking - see translate().
1293

1294
    // Addition of a new mapping may require a completely new fast mapping table.
1295
    //
1296
    // Being used in a multithreaded scenario, the old mappings must be retained open,
1297
    // until the realm version for which they were established has been closed/detached.
1298
    //
1299
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
1300
    // translate refs in the slab area, and that all these uses are serialized, whether
1301
    // that is achieved by being single threaded, interlocked or run from a sequential
1302
    // scheduling queue.
1303
    //
1304
    rebuild_translations(replace_last_mapping, old_num_mappings);
119,691✔
1305

1306
    schedule_refresh_of_outdated_encrypted_pages();
119,691✔
1307
}
119,691✔
1308

1309

1310
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1311
{
2,729,823✔
1312
#if REALM_ENABLE_ENCRYPTION
2,729,823✔
1313
    if (auto encryption = m_file.get_encryption()) {
2,729,823✔
1314
        encryption->mark_data_as_possibly_stale();
1,833✔
1315
    }
1,833✔
1316
#endif // REALM_ENABLE_ENCRYPTION
2,729,823✔
1317
}
2,729,823✔
1318

1319
size_t SlabAlloc::get_allocated_size() const noexcept
1320
{
83,322✔
1321
    size_t sz = 0;
83,322✔
1322
    for (const auto& s : m_slabs)
83,322✔
1323
        sz += s.size;
18,693✔
1324
    return sz;
83,322✔
1325
}
83,322✔
1326

1327
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1328
{
83,316✔
1329
    ++m_translation_table_size;
83,316✔
1330
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
83,316✔
1331
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
190,863✔
1332
        new_fast_mapping[i] = m_ref_translation_ptr[i];
107,547✔
1333
    }
107,547✔
1334
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
83,316✔
1335
                                    m_ref_translation_ptr.load());
83,316✔
1336
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
83,316✔
1337
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
1338
    // so optimize by clamping the lowest possible xover offset to the end of the section.
1339
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
83,316✔
1340
    m_ref_translation_ptr = new_fast_mapping.release();
83,316✔
1341
}
83,316✔
1342

1343
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1344
{
120,924✔
1345
    size_t free_space_size = m_slabs.size();
120,924✔
1346
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
120,924✔
1347
    if (m_translation_table_size < num_mappings + free_space_size) {
120,924✔
1348
        requires_new_translation = true;
74,808✔
1349
    }
74,808✔
1350
    RefTranslation* new_translation_table = m_ref_translation_ptr;
120,924✔
1351
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
120,924✔
1352
    if (requires_new_translation) {
120,924✔
1353
        // we need a new translation table, but must preserve old, as translations using it
1354
        // may be in progress concurrently
1355
        if (m_translation_table_size)
74,868✔
1356
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
168✔
1357
                                            m_ref_translation_ptr.load());
168✔
1358
        m_translation_table_size = num_mappings + free_space_size;
74,868✔
1359
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
74,868✔
1360
        new_translation_table = new_translation_table_owner.get();
74,868✔
1361
        old_num_sections = 0;
74,868✔
1362
    }
74,868✔
1363
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
195,957✔
1364
        if (is_in_memory()) {
75,033✔
1365
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1366
        }
12✔
1367
        else {
75,021✔
1368
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
75,021✔
1369
#if REALM_ENABLE_ENCRYPTION
75,021✔
1370
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
75,021✔
1371
#endif
75,021✔
1372
        }
75,021✔
1373
        REALM_ASSERT(new_translation_table[i].mapping_addr);
75,033✔
1374
        // We don't copy over data for the cross over mapping. If the mapping is needed,
1375
        // copying will happen on demand (in get_or_add_xover_mapping).
1376
        // Note: that may never be needed, because if the array that needed the original cross over
1377
        // mapping is freed, any new array allocated at the same position will NOT need a cross
1378
        // over mapping, but just use the primary mapping.
1379
    }
75,033✔
1380
    for (size_t k = 0; k < free_space_size; ++k) {
162,351✔
1381
        char* base = m_slabs[k].addr;
41,427✔
1382
        REALM_ASSERT(base);
41,427✔
1383
        new_translation_table[num_mappings + k].mapping_addr = base;
41,427✔
1384
    }
41,427✔
1385

1386
    // This will either be null or the same as new_translation_table, which is about to become owned by
1387
    // m_ref_translation_ptr.
1388
    (void)new_translation_table_owner.release();
120,924✔
1389

1390
    m_ref_translation_ptr = new_translation_table;
120,924✔
1391
}
120,924✔
1392

1393
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
1394
{
6✔
1395
    auto _page_size = page_size();
6✔
1396
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
6✔
1397
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
6✔
1398
        // some other thread already added a mapping
1399
        // it MUST have been for the exact same address:
1400
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1401
        return;
×
1402
    }
×
1403
    MapEntry* map_entry = &m_mappings[index];
6✔
1404
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
6✔
1405
    if (!map_entry->xover_mapping.is_attached()) {
6✔
1406
        // Create a xover mapping
1407
        auto file_offset = get_section_base(index) + offset;
6✔
1408
        auto end_offset = file_offset + size;
6✔
1409
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
6✔
1410
        auto minimal_mapping_size = end_offset - mapping_file_offset;
6✔
1411
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size,
6✔
1412
                                      m_write_observer);
6✔
1413
        map_entry->xover_mapping = std::move(mapping);
6✔
1414
    }
6✔
1415
    txl.xover_mapping_base = offset & ~(_page_size - 1);
6✔
1416
#if REALM_ENABLE_ENCRYPTION
6✔
1417
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
6✔
1418
#endif
6✔
1419
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
6✔
1420
}
6✔
1421

1422
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1423
{
1,459,476✔
1424
    // Verify that each old ref translation pointer still points to a valid
1425
    // thing that we haven't released yet.
1426
#if REALM_DEBUG
1,459,476✔
1427
    std::unordered_set<const char*> mappings;
1,459,476✔
1428
    for (auto& m : m_old_mappings) {
1,459,476✔
1429
        REALM_ASSERT(m.mapping.is_attached());
156✔
1430
        mappings.insert(m.mapping.get_addr());
156✔
1431
    }
156✔
1432
    for (auto& m : m_mappings) {
1,459,476✔
1433
        REALM_ASSERT(m.primary_mapping.is_attached());
1,239,090✔
1434
        mappings.insert(m.primary_mapping.get_addr());
1,239,090✔
1435
        if (m.xover_mapping.is_attached())
1,239,090✔
1436
            mappings.insert(m.xover_mapping.get_addr());
12✔
1437
    }
1,239,090✔
1438
    for (auto& m : m_virtual_file_buffer) {
1,459,476✔
1439
        mappings.insert(m.addr);
220,692✔
1440
    }
220,692✔
1441
    if (m_data)
1,459,476✔
1442
        mappings.insert(m_data);
1,448,523✔
1443
    for (auto& t : m_old_translations) {
1,459,476✔
1444
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
247,215✔
1445
                        youngest_live_version, t.replaced_at_version);
247,215✔
1446
        if (nonempty_attachment()) {
247,215✔
1447
            for (size_t i = 0; i < t.translation_count; ++i)
499,809✔
1448
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
258,285✔
1449
        }
241,524✔
1450
    }
247,215✔
1451
#else
1452
    static_cast<void>(youngest_live_version);
1453
#endif
1454
}
1,459,476✔
1455

1456

1457
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1458
{
729,759✔
1459
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
729,759✔
1460
    verify_old_translations(youngest_live_version);
729,759✔
1461

1462
    auto pred = [=](auto& oldie) {
729,759✔
1463
        return oldie.replaced_at_version < oldest_live_version;
165,459✔
1464
    };
165,459✔
1465
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
729,759✔
1466
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
729,759✔
1467
                             m_old_translations.end());
729,759✔
1468
    m_youngest_live_version = youngest_live_version;
729,759✔
1469
    verify_old_translations(youngest_live_version);
729,759✔
1470
}
729,759✔
1471

1472
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1473
{
698,019✔
1474
    m_youngest_live_version = currently_live_version;
698,019✔
1475
}
698,019✔
1476

1477
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1478
{
624,066✔
1479
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
624,066✔
1480
        throw InvalidFreeSpace();
×
1481
    return m_free_read_only;
624,066✔
1482
}
624,066✔
1483

1484

1485
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1486
{
20,880,777✔
1487
    size_t end_of_block = start_pos + free_chunk_size;
20,880,777✔
1488
    size_t alloc_pos = start_pos;
20,880,777✔
1489
    while (alloc_pos + request_size <= end_of_block) {
20,881,350✔
1490
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
20,881,350✔
1491
        if (alloc_pos + request_size <= next_section_boundary) {
20,881,350✔
1492
            return alloc_pos;
20,880,969✔
1493
        }
20,880,969✔
1494
        alloc_pos = next_section_boundary;
381✔
1495
    }
381✔
1496
    return 0;
4,294,967,294✔
1497
}
20,880,777✔
1498

1499

1500
void SlabAlloc::resize_file(size_t new_file_size)
1501
{
88,476✔
1502
    if (m_attach_mode == attach_SharedFile) {
88,476✔
1503
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
61,575✔
1504
        m_file.prealloc(new_file_size); // Throws
61,575✔
1505
        // resizing is done based on the logical file size. It is ok for the file
1506
        // to actually be bigger, but never smaller.
1507
        REALM_ASSERT_EX(new_file_size <= static_cast<size_t>(m_file.get_size()), new_file_size, m_file.get_size());
61,575✔
1508

1509
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
61,575✔
1510
        if (!disable_sync)
61,575✔
1511
            m_file.sync(); // Throws
555✔
1512
    }
61,575✔
1513
    else {
26,901✔
1514
        size_t current_size = 0;
26,901✔
1515
        for (auto& b : m_virtual_file_buffer) {
27,111✔
1516
            current_size += b.size;
27,111✔
1517
        }
27,111✔
1518
        if (new_file_size > current_size) {
26,901✔
1519
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1520
        }
6✔
1521
        m_virtual_file_size = new_file_size;
26,901✔
1522
    }
26,901✔
1523
}
88,476✔
1524

1525
#ifdef REALM_DEBUG
1526
void SlabAlloc::reserve_disk_space(size_t size)
1527
{
36✔
1528
    if (size != round_up_to_page_size(size))
36✔
1529
        size = round_up_to_page_size(size);
30✔
1530
    m_file.prealloc(size); // Throws
36✔
1531

1532
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1533
    if (!disable_sync)
36✔
1534
        m_file.sync(); // Throws
×
1535
}
36✔
1536
#endif
1537

1538
void SlabAlloc::verify() const
1539
{
126,990✔
1540
#ifdef REALM_DEBUG
126,990✔
1541
    if (!m_slabs.empty()) {
126,990✔
1542
        // Make sure that all free blocks are within a slab. This is done
1543
        // implicitly by using for_all_free_entries()
1544
        size_t first_possible_ref = m_baseline;
97,695✔
1545
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
97,695✔
1546
        for_all_free_entries([&](size_t ref, size_t size) {
594,867✔
1547
            REALM_ASSERT(ref >= first_possible_ref);
594,867✔
1548
            REALM_ASSERT(ref + size <= first_impossible_ref);
594,867✔
1549
            first_possible_ref = ref;
594,867✔
1550
        });
594,867✔
1551
    }
97,695✔
1552
#endif
126,990✔
1553
}
126,990✔
1554

1555
#ifdef REALM_DEBUG
1556

1557
bool SlabAlloc::is_all_free() const
1558
{
708✔
1559
    // verify that slabs contain only free space.
1560
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
1561
    for (const auto& e : m_slabs) {
708✔
1562
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
654✔
1563
        REALM_ASSERT(first->block_before_size == 0);
654✔
1564
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
654✔
1565
        REALM_ASSERT(last->block_after_size == 0);
654✔
1566
        if (first->block_after_size != last->block_before_size)
654✔
1567
            return false;
×
1568
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
654✔
1569
        range -= sizeof(BetweenBlocks);
654✔
1570
        // the size of the free area must match the distance between the two BetweenBlocks:
1571
        if (range != first->block_after_size)
654✔
1572
            return false;
×
1573
    }
654✔
1574
    return true;
708✔
1575
}
708✔
1576

1577

1578
// LCOV_EXCL_START
1579
void SlabAlloc::print() const
1580
{
×
1581
    /* TODO
1582
     *
1583

1584
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1585

1586
    size_t free = 0;
1587
    for (const auto& free_block : m_free_space) {
1588
        free += free_block.size;
1589
    }
1590

1591
    size_t allocated = allocated_for_slabs - free;
1592
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1593

1594
    if (!m_slabs.empty()) {
1595
        std::cout << "Slabs: ";
1596
        ref_type first_ref = m_baseline;
1597

1598
        for (const auto& slab : m_slabs) {
1599
            if (&slab != &m_slabs.front())
1600
                std::cout << ", ";
1601

1602
            ref_type last_ref = slab.ref_end - 1;
1603
            size_t size = slab.ref_end - first_ref;
1604
            void* addr = slab.addr;
1605
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1606
            first_ref = slab.ref_end;
1607
        }
1608
        std::cout << "\n";
1609
    }
1610

1611
    if (!m_free_space.empty()) {
1612
        std::cout << "FreeSpace: ";
1613
        for (const auto& free_block : m_free_space) {
1614
            if (&free_block != &m_free_space.front())
1615
                std::cout << ", ";
1616

1617
            ref_type last_ref = free_block.ref + free_block.size - 1;
1618
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1619
        }
1620
        std::cout << "\n";
1621
    }
1622
    if (!m_free_read_only.empty()) {
1623
        std::cout << "FreeSpace (ro): ";
1624
        for (const auto& free_block : m_free_read_only) {
1625
            if (&free_block != &m_free_read_only.front())
1626
                std::cout << ", ";
1627

1628
            ref_type last_ref = free_block.ref + free_block.size - 1;
1629
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1630
        }
1631
        std::cout << "\n";
1632
    }
1633
    std::cout << std::flush;
1634
    */
1635
}
×
1636
// LCOV_EXCL_STOP
1637

1638
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc