• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 1743

06 Oct 2023 04:08PM UTC coverage: 91.621% (+0.01%) from 91.61%
1743

push

Evergreen

realm-ci
Updated release notes

94318 of 173524 branches covered (0.0%)

230654 of 251749 relevant lines covered (91.62%)

6530623.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.75
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <cinttypes>
20
#include <type_traits>
21
#include <exception>
22
#include <algorithm>
23
#include <memory>
24
#include <mutex>
25
#include <map>
26
#include <atomic>
27
#include <cstring>
28

29
#if REALM_DEBUG
30
#include <iostream>
31
#include <unordered_set>
32
#endif
33

34
#ifdef REALM_SLAB_ALLOC_DEBUG
35
#include <cstdlib>
36
#endif
37

38
#include <realm/util/errno.hpp>
39
#include <realm/util/encrypted_file_mapping.hpp>
40
#include <realm/util/miscellaneous.hpp>
41
#include <realm/util/terminate.hpp>
42
#include <realm/util/thread.hpp>
43
#include <realm/util/scope_exit.hpp>
44
#include <realm/array.hpp>
45
#include <realm/alloc_slab.hpp>
46
#include <realm/group.hpp>
47

48
using namespace realm;
49
using namespace realm::util;
50

51

52
namespace {
53

54
#ifdef REALM_SLAB_ALLOC_DEBUG
55
std::map<ref_type, void*> malloc_debug_map;
56
#endif
57

58
class InvalidFreeSpace : std::exception {
59
public:
60
    const char* what() const noexcept override
61
    {
×
62
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
63
               "further writes can be performed.";
×
64
    }
×
65
};
66

67
std::atomic<size_t> total_slab_allocated(0);
68

69
} // anonymous namespace
70

71
size_t SlabAlloc::get_total_slab_size() noexcept
72
{
×
73
    return total_slab_allocated;
×
74
}
×
75

76
SlabAlloc::SlabAlloc()
77
{
168,897✔
78
    m_initial_section_size = 1UL << section_shift; // page_size();
168,897✔
79
    m_free_space_state = free_space_Clean;
168,897✔
80
    m_baseline = 0;
168,897✔
81
}
168,897✔
82

83
util::File& SlabAlloc::get_file()
84
{
2,507,232✔
85
    return m_file;
2,507,232✔
86
}
2,507,232✔
87

88

89
const SlabAlloc::Header SlabAlloc::empty_file_header = {
90
    {0, 0}, // top-refs
91
    {'T', '-', 'D', 'B'},
92
    {0, 0}, // undecided file format
93
    0,      // reserved
94
    0       // flags (lsb is select bit)
95
};
96

97

98
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
99
{
474✔
100
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
474✔
101
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
474✔
102
    *streaming_header = {
474✔
103
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
474✔
104
        {'T', '-', 'D', 'B'},
474✔
105
        {storage_type(file_format_version), 0},
474✔
106
        0, // reserved
474✔
107
        0  // flags (lsb is select bit)
474✔
108
    };
474✔
109
}
474✔
110

111
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
112
    : ref_end(r)
113
    , size(s)
114
{
122,205✔
115
    // Ensure that allocation is aligned to at least 8 bytes
60,189✔
116
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
122,205✔
117

60,189✔
118
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
122,205✔
119
    addr = new char[size];
122,205✔
120
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
122,205✔
121
#if REALM_ENABLE_ALLOC_SET_ZERO
122
    std::fill(addr, addr + size, 0);
123
#endif
124
}
122,205✔
125

126
SlabAlloc::Slab::~Slab()
127
{
125,010✔
128
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
125,010✔
129
    if (addr)
125,010✔
130
        delete[] addr;
122,205✔
131
}
125,010✔
132

133
void SlabAlloc::detach(bool keep_file_open) noexcept
134
{
169,575✔
135
    delete[] m_ref_translation_ptr;
169,575✔
136
    m_ref_translation_ptr.store(nullptr);
169,575✔
137
    m_translation_table_size = 0;
169,575✔
138
    set_read_only(true);
169,575✔
139
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
169,575✔
140
    switch (m_attach_mode) {
169,575✔
141
        case attach_None:
327✔
142
            break;
327✔
143
        case attach_UsersBuffer:
24✔
144
            break;
24✔
145
        case attach_OwnedBuffer:
4,506✔
146
            delete[] m_data;
4,506✔
147
            break;
4,506✔
148
        case attach_SharedFile:
138,816✔
149
        case attach_UnsharedFile:
139,434✔
150
            m_data = 0;
139,434✔
151
            m_mappings.clear();
139,434✔
152
            m_youngest_live_version = 0;
139,434✔
153
            if (!keep_file_open)
139,434✔
154
                m_file.close();
139,107✔
155
            break;
139,434✔
156
        case attach_Heap:
81,060✔
157
            m_data = 0;
25,284✔
158
            break;
25,284✔
159
        default:
68,418✔
160
            REALM_UNREACHABLE();
×
161
    }
169,575✔
162

83,460✔
163
    // Release all allocated memory - this forces us to create new
83,460✔
164
    // slabs after re-attaching thereby ensuring that the slabs are
83,460✔
165
    // placed correctly (logically) after the end of the file.
83,460✔
166
    m_slabs.clear();
169,575✔
167
    clear_freelists();
169,575✔
168
#if REALM_ENABLE_ENCRYPTION
169,575✔
169
    m_realm_file_info = nullptr;
169,575✔
170
#endif
169,575✔
171

83,460✔
172
    m_attach_mode = attach_None;
169,575✔
173
}
169,575✔
174

175

176
SlabAlloc::~SlabAlloc() noexcept
177
{
168,897✔
178
#ifdef REALM_DEBUG
168,897✔
179
    if (is_attached()) {
168,897✔
180
        // A shared group does not guarantee that all space is free
249✔
181
        if (m_attach_mode != attach_SharedFile) {
498✔
182
            // No point inchecking if free space info is invalid
249✔
183
            if (m_free_space_state != free_space_Invalid) {
498✔
184
                if (REALM_COVER_NEVER(!is_all_free())) {
498✔
185
                    print();
×
186
#ifndef REALM_SLAB_ALLOC_DEBUG
×
187
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
188
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
189
                                 "then run under Valgrind with --leak-check=full\n";
×
190
                    REALM_TERMINATE("SlabAlloc detected a leak");
×
191
#endif
×
192
                }
×
193
            }
498✔
194
        }
498✔
195
    }
498✔
196
#endif
168,897✔
197

83,178✔
198
    if (is_attached())
168,897✔
199
        detach();
498✔
200
}
168,897✔
201

202

203
MemRef SlabAlloc::do_alloc(size_t size)
204
{
32,152,515✔
205
    CriticalSection cs(changes);
32,152,515✔
206
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
32,152,515✔
207
    REALM_ASSERT_EX((size & 0x7) == 0, size,
32,152,515✔
208
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
32,152,515✔
209
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
32,152,515✔
210
    // This limits the size of any array to ensure it can fit within a memory section.
16,081,086✔
211
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
16,081,086✔
212
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
32,152,515✔
213

16,081,086✔
214
    // If we failed to correctly record free space, new allocations cannot be
16,081,086✔
215
    // carried out until the free space record is reset.
16,081,086✔
216
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
32,152,515✔
217
        throw InvalidFreeSpace();
16,081,086✔
218

16,081,086✔
219
    m_free_space_state = free_space_Dirty;
32,152,515✔
220
    m_commit_size += size;
32,152,515✔
221

16,081,086✔
222
    // minimal allocation is sizeof(FreeListEntry)
16,081,086✔
223
    if (size < sizeof(FreeBlock))
32,152,515✔
224
        size = sizeof(FreeBlock);
6,630✔
225
    // align to multipla of 8
16,081,086✔
226
    if (size & 0x7)
32,152,515✔
227
        size = (size + 7) & ~0x7;
×
228

16,081,086✔
229
    FreeBlock* entry = allocate_block(static_cast<int>(size));
32,152,515✔
230
    mark_allocated(entry);
32,152,515✔
231
    ref_type ref = entry->ref;
32,152,515✔
232

16,081,086✔
233
#ifdef REALM_DEBUG
32,152,515✔
234
    if (REALM_COVER_NEVER(m_debug_out))
32,152,515✔
235
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
16,081,086✔
236
#endif
32,152,515✔
237

16,081,086✔
238
    char* addr = reinterpret_cast<char*>(entry);
32,152,515✔
239
    REALM_ASSERT_EX(addr == translate(ref), addr, ref, get_file_path_for_assertions());
32,152,515✔
240

16,081,086✔
241
#if REALM_ENABLE_ALLOC_SET_ZERO
242
    std::fill(addr, addr + size, 0);
243
#endif
244
#ifdef REALM_SLAB_ALLOC_DEBUG
245
    malloc_debug_map[ref] = malloc(1);
246
#endif
247
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
32,152,515✔
248
    return MemRef(addr, ref, *this);
32,152,515✔
249
}
32,152,515✔
250

251
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
252
{
2,851,599✔
253
    auto bb = bb_before(entry);
2,851,599✔
254
    if (bb->block_before_size <= 0)
2,851,599✔
255
        return nullptr; // no prev block, or it is in use
2,486,025✔
256
    return block_before(bb);
365,574✔
257
}
365,574✔
258

259
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
260
{
2,851,569✔
261
    auto bb = bb_after(entry);
2,851,569✔
262
    if (bb->block_after_size <= 0)
2,851,569✔
263
        return nullptr; // no next block, or it is in use
970,602✔
264
    return block_after(bb);
1,880,967✔
265
}
1,880,967✔
266

267
SlabAlloc::FreeList SlabAlloc::find(int size)
268
{
32,173,194✔
269
    FreeList retval;
32,173,194✔
270
    retval.it = m_block_map.lower_bound(size);
32,173,194✔
271
    if (retval.it != m_block_map.end()) {
32,173,194✔
272
        retval.size = retval.it->first;
32,049,936✔
273
    }
32,049,936✔
274
    else {
123,258✔
275
        retval.size = 0;
123,258✔
276
    }
123,258✔
277
    return retval;
32,173,194✔
278
}
32,173,194✔
279

280
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
281
{
31,752,762✔
282
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
31,752,762✔
283
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
36,498,264✔
284
        ++hint.it;
4,745,502✔
285
    if (hint.it == m_block_map.end())
31,752,762✔
286
        hint.size = 0; // indicate "not found"
122,205✔
287
    return hint;
31,752,762✔
288
}
31,752,762✔
289

290
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
291
{
32,056,884✔
292
    FreeBlock* retval = list.it->second;
32,056,884✔
293
    FreeBlock* header = retval->next;
32,056,884✔
294
    if (header == retval)
32,056,884✔
295
        m_block_map.erase(list.it);
31,881,510✔
296
    else
175,374✔
297
        list.it->second = header;
175,374✔
298
    retval->unlink();
32,056,884✔
299
    return retval;
32,056,884✔
300
}
32,056,884✔
301

302
void SlabAlloc::FreeBlock::unlink()
303
{
34,300,083✔
304
    auto _next = next;
34,300,083✔
305
    auto _prev = prev;
34,300,083✔
306
    _next->prev = prev;
34,300,083✔
307
    _prev->next = next;
34,300,083✔
308
    clear_links();
34,300,083✔
309
}
34,300,083✔
310

311
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
312
{
2,246,634✔
313
    int size = bb_before(entry)->block_after_size;
2,246,634✔
314
    auto it = m_block_map.find(size);
2,246,634✔
315
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
2,246,634✔
316
    auto header = it->second;
2,246,634✔
317
    if (header == entry) {
2,246,634✔
318
        header = entry->next;
2,222,802✔
319
        if (header == entry)
2,222,802✔
320
            m_block_map.erase(it);
1,605,237✔
321
        else
617,565✔
322
            it->second = header;
617,565✔
323
    }
2,222,802✔
324
    entry->unlink();
2,246,634✔
325
}
2,246,634✔
326

327
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
328
{
36,027,132✔
329
    int size = bb_before(entry)->block_after_size;
36,027,132✔
330
    FreeBlock* header;
36,027,132✔
331
    auto it = m_block_map.find(size);
36,027,132✔
332
    if (it != m_block_map.end()) {
36,027,132✔
333
        header = it->second;
842,127✔
334
        it->second = entry;
842,127✔
335
        entry->next = header;
842,127✔
336
        entry->prev = header->prev;
842,127✔
337
        entry->prev->next = entry;
842,127✔
338
        entry->next->prev = entry;
842,127✔
339
    }
842,127✔
340
    else {
35,185,005✔
341
        header = nullptr;
35,185,005✔
342
        m_block_map[size] = entry;
35,185,005✔
343
        entry->next = entry->prev = entry;
35,185,005✔
344
    }
35,185,005✔
345
}
36,027,132✔
346

347
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
348
{
2,851,344✔
349
    auto bb = bb_before(entry);
2,851,344✔
350
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
2,851,344✔
351
    auto alloc_size = -bb->block_after_size;
2,851,344✔
352
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
2,851,344✔
353
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
2,851,344✔
354
                    get_file_path_for_assertions());
2,851,344✔
355
    bb->block_after_size = alloc_size;
2,851,344✔
356
    bb = bb_after(entry);
2,851,344✔
357
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
2,851,344✔
358
    REALM_ASSERT(-bb->block_before_size == alloc_size);
2,851,344✔
359
    bb->block_before_size = alloc_size;
2,851,344✔
360
}
2,851,344✔
361

362
void SlabAlloc::mark_allocated(FreeBlock* entry)
363
{
32,175,402✔
364
    auto bb = bb_before(entry);
32,175,402✔
365
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
32,175,402✔
366
    auto bb2 = bb_after(entry);
32,175,402✔
367
    bb->block_after_size = 0 - bb->block_after_size;
32,175,402✔
368
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
32,175,402✔
369
    bb2->block_before_size = 0 - bb2->block_before_size;
32,175,402✔
370
}
32,175,402✔
371

372
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
373
{
32,175,663✔
374
    FreeList list = find(size);
32,175,663✔
375
    if (list.found_exact(size)) {
32,175,663✔
376
        return pop_freelist_entry(list);
423,036✔
377
    }
423,036✔
378
    // no exact matches.
15,889,950✔
379
    list = find_larger(list, size);
31,752,627✔
380
    FreeBlock* block;
31,752,627✔
381
    if (list.found_something()) {
31,752,627✔
382
        block = pop_freelist_entry(list);
31,634,817✔
383
    }
31,634,817✔
384
    else {
117,810✔
385
        block = grow_slab(size);
117,810✔
386
    }
117,810✔
387
    FreeBlock* remaining = break_block(block, size);
31,752,627✔
388
    if (remaining)
31,752,627✔
389
        push_freelist_entry(remaining);
31,753,629✔
390
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
31,752,627✔
391
    return block;
31,752,627✔
392
}
31,752,627✔
393

394
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
395
{
1,549,197✔
396
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
1,549,197✔
397
    bb->block_before_size = 0;
1,549,197✔
398
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
1,549,197✔
399
    bb->block_after_size = block_size;
1,549,197✔
400
    auto entry = block_after(bb);
1,549,197✔
401
    entry->clear_links();
1,549,197✔
402
    entry->ref = ref_start + sizeof(BetweenBlocks);
1,549,197✔
403
    bb = bb_after(entry);
1,549,197✔
404
    bb->block_before_size = block_size;
1,549,197✔
405
    bb->block_after_size = 0;
1,549,197✔
406
    return entry;
1,549,197✔
407
}
1,549,197✔
408

409
void SlabAlloc::clear_freelists()
410
{
1,876,098✔
411
    m_block_map.clear();
1,876,098✔
412
}
1,876,098✔
413

414
void SlabAlloc::rebuild_freelists_from_slab()
415
{
1,706,523✔
416
    clear_freelists();
1,706,523✔
417
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
1,706,523✔
418
    for (const auto& e : m_slabs) {
1,564,146✔
419
        FreeBlock* entry = slab_to_entry(e, ref_start);
1,426,992✔
420
        push_freelist_entry(entry);
1,426,992✔
421
        ref_start = align_size_to_section_boundary(e.ref_end);
1,426,992✔
422
    }
1,426,992✔
423
}
1,706,523✔
424

425
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
426
{
31,750,302✔
427
    int size = size_from_block(block);
31,750,302✔
428
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
31,750,302✔
429
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
31,750,302✔
430
        return nullptr;
6✔
431
    bb_after(block)->block_before_size = remaining_size;
31,750,296✔
432
    bb_before(block)->block_after_size = new_size;
31,750,296✔
433
    auto bb_between = bb_after(block);
31,750,296✔
434
    bb_between->block_before_size = new_size;
31,750,296✔
435
    bb_between->block_after_size = remaining_size;
31,750,296✔
436
    FreeBlock* remaining_block = block_after(bb_between);
31,750,296✔
437
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
31,750,296✔
438
    remaining_block->clear_links();
31,750,296✔
439
    block->clear_links();
31,750,296✔
440
    return remaining_block;
31,750,296✔
441
}
31,750,296✔
442

443
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
444
{
2,246,682✔
445
    int size_first = size_from_block(first);
2,246,682✔
446
    int size_last = size_from_block(last);
2,246,682✔
447
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
2,246,682✔
448
    bb_before(first)->block_after_size = new_size;
2,246,682✔
449
    bb_after(last)->block_before_size = new_size;
2,246,682✔
450
    return first;
2,246,682✔
451
}
2,246,682✔
452

453
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
454
{
122,205✔
455
    // Allocate new slab.
60,189✔
456
    // - Always allocate at least 128K. This is also the amount of
60,189✔
457
    //   memory that we allow the slab allocator to keep between
60,189✔
458
    //   transactions. Allowing it to keep a small amount between
60,189✔
459
    //   transactions makes very small transactions faster by avoiding
60,189✔
460
    //   repeated unmap/mmap system calls.
60,189✔
461
    // - When allocating, allocate as much as we already have, but
60,189✔
462
    // - Never allocate more than a full section (64MB). This policy
60,189✔
463
    //   leads to gradual allocation of larger and larger blocks until
60,189✔
464
    //   we reach allocation of entire sections.
60,189✔
465
    size += 2 * sizeof(BetweenBlocks);
122,205✔
466
    size_t new_size = minimal_alloc;
122,205✔
467
    while (new_size < uint64_t(size))
153,213✔
468
        new_size += minimal_alloc;
31,008✔
469
    size_t already_allocated = get_allocated_size();
122,205✔
470
    if (new_size < already_allocated)
122,205✔
471
        new_size = already_allocated;
2,583✔
472
    if (new_size > maximal_alloc)
122,205✔
473
        new_size = maximal_alloc;
18✔
474

60,189✔
475
    ref_type ref;
122,205✔
476
    if (m_slabs.empty()) {
122,205✔
477
        ref = m_baseline.load(std::memory_order_relaxed);
113,061✔
478
    }
113,061✔
479
    else {
9,144✔
480
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
4,422✔
481
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
9,144✔
482
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
9,144✔
483
        ref = curr_ref_end;
9,144✔
484
    }
9,144✔
485
    ref = align_size_to_section_boundary(ref);
122,205✔
486
    size_t ref_end = ref;
122,205✔
487
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
122,205✔
488
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
489
                                      util::to_string(new_size));
×
490
    }
×
491

60,189✔
492
    REALM_ASSERT(matches_section_boundary(ref));
122,205✔
493

60,189✔
494
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
122,205✔
495
    // Create new slab and add to list of slabs
60,189✔
496
    m_slabs.emplace_back(ref_end, new_size); // Throws
122,205✔
497
    const Slab& slab = m_slabs.back();
122,205✔
498
    extend_fast_mapping_with_slab(slab.addr);
122,205✔
499

60,189✔
500
    // build a single block from that entry
60,189✔
501
    return slab_to_entry(slab, ref);
122,205✔
502
}
122,205✔
503

504

505
void SlabAlloc::do_free(ref_type ref, char* addr)
506
{
24,151,899✔
507
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
24,151,899✔
508
    CriticalSection cs(changes);
24,151,899✔
509

12,122,946✔
510
    bool read_only = is_read_only(ref);
24,151,899✔
511
#ifdef REALM_SLAB_ALLOC_DEBUG
512
    free(malloc_debug_map[ref]);
513
#endif
514

12,122,946✔
515
    // Get size from segment
12,122,946✔
516
    size_t size =
24,151,899✔
517
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
22,702,956✔
518

12,122,946✔
519
#ifdef REALM_DEBUG
24,151,899✔
520
    if (REALM_COVER_NEVER(m_debug_out))
24,151,899✔
521
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
12,122,946✔
522
#endif
24,151,899✔
523

12,122,946✔
524
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
24,151,899✔
525
        return;
12,122,946✔
526

12,122,946✔
527
    // Mutable memory cannot be freed unless it has first been allocated, and
12,122,946✔
528
    // any allocation puts free space tracking into the "dirty" state.
12,122,946✔
529
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
24,151,899✔
530
                    free_space_Dirty, get_file_path_for_assertions());
24,151,899✔
531

12,122,946✔
532
    m_free_space_state = free_space_Dirty;
24,151,899✔
533

12,122,946✔
534
    if (read_only) {
24,151,899✔
535
        // Free space in read only segment is tracked separately
10,731,480✔
536
        try {
21,308,697✔
537
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
21,308,697✔
538
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
21,308,697✔
539
            auto next = m_free_read_only.lower_bound(ref);
21,308,697✔
540
            if (next != m_free_read_only.end()) {
21,308,697✔
541
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
19,111,866✔
542
                                        get_file_path_for_assertions());
19,111,866✔
543
                // See if element can be combined with next element
9,622,509✔
544
                if (ref + size == next->first) {
19,111,866✔
545
                    // if so, combine to include next element and remove that from collection
1,832,064✔
546
                    size += next->second;
3,596,187✔
547
                    next = m_free_read_only.erase(next);
3,596,187✔
548
                }
3,596,187✔
549
            }
19,111,866✔
550
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
21,308,697✔
551
                // There must be a previous element - see if we can merge
8,299,695✔
552
                auto prev = next;
16,494,690✔
553
                prev--;
16,494,690✔
554

8,299,695✔
555
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
16,494,690✔
556
                                        get_file_path_for_assertions());
16,494,690✔
557
                // See if element can be combined with previous element
8,299,695✔
558
                // We can do that just by adding the size
8,299,695✔
559
                if (prev->first + prev->second == ref) {
16,494,690✔
560
                    prev->second += size;
7,896,231✔
561
                    return; // Done!
7,896,231✔
562
                }
7,896,231✔
563
                m_free_read_only.emplace_hint(next, ref, size); // Throws
8,598,459✔
564
            }
8,598,459✔
565
            else {
4,814,007✔
566
                m_free_read_only.emplace(ref, size); // Throws
4,814,007✔
567
            }
4,814,007✔
568
        }
21,308,697✔
569
        catch (...) {
10,731,480✔
570
            m_free_space_state = free_space_Invalid;
×
571
        }
×
572
    }
21,308,697✔
573
    else {
2,843,202✔
574
        m_commit_size -= size;
2,843,202✔
575

1,391,466✔
576
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
1,391,466✔
577
        if (size < sizeof(FreeBlock))
2,843,202✔
578
            size = sizeof(FreeBlock);
6,630✔
579
        // align to multipla of 8
1,391,466✔
580
        if (size & 0x7)
2,843,202✔
581
            size = (size + 7) & ~0x7;
×
582

1,391,466✔
583
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
2,843,202✔
584
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
2,843,202✔
585
        mark_freed(e, static_cast<int>(size));
2,843,202✔
586
        free_block(ref, e);
2,843,202✔
587
    }
2,843,202✔
588
}
24,151,899✔
589

590
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
591
{
2,851,623✔
592
    // merge with surrounding blocks if possible
1,399,059✔
593
    block->ref = ref;
2,851,623✔
594
    FreeBlock* prev = get_prev_block_if_mergeable(block);
2,851,623✔
595
    if (prev) {
2,851,623✔
596
        remove_freelist_entry(prev);
365,583✔
597
        block = merge_blocks(prev, block);
365,583✔
598
    }
365,583✔
599
    FreeBlock* next = get_next_block_if_mergeable(block);
2,851,623✔
600
    if (next) {
2,851,623✔
601
        remove_freelist_entry(next);
1,881,090✔
602
        block = merge_blocks(block, next);
1,881,090✔
603
    }
1,881,090✔
604
    push_freelist_entry(block);
2,851,623✔
605
}
2,851,623✔
606

607
size_t SlabAlloc::consolidate_free_read_only()
608
{
1,366,164✔
609
    CriticalSection cs(changes);
1,366,164✔
610
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
1,366,164✔
611
        throw InvalidFreeSpace();
687,600✔
612

687,600✔
613
    return m_free_read_only.size();
1,366,164✔
614
}
1,366,164✔
615

616

617
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
618
{
2,333,664✔
619
    REALM_ASSERT_DEBUG(translate(ref) == addr);
2,333,664✔
620
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
2,333,664✔
621
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
2,333,664✔
622
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
2,333,664✔
623

1,161,810✔
624
    // Possible future enhancement: check if we can extend current space instead
1,161,810✔
625
    // of unconditionally allocating new space. In that case, remember to
1,161,810✔
626
    // check whether m_free_space_state == free_state_Invalid. Also remember to
1,161,810✔
627
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
1,161,810✔
628

1,161,810✔
629
    // Allocate new space
1,161,810✔
630
    MemRef new_mem = do_alloc(new_size); // Throws
2,333,664✔
631

1,161,810✔
632
    // Copy existing segment
1,161,810✔
633
    char* new_addr = new_mem.get_addr();
2,333,664✔
634
    realm::safe_copy_n(addr, old_size, new_addr);
2,333,664✔
635

1,161,810✔
636
    // Add old segment to freelist
1,161,810✔
637
    do_free(ref, addr);
2,333,664✔
638

1,161,810✔
639
#ifdef REALM_DEBUG
2,333,664✔
640
    if (REALM_COVER_NEVER(m_debug_out)) {
2,333,664✔
641
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
642
                  << " new_size: " << new_size << "\n";
×
643
    }
×
644
#endif // REALM_DEBUG
2,333,664✔
645

1,161,810✔
646
    return new_mem;
2,333,664✔
647
}
2,333,664✔
648

649

650
char* SlabAlloc::do_translate(ref_type) const noexcept
651
{
×
652
    REALM_ASSERT(false); // never come here
×
653
    return nullptr;
×
654
}
×
655

656

657
int SlabAlloc::get_committed_file_format_version() noexcept
658
{
139,518✔
659
    {
139,518✔
660
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
139,518✔
661
        if (m_mappings.size()) {
139,518✔
662
            // if we have mapped a file, m_mappings will have at least one mapping and
68,439✔
663
            // the first will be to the start of the file. Don't come here, if we're
68,439✔
664
            // just attaching a buffer. They don't have mappings.
68,439✔
665
            realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
139,476✔
666
        }
139,476✔
667
    }
139,518✔
668
    const Header& header = *reinterpret_cast<const Header*>(m_data);
139,518✔
669
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
104,211✔
670
    int file_format_version = int(header.m_file_format[slot_selector]);
139,518✔
671
    return file_format_version;
139,518✔
672
}
139,518✔
673

674
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
675
{
482,514✔
676
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
237,261✔
677
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
374,190✔
678
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
482,514✔
679
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
482,514✔
680
}
482,514✔
681

682
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
683
{
×
684
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
685
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
686
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
687
    if (is_file_on_streaming_form(header)) {
×
688
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
689
        return ref_type(footer.m_top_ref);
×
690
    }
×
691
    else {
×
692
        return to_ref(header.m_top_ref[slot_selector]);
×
693
    }
×
694
}
×
695

696
std::string SlabAlloc::get_file_path_for_assertions() const
697
{
×
698
    return m_file.get_path();
×
699
}
×
700

701
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
702
{
114,231✔
703
    if (cfg.read_only) {
114,231✔
704
        // If the file is opened read-only, we cannot change it. This is not a problem,
705
        // because for a read-only file we assume that it will not change while we use it,
706
        // hence there will be no need to grow memory mappings.
707
        // This assumption obviously will not hold, if the file is shared by multiple
708
        // processes or threads with different opening modes.
709
        // Currently, there is no way to detect if this assumption is violated.
710
        return false;
×
711
    }
×
712
    size_t expected_size = size_t(-1);
114,231✔
713
    size_t size = static_cast<size_t>(m_file.get_size());
114,231✔
714

56,217✔
715
    // It is not safe to change the size of a file on streaming form, since the footer
56,217✔
716
    // must remain available and remain at the very end of the file.
56,217✔
717
    REALM_ASSERT(!is_file_on_streaming_form());
114,231✔
718

56,217✔
719
    // check if online compaction allows us to shrink the file:
56,217✔
720
    if (top_ref) {
114,231✔
721
        // Get the expected file size by looking up logical file size stored in top array
32,256✔
722
        constexpr size_t max_top_size = (Group::s_file_size_ndx + 1) * 8 + sizeof(Header);
65,283✔
723
        size_t top_page_base = top_ref & ~(page_size() - 1);
65,283✔
724
        size_t top_offset = top_ref - top_page_base;
65,283✔
725
        size_t map_size = std::min(max_top_size + top_offset, size - top_page_base);
65,283✔
726
        File::Map<char> map_top(m_file, top_page_base, File::access_ReadOnly, map_size, 0, m_write_observer);
65,283✔
727
        realm::util::encryption_read_barrier(map_top, top_offset, max_top_size);
65,283✔
728
        auto top_header = map_top.get_addr() + top_offset;
65,283✔
729
        auto top_data = NodeHeader::get_data_from_header(top_header);
65,283✔
730
        auto w = NodeHeader::get_width_from_header(top_header);
65,283✔
731
        auto logical_size = size_t(get_direct(top_data, w, Group::s_file_size_ndx)) >> 1;
65,283✔
732
        // make sure we're page aligned, so the code below doesn't first
32,256✔
733
        // truncate the file, then expand it again
32,256✔
734
        expected_size = round_up_to_page_size(logical_size);
65,283✔
735
    }
65,283✔
736

56,217✔
737
    // Check if we can shrink the file
56,217✔
738
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
114,231✔
739
        detach(true); // keep m_file open
9✔
740
        m_file.resize(expected_size);
9✔
741
        m_file.close();
9✔
742
        size = expected_size;
9✔
743
        return true;
9✔
744
    }
9✔
745

56,211✔
746
    // We can only safely mmap the file, if its size matches a page boundary. If not,
56,211✔
747
    // we must change the size to match before mmaping it.
56,211✔
748
    if (size != round_up_to_page_size(size)) {
114,222✔
749
        // The file size did not match a page boundary.
129✔
750
        // We must extend the file to a page boundary (unless already there)
129✔
751
        // The file must be extended to match in size prior to being mmapped,
129✔
752
        // as extending it after mmap has undefined behavior.
129✔
753
        if (cfg.session_initiator || !cfg.is_shared) {
318✔
754
            // We can only safely extend the file if we're the session initiator, or if
129✔
755
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
129✔
756
            // done to ensure well defined behavior for memory mappings. It does not matter,
129✔
757
            // that the free space management isn't informed
129✔
758
            size = round_up_to_page_size(size);
318✔
759
            detach(true); // keep m_file open
318✔
760
            m_file.prealloc(size);
318✔
761
            m_file.close();
318✔
762
            return true;
318✔
763
        }
318✔
764
        else {
×
765
            // Getting here, we have a file of a size that will not work, and without being
766
            // allowed to extend it. This should not be possible. But allowing a retry is
767
            // arguably better than giving up and crashing...
768
            throw Retry();
×
769
        }
×
770
    }
113,904✔
771
    return false;
113,904✔
772
}
113,904✔
773

774
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
775
{
139,590✔
776
    m_cfg = cfg;
139,590✔
777
    m_write_observer = write_observer;
139,590✔
778
    // ExceptionSafety: If this function throws, it must leave the allocator in
68,496✔
779
    // the detached state.
68,496✔
780

68,496✔
781
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
139,590✔
782

68,496✔
783
    // When 'read_only' is true, this function will throw InvalidDatabase if the
68,496✔
784
    // file exists already but is empty. This can happen if another process is
68,496✔
785
    // currently creating it. Note however, that it is only legal for multiple
68,496✔
786
    // processes to access a database file concurrently if it is done via a
68,496✔
787
    // DB, and in that case 'read_only' can never be true.
68,496✔
788
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
139,590✔
789
    // session_initiator can be set *only* if we're shared.
68,496✔
790
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
139,590✔
791
                    get_file_path_for_assertions());
139,590✔
792
    // clear_file can be set *only* if we're the first session.
68,496✔
793
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
139,590✔
794
                    get_file_path_for_assertions());
139,590✔
795

68,496✔
796
    using namespace realm::util;
139,590✔
797
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
138,969✔
798
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
139,590✔
799
    set_read_only(cfg.read_only);
139,590✔
800
    try {
139,590✔
801
        m_file.open(path.c_str(), access, create, 0); // Throws
139,590✔
802
    }
139,590✔
803
    catch (const FileAccessError& ex) {
68,514✔
804
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
36✔
805
        if (ex.code() == ErrorCodes::PermissionDenied) {
36✔
806
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
807
                                cfg.read_only ? "read" : "read-write");
6✔
808
        }
6✔
809
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
36✔
810
    }
36✔
811
    File::CloseGuard fcg(m_file);
139,554✔
812
    auto physical_file_size = m_file.get_size();
139,554✔
813
    // Note that get_size() may (will) return a different size before and after
68,478✔
814
    // the call below to set_encryption_key.
68,478✔
815
    m_file.set_encryption_key(cfg.encryption_key);
139,554✔
816

68,478✔
817
    size_t size = 0;
139,554✔
818
    // The size of a database file must not exceed what can be encoded in
68,478✔
819
    // size_t.
68,478✔
820
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
139,554✔
821
        throw InvalidDatabase("Realm file too large", path);
68,478✔
822
    if (cfg.encryption_key && size == 0 && physical_file_size != 0) {
139,554✔
823
        // The opened file holds data, but is so small it cannot have
6✔
824
        // been created with encryption
6✔
825
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
6✔
826
    }
6✔
827
    if (size == 0 || cfg.clear_file) {
139,548✔
828
        if (REALM_UNLIKELY(cfg.read_only))
48,888✔
829
            throw InvalidDatabase("Read-only access to empty Realm file", path);
23,940✔
830

23,940✔
831
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
48,888✔
832
        m_file.write(data, sizeof empty_file_header); // Throws
48,888✔
833

23,940✔
834
        // Pre-alloc initial space
23,940✔
835
        size_t initial_size = page_size(); // m_initial_section_size;
48,888✔
836
        m_file.prealloc(initial_size);     // Throws
48,888✔
837

23,940✔
838
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
48,888✔
839
        if (!disable_sync)
48,888✔
840
            m_file.sync(); // Throws
15✔
841

23,940✔
842
        size = initial_size;
48,888✔
843
    }
48,888✔
844
    ref_type top_ref;
139,548✔
845
    note_reader_start(this);
139,548✔
846
    util::ScopeExit reader_end_guard([this]() noexcept {
139,548✔
847
        note_reader_end(this);
139,548✔
848
    });
139,548✔
849

68,472✔
850
    try {
139,548✔
851
        // we'll read header and (potentially) footer
68,472✔
852
        File::Map<char> map_header(m_file, File::access_ReadOnly, sizeof(Header), 0, m_write_observer);
139,548✔
853
        realm::util::encryption_read_barrier(map_header, 0, sizeof(Header));
139,548✔
854
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
139,548✔
855

68,472✔
856
        File::Map<char> map_footer;
139,548✔
857
        const StreamingFooter* footer = nullptr;
139,548✔
858
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
139,548✔
859
            size_t footer_ref = size - sizeof(StreamingFooter);
480✔
860
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
480✔
861
            size_t footer_offset = footer_ref - footer_page_base;
480✔
862
            map_footer = File::Map<char>(m_file, footer_page_base, File::access_ReadOnly,
480✔
863
                                         sizeof(StreamingFooter) + footer_offset, 0, m_write_observer);
480✔
864
            realm::util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
480✔
865
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
480✔
866
        }
480✔
867

68,472✔
868
        top_ref = validate_header(header, footer, size, path, cfg.encryption_key != nullptr); // Throws
139,548✔
869
        m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
138,870✔
870
        m_data = map_header.get_addr(); // <-- needed below
139,548✔
871

68,472✔
872
        if (cfg.session_initiator && is_file_on_streaming_form(*header)) {
139,548✔
873
            // Don't compare file format version fields as they are allowed to differ.
204✔
874
            // Also don't compare reserved fields.
204✔
875
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, get_file_path_for_assertions());
408✔
876
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0],
408✔
877
                            get_file_path_for_assertions());
408✔
878
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1],
408✔
879
                            get_file_path_for_assertions());
408✔
880
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2],
408✔
881
                            get_file_path_for_assertions());
408✔
882
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3],
408✔
883
                            get_file_path_for_assertions());
408✔
884
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0],
408✔
885
                            get_file_path_for_assertions());
408✔
886
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], get_file_path_for_assertions());
408✔
887
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie,
408✔
888
                            get_file_path_for_assertions());
408✔
889
        }
408✔
890
    }
139,548✔
891
    catch (const InvalidDatabase&) {
68,502✔
892
        throw;
60✔
893
    }
60✔
894
    catch (const DecryptionFailed& e) {
42✔
895
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
42✔
896
    }
42✔
897
    catch (const std::exception& e) {
12✔
898
        throw InvalidDatabase(e.what(), path);
12✔
899
    }
12✔
900
    catch (...) {
×
901
        throw InvalidDatabase("unknown error", path);
×
902
    }
×
903
    // m_data not valid at this point!
68,418✔
904
    m_baseline = 0;
139,434✔
905
    // make sure that any call to begin_read cause any slab to be placed in free
68,418✔
906
    // lists correctly
68,418✔
907
    m_free_space_state = free_space_Invalid;
139,434✔
908

68,418✔
909
    // Ensure clean up, if we need to back out:
68,418✔
910
    DetachGuard dg(*this);
139,434✔
911

68,418✔
912
    reset_free_space_tracking();
139,434✔
913
    update_reader_view(size);
139,434✔
914
    REALM_ASSERT(m_mappings.size());
139,434✔
915
    m_data = m_mappings[0].primary_mapping.get_addr();
139,434✔
916
    realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
139,434✔
917
    dg.release();  // Do not detach
139,434✔
918
    fcg.release(); // Do not close
139,434✔
919
#if REALM_ENABLE_ENCRYPTION
139,434✔
920
    m_realm_file_info = util::get_file_info_for_file(m_file);
139,434✔
921
#endif
139,434✔
922
    return top_ref;
139,434✔
923
}
139,434✔
924

925
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
926
{
114,375✔
927
    auto header = reinterpret_cast<const Header*>(m_data);
114,375✔
928
    if (!is_file_on_streaming_form(*header))
114,375✔
929
        return;
113,979✔
930

198✔
931
    // Make sure the database is not on streaming format. If we did not do this,
198✔
932
    // a later commit would have to do it. That would require coordination with
198✔
933
    // anybody concurrently joining the session, so it seems easier to do it at
198✔
934
    // session initialization, even if it means writing the database during open.
198✔
935
    {
396✔
936
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
396✔
937
        Header& writable_header = *writable_map.get_addr();
396✔
938
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
396✔
939
        writable_header.m_top_ref[1] = top_ref;
396✔
940
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
396✔
941
        realm::util::encryption_write_barrier(writable_map, 0);
396✔
942
        writable_map.sync();
396✔
943
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
396✔
944
        writable_header.m_flags |= flags_SelectBit;
396✔
945
        realm::util::encryption_write_barrier(writable_map, 0);
396✔
946
        writable_map.sync();
396✔
947

198✔
948
        realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
396✔
949
    }
396✔
950
}
396✔
951

952
void SlabAlloc::note_reader_start(const void* reader_id)
953
{
2,982,705✔
954
#if REALM_ENABLE_ENCRYPTION
2,982,705✔
955
    if (m_realm_file_info)
2,982,705✔
956
        util::encryption_note_reader_start(*m_realm_file_info, reader_id);
1,191✔
957
#else
958
    static_cast<void>(reader_id);
959
#endif
960
}
2,982,705✔
961

962
void SlabAlloc::note_reader_end(const void* reader_id) noexcept
963
{
2,983,596✔
964
#if REALM_ENABLE_ENCRYPTION
2,983,596✔
965
    if (m_realm_file_info)
2,983,596✔
966
        util::encryption_note_reader_end(*m_realm_file_info, reader_id);
1,443✔
967
#else
968
    static_cast<void>(reader_id);
969
#endif
970
}
2,983,596✔
971

972
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
973
{
90✔
974
    // ExceptionSafety: If this function throws, it must leave the allocator in
45✔
975
    // the detached state.
45✔
976

45✔
977
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
978
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
979

45✔
980
    // Verify the data structures
45✔
981
    std::string path;                                     // No path
90✔
982
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
983

45✔
984
    m_data = data;
90✔
985
    size = align_size_to_section_boundary(size);
90✔
986
    m_baseline = size;
90✔
987
    m_attach_mode = attach_UsersBuffer;
90✔
988

45✔
989
    m_translation_table_size = 1;
90✔
990
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
991
    return top_ref;
90✔
992
}
90✔
993

994
void SlabAlloc::init_in_memory_buffer()
995
{
25,284✔
996
    m_attach_mode = attach_Heap;
25,284✔
997
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,284✔
998
    m_data = m_virtual_file_buffer.back().addr;
25,284✔
999
    m_virtual_file_size = sizeof(empty_file_header);
25,284✔
1000
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,284✔
1001

12,642✔
1002
    m_baseline = m_virtual_file_size;
25,284✔
1003
    m_translation_table_size = 1;
25,284✔
1004
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,284✔
1005
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,284✔
1006
    m_ref_translation_ptr = ref_translation_ptr;
25,284✔
1007
}
25,284✔
1008

1009
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
1010
{
5,332,251✔
1011
    auto idx = get_section_index(ref);
5,332,251✔
1012
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
5,332,251✔
1013
    auto& buf = m_virtual_file_buffer[idx];
5,332,251✔
1014
    return buf.addr + (ref - buf.start_ref);
5,332,251✔
1015
}
5,332,251✔
1016

1017
void SlabAlloc::attach_empty()
1018
{
4,464✔
1019
    // ExceptionSafety: If this function throws, it must leave the allocator in
2,232✔
1020
    // the detached state.
2,232✔
1021

2,232✔
1022
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
4,464✔
1023

2,232✔
1024
    m_attach_mode = attach_OwnedBuffer;
4,464✔
1025
    m_data = nullptr; // Empty buffer
4,464✔
1026

2,232✔
1027
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
2,232✔
1028

2,232✔
1029
    // No ref must ever be less than the header size, so we will use that as the
2,232✔
1030
    // baseline here.
2,232✔
1031
    size_t size = align_size_to_section_boundary(sizeof(Header));
4,464✔
1032
    m_baseline = size;
4,464✔
1033
    m_translation_table_size = 1;
4,464✔
1034
    m_ref_translation_ptr = new RefTranslation[1];
4,464✔
1035
}
4,464✔
1036

1037
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1038
{
30✔
1039
    char buf[256];
30✔
1040
    snprintf(buf, sizeof(buf),
30✔
1041
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
30✔
1042
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
30✔
1043
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
30✔
1044
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
30✔
1045
             header.m_flags);
30✔
1046
    msg += buf;
30✔
1047
    throw InvalidDatabase(msg, path);
30✔
1048
}
30✔
1049

1050
// Note: This relies on proper mappings having been established by the caller
1051
// for both the header and the streaming footer
1052
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1053
{
90✔
1054
    auto header = reinterpret_cast<const Header*>(data);
90✔
1055
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1056
    return validate_header(header, footer, size, path);
90✔
1057
}
90✔
1058

1059
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1060
                                    const std::string& path, bool is_encrypted)
1061
{
139,584✔
1062
    // Verify that size is sane and 8-byte aligned
68,493✔
1063
    if (REALM_UNLIKELY(size < sizeof(Header)))
139,584✔
1064
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
68,520✔
1065
                              path);
54✔
1066
    if (REALM_UNLIKELY(size % 8 != 0))
139,530✔
1067
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
68,466✔
1068

68,466✔
1069
    // First four bytes of info block is file format id
68,466✔
1070
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
139,530✔
1071
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
68,481✔
1072
        if (is_encrypted) {
30✔
1073
            // Encrypted files check the hmac on read, so there's a lot less
1074
            // which could go wrong and have us still reach this point
1075
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
×
1076
                                   path);
×
1077
        }
×
1078
        else {
30✔
1079
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
30✔
1080
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
30✔
1081
                                   *header, path);
30✔
1082
        }
30✔
1083
    }
30✔
1084

68,466✔
1085
    // Last bit in info block indicates which top_ref block is valid
68,466✔
1086
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
104,124✔
1087

68,466✔
1088
    // Top-ref must always point within buffer
68,466✔
1089
    auto top_ref = header->m_top_ref[slot_selector];
139,530✔
1090
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
139,530✔
1091
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
528✔
1092
            throw InvalidDatabase(
×
1093
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1094
                path);
×
1095
        }
×
1096
        REALM_ASSERT(footer);
528✔
1097
        top_ref = footer->m_top_ref;
528✔
1098
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
528✔
1099
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1100
                                               "The file is probably truncated.",
×
1101
                                               footer->m_magic_cookie),
×
1102
                                  path);
×
1103
        }
×
1104
    }
139,530✔
1105
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
139,530✔
1106
        throw_header_exception("top ref is not aligned", *header, path);
×
1107
    }
×
1108
    if (REALM_UNLIKELY(top_ref >= size)) {
139,530✔
1109
        throw_header_exception(
×
1110
            util::format(
×
1111
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1112
                top_ref),
×
1113
            *header, path);
×
1114
    }
×
1115
    return ref_type(top_ref);
139,530✔
1116
}
139,530✔
1117

1118

1119
size_t SlabAlloc::get_total_size() const noexcept
1120
{
3,204,357✔
1121
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
3,204,318✔
1122
}
3,204,357✔
1123

1124

1125
void SlabAlloc::reset_free_space_tracking()
1126
{
1,536,129✔
1127
    CriticalSection cs(changes);
1,536,129✔
1128
    if (is_free_space_clean())
1,536,129✔
1129
        return;
13,863✔
1130

764,373✔
1131
    // Free all scratch space (done after all data has
764,373✔
1132
    // been commited to persistent space)
764,373✔
1133
    m_free_read_only.clear();
1,522,266✔
1134

764,373✔
1135
    // release slabs.. keep the initial allocation if it's a minimal allocation,
764,373✔
1136
    // otherwise release it as well. This saves map/unmap for small transactions.
764,373✔
1137
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
1,531,077✔
1138
        auto& last_slab = m_slabs.back();
8,811✔
1139
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
8,811✔
1140
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
8,811✔
1141
        --m_translation_table_size;
8,811✔
1142
        m_slabs.pop_back();
8,811✔
1143
    }
8,811✔
1144
    rebuild_freelists_from_slab();
1,522,266✔
1145
    m_free_space_state = free_space_Clean;
1,522,266✔
1146
    m_commit_size = 0;
1,522,266✔
1147
}
1,522,266✔
1148

1149
inline bool randomly_false_in_debug(bool x)
1150
{
×
1151
#ifdef REALM_DEBUG
×
1152
    if (x)
×
1153
        return (std::rand() & 1);
×
1154
#endif
×
1155
    return x;
×
1156
}
×
1157

1158

1159
/*
1160
  Memory mapping
1161

1162
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1163
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1164
  essential to map the file in even sized sections.
1165

1166
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1167
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1168
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1169
  We don't want to extend the file in increments as large as the chunk size.
1170

1171
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1172
  old one in the mapping table. However, we must keep the old mapping open, because older
1173
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1174
  and only cleaned out once we know that no transaction can refer to them anymore.
1175

1176
  Interaction with encryption
1177

1178
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1179
  The binding to the file is done by software. This allows us to "cheat" and allocate
1180
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1181
  extends beyond the end of file, because it will not be accessed.
1182

1183
  Growing/Changing the mapping table.
1184

1185
  There are two mapping tables:
1186

1187
  * m_mappings: This is the "source of truth" about what the current mapping is.
1188
    It is only accessed under lock.
1189
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1190
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1191
    table can only be extended. Only selected members in each entry can be changed.
1192
    See RefTranslation in alloc.hpp for more details.
1193
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1194
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1195
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1196
    to memory allocation and release, which is already serialized (since write transactions are
1197
    single threaded).
1198

1199
  When m_mappings is changed due to an extend operation changing a mapping, or when
1200
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1201

1202
  * A new fast mapping table is created. The old one is not modified.
1203
  * The old one is held in a waiting area until it is no longer relevant because no
1204
    live transaction can refer to it any more.
1205
 */
1206
void SlabAlloc::update_reader_view(size_t file_size)
1207
{
3,412,158✔
1208
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
3,412,158✔
1209
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
3,412,158✔
1210
    if (file_size <= old_baseline) {
3,412,158✔
1211
        schedule_refresh_of_outdated_encrypted_pages();
3,229,044✔
1212
        return;
3,229,044✔
1213
    }
3,229,044✔
1214

97,872✔
1215
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
183,114✔
1216
    bool replace_last_mapping = false;
183,114✔
1217
    size_t old_num_mappings = get_section_index(old_slab_base);
183,114✔
1218

97,872✔
1219
    if (!is_in_memory()) {
183,114✔
1220
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
158,637✔
1221
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
158,637✔
1222
                        get_file_path_for_assertions());
158,637✔
1223
        REALM_ASSERT_DEBUG(is_free_space_clean());
158,637✔
1224

86,076✔
1225
        // Create the new mappings we needed to cover the new size. We don't mutate
86,076✔
1226
        // any of the member variables until we've successfully created all of the
86,076✔
1227
        // mappings so that we leave things in a consistent state if one of them
86,076✔
1228
        // hits an allocation failure.
86,076✔
1229

86,076✔
1230
        std::vector<MapEntry> new_mappings;
158,637✔
1231
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
158,637✔
1232

86,076✔
1233
        {
158,637✔
1234
            // If the old slab base was greater than the old baseline then the final
86,076✔
1235
            // mapping was a partial section and we need to replace it with a larger
86,076✔
1236
            // mapping.
86,076✔
1237
            if (old_baseline < old_slab_base) {
158,637✔
1238
                // old_slab_base should be 0 if we had no mappings previously
17,649✔
1239
                REALM_ASSERT(old_num_mappings > 0);
19,182✔
1240
                // try to extend the old mapping in-place instead of replacing it.
17,649✔
1241
                MapEntry& cur_entry = m_mappings.back();
19,182✔
1242
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
19,182✔
1243
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
19,182✔
1244
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
19,182✔
1245
                    replace_last_mapping = true;
72✔
1246
                    --old_num_mappings;
72✔
1247
                }
72✔
1248
            }
19,182✔
1249

86,076✔
1250
            // Create new mappings covering from the end of the last complete
86,076✔
1251
            // section to the end of the new file size.
86,076✔
1252
            const auto new_slab_base = align_size_to_section_boundary(file_size);
158,637✔
1253
            const size_t num_mappings = get_section_index(new_slab_base);
158,637✔
1254
            new_mappings.reserve(num_mappings - old_num_mappings);
158,637✔
1255
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
298,299✔
1256
                const size_t section_start_offset = get_section_base(k);
139,674✔
1257
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
139,674✔
1258
                if (section_size == (1 << section_shift)) {
139,674✔
1259
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
66✔
1260
                                                                  section_size, 0, m_write_observer)});
66✔
1261
                }
66✔
1262
                else {
139,608✔
1263
                    new_mappings.push_back({util::File::Map<char>()});
139,608✔
1264
                    auto& mapping = new_mappings.back().primary_mapping;
139,608✔
1265
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
139,608✔
1266
                                                        section_start_offset, m_write_observer);
139,608✔
1267
                    if (reserved) {
139,608✔
1268
                        // if reservation is supported, first attempt at extending must succeed
68,505✔
1269
                        if (!mapping.try_extend_to(section_size))
139,608✔
1270
                            throw std::bad_alloc();
12✔
1271
                    }
×
1272
                    else {
×
1273
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size, 0,
×
1274
                                                                section_start_offset, m_write_observer);
×
1275
                    }
×
1276
                }
139,608✔
1277
            }
139,674✔
1278
        }
158,637✔
1279

86,076✔
1280
        // Now that we've successfully created our mappings, update our member
86,076✔
1281
        // variables (and assume that resizing a simple vector won't produce memory
86,076✔
1282
        // allocation failures, unlike 64 MB mmaps).
86,076✔
1283
        if (replace_last_mapping) {
158,631✔
1284
            MapEntry& cur_entry = m_mappings.back();
66✔
1285
            // We should not have a xover mapping here because that would mean
33✔
1286
            // that there was already something mapped after the last section
33✔
1287
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
66✔
1288
            // save the old mapping/keep it open
33✔
1289
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
66✔
1290
            m_mappings.pop_back();
66✔
1291
            m_mapping_version++;
66✔
1292
        }
66✔
1293

86,070✔
1294
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
158,625✔
1295
    }
158,625✔
1296

97,872✔
1297
    m_baseline.store(file_size, std::memory_order_relaxed);
183,108✔
1298

97,866✔
1299
    const size_t ref_start = align_size_to_section_boundary(file_size);
183,102✔
1300
    const size_t ref_displacement = ref_start - old_slab_base;
183,102✔
1301
    if (ref_displacement > 0) {
183,102✔
1302
        // Rebase slabs as m_baseline is now bigger than old_slab_base
68,472✔
1303
        for (auto& e : m_slabs) {
68,508✔
1304
            e.ref_end += ref_displacement;
72✔
1305
        }
72✔
1306
    }
139,542✔
1307

97,866✔
1308
    rebuild_freelists_from_slab();
183,102✔
1309

97,866✔
1310
    // Build the fast path mapping
97,866✔
1311

97,866✔
1312
    // The fast path mapping is an array which is used from multiple threads
97,866✔
1313
    // without locking - see translate().
97,866✔
1314

97,866✔
1315
    // Addition of a new mapping may require a completely new fast mapping table.
97,866✔
1316
    //
97,866✔
1317
    // Being used in a multithreaded scenario, the old mappings must be retained open,
97,866✔
1318
    // until the realm version for which they were established has been closed/detached.
97,866✔
1319
    //
97,866✔
1320
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
97,866✔
1321
    // translate refs in the slab area, and that all these uses are serialized, whether
97,866✔
1322
    // that is achieved by being single threaded, interlocked or run from a sequential
97,866✔
1323
    // scheduling queue.
97,866✔
1324
    //
97,866✔
1325
    rebuild_translations(replace_last_mapping, old_num_mappings);
183,102✔
1326

97,866✔
1327
    schedule_refresh_of_outdated_encrypted_pages();
183,102✔
1328
}
183,102✔
1329

1330

1331
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1332
{
3,413,292✔
1333
#if REALM_ENABLE_ENCRYPTION
3,413,292✔
1334
    // callers must already hold m_mapping_mutex
1,970,079✔
1335
    for (auto& e : m_mappings) {
3,272,526✔
1336
        if (auto m = e.primary_mapping.get_encrypted_mapping()) {
3,112,119✔
1337
            encryption_mark_pages_for_IV_check(m);
1,455✔
1338
        }
1,455✔
1339
        if (auto m = e.xover_mapping.get_encrypted_mapping()) {
3,112,119✔
1340
            encryption_mark_pages_for_IV_check(m);
×
1341
        }
×
1342
    }
3,112,119✔
1343
    // unsafe to do outside writing thread: verify();
1,970,079✔
1344
#endif // REALM_ENABLE_ENCRYPTION
3,413,292✔
1345
}
3,413,292✔
1346

1347
size_t SlabAlloc::get_allocated_size() const noexcept
1348
{
122,211✔
1349
    size_t sz = 0;
122,211✔
1350
    for (const auto& s : m_slabs)
122,211✔
1351
        sz += s.size;
16,095✔
1352
    return sz;
122,211✔
1353
}
122,211✔
1354

1355
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1356
{
122,205✔
1357
    ++m_translation_table_size;
122,205✔
1358
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
122,205✔
1359
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
266,049✔
1360
        new_fast_mapping[i] = m_ref_translation_ptr[i];
143,844✔
1361
    }
143,844✔
1362
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
122,205✔
1363
                                    m_ref_translation_ptr.load());
122,205✔
1364
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
122,205✔
1365
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
60,189✔
1366
    // so optimize by clamping the lowest possible xover offset to the end of the section.
60,189✔
1367
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
122,205✔
1368
    m_ref_translation_ptr = new_fast_mapping.release();
122,205✔
1369
}
122,205✔
1370

1371
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1372
{
184,245✔
1373
    size_t free_space_size = m_slabs.size();
184,245✔
1374
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
171,621✔
1375
    if (m_translation_table_size < num_mappings + free_space_size) {
184,245✔
1376
        requires_new_translation = true;
139,542✔
1377
    }
139,542✔
1378
    RefTranslation* new_translation_table = m_ref_translation_ptr;
184,245✔
1379
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
184,245✔
1380
    if (requires_new_translation) {
184,245✔
1381
        // we need a new translation table, but must preserve old, as translations using it
68,505✔
1382
        // may be in progress concurrently
68,505✔
1383
        if (m_translation_table_size)
139,608✔
1384
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
174✔
1385
                                            m_ref_translation_ptr.load());
174✔
1386
        m_translation_table_size = num_mappings + free_space_size;
139,608✔
1387
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
139,608✔
1388
        new_translation_table = new_translation_table_owner.get();
139,608✔
1389
        old_num_sections = 0;
139,608✔
1390
    }
139,608✔
1391
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
324,027✔
1392
        if (is_in_memory()) {
139,782✔
1393
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1394
        }
12✔
1395
        else {
139,770✔
1396
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
139,770✔
1397
#if REALM_ENABLE_ENCRYPTION
139,770✔
1398
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
139,770✔
1399
#endif
139,770✔
1400
        }
139,770✔
1401
        REALM_ASSERT(new_translation_table[i].mapping_addr);
139,782✔
1402
        // We don't copy over data for the cross over mapping. If the mapping is needed,
68,592✔
1403
        // copying will happen on demand (in get_or_add_xover_mapping).
68,592✔
1404
        // Note: that may never be needed, because if the array that needed the original cross over
68,592✔
1405
        // mapping is freed, any new array allocated at the same position will NOT need a cross
68,592✔
1406
        // over mapping, but just use the primary mapping.
68,592✔
1407
    }
139,782✔
1408
    for (size_t k = 0; k < free_space_size; ++k) {
228,390✔
1409
        char* base = m_slabs[k].addr;
44,145✔
1410
        REALM_ASSERT(base);
44,145✔
1411
        new_translation_table[num_mappings + k].mapping_addr = base;
44,145✔
1412
    }
44,145✔
1413

99,069✔
1414
    // This will either be null or the same as new_translation_table, which is about to become owned by
99,069✔
1415
    // m_ref_translation_ptr.
99,069✔
1416
    (void)new_translation_table_owner.release();
184,245✔
1417

99,069✔
1418
    m_ref_translation_ptr = new_translation_table;
184,245✔
1419
}
184,245✔
1420

1421
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
1422
{
6✔
1423
    auto _page_size = page_size();
6✔
1424
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
6✔
1425
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
6✔
1426
        // some other thread already added a mapping
1427
        // it MUST have been for the exact same address:
1428
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1429
        return;
×
1430
    }
×
1431
    MapEntry* map_entry = &m_mappings[index];
6✔
1432
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
6✔
1433
    if (!map_entry->xover_mapping.is_attached()) {
6✔
1434
        // Create a xover mapping
3✔
1435
        auto file_offset = get_section_base(index) + offset;
6✔
1436
        auto end_offset = file_offset + size;
6✔
1437
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
6✔
1438
        auto minimal_mapping_size = end_offset - mapping_file_offset;
6✔
1439
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size, 0,
6✔
1440
                                      m_write_observer);
6✔
1441
        map_entry->xover_mapping = std::move(mapping);
6✔
1442
    }
6✔
1443
    txl.xover_mapping_base = offset & ~(_page_size - 1);
6✔
1444
#if REALM_ENABLE_ENCRYPTION
6✔
1445
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
6✔
1446
#endif
6✔
1447
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
6✔
1448
}
6✔
1449

1450
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1451
{
3,071,502✔
1452
    // Verify that each old ref translation pointer still points to a valid
1,542,171✔
1453
    // thing that we haven't released yet.
1,542,171✔
1454
#if REALM_DEBUG
3,071,502✔
1455
    std::unordered_set<const char*> mappings;
3,071,502✔
1456
    for (auto& m : m_old_mappings) {
1,542,258✔
1457
        REALM_ASSERT(m.mapping.is_attached());
174✔
1458
        mappings.insert(m.mapping.get_addr());
174✔
1459
    }
174✔
1460
    for (auto& m : m_mappings) {
2,962,842✔
1461
        REALM_ASSERT(m.primary_mapping.is_attached());
2,854,251✔
1462
        mappings.insert(m.primary_mapping.get_addr());
2,854,251✔
1463
        if (m.xover_mapping.is_attached())
2,854,251✔
1464
            mappings.insert(m.xover_mapping.get_addr());
12✔
1465
    }
2,854,251✔
1466
    for (auto& m : m_virtual_file_buffer) {
1,651,587✔
1467
        mappings.insert(m.addr);
218,820✔
1468
    }
218,820✔
1469
    if (m_data)
3,071,502✔
1470
        mappings.insert(m_data);
3,061,887✔
1471
    for (auto& t : m_old_translations) {
1,727,337✔
1472
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
364,722✔
1473
                        youngest_live_version, t.replaced_at_version);
364,722✔
1474
        if (nonempty_attachment()) {
364,722✔
1475
            for (size_t i = 0; i < t.translation_count; ++i)
736,572✔
1476
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
359,928✔
1477
        }
359,928✔
1478
    }
364,722✔
1479
#else
1480
    static_cast<void>(youngest_live_version);
1481
#endif
1482
}
3,071,502✔
1483

1484

1485
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1486
{
1,535,766✔
1487
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
1,535,766✔
1488
    verify_old_translations(youngest_live_version);
1,535,766✔
1489

771,081✔
1490
    auto pred = [=](auto& oldie) {
894,777✔
1491
        return oldie.replaced_at_version < oldest_live_version;
243,672✔
1492
    };
243,672✔
1493
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
1,535,766✔
1494
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
1,535,766✔
1495
                             m_old_translations.end());
1,535,766✔
1496
    m_youngest_live_version = youngest_live_version;
1,535,766✔
1497
    verify_old_translations(youngest_live_version);
1,535,766✔
1498
}
1,535,766✔
1499

1500
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1501
{
1,533,057✔
1502
    m_youngest_live_version = currently_live_version;
1,533,057✔
1503
}
1,533,057✔
1504

1505
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1506
{
1,366,185✔
1507
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
1,366,185✔
1508
        throw InvalidFreeSpace();
687,621✔
1509
    return m_free_read_only;
1,366,185✔
1510
}
1,366,185✔
1511

1512

1513
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1514
{
24,629,073✔
1515
    size_t end_of_block = start_pos + free_chunk_size;
24,629,073✔
1516
    size_t alloc_pos = start_pos;
24,629,073✔
1517
    while (alloc_pos + request_size <= end_of_block) {
24,631,026✔
1518
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
24,629,703✔
1519
        if (alloc_pos + request_size <= next_section_boundary) {
24,629,703✔
1520
            return alloc_pos;
24,627,816✔
1521
        }
24,627,816✔
1522
        alloc_pos = next_section_boundary;
1,887✔
1523
    }
1,887✔
1524
    return 0;
2,159,822,290✔
1525
}
24,629,073✔
1526

1527

1528
void SlabAlloc::resize_file(size_t new_file_size)
1529
{
99,462✔
1530
    if (m_attach_mode == attach_SharedFile) {
99,462✔
1531
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
72,771✔
1532
        m_file.prealloc(new_file_size); // Throws
72,771✔
1533
        // resizing is done based on the logical file size. It is ok for the file
44,364✔
1534
        // to actually be bigger, but never smaller.
44,364✔
1535
        REALM_ASSERT(new_file_size <= static_cast<size_t>(m_file.get_size()));
72,771✔
1536

44,364✔
1537
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
72,771✔
1538
        if (!disable_sync)
72,771✔
1539
            m_file.sync(); // Throws
561✔
1540
    }
72,771✔
1541
    else {
26,691✔
1542
        size_t current_size = 0;
26,691✔
1543
        for (auto& b : m_virtual_file_buffer) {
26,907✔
1544
            current_size += b.size;
26,907✔
1545
        }
26,907✔
1546
        if (new_file_size > current_size) {
26,691✔
1547
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1548
        }
6✔
1549
        m_virtual_file_size = new_file_size;
26,691✔
1550
    }
26,691✔
1551
}
99,462✔
1552

1553
#ifdef REALM_DEBUG
1554
void SlabAlloc::reserve_disk_space(size_t size)
1555
{
36✔
1556
    if (size != round_up_to_page_size(size))
36✔
1557
        size = round_up_to_page_size(size);
30✔
1558
    m_file.prealloc(size); // Throws
36✔
1559

18✔
1560
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1561
    if (!disable_sync)
36✔
1562
        m_file.sync(); // Throws
×
1563
}
36✔
1564
#endif
1565

1566
void SlabAlloc::verify() const
1567
{
702,468✔
1568
#ifdef REALM_DEBUG
702,468✔
1569
    if (!m_slabs.empty()) {
702,468✔
1570
        // Make sure that all free blocks are within a slab. This is done
338,961✔
1571
        // implicitly by using for_all_free_entries()
338,961✔
1572
        size_t first_possible_ref = m_baseline;
677,883✔
1573
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
677,883✔
1574
        for_all_free_entries([&](size_t ref, size_t size) {
5,577,369✔
1575
            REALM_ASSERT(ref >= first_possible_ref);
5,577,369✔
1576
            REALM_ASSERT(ref + size <= first_impossible_ref);
5,577,369✔
1577
            first_possible_ref = ref;
5,577,369✔
1578
        });
5,577,369✔
1579
    }
677,883✔
1580
#endif
702,468✔
1581
}
702,468✔
1582

1583
#ifdef REALM_DEBUG
1584

1585
bool SlabAlloc::is_all_free() const
1586
{
498✔
1587
    // verify that slabs contain only free space.
249✔
1588
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
249✔
1589
    for (const auto& e : m_slabs) {
492✔
1590
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
486✔
1591
        REALM_ASSERT(first->block_before_size == 0);
486✔
1592
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
486✔
1593
        REALM_ASSERT(last->block_after_size == 0);
486✔
1594
        if (first->block_after_size != last->block_before_size)
486✔
1595
            return false;
×
1596
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
486✔
1597
        range -= sizeof(BetweenBlocks);
486✔
1598
        // the size of the free area must match the distance between the two BetweenBlocks:
243✔
1599
        if (range != first->block_after_size)
486✔
1600
            return false;
×
1601
    }
486✔
1602
    return true;
498✔
1603
}
498✔
1604

1605

1606
// LCOV_EXCL_START
1607
void SlabAlloc::print() const
1608
{
×
1609
    /* TODO
1610
     *
1611

1612
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1613

1614
    size_t free = 0;
1615
    for (const auto& free_block : m_free_space) {
1616
        free += free_block.size;
1617
    }
1618

1619
    size_t allocated = allocated_for_slabs - free;
1620
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1621

1622
    if (!m_slabs.empty()) {
1623
        std::cout << "Slabs: ";
1624
        ref_type first_ref = m_baseline;
1625

1626
        for (const auto& slab : m_slabs) {
1627
            if (&slab != &m_slabs.front())
1628
                std::cout << ", ";
1629

1630
            ref_type last_ref = slab.ref_end - 1;
1631
            size_t size = slab.ref_end - first_ref;
1632
            void* addr = slab.addr;
1633
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1634
            first_ref = slab.ref_end;
1635
        }
1636
        std::cout << "\n";
1637
    }
1638

1639
    if (!m_free_space.empty()) {
1640
        std::cout << "FreeSpace: ";
1641
        for (const auto& free_block : m_free_space) {
1642
            if (&free_block != &m_free_space.front())
1643
                std::cout << ", ";
1644

1645
            ref_type last_ref = free_block.ref + free_block.size - 1;
1646
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1647
        }
1648
        std::cout << "\n";
1649
    }
1650
    if (!m_free_read_only.empty()) {
1651
        std::cout << "FreeSpace (ro): ";
1652
        for (const auto& free_block : m_free_read_only) {
1653
            if (&free_block != &m_free_read_only.front())
1654
                std::cout << ", ";
1655

1656
            ref_type last_ref = free_block.ref + free_block.size - 1;
1657
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1658
        }
1659
        std::cout << "\n";
1660
    }
1661
    std::cout << std::flush;
1662
    */
1663
}
×
1664
// LCOV_EXCL_STOP
1665

1666
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc