• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_337

03 Jul 2024 01:04PM UTC coverage: 90.864% (-0.1%) from 90.984%
jorgen.edelbo_337

Pull #7826

Evergreen

nicola-cab
Merge branch 'master' of github.com:realm/realm-core into next-major
Pull Request #7826: Merge Next major

102968 of 181176 branches covered (56.83%)

3131 of 3738 new or added lines in 54 files covered. (83.76%)

106 existing lines in 23 files now uncovered.

217725 of 239616 relevant lines covered (90.86%)

6844960.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.61
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <algorithm>
20
#include <atomic>
21
#include <cinttypes>
22
#include <cstring>
23
#include <exception>
24
#include <memory>
25
#include <type_traits>
26

27
#if REALM_DEBUG
28
#include <iostream>
29
#include <unordered_set>
30
#endif
31

32
#ifdef REALM_SLAB_ALLOC_DEBUG
33
#include <cstdlib>
34
#endif
35

36
#include <realm/util/encrypted_file_mapping.hpp>
37
#include <realm/util/errno.hpp>
38
#include <realm/util/scope_exit.hpp>
39
#include <realm/util/terminate.hpp>
40
#include <realm/array.hpp>
41
#include <realm/alloc_slab.hpp>
42
#include <realm/disable_sync_to_disk.hpp>
43
#include <realm/group.hpp>
44

45
using namespace realm;
46
using namespace realm::util;
47

48

49
namespace {
50

51
#ifdef REALM_SLAB_ALLOC_DEBUG
52
std::map<ref_type, void*> malloc_debug_map;
53
#endif
54

55
class InvalidFreeSpace : std::exception {
56
public:
57
    const char* what() const noexcept override
58
    {
×
59
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
60
               "further writes can be performed.";
×
61
    }
×
62
};
63

64
std::atomic<size_t> total_slab_allocated(0);
65

66
} // anonymous namespace
67

68
size_t SlabAlloc::get_total_slab_size() noexcept
69
{
×
70
    return total_slab_allocated;
×
71
}
×
72

73
SlabAlloc::SlabAlloc()
74
{
104,880✔
75
    m_initial_section_size = section_size();
104,880✔
76
    m_free_space_state = free_space_Clean;
104,880✔
77
    m_baseline = 0;
104,880✔
78
}
104,880✔
79

80
util::File& SlabAlloc::get_file()
81
{
976,485✔
82
    return m_file;
976,485✔
83
}
976,485✔
84

85

86
inline constexpr SlabAlloc::Header SlabAlloc::empty_file_header = {
87
    {0, 0}, // top-refs
88
    {'T', '-', 'D', 'B'},
89
    {0, 0}, // undecided file format
90
    0,      // reserved
91
    0       // flags (lsb is select bit)
92
};
93

94

95
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
96
{
672✔
97
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
672✔
98
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
672✔
99
    *streaming_header = {
672✔
100
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
672✔
101
        {'T', '-', 'D', 'B'},
672✔
102
        {storage_type(file_format_version), 0},
672✔
103
        0, // reserved
672✔
104
        0  // flags (lsb is select bit)
672✔
105
    };
672✔
106
}
672✔
107

108
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
109
    : ref_end(r)
41,082✔
110
    , size(s)
41,082✔
111
{
83,400✔
112
    // Ensure that allocation is aligned to at least 8 bytes
113
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
83,400✔
114

115
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
83,400✔
116
    addr = new char[size];
83,400✔
117
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
83,400✔
118
#if REALM_ENABLE_ALLOC_SET_ZERO
119
    std::fill(addr, addr + size, 0);
120
#endif
121
}
83,400✔
122

123
SlabAlloc::Slab::~Slab()
124
{
87,084✔
125
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
87,084✔
126
    if (addr)
87,084✔
127
        delete[] addr;
83,397✔
128
}
87,084✔
129

130
void SlabAlloc::detach(bool keep_file_open) noexcept
131
{
105,762✔
132
    delete[] m_ref_translation_ptr.exchange(nullptr);
105,762✔
133
    m_translation_table_size = 0;
105,762✔
134
    set_read_only(true);
105,762✔
135
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
105,762✔
136
    switch (m_attach_mode) {
105,762✔
137
        case attach_None:
429✔
138
            break;
429✔
139
        case attach_UsersBuffer:
24✔
140
            break;
24✔
141
        case attach_OwnedBuffer:
5,088✔
142
            delete[] m_data;
5,088✔
143
            break;
5,088✔
144
        case attach_SharedFile:
73,623✔
145
        case attach_UnsharedFile:
74,727✔
146
            m_data = 0;
74,727✔
147
            m_mappings.clear();
74,727✔
148
            m_youngest_live_version = 0;
74,727✔
149
            if (!keep_file_open)
74,727✔
150
                m_file.close();
74,298✔
151
            break;
74,727✔
152
        case attach_Heap:
25,494✔
153
            m_data = 0;
25,494✔
154
            break;
25,494✔
155
        default:
✔
156
            REALM_UNREACHABLE();
157
    }
105,762✔
158

159
    // Release all allocated memory - this forces us to create new
160
    // slabs after re-attaching thereby ensuring that the slabs are
161
    // placed correctly (logically) after the end of the file.
162
    m_slabs.clear();
105,762✔
163
    clear_freelists();
105,762✔
164

165
    m_attach_mode = attach_None;
105,762✔
166
}
105,762✔
167

168

169
SlabAlloc::~SlabAlloc() noexcept
170
{
104,880✔
171
#ifdef REALM_DEBUG
104,880✔
172
    if (is_attached()) {
104,880✔
173
        // A shared group does not guarantee that all space is free
174
        if (m_attach_mode != attach_SharedFile) {
744✔
175
            // No point inchecking if free space info is invalid
176
            if (m_free_space_state != free_space_Invalid) {
738✔
177
                if (REALM_COVER_NEVER(!is_all_free())) {
738✔
178
                    print();
×
179
#ifndef REALM_SLAB_ALLOC_DEBUG
×
180
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
181
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
182
                                 "then run under Valgrind with --leak-check=full\n";
×
183
                    REALM_TERMINATE("SlabAlloc detected a leak");
184
#endif
×
185
                }
×
186
            }
738✔
187
        }
738✔
188
    }
744✔
189
#endif
104,880✔
190

191
    if (is_attached())
104,880✔
192
        detach();
744✔
193
}
104,880✔
194

195

196
MemRef SlabAlloc::do_alloc(size_t size)
197
{
28,916,526✔
198
    CriticalSection cs(changes);
28,916,526✔
199
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
28,916,526✔
200
    REALM_ASSERT_EX((size & 0x7) == 0, size,
28,916,526✔
201
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
28,916,526✔
202
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
28,916,526✔
203
    // This limits the size of any array to ensure it can fit within a memory section.
204
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
205
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
28,916,526✔
206

207
    // If we failed to correctly record free space, new allocations cannot be
208
    // carried out until the free space record is reset.
209
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
28,916,526✔
210
        throw InvalidFreeSpace();
×
211

212
    m_free_space_state = free_space_Dirty;
28,916,526✔
213
    m_commit_size += size;
28,916,526✔
214

215
    // minimal allocation is sizeof(FreeListEntry)
216
    if (size < sizeof(FreeBlock))
28,916,526✔
217
        size = sizeof(FreeBlock);
6,636✔
218
    // align to multipla of 8
219
    if (size & 0x7)
28,916,526✔
220
        size = (size + 7) & ~0x7;
×
221

222
    FreeBlock* entry = allocate_block(static_cast<int>(size));
28,916,526✔
223
    mark_allocated(entry);
28,916,526✔
224
    ref_type ref = entry->ref;
28,916,526✔
225

226
#ifdef REALM_DEBUG
28,916,526✔
227
    if (REALM_COVER_NEVER(m_debug_out))
28,916,526✔
228
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
×
229
#endif
28,916,526✔
230

231
    char* addr = reinterpret_cast<char*>(entry);
28,916,526✔
232
    REALM_ASSERT_EX(addr == translate_in_slab(ref), addr, ref, get_file_path_for_assertions());
28,916,526✔
233

234
#if REALM_ENABLE_ALLOC_SET_ZERO
235
    std::fill(addr, addr + size, 0);
236
#endif
237
#ifdef REALM_SLAB_ALLOC_DEBUG
238
    malloc_debug_map[ref] = malloc(1);
239
#endif
240
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
28,916,526✔
241
    return MemRef(addr, ref, *this);
28,916,526✔
242
}
28,916,526✔
243

244
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
245
{
4,432,737✔
246
    auto bb = bb_before(entry);
4,432,737✔
247
    if (bb->block_before_size <= 0)
4,432,737✔
248
        return nullptr; // no prev block, or it is in use
3,487,710✔
249
    return block_before(bb);
945,027✔
250
}
4,432,737✔
251

252
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
253
{
4,433,241✔
254
    auto bb = bb_after(entry);
4,433,241✔
255
    if (bb->block_after_size <= 0)
4,433,241✔
256
        return nullptr; // no next block, or it is in use
1,543,503✔
257
    return block_after(bb);
2,889,738✔
258
}
4,433,241✔
259

260
SlabAlloc::FreeList SlabAlloc::find(int size)
261
{
28,942,917✔
262
    FreeList retval;
28,942,917✔
263
    retval.it = m_block_map.lower_bound(size);
28,942,917✔
264
    if (retval.it != m_block_map.end()) {
28,942,917✔
265
        retval.size = retval.it->first;
28,856,928✔
266
    }
28,856,928✔
267
    else {
85,989✔
268
        retval.size = 0;
85,989✔
269
    }
85,989✔
270
    return retval;
28,942,917✔
271
}
28,942,917✔
272

273
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
274
{
28,499,376✔
275
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
28,499,376✔
276
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
39,692,589✔
277
        ++hint.it;
11,193,213✔
278
    if (hint.it == m_block_map.end())
28,499,376✔
279
        hint.size = 0; // indicate "not found"
83,400✔
280
    return hint;
28,499,376✔
281
}
28,499,376✔
282

283
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
284
{
28,862,421✔
285
    FreeBlock* retval = list.it->second;
28,862,421✔
286
    FreeBlock* header = retval->next;
28,862,421✔
287
    if (header == retval)
28,862,421✔
288
        m_block_map.erase(list.it);
28,688,355✔
289
    else
174,066✔
290
        list.it->second = header;
174,066✔
291
    retval->unlink();
28,862,421✔
292
    return retval;
28,862,421✔
293
}
28,862,421✔
294

295
void SlabAlloc::FreeBlock::unlink()
296
{
32,690,418✔
297
    REALM_ASSERT_DEBUG(next != nullptr && prev != nullptr);
32,690,418✔
298
    auto _next = next;
32,690,418✔
299
    auto _prev = prev;
32,690,418✔
300
    _next->prev = prev;
32,690,418✔
301
    _prev->next = next;
32,690,418✔
302
    clear_links();
32,690,418✔
303
}
32,690,418✔
304

305
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
306
{
3,835,662✔
307
    int size = bb_before(entry)->block_after_size;
3,835,662✔
308
    auto it = m_block_map.find(size);
3,835,662✔
309
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
3,835,662✔
310
    auto header = it->second;
3,835,662✔
311
    if (header == entry) {
3,835,662✔
312
        header = entry->next;
3,556,779✔
313
        if (header == entry)
3,556,779✔
314
            m_block_map.erase(it);
2,036,715✔
315
        else
1,520,064✔
316
            it->second = header;
1,520,064✔
317
    }
3,556,779✔
318
    entry->unlink();
3,835,662✔
319
}
3,835,662✔
320

321
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
322
{
33,598,299✔
323
    int size = bb_before(entry)->block_after_size;
33,598,299✔
324
    FreeBlock* header;
33,598,299✔
325
    auto it = m_block_map.find(size);
33,598,299✔
326
    if (it != m_block_map.end()) {
33,598,299✔
327
        header = it->second;
1,998,885✔
328
        it->second = entry;
1,998,885✔
329
        entry->next = header;
1,998,885✔
330
        entry->prev = header->prev;
1,998,885✔
331
        entry->prev->next = entry;
1,998,885✔
332
        entry->next->prev = entry;
1,998,885✔
333
    }
1,998,885✔
334
    else {
31,599,414✔
335
        header = nullptr;
31,599,414✔
336
        m_block_map[size] = entry;
31,599,414✔
337
        entry->next = entry->prev = entry;
31,599,414✔
338
    }
31,599,414✔
339
}
33,598,299✔
340

341
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
342
{
4,430,385✔
343
    auto bb = bb_before(entry);
4,430,385✔
344
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
4,430,385✔
345
    auto alloc_size = -bb->block_after_size;
4,430,385✔
346
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
4,430,385✔
347
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
4,430,385✔
348
                    get_file_path_for_assertions());
4,430,385✔
349
    bb->block_after_size = alloc_size;
4,430,385✔
350
    bb = bb_after(entry);
4,430,385✔
351
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
4,430,385✔
352
    REALM_ASSERT(-bb->block_before_size == alloc_size);
4,430,385✔
353
    bb->block_before_size = alloc_size;
4,430,385✔
354
}
4,430,385✔
355

356
void SlabAlloc::mark_allocated(FreeBlock* entry)
357
{
28,940,220✔
358
    auto bb = bb_before(entry);
28,940,220✔
359
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
28,940,220✔
360
    auto bb2 = bb_after(entry);
28,940,220✔
361
    bb->block_after_size = 0 - bb->block_after_size;
28,940,220✔
362
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
28,940,220✔
363
    bb2->block_before_size = 0 - bb2->block_before_size;
28,940,220✔
364
}
28,940,220✔
365

366
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
367
{
28,942,095✔
368
    FreeList list = find(size);
28,942,095✔
369
    if (list.found_exact(size)) {
28,942,095✔
370
        return pop_freelist_entry(list);
448,350✔
371
    }
448,350✔
372
    // no exact matches.
373
    list = find_larger(list, size);
28,493,745✔
374
    FreeBlock* block;
28,493,745✔
375
    if (list.found_something()) {
28,493,745✔
376
        block = pop_freelist_entry(list);
28,413,924✔
377
    }
28,413,924✔
378
    else {
79,821✔
379
        block = grow_slab(size);
79,821✔
380
    }
79,821✔
381
    FreeBlock* remaining = break_block(block, size);
28,493,745✔
382
    if (remaining)
28,493,745✔
383
        push_freelist_entry(remaining);
28,491,282✔
384
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
28,493,745✔
385
    const auto block_before = bb_before(block);
28,493,745✔
386
    REALM_ASSERT_DEBUG(block_before && block_before->block_after_size >= size);
28,493,745✔
387
    const auto after_block_size = size_from_block(block);
28,493,745✔
388
    REALM_ASSERT_DEBUG(after_block_size >= size);
28,493,745✔
389
    return block;
28,493,745✔
390
}
28,942,095✔
391

392
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
393
{
764,436✔
394
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
764,436✔
395
    bb->block_before_size = 0;
764,436✔
396
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
764,436✔
397
    bb->block_after_size = block_size;
764,436✔
398
    auto entry = block_after(bb);
764,436✔
399
    entry->clear_links();
764,436✔
400
    entry->ref = ref_start + sizeof(BetweenBlocks);
764,436✔
401
    bb = bb_after(entry);
764,436✔
402
    bb->block_before_size = block_size;
764,436✔
403
    bb->block_after_size = 0;
764,436✔
404
    return entry;
764,436✔
405
}
764,436✔
406

407
void SlabAlloc::clear_freelists()
408
{
941,058✔
409
    m_block_map.clear();
941,058✔
410
}
941,058✔
411

412
void SlabAlloc::rebuild_freelists_from_slab()
413
{
835,296✔
414
    clear_freelists();
835,296✔
415
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
835,296✔
416
    for (const auto& e : m_slabs) {
835,296✔
417
        FreeBlock* entry = slab_to_entry(e, ref_start);
681,036✔
418
        push_freelist_entry(entry);
681,036✔
419
        ref_start = align_size_to_section_boundary(e.ref_end);
681,036✔
420
    }
681,036✔
421
}
835,296✔
422

423
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
424
{
28,494,237✔
425
    int size = size_from_block(block);
28,494,237✔
426
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
28,494,237✔
427
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
28,494,237✔
428
        return nullptr;
6✔
429
    bb_after(block)->block_before_size = remaining_size;
28,494,231✔
430
    bb_before(block)->block_after_size = new_size;
28,494,231✔
431
    auto bb_between = bb_after(block);
28,494,231✔
432
    bb_between->block_before_size = new_size;
28,494,231✔
433
    bb_between->block_after_size = remaining_size;
28,494,231✔
434
    FreeBlock* remaining_block = block_after(bb_between);
28,494,231✔
435
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
28,494,231✔
436
    remaining_block->clear_links();
28,494,231✔
437
    block->clear_links();
28,494,231✔
438
    return remaining_block;
28,494,231✔
439
}
28,494,237✔
440

441
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
442
{
3,836,004✔
443
    int size_first = size_from_block(first);
3,836,004✔
444
    int size_last = size_from_block(last);
3,836,004✔
445
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
3,836,004✔
446
    bb_before(first)->block_after_size = new_size;
3,836,004✔
447
    bb_after(last)->block_before_size = new_size;
3,836,004✔
448
    return first;
3,836,004✔
449
}
3,836,004✔
450

451
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
452
{
83,400✔
453
    // Allocate new slab.
454
    // - Always allocate at least 128K. This is also the amount of
455
    //   memory that we allow the slab allocator to keep between
456
    //   transactions. Allowing it to keep a small amount between
457
    //   transactions makes very small transactions faster by avoiding
458
    //   repeated unmap/mmap system calls.
459
    // - When allocating, allocate as much as we already have, but
460
    // - Never allocate more than a full section (64MB). This policy
461
    //   leads to gradual allocation of larger and larger blocks until
462
    //   we reach allocation of entire sections.
463
    size += 2 * sizeof(BetweenBlocks);
83,400✔
464
    size_t new_size = minimal_alloc;
83,400✔
465
    while (new_size < uint64_t(size))
118,458✔
466
        new_size += minimal_alloc;
35,058✔
467
    size_t already_allocated = get_allocated_size();
83,400✔
468
    if (new_size < already_allocated)
83,400✔
469
        new_size = already_allocated;
3,372✔
470
    if (new_size > maximal_alloc)
83,400✔
471
        new_size = maximal_alloc;
18✔
472

473
    ref_type ref;
83,400✔
474
    if (m_slabs.empty()) {
83,400✔
475
        ref = m_baseline.load(std::memory_order_relaxed);
73,029✔
476
    }
73,029✔
477
    else {
10,371✔
478
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
479
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
10,371✔
480
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
10,371✔
481
        ref = curr_ref_end;
10,371✔
482
    }
10,371✔
483
    ref = align_size_to_section_boundary(ref);
83,400✔
484
    size_t ref_end = ref;
83,400✔
485
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
83,400✔
486
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
487
                                      util::to_string(new_size));
×
488
    }
×
489

490
    REALM_ASSERT(matches_section_boundary(ref));
83,400✔
491

492
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
83,400✔
493
    // Create new slab and add to list of slabs
494
    m_slabs.emplace_back(ref_end, new_size); // Throws
83,400✔
495
    const Slab& slab = m_slabs.back();
83,400✔
496
    extend_fast_mapping_with_slab(slab.addr);
83,400✔
497

498
    // build a single block from that entry
499
    return slab_to_entry(slab, ref);
83,400✔
500
}
83,400✔
501

502

503
void SlabAlloc::do_free(ref_type ref, char* addr)
504
{
20,290,833✔
505
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
20,290,833✔
506
    CriticalSection cs(changes);
20,290,833✔
507

508
    bool read_only = is_read_only(ref);
20,290,833✔
509
#ifdef REALM_SLAB_ALLOC_DEBUG
510
    free(malloc_debug_map[ref]);
511
#endif
512

513
    // Get size from segment
514
    size_t size =
20,290,833✔
515
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
20,290,833✔
516

517
#ifdef REALM_DEBUG
20,290,833✔
518
    if (REALM_COVER_NEVER(m_debug_out))
20,290,833✔
519
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
×
520
#endif
20,290,833✔
521

522
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
20,290,833✔
523
        return;
×
524

525
    // Mutable memory cannot be freed unless it has first been allocated, and
526
    // any allocation puts free space tracking into the "dirty" state.
527
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
20,290,833✔
528
                    free_space_Dirty, get_file_path_for_assertions());
20,290,833✔
529

530
    m_free_space_state = free_space_Dirty;
20,290,833✔
531

532
    if (read_only) {
20,290,833✔
533
        // Free space in read only segment is tracked separately
534
        try {
15,853,437✔
535
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
15,853,437✔
536
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
15,853,437✔
537
            auto next = m_free_read_only.lower_bound(ref);
15,853,437✔
538
            if (next != m_free_read_only.end()) {
15,853,437✔
539
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
14,385,771✔
540
                                        get_file_path_for_assertions());
14,385,771✔
541
                // See if element can be combined with next element
542
                if (ref + size == next->first) {
14,385,771✔
543
                    // if so, combine to include next element and remove that from collection
544
                    size += next->second;
2,648,871✔
545
                    next = m_free_read_only.erase(next);
2,648,871✔
546
                }
2,648,871✔
547
            }
14,385,771✔
548
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
15,853,437✔
549
                // There must be a previous element - see if we can merge
550
                auto prev = next;
13,264,854✔
551
                prev--;
13,264,854✔
552

553
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
13,264,854✔
554
                                        get_file_path_for_assertions());
13,264,854✔
555
                // See if element can be combined with previous element
556
                // We can do that just by adding the size
557
                if (prev->first + prev->second == ref) {
13,264,854✔
558
                    prev->second += size;
6,478,485✔
559
                    return; // Done!
6,478,485✔
560
                }
6,478,485✔
561
                m_free_read_only.emplace_hint(next, ref, size); // Throws
6,786,369✔
562
            }
6,786,369✔
563
            else {
2,588,583✔
564
                m_free_read_only.emplace(ref, size); // Throws
2,588,583✔
565
            }
2,588,583✔
566
        }
15,853,437✔
567
        catch (...) {
15,853,437✔
568
            m_free_space_state = free_space_Invalid;
×
569
        }
×
570
    }
15,853,437✔
571
    else {
4,437,396✔
572
        m_commit_size -= size;
4,437,396✔
573

574
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
575
        if (size < sizeof(FreeBlock))
4,437,396✔
576
            size = sizeof(FreeBlock);
6,636✔
577
        // align to multipla of 8
578
        if (size & 0x7)
4,437,396✔
579
            size = (size + 7) & ~0x7;
×
580

581
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
4,437,396✔
582
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
4,437,396✔
583
        mark_freed(e, static_cast<int>(size));
4,437,396✔
584
        free_block(ref, e);
4,437,396✔
585
    }
4,437,396✔
586
}
20,290,833✔
587

588
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
589
{
4,432,743✔
590
    // merge with surrounding blocks if possible
591
    block->ref = ref;
4,432,743✔
592
    FreeBlock* prev = get_prev_block_if_mergeable(block);
4,432,743✔
593
    if (prev) {
4,432,743✔
594
        remove_freelist_entry(prev);
945,777✔
595
        block = merge_blocks(prev, block);
945,777✔
596
    }
945,777✔
597
    FreeBlock* next = get_next_block_if_mergeable(block);
4,432,743✔
598
    if (next) {
4,432,743✔
599
        remove_freelist_entry(next);
2,890,413✔
600
        block = merge_blocks(block, next);
2,890,413✔
601
    }
2,890,413✔
602
    push_freelist_entry(block);
4,432,743✔
603
}
4,432,743✔
604

605
size_t SlabAlloc::consolidate_free_read_only()
606
{
622,098✔
607
    CriticalSection cs(changes);
622,098✔
608
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
622,098✔
609
        throw InvalidFreeSpace();
×
610

611
    return m_free_read_only.size();
622,098✔
612
}
622,098✔
613

614

615
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
616
{
2,449,131✔
617
    REALM_ASSERT_DEBUG(translate(ref) == addr);
2,449,131✔
618
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
2,449,131✔
619
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
2,449,131✔
620
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
2,449,131✔
621

622
    // Possible future enhancement: check if we can extend current space instead
623
    // of unconditionally allocating new space. In that case, remember to
624
    // check whether m_free_space_state == free_state_Invalid. Also remember to
625
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
626

627
    // Allocate new space
628
    MemRef new_mem = do_alloc(new_size); // Throws
2,449,131✔
629

630
    // Copy existing segment
631
    char* new_addr = new_mem.get_addr();
2,449,131✔
632
    realm::safe_copy_n(addr, old_size, new_addr);
2,449,131✔
633

634
    // Add old segment to freelist
635
    do_free(ref, addr);
2,449,131✔
636

637
#ifdef REALM_DEBUG
2,449,131✔
638
    if (REALM_COVER_NEVER(m_debug_out)) {
2,449,131✔
639
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
640
                  << " new_size: " << new_size << "\n";
×
641
    }
×
642
#endif // REALM_DEBUG
2,449,131✔
643

644
    return new_mem;
2,449,131✔
645
}
2,449,131✔
646

647

648
char* SlabAlloc::do_translate(ref_type) const noexcept
649
{
×
650
    REALM_ASSERT(false); // never come here
×
651
    return nullptr;
×
652
}
×
653

654

655
int SlabAlloc::get_committed_file_format_version() noexcept
656
{
74,775✔
657
    {
74,775✔
658
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
74,775✔
659
        if (m_mappings.size()) {
74,775✔
660
            // if we have mapped a file, m_mappings will have at least one mapping and
661
            // the first will be to the start of the file. Don't come here, if we're
662
            // just attaching a buffer. They don't have mappings.
663
            util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
74,733✔
664
        }
74,733✔
665
    }
74,775✔
666
    const Header& header = *reinterpret_cast<const Header*>(m_data);
74,775✔
667
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
74,775✔
668
    int file_format_version = int(header.m_file_format[slot_selector]);
74,775✔
669
    return file_format_version;
74,775✔
670
}
74,775✔
671

672
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
673
{
218,058✔
674
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
675
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
218,058✔
676
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
218,058✔
677
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
218,058✔
678
}
218,058✔
679

680
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
681
{
×
682
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
683
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
684
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
685
    if (is_file_on_streaming_form(header)) {
×
686
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
687
        return ref_type(footer.m_top_ref);
×
688
    }
×
689
    else {
×
690
        return to_ref(header.m_top_ref[slot_selector]);
×
691
    }
×
692
}
×
693

694
std::string SlabAlloc::get_file_path_for_assertions() const
695
{
×
696
    return m_file.get_path();
×
697
}
×
698

699
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
700
{
47,517✔
701
    if (cfg.read_only) {
47,517✔
702
        // If the file is opened read-only, we cannot change it. This is not a problem,
703
        // because for a read-only file we assume that it will not change while we use it,
704
        // hence there will be no need to grow memory mappings.
705
        // This assumption obviously will not hold, if the file is shared by multiple
706
        // processes or threads with different opening modes.
707
        // Currently, there is no way to detect if this assumption is violated.
708
        return false;
×
709
    }
×
710
    size_t expected_size = size_t(-1);
47,517✔
711
    size_t size = static_cast<size_t>(m_file.get_size());
47,517✔
712

713
    // It is not safe to change the size of a file on streaming form, since the footer
714
    // must remain available and remain at the very end of the file.
715
    REALM_ASSERT(!is_file_on_streaming_form());
47,517✔
716

717
    // check if online compaction allows us to shrink the file:
718
    if (top_ref) {
47,517✔
719
        // Get the expected file size by looking up logical file size stored in top array
720
        Array top(*this);
7,299✔
721
        top.init_from_ref(top_ref);
7,299✔
722
        size_t logical_size = Group::get_logical_file_size(top);
7,299✔
723
        // make sure we're page aligned, so the code below doesn't first
724
        // truncate the file, then expand it again
725
        expected_size = round_up_to_page_size(logical_size);
7,299✔
726
    }
7,299✔
727

728
    // Check if we can shrink the file
729
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
47,517✔
730
        detach(true); // keep m_file open
6✔
731
        m_file.resize(expected_size);
6✔
732
        m_file.close();
6✔
733
        return true;
6✔
734
    }
6✔
735

736
    // We can only safely mmap the file, if its size matches a page boundary. If not,
737
    // we must change the size to match before mmaping it.
738
    if (size != round_up_to_page_size(size)) {
47,511✔
739
        // The file size did not match a page boundary.
740
        // We must extend the file to a page boundary (unless already there)
741
        // The file must be extended to match in size prior to being mmapped,
742
        // as extending it after mmap has undefined behavior.
743
        if (cfg.session_initiator || !cfg.is_shared) {
423!
744
            // We can only safely extend the file if we're the session initiator, or if
745
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
746
            // done to ensure well defined behavior for memory mappings. It does not matter,
747
            // that the free space management isn't informed
748
            size = round_up_to_page_size(size);
423✔
749
            detach(true); // keep m_file open
423✔
750
            m_file.prealloc(size);
423✔
751
            m_file.close();
423✔
752
            return true;
423✔
753
        }
423✔
754
        else {
×
755
            // Getting here, we have a file of a size that will not work, and without being
756
            // allowed to extend it. This should not be possible. But allowing a retry is
757
            // arguably better than giving up and crashing...
758
            throw Retry();
×
759
        }
×
760
    }
423✔
761
    return false;
47,088✔
762
}
47,511✔
763

764
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
765
{
74,910✔
766
    m_cfg = cfg;
74,910✔
767
    m_write_observer = write_observer;
74,910✔
768
    // ExceptionSafety: If this function throws, it must leave the allocator in
769
    // the detached state.
770

771
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
74,910✔
772

773
    // When 'read_only' is true, this function will throw InvalidDatabase if the
774
    // file exists already but is empty. This can happen if another process is
775
    // currently creating it. Note however, that it is only legal for multiple
776
    // processes to access a database file concurrently if it is done via a
777
    // DB, and in that case 'read_only' can never be true.
778
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
74,910✔
779
    // session_initiator can be set *only* if we're shared.
780
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
74,910✔
781
                    get_file_path_for_assertions());
74,910✔
782
    // clear_file can be set *only* if we're the first session.
783
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
74,910✔
784
                    get_file_path_for_assertions());
74,910✔
785

786
    using namespace realm::util;
74,910✔
787
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
74,910✔
788
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
74,910✔
789
    set_read_only(cfg.read_only);
74,910✔
790
    try {
74,910✔
791
        m_file.open(path.c_str(), access, create, 0); // Throws
74,910✔
792
    }
74,910✔
793
    catch (const FileAccessError& ex) {
74,910✔
794
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
42✔
795
        if (ex.code() == ErrorCodes::PermissionDenied) {
42✔
796
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
797
                                cfg.read_only ? "read" : "read-write");
6✔
798
        }
6✔
799
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
42✔
800
    }
42✔
801
    File::CloseGuard fcg(m_file);
74,871✔
802
    auto physical_file_size = m_file.get_size();
74,871✔
803
    // Note that get_size() may (will) return a different size before and after
804
    // the call below to set_encryption_key.
805
    m_file.set_encryption_key(cfg.encryption_key);
74,871✔
806

807
    size_t size = 0;
74,871✔
808
    // The size of a database file must not exceed what can be encoded in
809
    // size_t.
810
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
74,871✔
811
        throw InvalidDatabase("Realm file too large", path);
×
812
    if (cfg.clear_file_on_error && cfg.session_initiator) {
74,871✔
813
        if (size == 0 && physical_file_size != 0) {
2,091✔
814
            cfg.clear_file = true;
6✔
815
        }
6✔
816
        else if (size > 0) {
2,085✔
817
            try {
186✔
818
                read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
186✔
819
            }
186✔
820
            catch (const InvalidDatabase&) {
186✔
821
                cfg.clear_file = true;
30✔
822
            }
30✔
823
        }
186✔
824
    }
2,091✔
825
    if (cfg.clear_file) {
74,871✔
826
        m_file.resize(0);
22,734✔
827
        size = 0;
22,734✔
828
        physical_file_size = 0;
22,734✔
829
    }
22,734✔
830
    else if (cfg.encryption_key && !cfg.clear_file && size == 0 && physical_file_size != 0) {
52,137✔
831
        // The opened file holds data, but is so small it cannot have
832
        // been created with encryption
833
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
12✔
834
    }
12✔
835
    if (size == 0) {
74,859✔
836
        if (REALM_UNLIKELY(cfg.read_only))
40,197✔
837
            throw InvalidDatabase("Read-only access to empty Realm file", path);
×
838
        // We want all non-streaming files to be a multiple of the page size
839
        // to simplify memory mapping, so just pre-reserve the required space now
840
        m_file.prealloc(page_size()); // Throws
40,197✔
841
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
40,197✔
842
        m_file.write(0, data, sizeof empty_file_header); // Throws
40,197✔
843

844
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
40,197✔
845
        if (!disable_sync)
40,197✔
846
            m_file.sync(); // Throws
12✔
847

848
        size = size_t(m_file.get_size());
40,197✔
849
    }
40,197✔
850

851
    ref_type top_ref = read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
74,859✔
852
    m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
74,859✔
853
    // m_data not valid at this point!
854
    m_baseline = 0;
74,859✔
855
    // make sure that any call to begin_read cause any slab to be placed in free
856
    // lists correctly
857
    m_free_space_state = free_space_Invalid;
74,859✔
858

859
    // Ensure clean up, if we need to back out:
860
    DetachGuard dg(*this);
74,859✔
861

862
    reset_free_space_tracking();
74,859✔
863

864
    // the file could have been produced on a device with a different
865
    // page size than our own so don't expect the size to be aligned
866
    if (cfg.encryption_key && size != 0 && size != round_up_to_page_size(size)) {
74,859✔
867
        size = round_up_to_page_size(size);
×
868
    }
×
869
    update_reader_view(size);
74,859✔
870
    REALM_ASSERT(m_mappings.size());
74,859✔
871
    m_data = m_mappings[0].primary_mapping.get_addr();
74,859✔
872
    util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
74,859✔
873
    dg.release();  // Do not detach
74,859✔
874
    fcg.release(); // Do not close
74,859✔
875
    return top_ref;
74,859✔
876
}
74,859✔
877

878
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
879
{
47,679✔
880
    auto header = reinterpret_cast<const Header*>(m_data);
47,679✔
881
    if (!is_file_on_streaming_form(*header))
47,679✔
882
        return;
47,139✔
883

884
    // Make sure the database is not on streaming format. If we did not do this,
885
    // a later commit would have to do it. That would require coordination with
886
    // anybody concurrently joining the session, so it seems easier to do it at
887
    // session initialization, even if it means writing the database during open.
888
    {
540✔
889
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
540✔
890
        Header& writable_header = *writable_map.get_addr();
540✔
891
        util::encryption_read_barrier(writable_map, 0);
540✔
892
        writable_header.m_top_ref[1] = top_ref;
540✔
893
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
540✔
894
        realm::util::encryption_write_barrier(writable_map, 0);
540✔
895
        writable_map.sync();
540✔
896
        util::encryption_read_barrier(writable_map, 0);
540✔
897
        writable_header.m_flags |= flags_SelectBit;
540✔
898
        realm::util::encryption_write_barrier(writable_map, 0);
540✔
899
        writable_map.sync();
540✔
900

901
        util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
540✔
902
    }
540✔
903
}
540✔
904

905
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
906
{
90✔
907
    // ExceptionSafety: If this function throws, it must leave the allocator in
908
    // the detached state.
909

910
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
911
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
912

913
    // Verify the data structures
914
    std::string path;                                     // No path
90✔
915
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
916

917
    m_data = data;
90✔
918
    size = align_size_to_section_boundary(size);
90✔
919
    m_baseline = size;
90✔
920
    m_attach_mode = attach_UsersBuffer;
90✔
921

922
    m_translation_table_size = 1;
90✔
923
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
924
    return top_ref;
90✔
925
}
90✔
926

927
void SlabAlloc::init_in_memory_buffer()
928
{
25,494✔
929
    m_attach_mode = attach_Heap;
25,494✔
930
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,494✔
931
    m_data = m_virtual_file_buffer.back().addr;
25,494✔
932
    m_virtual_file_size = sizeof(empty_file_header);
25,494✔
933
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,494✔
934

935
    m_baseline = m_virtual_file_size;
25,494✔
936
    m_translation_table_size = 1;
25,494✔
937
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,494✔
938
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,494✔
939
    m_ref_translation_ptr = ref_translation_ptr;
25,494✔
940
}
25,494✔
941

942
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
943
{
5,361,909✔
944
    auto idx = get_section_index(ref);
5,361,909✔
945
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
5,361,909✔
946
    auto& buf = m_virtual_file_buffer[idx];
5,361,909✔
947
    return buf.addr + (ref - buf.start_ref);
5,361,909✔
948
}
5,361,909✔
949

950
void SlabAlloc::attach_empty()
951
{
5,046✔
952
    // ExceptionSafety: If this function throws, it must leave the allocator in
953
    // the detached state.
954

955
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
5,046✔
956

957
    m_attach_mode = attach_OwnedBuffer;
5,046✔
958
    m_data = nullptr; // Empty buffer
5,046✔
959

960
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
961

962
    // No ref must ever be less than the header size, so we will use that as the
963
    // baseline here.
964
    size_t size = align_size_to_section_boundary(sizeof(Header));
5,046✔
965
    m_baseline = size;
5,046✔
966
    m_translation_table_size = 1;
5,046✔
967
    m_ref_translation_ptr = new RefTranslation[1];
5,046✔
968
}
5,046✔
969

970
ref_type SlabAlloc::read_and_validate_header(util::File& file, const std::string& path, size_t size,
971
                                             bool session_initiator, util::WriteObserver* write_observer)
972
{
75,045✔
973
    try {
75,045✔
974
        // we'll read header and (potentially) footer
975
        File::Map<char> map_header(file, File::access_ReadOnly, sizeof(Header), write_observer);
75,045✔
976
        util::encryption_read_barrier(map_header, 0, sizeof(Header));
75,045✔
977
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
75,045✔
978

979
        File::Map<char> map_footer;
75,045✔
980
        const StreamingFooter* footer = nullptr;
75,045✔
981
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
75,045✔
982
            size_t footer_ref = size - sizeof(StreamingFooter);
684✔
983
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
684✔
984
            size_t footer_offset = footer_ref - footer_page_base;
684✔
985
            map_footer = File::Map<char>(file, footer_page_base, File::access_ReadOnly,
684✔
986
                                         sizeof(StreamingFooter) + footer_offset, write_observer);
684✔
987
            util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
684✔
988
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
684✔
989
        }
684✔
990

991
        auto top_ref = validate_header(header, footer, size, path, file.get_encryption() != nullptr); // Throws
75,045✔
992

993
        if (session_initiator && is_file_on_streaming_form(*header)) {
75,045✔
994
            // Don't compare file format version fields as they are allowed to differ.
995
            // Also don't compare reserved fields.
996
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, path);
576✔
997
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0], path);
576✔
998
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1], path);
576✔
999
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2], path);
576✔
1000
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3], path);
576✔
1001
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0], path);
576✔
1002
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], path);
576✔
1003
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie, path);
576✔
1004
        }
576✔
1005
        return top_ref;
75,045✔
1006
    }
75,045✔
1007
    catch (const InvalidDatabase&) {
75,045✔
1008
        throw;
84✔
1009
    }
84✔
1010
    catch (const DecryptionFailed& e) {
75,045✔
1011
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
66✔
1012
    }
66✔
1013
    catch (const std::exception& e) {
75,045✔
1014
        throw InvalidDatabase(e.what(), path);
12✔
1015
    }
12✔
1016
    catch (...) {
75,045✔
1017
        throw InvalidDatabase("unknown error", path);
×
1018
    }
×
1019
}
75,045✔
1020

1021
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1022
{
54✔
1023
    char buf[256];
54✔
1024
    snprintf(buf, sizeof(buf),
54✔
1025
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
54✔
1026
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
54✔
1027
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
54✔
1028
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
54✔
1029
             header.m_flags);
54✔
1030
    msg += buf;
54✔
1031
    throw InvalidDatabase(msg, path);
54✔
1032
}
54✔
1033

1034
// Note: This relies on proper mappings having been established by the caller
1035
// for both the header and the streaming footer
1036
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1037
{
90✔
1038
    auto header = reinterpret_cast<const Header*>(data);
90✔
1039
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1040
    return validate_header(header, footer, size, path);
90✔
1041
}
90✔
1042

1043
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1044
                                    const std::string& path, bool is_encrypted)
1045
{
75,057✔
1046
    // Verify that size is sane and 8-byte aligned
1047
    if (REALM_UNLIKELY(size < sizeof(Header)))
75,057✔
1048
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
54✔
1049
                              path);
54✔
1050
    if (REALM_UNLIKELY(size % 8 != 0))
75,003✔
1051
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
×
1052

1053
    // First four bytes of info block is file format id
1054
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
75,003✔
1055
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
75,003✔
1056
        if (is_encrypted) {
54✔
1057
            // Encrypted files check the hmac on read, so there's a lot less
1058
            // which could go wrong and have us still reach this point
1059
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
18✔
1060
                                   path);
18✔
1061
        }
18✔
1062
        else {
36✔
1063
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
36✔
1064
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
36✔
1065
                                   *header, path);
36✔
1066
        }
36✔
1067
    }
54✔
1068

1069
    // Last bit in info block indicates which top_ref block is valid
1070
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
75,003✔
1071

1072
    // Top-ref must always point within buffer
1073
    auto top_ref = header->m_top_ref[slot_selector];
75,003✔
1074
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
75,003✔
1075
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
726✔
1076
            throw InvalidDatabase(
×
1077
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1078
                path);
×
1079
        }
×
1080
        REALM_ASSERT(footer);
726✔
1081
        top_ref = footer->m_top_ref;
726✔
1082
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
726✔
1083
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1084
                                               "The file is probably truncated.",
×
1085
                                               footer->m_magic_cookie),
×
1086
                                  path);
×
1087
        }
×
1088
    }
726✔
1089
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
75,003✔
1090
        throw_header_exception("top ref is not aligned", *header, path);
×
1091
    }
×
1092
    if (REALM_UNLIKELY(top_ref >= size)) {
75,003✔
1093
        throw_header_exception(
×
1094
            util::format(
×
1095
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1096
                top_ref),
×
1097
            *header, path);
×
1098
    }
×
1099
    return ref_type(top_ref);
75,003✔
1100
}
75,003✔
1101

1102

1103
size_t SlabAlloc::get_total_size() const noexcept
1104
{
1,096,434✔
1105
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
1,096,434✔
1106
}
1,096,434✔
1107

1108

1109
void SlabAlloc::reset_free_space_tracking()
1110
{
723,900✔
1111
    CriticalSection cs(changes);
723,900✔
1112
    if (is_free_space_clean())
723,900✔
1113
        return;
9,585✔
1114

1115
    // Free all scratch space (done after all data has
1116
    // been commited to persistent space)
1117
    m_free_read_only.clear();
714,315✔
1118

1119
    // release slabs.. keep the initial allocation if it's a minimal allocation,
1120
    // otherwise release it as well. This saves map/unmap for small transactions.
1121
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
723,987✔
1122
        auto& last_slab = m_slabs.back();
9,672✔
1123
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
9,672✔
1124
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
9,672✔
1125
        --m_translation_table_size;
9,672✔
1126
        m_slabs.pop_back();
9,672✔
1127
    }
9,672✔
1128
    rebuild_freelists_from_slab();
714,315✔
1129
    m_free_space_state = free_space_Clean;
714,315✔
1130
    m_commit_size = 0;
714,315✔
1131
}
714,315✔
1132

1133
inline bool randomly_false_in_debug(bool x)
1134
{
×
1135
#ifdef REALM_DEBUG
×
1136
    if (x)
×
1137
        return (std::rand() & 1);
×
1138
#endif
×
1139
    return x;
×
1140
}
×
1141

1142

1143
/*
1144
  Memory mapping
1145

1146
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1147
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1148
  essential to map the file in even sized sections.
1149

1150
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1151
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1152
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1153
  We don't want to extend the file in increments as large as the chunk size.
1154

1155
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1156
  old one in the mapping table. However, we must keep the old mapping open, because older
1157
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1158
  and only cleaned out once we know that no transaction can refer to them anymore.
1159

1160
  Interaction with encryption
1161

1162
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1163
  The binding to the file is done by software. This allows us to "cheat" and allocate
1164
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1165
  extends beyond the end of file, because it will not be accessed.
1166

1167
  Growing/Changing the mapping table.
1168

1169
  There are two mapping tables:
1170

1171
  * m_mappings: This is the "source of truth" about what the current mapping is.
1172
    It is only accessed under lock.
1173
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1174
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1175
    table can only be extended. Only selected members in each entry can be changed.
1176
    See RefTranslation in alloc.hpp for more details.
1177
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1178
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1179
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1180
    to memory allocation and release, which is already serialized (since write transactions are
1181
    single threaded).
1182

1183
  When m_mappings is changed due to an extend operation changing a mapping, or when
1184
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1185

1186
  * A new fast mapping table is created. The old one is not modified.
1187
  * The old one is held in a waiting area until it is no longer relevant because no
1188
    live transaction can refer to it any more.
1189
 */
1190
void SlabAlloc::update_reader_view(size_t file_size)
1191
{
2,533,629✔
1192
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
2,533,629✔
1193
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
2,533,629✔
1194
    if (file_size <= old_baseline) {
2,533,629✔
1195
        schedule_refresh_of_outdated_encrypted_pages();
2,413,332✔
1196
        return;
2,413,332✔
1197
    }
2,413,332✔
1198

1199
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
120,297✔
1200
    bool replace_last_mapping = false;
120,297✔
1201
    size_t old_num_mappings = get_section_index(old_slab_base);
120,297✔
1202

1203
    if (!is_in_memory()) {
120,297✔
1204
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
95,157✔
1205
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
95,157✔
1206
                        get_file_path_for_assertions());
95,157✔
1207
        REALM_ASSERT_DEBUG(is_free_space_clean());
95,157✔
1208

1209
        // Create the new mappings we needed to cover the new size. We don't mutate
1210
        // any of the member variables until we've successfully created all of the
1211
        // mappings so that we leave things in a consistent state if one of them
1212
        // hits an allocation failure.
1213

1214
        std::vector<MapEntry> new_mappings;
95,157✔
1215
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
95,157✔
1216

1217
        {
95,157✔
1218
            // If the old slab base was greater than the old baseline then the final
1219
            // mapping was a partial section and we need to replace it with a larger
1220
            // mapping.
1221
            if (old_baseline < old_slab_base) {
95,157✔
1222
                // old_slab_base should be 0 if we had no mappings previously
1223
                REALM_ASSERT(old_num_mappings > 0);
20,406✔
1224
                // try to extend the old mapping in-place instead of replacing it.
1225
                MapEntry& cur_entry = m_mappings.back();
20,406✔
1226
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
20,406✔
1227
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
20,406✔
1228
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
20,406✔
1229
                    replace_last_mapping = true;
66✔
1230
                    --old_num_mappings;
66✔
1231
                }
66✔
1232
            }
20,406✔
1233

1234
            // Create new mappings covering from the end of the last complete
1235
            // section to the end of the new file size.
1236
            const auto new_slab_base = align_size_to_section_boundary(file_size);
95,157✔
1237
            const size_t num_mappings = get_section_index(new_slab_base);
95,157✔
1238
            new_mappings.reserve(num_mappings - old_num_mappings);
95,157✔
1239
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
170,106✔
1240
                const size_t section_start_offset = get_section_base(k);
74,961✔
1241
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
74,961✔
1242
                if (section_size == (1 << section_shift)) {
74,961✔
1243
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
66✔
1244
                                                                  section_size, m_write_observer)});
66✔
1245
                }
66✔
1246
                else {
74,895✔
1247
                    new_mappings.emplace_back();
74,895✔
1248
                    auto& mapping = new_mappings.back().primary_mapping;
74,895✔
1249
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
74,895✔
1250
                                                        section_start_offset, m_write_observer);
74,895✔
1251
                    if (reserved) {
74,895✔
1252
                        // if reservation is supported, first attempt at extending must succeed
1253
                        if (!mapping.try_extend_to(section_size))
74,895✔
1254
                            throw std::bad_alloc();
12✔
1255
                    }
74,895✔
UNCOV
1256
                    else {
×
UNCOV
1257
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size,
×
UNCOV
1258
                                                                section_start_offset, m_write_observer);
×
UNCOV
1259
                    }
×
1260
                }
74,895✔
1261
            }
74,961✔
1262
        }
95,157✔
1263

1264
        // Now that we've successfully created our mappings, update our member
1265
        // variables (and assume that resizing a simple vector won't produce memory
1266
        // allocation failures, unlike 64 MB mmaps).
1267
        if (replace_last_mapping) {
95,145✔
1268
            MapEntry& cur_entry = m_mappings.back();
60✔
1269
            // We should not have a xover mapping here because that would mean
1270
            // that there was already something mapped after the last section
1271
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
60✔
1272
            // save the old mapping/keep it open
1273
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
60✔
1274
            m_mappings.pop_back();
60✔
1275
            m_mapping_version++;
60✔
1276
        }
60✔
1277

1278
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
95,145✔
1279
    }
95,145✔
1280

1281
    m_baseline.store(file_size, std::memory_order_relaxed);
120,285✔
1282

1283
    const size_t ref_start = align_size_to_section_boundary(file_size);
120,285✔
1284
    const size_t ref_displacement = ref_start - old_slab_base;
120,285✔
1285
    if (ref_displacement > 0) {
120,285✔
1286
        // Rebase slabs as m_baseline is now bigger than old_slab_base
1287
        for (auto& e : m_slabs) {
74,835✔
1288
            e.ref_end += ref_displacement;
72✔
1289
        }
72✔
1290
    }
74,835✔
1291

1292
    rebuild_freelists_from_slab();
120,285✔
1293

1294
    // Build the fast path mapping
1295

1296
    // The fast path mapping is an array which is used from multiple threads
1297
    // without locking - see translate().
1298

1299
    // Addition of a new mapping may require a completely new fast mapping table.
1300
    //
1301
    // Being used in a multithreaded scenario, the old mappings must be retained open,
1302
    // until the realm version for which they were established has been closed/detached.
1303
    //
1304
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
1305
    // translate refs in the slab area, and that all these uses are serialized, whether
1306
    // that is achieved by being single threaded, interlocked or run from a sequential
1307
    // scheduling queue.
1308
    //
1309
    rebuild_translations(replace_last_mapping, old_num_mappings);
120,285✔
1310

1311
    schedule_refresh_of_outdated_encrypted_pages();
120,285✔
1312
}
120,285✔
1313

1314

1315
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1316
{
2,534,292✔
1317
#if REALM_ENABLE_ENCRYPTION
2,534,292✔
1318
    if (auto encryption = m_file.get_encryption()) {
2,534,292✔
1319
        encryption->mark_data_as_possibly_stale();
1,731✔
1320
    }
1,731✔
1321
#endif // REALM_ENABLE_ENCRYPTION
2,534,292✔
1322
}
2,534,292✔
1323

1324
size_t SlabAlloc::get_allocated_size() const noexcept
1325
{
83,406✔
1326
    size_t sz = 0;
83,406✔
1327
    for (const auto& s : m_slabs)
83,406✔
1328
        sz += s.size;
18,861✔
1329
    return sz;
83,406✔
1330
}
83,406✔
1331

1332
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1333
{
83,400✔
1334
    ++m_translation_table_size;
83,400✔
1335
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
83,400✔
1336
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
191,205✔
1337
        new_fast_mapping[i] = m_ref_translation_ptr[i];
107,805✔
1338
    }
107,805✔
1339
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
83,400✔
1340
                                    m_ref_translation_ptr.load());
83,400✔
1341
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
83,400✔
1342
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
1343
    // so optimize by clamping the lowest possible xover offset to the end of the section.
1344
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
83,400✔
1345
    m_ref_translation_ptr = new_fast_mapping.release();
83,400✔
1346
}
83,400✔
1347

1348
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1349
{
120,981✔
1350
    size_t free_space_size = m_slabs.size();
120,981✔
1351
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
120,981✔
1352
    if (m_translation_table_size < num_mappings + free_space_size) {
120,981✔
1353
        requires_new_translation = true;
74,835✔
1354
    }
74,835✔
1355
    RefTranslation* new_translation_table = m_ref_translation_ptr;
120,981✔
1356
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
120,981✔
1357
    if (requires_new_translation) {
120,981✔
1358
        // we need a new translation table, but must preserve old, as translations using it
1359
        // may be in progress concurrently
1360
        if (m_translation_table_size)
74,895✔
1361
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
168✔
1362
                                            m_ref_translation_ptr.load());
168✔
1363
        m_translation_table_size = num_mappings + free_space_size;
74,895✔
1364
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
74,895✔
1365
        new_translation_table = new_translation_table_owner.get();
74,895✔
1366
        old_num_sections = 0;
74,895✔
1367
    }
74,895✔
1368
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
196,050✔
1369
        if (is_in_memory()) {
75,069✔
1370
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1371
        }
12✔
1372
        else {
75,057✔
1373
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
75,057✔
1374
#if REALM_ENABLE_ENCRYPTION
75,057✔
1375
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
75,057✔
1376
#endif
75,057✔
1377
        }
75,057✔
1378
        REALM_ASSERT(new_translation_table[i].mapping_addr);
75,069✔
1379
        // We don't copy over data for the cross over mapping. If the mapping is needed,
1380
        // copying will happen on demand (in get_or_add_xover_mapping).
1381
        // Note: that may never be needed, because if the array that needed the original cross over
1382
        // mapping is freed, any new array allocated at the same position will NOT need a cross
1383
        // over mapping, but just use the primary mapping.
1384
    }
75,069✔
1385
    for (size_t k = 0; k < free_space_size; ++k) {
162,435✔
1386
        char* base = m_slabs[k].addr;
41,454✔
1387
        REALM_ASSERT(base);
41,454✔
1388
        new_translation_table[num_mappings + k].mapping_addr = base;
41,454✔
1389
    }
41,454✔
1390

1391
    // This will either be null or the same as new_translation_table, which is about to become owned by
1392
    // m_ref_translation_ptr.
1393
    (void)new_translation_table_owner.release();
120,981✔
1394

1395
    m_ref_translation_ptr = new_translation_table;
120,981✔
1396
}
120,981✔
1397

1398
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
1399
{
6✔
1400
    auto _page_size = page_size();
6✔
1401
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
6✔
1402
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
6✔
1403
        // some other thread already added a mapping
1404
        // it MUST have been for the exact same address:
1405
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1406
        return;
×
1407
    }
×
1408
    MapEntry* map_entry = &m_mappings[index];
6✔
1409
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
6✔
1410
    if (!map_entry->xover_mapping.is_attached()) {
6✔
1411
        // Create a xover mapping
1412
        auto file_offset = get_section_base(index) + offset;
6✔
1413
        auto end_offset = file_offset + size;
6✔
1414
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
6✔
1415
        auto minimal_mapping_size = end_offset - mapping_file_offset;
6✔
1416
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size,
6✔
1417
                                      m_write_observer);
6✔
1418
        map_entry->xover_mapping = std::move(mapping);
6✔
1419
    }
6✔
1420
    txl.xover_mapping_base = offset & ~(_page_size - 1);
6✔
1421
#if REALM_ENABLE_ENCRYPTION
6✔
1422
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
6✔
1423
#endif
6✔
1424
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
6✔
1425
}
6✔
1426

1427
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1428
{
1,455,699✔
1429
    // Verify that each old ref translation pointer still points to a valid
1430
    // thing that we haven't released yet.
1431
#if REALM_DEBUG
1,455,699✔
1432
    std::unordered_set<const char*> mappings;
1,455,699✔
1433
    for (auto& m : m_old_mappings) {
1,455,699✔
1434
        REALM_ASSERT(m.mapping.is_attached());
156✔
1435
        mappings.insert(m.mapping.get_addr());
156✔
1436
    }
156✔
1437
    for (auto& m : m_mappings) {
1,455,699✔
1438
        REALM_ASSERT(m.primary_mapping.is_attached());
1,235,184✔
1439
        mappings.insert(m.primary_mapping.get_addr());
1,235,184✔
1440
        if (m.xover_mapping.is_attached())
1,235,184✔
1441
            mappings.insert(m.xover_mapping.get_addr());
12✔
1442
    }
1,235,184✔
1443
    for (auto& m : m_virtual_file_buffer) {
1,455,699✔
1444
        mappings.insert(m.addr);
220,740✔
1445
    }
220,740✔
1446
    if (m_data)
1,455,699✔
1447
        mappings.insert(m_data);
1,444,683✔
1448
    for (auto& t : m_old_translations) {
1,455,699✔
1449
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
247,311✔
1450
                        youngest_live_version, t.replaced_at_version);
247,311✔
1451
        if (nonempty_attachment()) {
247,311✔
1452
            for (size_t i = 0; i < t.translation_count; ++i)
499,842✔
1453
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
258,279✔
1454
        }
241,563✔
1455
    }
247,311✔
1456
#else
1457
    static_cast<void>(youngest_live_version);
1458
#endif
1459
}
1,455,699✔
1460

1461

1462
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1463
{
727,866✔
1464
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
727,866✔
1465
    verify_old_translations(youngest_live_version);
727,866✔
1466

1467
    auto pred = [=](auto& oldie) {
727,866✔
1468
        return oldie.replaced_at_version < oldest_live_version;
165,546✔
1469
    };
165,546✔
1470
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
727,866✔
1471
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
727,866✔
1472
                             m_old_translations.end());
727,866✔
1473
    m_youngest_live_version = youngest_live_version;
727,866✔
1474
    verify_old_translations(youngest_live_version);
727,866✔
1475
}
727,866✔
1476

1477
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1478
{
696,360✔
1479
    m_youngest_live_version = currently_live_version;
696,360✔
1480
}
696,360✔
1481

1482
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1483
{
622,095✔
1484
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
622,095✔
1485
        throw InvalidFreeSpace();
×
1486
    return m_free_read_only;
622,095✔
1487
}
622,095✔
1488

1489

1490
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1491
{
20,855,196✔
1492
    size_t end_of_block = start_pos + free_chunk_size;
20,855,196✔
1493
    size_t alloc_pos = start_pos;
20,855,196✔
1494
    while (alloc_pos + request_size <= end_of_block) {
20,855,877✔
1495
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
20,855,586✔
1496
        if (alloc_pos + request_size <= next_section_boundary) {
20,855,613✔
1497
            return alloc_pos;
20,855,013✔
1498
        }
20,855,013✔
1499
        alloc_pos = next_section_boundary;
2,147,484,247✔
1500
    }
2,147,484,247✔
1501
    return 0;
2,147,483,938✔
1502
}
20,855,196✔
1503

1504

1505
void SlabAlloc::resize_file(size_t new_file_size)
1506
{
88,548✔
1507
    if (m_attach_mode == attach_SharedFile) {
88,548✔
1508
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
61,647✔
1509
        m_file.prealloc(new_file_size); // Throws
61,647✔
1510
        // resizing is done based on the logical file size. It is ok for the file
1511
        // to actually be bigger, but never smaller.
1512
        REALM_ASSERT_EX(new_file_size <= static_cast<size_t>(m_file.get_size()), new_file_size, m_file.get_size());
61,647✔
1513

1514
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
61,647✔
1515
        if (!disable_sync)
61,647✔
1516
            m_file.sync(); // Throws
555✔
1517
    }
61,647✔
1518
    else {
26,901✔
1519
        size_t current_size = 0;
26,901✔
1520
        for (auto& b : m_virtual_file_buffer) {
27,114✔
1521
            current_size += b.size;
27,114✔
1522
        }
27,114✔
1523
        if (new_file_size > current_size) {
26,901✔
1524
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1525
        }
6✔
1526
        m_virtual_file_size = new_file_size;
26,901✔
1527
    }
26,901✔
1528
}
88,548✔
1529

1530
#ifdef REALM_DEBUG
1531
void SlabAlloc::reserve_disk_space(size_t size)
1532
{
36✔
1533
    if (size != round_up_to_page_size(size))
36✔
1534
        size = round_up_to_page_size(size);
30✔
1535
    m_file.prealloc(size); // Throws
36✔
1536

1537
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1538
    if (!disable_sync)
36✔
1539
        m_file.sync(); // Throws
×
1540
}
36✔
1541
#endif
1542

1543
void SlabAlloc::verify() const
1544
{
127,071✔
1545
#ifdef REALM_DEBUG
127,071✔
1546
    if (!m_slabs.empty()) {
127,071✔
1547
        // Make sure that all free blocks are within a slab. This is done
1548
        // implicitly by using for_all_free_entries()
1549
        size_t first_possible_ref = m_baseline;
97,812✔
1550
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
97,812✔
1551
        for_all_free_entries([&](size_t ref, size_t size) {
594,456✔
1552
            REALM_ASSERT(ref >= first_possible_ref);
594,456✔
1553
            REALM_ASSERT(ref + size <= first_impossible_ref);
594,456✔
1554
            first_possible_ref = ref;
594,456✔
1555
        });
594,456✔
1556
    }
97,812✔
1557
#endif
127,071✔
1558
}
127,071✔
1559

1560
#ifdef REALM_DEBUG
1561

1562
bool SlabAlloc::is_all_free() const
1563
{
738✔
1564
    // verify that slabs contain only free space.
1565
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
1566
    for (const auto& e : m_slabs) {
738✔
1567
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
684✔
1568
        REALM_ASSERT(first->block_before_size == 0);
684✔
1569
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
684✔
1570
        REALM_ASSERT(last->block_after_size == 0);
684✔
1571
        if (first->block_after_size != last->block_before_size)
684✔
1572
            return false;
×
1573
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
684✔
1574
        range -= sizeof(BetweenBlocks);
684✔
1575
        // the size of the free area must match the distance between the two BetweenBlocks:
1576
        if (range != first->block_after_size)
684✔
1577
            return false;
×
1578
    }
684✔
1579
    return true;
738✔
1580
}
738✔
1581

1582

1583
// LCOV_EXCL_START
1584
void SlabAlloc::print() const
1585
{
×
1586
    /* TODO
1587
     *
1588

1589
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1590

1591
    size_t free = 0;
1592
    for (const auto& free_block : m_free_space) {
1593
        free += free_block.size;
1594
    }
1595

1596
    size_t allocated = allocated_for_slabs - free;
1597
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1598

1599
    if (!m_slabs.empty()) {
1600
        std::cout << "Slabs: ";
1601
        ref_type first_ref = m_baseline;
1602

1603
        for (const auto& slab : m_slabs) {
1604
            if (&slab != &m_slabs.front())
1605
                std::cout << ", ";
1606

1607
            ref_type last_ref = slab.ref_end - 1;
1608
            size_t size = slab.ref_end - first_ref;
1609
            void* addr = slab.addr;
1610
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1611
            first_ref = slab.ref_end;
1612
        }
1613
        std::cout << "\n";
1614
    }
1615

1616
    if (!m_free_space.empty()) {
1617
        std::cout << "FreeSpace: ";
1618
        for (const auto& free_block : m_free_space) {
1619
            if (&free_block != &m_free_space.front())
1620
                std::cout << ", ";
1621

1622
            ref_type last_ref = free_block.ref + free_block.size - 1;
1623
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1624
        }
1625
        std::cout << "\n";
1626
    }
1627
    if (!m_free_read_only.empty()) {
1628
        std::cout << "FreeSpace (ro): ";
1629
        for (const auto& free_block : m_free_read_only) {
1630
            if (&free_block != &m_free_read_only.front())
1631
                std::cout << ", ";
1632

1633
            ref_type last_ref = free_block.ref + free_block.size - 1;
1634
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1635
        }
1636
        std::cout << "\n";
1637
    }
1638
    std::cout << std::flush;
1639
    */
1640
}
×
1641
// LCOV_EXCL_STOP
1642

1643
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc