• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 1862

23 Nov 2023 10:15PM UTC coverage: 91.685% (-0.007%) from 91.692%
1862

push

Evergreen

web-flow
Update baas with the fix for "Test client migration and rollback with recovery" test (#7164)

92396 of 169288 branches covered (0.0%)

231698 of 252710 relevant lines covered (91.69%)

6285679.64 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.89
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <cinttypes>
20
#include <type_traits>
21
#include <exception>
22
#include <algorithm>
23
#include <memory>
24
#include <mutex>
25
#include <map>
26
#include <atomic>
27
#include <cstring>
28

29
#if REALM_DEBUG
30
#include <iostream>
31
#include <unordered_set>
32
#endif
33

34
#ifdef REALM_SLAB_ALLOC_DEBUG
35
#include <cstdlib>
36
#endif
37

38
#include <realm/util/errno.hpp>
39
#include <realm/util/encrypted_file_mapping.hpp>
40
#include <realm/util/terminate.hpp>
41
#include <realm/util/thread.hpp>
42
#include <realm/util/scope_exit.hpp>
43
#include <realm/array.hpp>
44
#include <realm/alloc_slab.hpp>
45
#include <realm/group.hpp>
46

47
using namespace realm;
48
using namespace realm::util;
49

50

51
namespace {
52

53
#ifdef REALM_SLAB_ALLOC_DEBUG
54
std::map<ref_type, void*> malloc_debug_map;
55
#endif
56

57
class InvalidFreeSpace : std::exception {
58
public:
59
    const char* what() const noexcept override
60
    {
×
61
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
62
               "further writes can be performed.";
×
63
    }
×
64
};
65

66
std::atomic<size_t> total_slab_allocated(0);
67

68
} // anonymous namespace
69

70
size_t SlabAlloc::get_total_slab_size() noexcept
71
{
×
72
    return total_slab_allocated;
×
73
}
×
74

75
SlabAlloc::SlabAlloc()
76
{
174,900✔
77
    m_initial_section_size = 1UL << section_shift; // page_size();
174,900✔
78
    m_free_space_state = free_space_Clean;
174,900✔
79
    m_baseline = 0;
174,900✔
80
}
174,900✔
81

82
util::File& SlabAlloc::get_file()
83
{
2,536,647✔
84
    return m_file;
2,536,647✔
85
}
2,536,647✔
86

87

88
const SlabAlloc::Header SlabAlloc::empty_file_header = {
89
    {0, 0}, // top-refs
90
    {'T', '-', 'D', 'B'},
91
    {0, 0}, // undecided file format
92
    0,      // reserved
93
    0       // flags (lsb is select bit)
94
};
95

96

97
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
98
{
582✔
99
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
582✔
100
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
582✔
101
    *streaming_header = {
582✔
102
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
582✔
103
        {'T', '-', 'D', 'B'},
582✔
104
        {storage_type(file_format_version), 0},
582✔
105
        0, // reserved
582✔
106
        0  // flags (lsb is select bit)
582✔
107
    };
582✔
108
}
582✔
109

110
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
111
    : ref_end(r)
112
    , size(s)
113
{
126,600✔
114
    // Ensure that allocation is aligned to at least 8 bytes
62,409✔
115
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
126,600✔
116

62,409✔
117
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
126,600✔
118
    addr = new char[size];
126,600✔
119
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
126,600✔
120
#if REALM_ENABLE_ALLOC_SET_ZERO
121
    std::fill(addr, addr + size, 0);
122
#endif
123
}
126,600✔
124

125
SlabAlloc::Slab::~Slab()
126
{
129,408✔
127
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
129,408✔
128
    if (addr)
129,408✔
129
        delete[] addr;
126,600✔
130
}
129,408✔
131

132
void SlabAlloc::detach(bool keep_file_open) noexcept
133
{
175,782✔
134
    delete[] m_ref_translation_ptr;
175,782✔
135
    m_ref_translation_ptr.store(nullptr);
175,782✔
136
    m_translation_table_size = 0;
175,782✔
137
    set_read_only(true);
175,782✔
138
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
175,782✔
139
    switch (m_attach_mode) {
175,782✔
140
        case attach_None:
429✔
141
            break;
429✔
142
        case attach_UsersBuffer:
24✔
143
            break;
24✔
144
        case attach_OwnedBuffer:
4,626✔
145
            delete[] m_data;
4,626✔
146
            break;
4,626✔
147
        case attach_SharedFile:
144,786✔
148
        case attach_UnsharedFile:
145,407✔
149
            m_data = 0;
145,407✔
150
            m_mappings.clear();
145,407✔
151
            m_youngest_live_version = 0;
145,407✔
152
            if (!keep_file_open)
145,407✔
153
                m_file.close();
144,978✔
154
            break;
145,407✔
155
        case attach_Heap:
84,078✔
156
            m_data = 0;
25,296✔
157
            break;
25,296✔
158
        default:
71,430✔
159
            REALM_UNREACHABLE();
160
    }
175,782✔
161

86,589✔
162
    // Release all allocated memory - this forces us to create new
86,589✔
163
    // slabs after re-attaching thereby ensuring that the slabs are
86,589✔
164
    // placed correctly (logically) after the end of the file.
86,589✔
165
    m_slabs.clear();
175,782✔
166
    clear_freelists();
175,782✔
167
#if REALM_ENABLE_ENCRYPTION
175,782✔
168
    m_realm_file_info = nullptr;
175,782✔
169
#endif
175,782✔
170

86,589✔
171
    m_attach_mode = attach_None;
175,782✔
172
}
175,782✔
173

174

175
SlabAlloc::~SlabAlloc() noexcept
176
{
174,900✔
177
#ifdef REALM_DEBUG
174,900✔
178
    if (is_attached()) {
174,900✔
179
        // A shared group does not guarantee that all space is free
303✔
180
        if (m_attach_mode != attach_SharedFile) {
606✔
181
            // No point inchecking if free space info is invalid
303✔
182
            if (m_free_space_state != free_space_Invalid) {
606✔
183
                if (REALM_COVER_NEVER(!is_all_free())) {
606✔
184
                    print();
×
185
#ifndef REALM_SLAB_ALLOC_DEBUG
×
186
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
187
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
188
                                 "then run under Valgrind with --leak-check=full\n";
×
189
                    REALM_TERMINATE("SlabAlloc detected a leak");
190
#endif
×
191
                }
×
192
            }
606✔
193
        }
606✔
194
    }
606✔
195
#endif
174,900✔
196

86,205✔
197
    if (is_attached())
174,900✔
198
        detach();
606✔
199
}
174,900✔
200

201

202
MemRef SlabAlloc::do_alloc(size_t size)
203
{
32,848,305✔
204
    CriticalSection cs(changes);
32,848,305✔
205
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
32,848,305✔
206
    REALM_ASSERT_EX((size & 0x7) == 0, size,
32,848,305✔
207
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
32,848,305✔
208
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
32,848,305✔
209
    // This limits the size of any array to ensure it can fit within a memory section.
16,429,338✔
210
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
16,429,338✔
211
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
32,848,305✔
212

16,429,338✔
213
    // If we failed to correctly record free space, new allocations cannot be
16,429,338✔
214
    // carried out until the free space record is reset.
16,429,338✔
215
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
32,848,305✔
216
        throw InvalidFreeSpace();
16,429,338✔
217

16,429,338✔
218
    m_free_space_state = free_space_Dirty;
32,848,305✔
219
    m_commit_size += size;
32,848,305✔
220

16,429,338✔
221
    // minimal allocation is sizeof(FreeListEntry)
16,429,338✔
222
    if (size < sizeof(FreeBlock))
32,848,305✔
223
        size = sizeof(FreeBlock);
6,657✔
224
    // align to multipla of 8
16,429,338✔
225
    if (size & 0x7)
32,848,305✔
226
        size = (size + 7) & ~0x7;
×
227

16,429,338✔
228
    FreeBlock* entry = allocate_block(static_cast<int>(size));
32,848,305✔
229
    mark_allocated(entry);
32,848,305✔
230
    ref_type ref = entry->ref;
32,848,305✔
231

16,429,338✔
232
#ifdef REALM_DEBUG
32,848,305✔
233
    if (REALM_COVER_NEVER(m_debug_out))
32,848,305✔
234
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
16,429,338✔
235
#endif
32,848,305✔
236

16,429,338✔
237
    char* addr = reinterpret_cast<char*>(entry);
32,848,305✔
238
    REALM_ASSERT_EX(addr == translate(ref), addr, ref, get_file_path_for_assertions());
32,848,305✔
239

16,429,338✔
240
#if REALM_ENABLE_ALLOC_SET_ZERO
241
    std::fill(addr, addr + size, 0);
242
#endif
243
#ifdef REALM_SLAB_ALLOC_DEBUG
244
    malloc_debug_map[ref] = malloc(1);
245
#endif
246
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
32,848,305✔
247
    return MemRef(addr, ref, *this);
32,848,305✔
248
}
32,848,305✔
249

250
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
251
{
2,882,709✔
252
    auto bb = bb_before(entry);
2,882,709✔
253
    if (bb->block_before_size <= 0)
2,882,709✔
254
        return nullptr; // no prev block, or it is in use
2,508,441✔
255
    return block_before(bb);
374,268✔
256
}
374,268✔
257

258
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
259
{
2,882,748✔
260
    auto bb = bb_after(entry);
2,882,748✔
261
    if (bb->block_after_size <= 0)
2,882,748✔
262
        return nullptr; // no next block, or it is in use
994,929✔
263
    return block_after(bb);
1,887,819✔
264
}
1,887,819✔
265

266
SlabAlloc::FreeList SlabAlloc::find(int size)
267
{
32,870,325✔
268
    FreeList retval;
32,870,325✔
269
    retval.it = m_block_map.lower_bound(size);
32,870,325✔
270
    if (retval.it != m_block_map.end()) {
32,870,325✔
271
        retval.size = retval.it->first;
32,742,066✔
272
    }
32,742,066✔
273
    else {
128,259✔
274
        retval.size = 0;
128,259✔
275
    }
128,259✔
276
    return retval;
32,870,325✔
277
}
32,870,325✔
278

279
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
280
{
32,437,194✔
281
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
32,437,194✔
282
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
37,219,332✔
283
        ++hint.it;
4,782,138✔
284
    if (hint.it == m_block_map.end())
32,437,194✔
285
        hint.size = 0; // indicate "not found"
126,597✔
286
    return hint;
32,437,194✔
287
}
32,437,194✔
288

289
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
290
{
32,749,464✔
291
    FreeBlock* retval = list.it->second;
32,749,464✔
292
    FreeBlock* header = retval->next;
32,749,464✔
293
    if (header == retval)
32,749,464✔
294
        m_block_map.erase(list.it);
32,576,496✔
295
    else
172,968✔
296
        list.it->second = header;
172,968✔
297
    retval->unlink();
32,749,464✔
298
    return retval;
32,749,464✔
299
}
32,749,464✔
300

301
void SlabAlloc::FreeBlock::unlink()
302
{
35,009,772✔
303
    auto _next = next;
35,009,772✔
304
    auto _prev = prev;
35,009,772✔
305
    _next->prev = prev;
35,009,772✔
306
    _prev->next = next;
35,009,772✔
307
    clear_links();
35,009,772✔
308
}
35,009,772✔
309

310
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
311
{
2,262,327✔
312
    int size = bb_before(entry)->block_after_size;
2,262,327✔
313
    auto it = m_block_map.find(size);
2,262,327✔
314
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
2,262,327✔
315
    auto header = it->second;
2,262,327✔
316
    if (header == entry) {
2,262,327✔
317
        header = entry->next;
2,238,624✔
318
        if (header == entry)
2,238,624✔
319
            m_block_map.erase(it);
1,621,080✔
320
        else
617,544✔
321
            it->second = header;
617,544✔
322
    }
2,238,624✔
323
    entry->unlink();
2,262,327✔
324
}
2,262,327✔
325

326
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
327
{
36,767,352✔
328
    int size = bb_before(entry)->block_after_size;
36,767,352✔
329
    FreeBlock* header;
36,767,352✔
330
    auto it = m_block_map.find(size);
36,767,352✔
331
    if (it != m_block_map.end()) {
36,767,352✔
332
        header = it->second;
839,559✔
333
        it->second = entry;
839,559✔
334
        entry->next = header;
839,559✔
335
        entry->prev = header->prev;
839,559✔
336
        entry->prev->next = entry;
839,559✔
337
        entry->next->prev = entry;
839,559✔
338
    }
839,559✔
339
    else {
35,927,793✔
340
        header = nullptr;
35,927,793✔
341
        m_block_map[size] = entry;
35,927,793✔
342
        entry->next = entry->prev = entry;
35,927,793✔
343
    }
35,927,793✔
344
}
36,767,352✔
345

346
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
347
{
2,882,652✔
348
    auto bb = bb_before(entry);
2,882,652✔
349
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
2,882,652✔
350
    auto alloc_size = -bb->block_after_size;
2,882,652✔
351
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
2,882,652✔
352
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
2,882,652✔
353
                    get_file_path_for_assertions());
2,882,652✔
354
    bb->block_after_size = alloc_size;
2,882,652✔
355
    bb = bb_after(entry);
2,882,652✔
356
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
2,882,652✔
357
    REALM_ASSERT(-bb->block_before_size == alloc_size);
2,882,652✔
358
    bb->block_before_size = alloc_size;
2,882,652✔
359
}
2,882,652✔
360

361
void SlabAlloc::mark_allocated(FreeBlock* entry)
362
{
32,874,846✔
363
    auto bb = bb_before(entry);
32,874,846✔
364
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
32,874,846✔
365
    auto bb2 = bb_after(entry);
32,874,846✔
366
    bb->block_after_size = 0 - bb->block_after_size;
32,874,846✔
367
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
32,874,846✔
368
    bb2->block_before_size = 0 - bb2->block_before_size;
32,874,846✔
369
}
32,874,846✔
370

371
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
372
{
32,872,461✔
373
    FreeList list = find(size);
32,872,461✔
374
    if (list.found_exact(size)) {
32,872,461✔
375
        return pop_freelist_entry(list);
435,069✔
376
    }
435,069✔
377
    // no exact matches.
16,229,055✔
378
    list = find_larger(list, size);
32,437,392✔
379
    FreeBlock* block;
32,437,392✔
380
    if (list.found_something()) {
32,437,392✔
381
        block = pop_freelist_entry(list);
32,315,352✔
382
    }
32,315,352✔
383
    else {
122,040✔
384
        block = grow_slab(size);
122,040✔
385
    }
122,040✔
386
    FreeBlock* remaining = break_block(block, size);
32,437,392✔
387
    if (remaining)
32,437,392✔
388
        push_freelist_entry(remaining);
32,440,140✔
389
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
32,437,392✔
390
    return block;
32,437,392✔
391
}
32,437,392✔
392

393
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
394
{
1,577,724✔
395
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
1,577,724✔
396
    bb->block_before_size = 0;
1,577,724✔
397
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
1,577,724✔
398
    bb->block_after_size = block_size;
1,577,724✔
399
    auto entry = block_after(bb);
1,577,724✔
400
    entry->clear_links();
1,577,724✔
401
    entry->ref = ref_start + sizeof(BetweenBlocks);
1,577,724✔
402
    bb = bb_after(entry);
1,577,724✔
403
    bb->block_before_size = block_size;
1,577,724✔
404
    bb->block_after_size = 0;
1,577,724✔
405
    return entry;
1,577,724✔
406
}
1,577,724✔
407

408
void SlabAlloc::clear_freelists()
409
{
1,918,365✔
410
    m_block_map.clear();
1,918,365✔
411
}
1,918,365✔
412

413
void SlabAlloc::rebuild_freelists_from_slab()
414
{
1,742,583✔
415
    clear_freelists();
1,742,583✔
416
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
1,742,583✔
417
    for (const auto& e : m_slabs) {
1,594,290✔
418
        FreeBlock* entry = slab_to_entry(e, ref_start);
1,451,124✔
419
        push_freelist_entry(entry);
1,451,124✔
420
        ref_start = align_size_to_section_boundary(e.ref_end);
1,451,124✔
421
    }
1,451,124✔
422
}
1,742,583✔
423

424
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
425
{
32,437,608✔
426
    int size = size_from_block(block);
32,437,608✔
427
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
32,437,608✔
428
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
32,437,608✔
429
        return nullptr;
6✔
430
    bb_after(block)->block_before_size = remaining_size;
32,437,602✔
431
    bb_before(block)->block_after_size = new_size;
32,437,602✔
432
    auto bb_between = bb_after(block);
32,437,602✔
433
    bb_between->block_before_size = new_size;
32,437,602✔
434
    bb_between->block_after_size = remaining_size;
32,437,602✔
435
    FreeBlock* remaining_block = block_after(bb_between);
32,437,602✔
436
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
32,437,602✔
437
    remaining_block->clear_links();
32,437,602✔
438
    block->clear_links();
32,437,602✔
439
    return remaining_block;
32,437,602✔
440
}
32,437,602✔
441

442
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
443
{
2,262,354✔
444
    int size_first = size_from_block(first);
2,262,354✔
445
    int size_last = size_from_block(last);
2,262,354✔
446
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
2,262,354✔
447
    bb_before(first)->block_after_size = new_size;
2,262,354✔
448
    bb_after(last)->block_before_size = new_size;
2,262,354✔
449
    return first;
2,262,354✔
450
}
2,262,354✔
451

452
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
453
{
126,597✔
454
    // Allocate new slab.
62,409✔
455
    // - Always allocate at least 128K. This is also the amount of
62,409✔
456
    //   memory that we allow the slab allocator to keep between
62,409✔
457
    //   transactions. Allowing it to keep a small amount between
62,409✔
458
    //   transactions makes very small transactions faster by avoiding
62,409✔
459
    //   repeated unmap/mmap system calls.
62,409✔
460
    // - When allocating, allocate as much as we already have, but
62,409✔
461
    // - Never allocate more than a full section (64MB). This policy
62,409✔
462
    //   leads to gradual allocation of larger and larger blocks until
62,409✔
463
    //   we reach allocation of entire sections.
62,409✔
464
    size += 2 * sizeof(BetweenBlocks);
126,597✔
465
    size_t new_size = minimal_alloc;
126,597✔
466
    while (new_size < uint64_t(size))
157,599✔
467
        new_size += minimal_alloc;
31,002✔
468
    size_t already_allocated = get_allocated_size();
126,597✔
469
    if (new_size < already_allocated)
126,597✔
470
        new_size = already_allocated;
2,589✔
471
    if (new_size > maximal_alloc)
126,597✔
472
        new_size = maximal_alloc;
18✔
473

62,409✔
474
    ref_type ref;
126,597✔
475
    if (m_slabs.empty()) {
126,597✔
476
        ref = m_baseline.load(std::memory_order_relaxed);
117,456✔
477
    }
117,456✔
478
    else {
9,141✔
479
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
4,428✔
480
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
9,141✔
481
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
9,141✔
482
        ref = curr_ref_end;
9,141✔
483
    }
9,141✔
484
    ref = align_size_to_section_boundary(ref);
126,597✔
485
    size_t ref_end = ref;
126,597✔
486
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
126,597✔
487
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
488
                                      util::to_string(new_size));
×
489
    }
×
490

62,409✔
491
    REALM_ASSERT(matches_section_boundary(ref));
126,597✔
492

62,409✔
493
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
126,597✔
494
    // Create new slab and add to list of slabs
62,409✔
495
    m_slabs.emplace_back(ref_end, new_size); // Throws
126,597✔
496
    const Slab& slab = m_slabs.back();
126,597✔
497
    extend_fast_mapping_with_slab(slab.addr);
126,597✔
498

62,409✔
499
    // build a single block from that entry
62,409✔
500
    return slab_to_entry(slab, ref);
126,597✔
501
}
126,597✔
502

503

504
void SlabAlloc::do_free(ref_type ref, char* addr)
505
{
24,583,515✔
506
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
24,583,515✔
507
    CriticalSection cs(changes);
24,583,515✔
508

12,335,013✔
509
    bool read_only = is_read_only(ref);
24,583,515✔
510
#ifdef REALM_SLAB_ALLOC_DEBUG
511
    free(malloc_debug_map[ref]);
512
#endif
513

12,335,013✔
514
    // Get size from segment
12,335,013✔
515
    size_t size =
24,583,515✔
516
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
23,120,694✔
517

12,335,013✔
518
#ifdef REALM_DEBUG
24,583,515✔
519
    if (REALM_COVER_NEVER(m_debug_out))
24,583,515✔
520
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
12,335,013✔
521
#endif
24,583,515✔
522

12,335,013✔
523
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
24,583,515✔
524
        return;
12,335,013✔
525

12,335,013✔
526
    // Mutable memory cannot be freed unless it has first been allocated, and
12,335,013✔
527
    // any allocation puts free space tracking into the "dirty" state.
12,335,013✔
528
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
24,583,515✔
529
                    free_space_Dirty, get_file_path_for_assertions());
24,583,515✔
530

12,335,013✔
531
    m_free_space_state = free_space_Dirty;
24,583,515✔
532

12,335,013✔
533
    if (read_only) {
24,583,515✔
534
        // Free space in read only segment is tracked separately
10,926,699✔
535
        try {
21,706,899✔
536
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
21,706,899✔
537
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
21,706,899✔
538
            auto next = m_free_read_only.lower_bound(ref);
21,706,899✔
539
            if (next != m_free_read_only.end()) {
21,706,899✔
540
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
19,475,445✔
541
                                        get_file_path_for_assertions());
19,475,445✔
542
                // See if element can be combined with next element
9,802,410✔
543
                if (ref + size == next->first) {
19,475,445✔
544
                    // if so, combine to include next element and remove that from collection
1,869,966✔
545
                    size += next->second;
3,681,135✔
546
                    next = m_free_read_only.erase(next);
3,681,135✔
547
                }
3,681,135✔
548
            }
19,475,445✔
549
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
21,706,899✔
550
                // There must be a previous element - see if we can merge
8,451,729✔
551
                auto prev = next;
16,813,842✔
552
                prev--;
16,813,842✔
553

8,451,729✔
554
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
16,813,842✔
555
                                        get_file_path_for_assertions());
16,813,842✔
556
                // See if element can be combined with previous element
8,451,729✔
557
                // We can do that just by adding the size
8,451,729✔
558
                if (prev->first + prev->second == ref) {
16,813,842✔
559
                    prev->second += size;
8,065,113✔
560
                    return; // Done!
8,065,113✔
561
                }
8,065,113✔
562
                m_free_read_only.emplace_hint(next, ref, size); // Throws
8,748,729✔
563
            }
8,748,729✔
564
            else {
4,893,057✔
565
                m_free_read_only.emplace(ref, size); // Throws
4,893,057✔
566
            }
4,893,057✔
567
        }
21,706,899✔
568
        catch (...) {
10,926,699✔
569
            m_free_space_state = free_space_Invalid;
×
570
        }
×
571
    }
21,706,899✔
572
    else {
2,876,616✔
573
        m_commit_size -= size;
2,876,616✔
574

1,408,314✔
575
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
1,408,314✔
576
        if (size < sizeof(FreeBlock))
2,876,616✔
577
            size = sizeof(FreeBlock);
6,657✔
578
        // align to multipla of 8
1,408,314✔
579
        if (size & 0x7)
2,876,616✔
580
            size = (size + 7) & ~0x7;
×
581

1,408,314✔
582
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
2,876,616✔
583
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
2,876,616✔
584
        mark_freed(e, static_cast<int>(size));
2,876,616✔
585
        free_block(ref, e);
2,876,616✔
586
    }
2,876,616✔
587
}
24,583,515✔
588

589
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
590
{
2,882,760✔
591
    // merge with surrounding blocks if possible
1,415,016✔
592
    block->ref = ref;
2,882,760✔
593
    FreeBlock* prev = get_prev_block_if_mergeable(block);
2,882,760✔
594
    if (prev) {
2,882,760✔
595
        remove_freelist_entry(prev);
374,385✔
596
        block = merge_blocks(prev, block);
374,385✔
597
    }
374,385✔
598
    FreeBlock* next = get_next_block_if_mergeable(block);
2,882,760✔
599
    if (next) {
2,882,760✔
600
        remove_freelist_entry(next);
1,888,005✔
601
        block = merge_blocks(block, next);
1,888,005✔
602
    }
1,888,005✔
603
    push_freelist_entry(block);
2,882,760✔
604
}
2,882,760✔
605

606
size_t SlabAlloc::consolidate_free_read_only()
607
{
1,388,409✔
608
    CriticalSection cs(changes);
1,388,409✔
609
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
1,388,409✔
610
        throw InvalidFreeSpace();
698,268✔
611

698,268✔
612
    return m_free_read_only.size();
1,388,409✔
613
}
1,388,409✔
614

615

616
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
617
{
2,342,799✔
618
    REALM_ASSERT_DEBUG(translate(ref) == addr);
2,342,799✔
619
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
2,342,799✔
620
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
2,342,799✔
621
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
2,342,799✔
622

1,167,666✔
623
    // Possible future enhancement: check if we can extend current space instead
1,167,666✔
624
    // of unconditionally allocating new space. In that case, remember to
1,167,666✔
625
    // check whether m_free_space_state == free_state_Invalid. Also remember to
1,167,666✔
626
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
1,167,666✔
627

1,167,666✔
628
    // Allocate new space
1,167,666✔
629
    MemRef new_mem = do_alloc(new_size); // Throws
2,342,799✔
630

1,167,666✔
631
    // Copy existing segment
1,167,666✔
632
    char* new_addr = new_mem.get_addr();
2,342,799✔
633
    realm::safe_copy_n(addr, old_size, new_addr);
2,342,799✔
634

1,167,666✔
635
    // Add old segment to freelist
1,167,666✔
636
    do_free(ref, addr);
2,342,799✔
637

1,167,666✔
638
#ifdef REALM_DEBUG
2,342,799✔
639
    if (REALM_COVER_NEVER(m_debug_out)) {
2,342,799✔
640
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
641
                  << " new_size: " << new_size << "\n";
×
642
    }
×
643
#endif // REALM_DEBUG
2,342,799✔
644

1,167,666✔
645
    return new_mem;
2,342,799✔
646
}
2,342,799✔
647

648

649
char* SlabAlloc::do_translate(ref_type) const noexcept
650
{
×
651
    REALM_ASSERT(false); // never come here
×
652
    return nullptr;
×
653
}
×
654

655

656
int SlabAlloc::get_committed_file_format_version() noexcept
657
{
145,491✔
658
    {
145,491✔
659
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
145,491✔
660
        if (m_mappings.size()) {
145,491✔
661
            // if we have mapped a file, m_mappings will have at least one mapping and
71,451✔
662
            // the first will be to the start of the file. Don't come here, if we're
71,451✔
663
            // just attaching a buffer. They don't have mappings.
71,451✔
664
            realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
145,449✔
665
        }
145,449✔
666
    }
145,491✔
667
    const Header& header = *reinterpret_cast<const Header*>(m_data);
145,491✔
668
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
108,501✔
669
    int file_format_version = int(header.m_file_format[slot_selector]);
145,491✔
670
    return file_format_version;
145,491✔
671
}
145,491✔
672

673
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
674
{
508,125✔
675
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
250,668✔
676
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
394,443✔
677
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
508,125✔
678
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
508,125✔
679
}
508,125✔
680

681
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
682
{
×
683
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
684
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
685
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
686
    if (is_file_on_streaming_form(header)) {
×
687
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
688
        return ref_type(footer.m_top_ref);
×
689
    }
×
690
    else {
×
691
        return to_ref(header.m_top_ref[slot_selector]);
×
692
    }
×
693
}
×
694

695
std::string SlabAlloc::get_file_path_for_assertions() const
696
{
×
697
    return m_file.get_path();
×
698
}
×
699

700
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
701
{
120,777✔
702
    if (cfg.read_only) {
120,777✔
703
        // If the file is opened read-only, we cannot change it. This is not a problem,
704
        // because for a read-only file we assume that it will not change while we use it,
705
        // hence there will be no need to grow memory mappings.
706
        // This assumption obviously will not hold, if the file is shared by multiple
707
        // processes or threads with different opening modes.
708
        // Currently, there is no way to detect if this assumption is violated.
709
        return false;
×
710
    }
×
711
    size_t expected_size = size_t(-1);
120,777✔
712
    size_t size = static_cast<size_t>(m_file.get_size());
120,777✔
713

59,682✔
714
    // It is not safe to change the size of a file on streaming form, since the footer
59,682✔
715
    // must remain available and remain at the very end of the file.
59,682✔
716
    REALM_ASSERT(!is_file_on_streaming_form());
120,777✔
717

59,682✔
718
    // check if online compaction allows us to shrink the file:
59,682✔
719
    if (top_ref) {
120,777✔
720
        // Get the expected file size by looking up logical file size stored in top array
34,476✔
721
        constexpr size_t max_top_size = (Group::s_file_size_ndx + 1) * 8 + sizeof(Header);
69,357✔
722
        size_t top_page_base = top_ref & ~(page_size() - 1);
69,357✔
723
        size_t top_offset = top_ref - top_page_base;
69,357✔
724
        size_t map_size = std::min(max_top_size + top_offset, size - top_page_base);
69,357✔
725
        File::Map<char> map_top(m_file, top_page_base, File::access_ReadOnly, map_size, 0, m_write_observer);
69,357✔
726
        realm::util::encryption_read_barrier(map_top, top_offset, max_top_size);
69,357✔
727
        auto top_header = map_top.get_addr() + top_offset;
69,357✔
728
        auto top_data = NodeHeader::get_data_from_header(top_header);
69,357✔
729
        auto w = NodeHeader::get_width_from_header(top_header);
69,357✔
730
        auto logical_size = size_t(get_direct(top_data, w, Group::s_file_size_ndx)) >> 1;
69,357✔
731
        // make sure we're page aligned, so the code below doesn't first
34,476✔
732
        // truncate the file, then expand it again
34,476✔
733
        expected_size = round_up_to_page_size(logical_size);
69,357✔
734
    }
69,357✔
735

59,682✔
736
    // Check if we can shrink the file
59,682✔
737
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
120,777✔
738
        detach(true); // keep m_file open
9✔
739
        m_file.resize(expected_size);
9✔
740
        m_file.close();
9✔
741
        size = expected_size;
9✔
742
        return true;
9✔
743
    }
9✔
744

59,676✔
745
    // We can only safely mmap the file, if its size matches a page boundary. If not,
59,676✔
746
    // we must change the size to match before mmaping it.
59,676✔
747
    if (size != round_up_to_page_size(size)) {
120,768✔
748
        // The file size did not match a page boundary.
180✔
749
        // We must extend the file to a page boundary (unless already there)
180✔
750
        // The file must be extended to match in size prior to being mmapped,
180✔
751
        // as extending it after mmap has undefined behavior.
180✔
752
        if (cfg.session_initiator || !cfg.is_shared) {
420✔
753
            // We can only safely extend the file if we're the session initiator, or if
180✔
754
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
180✔
755
            // done to ensure well defined behavior for memory mappings. It does not matter,
180✔
756
            // that the free space management isn't informed
180✔
757
            size = round_up_to_page_size(size);
420✔
758
            detach(true); // keep m_file open
420✔
759
            m_file.prealloc(size);
420✔
760
            m_file.close();
420✔
761
            return true;
420✔
762
        }
420✔
763
        else {
×
764
            // Getting here, we have a file of a size that will not work, and without being
765
            // allowed to extend it. This should not be possible. But allowing a retry is
766
            // arguably better than giving up and crashing...
767
            throw Retry();
×
768
        }
×
769
    }
120,348✔
770
    return false;
120,348✔
771
}
120,348✔
772

773
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
774
{
145,563✔
775
    m_cfg = cfg;
145,563✔
776
    m_write_observer = write_observer;
145,563✔
777
    // ExceptionSafety: If this function throws, it must leave the allocator in
71,508✔
778
    // the detached state.
71,508✔
779

71,508✔
780
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
145,563✔
781

71,508✔
782
    // When 'read_only' is true, this function will throw InvalidDatabase if the
71,508✔
783
    // file exists already but is empty. This can happen if another process is
71,508✔
784
    // currently creating it. Note however, that it is only legal for multiple
71,508✔
785
    // processes to access a database file concurrently if it is done via a
71,508✔
786
    // DB, and in that case 'read_only' can never be true.
71,508✔
787
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
145,563✔
788
    // session_initiator can be set *only* if we're shared.
71,508✔
789
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
145,563✔
790
                    get_file_path_for_assertions());
145,563✔
791
    // clear_file can be set *only* if we're the first session.
71,508✔
792
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
145,563✔
793
                    get_file_path_for_assertions());
145,563✔
794

71,508✔
795
    using namespace realm::util;
145,563✔
796
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
144,939✔
797
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
145,563✔
798
    set_read_only(cfg.read_only);
145,563✔
799
    try {
145,563✔
800
        m_file.open(path.c_str(), access, create, 0); // Throws
145,563✔
801
    }
145,563✔
802
    catch (const FileAccessError& ex) {
71,526✔
803
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
36✔
804
        if (ex.code() == ErrorCodes::PermissionDenied) {
36✔
805
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
806
                                cfg.read_only ? "read" : "read-write");
6✔
807
        }
6✔
808
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
36✔
809
    }
36✔
810
    File::CloseGuard fcg(m_file);
145,527✔
811
    auto physical_file_size = m_file.get_size();
145,527✔
812
    // Note that get_size() may (will) return a different size before and after
71,490✔
813
    // the call below to set_encryption_key.
71,490✔
814
    m_file.set_encryption_key(cfg.encryption_key);
145,527✔
815

71,490✔
816
    size_t size = 0;
145,527✔
817
    // The size of a database file must not exceed what can be encoded in
71,490✔
818
    // size_t.
71,490✔
819
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
145,527✔
820
        throw InvalidDatabase("Realm file too large", path);
71,490✔
821
    if (cfg.encryption_key && size == 0 && physical_file_size != 0) {
145,527✔
822
        // The opened file holds data, but is so small it cannot have
6✔
823
        // been created with encryption
6✔
824
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
6✔
825
    }
6✔
826
    if (size == 0 || cfg.clear_file) {
145,521✔
827
        if (REALM_UNLIKELY(cfg.read_only))
51,381✔
828
            throw InvalidDatabase("Read-only access to empty Realm file", path);
25,188✔
829

25,188✔
830
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
51,381✔
831
        m_file.write(data, sizeof empty_file_header); // Throws
51,381✔
832

25,188✔
833
        // Pre-alloc initial space
25,188✔
834
        size_t initial_size = page_size(); // m_initial_section_size;
51,381✔
835
        m_file.prealloc(initial_size);     // Throws
51,381✔
836

25,188✔
837
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
51,381✔
838
        if (!disable_sync)
51,381✔
839
            m_file.sync(); // Throws
15✔
840

25,188✔
841
        size = initial_size;
51,381✔
842
    }
51,381✔
843
    ref_type top_ref;
145,521✔
844
    note_reader_start(this);
145,521✔
845
    util::ScopeExit reader_end_guard([this]() noexcept {
145,521✔
846
        note_reader_end(this);
145,521✔
847
    });
145,521✔
848

71,484✔
849
    try {
145,521✔
850
        // we'll read header and (potentially) footer
71,484✔
851
        File::Map<char> map_header(m_file, File::access_ReadOnly, sizeof(Header), 0, m_write_observer);
145,521✔
852
        realm::util::encryption_read_barrier(map_header, 0, sizeof(Header));
145,521✔
853
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
145,521✔
854

71,484✔
855
        File::Map<char> map_footer;
145,521✔
856
        const StreamingFooter* footer = nullptr;
145,521✔
857
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
145,521✔
858
            size_t footer_ref = size - sizeof(StreamingFooter);
588✔
859
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
588✔
860
            size_t footer_offset = footer_ref - footer_page_base;
588✔
861
            map_footer = File::Map<char>(m_file, footer_page_base, File::access_ReadOnly,
588✔
862
                                         sizeof(StreamingFooter) + footer_offset, 0, m_write_observer);
588✔
863
            realm::util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
588✔
864
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
588✔
865
        }
588✔
866

71,484✔
867
        top_ref = validate_header(header, footer, size, path, cfg.encryption_key != nullptr); // Throws
145,521✔
868
        m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
144,840✔
869
        m_data = map_header.get_addr(); // <-- needed below
145,521✔
870

71,484✔
871
        if (cfg.session_initiator && is_file_on_streaming_form(*header)) {
145,521✔
872
            // Don't compare file format version fields as they are allowed to differ.
255✔
873
            // Also don't compare reserved fields.
255✔
874
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, get_file_path_for_assertions());
510✔
875
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0],
510✔
876
                            get_file_path_for_assertions());
510✔
877
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1],
510✔
878
                            get_file_path_for_assertions());
510✔
879
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2],
510✔
880
                            get_file_path_for_assertions());
510✔
881
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3],
510✔
882
                            get_file_path_for_assertions());
510✔
883
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0],
510✔
884
                            get_file_path_for_assertions());
510✔
885
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], get_file_path_for_assertions());
510✔
886
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie,
510✔
887
                            get_file_path_for_assertions());
510✔
888
        }
510✔
889
    }
145,521✔
890
    catch (const InvalidDatabase&) {
71,514✔
891
        throw;
60✔
892
    }
60✔
893
    catch (const DecryptionFailed& e) {
42✔
894
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
42✔
895
    }
42✔
896
    catch (const std::exception& e) {
12✔
897
        throw InvalidDatabase(e.what(), path);
12✔
898
    }
12✔
899
    catch (...) {
×
900
        throw InvalidDatabase("unknown error", path);
×
901
    }
×
902
    // m_data not valid at this point!
71,430✔
903
    m_baseline = 0;
145,407✔
904
    // make sure that any call to begin_read cause any slab to be placed in free
71,430✔
905
    // lists correctly
71,430✔
906
    m_free_space_state = free_space_Invalid;
145,407✔
907

71,430✔
908
    // Ensure clean up, if we need to back out:
71,430✔
909
    DetachGuard dg(*this);
145,407✔
910

71,430✔
911
    reset_free_space_tracking();
145,407✔
912
    update_reader_view(size);
145,407✔
913
    REALM_ASSERT(m_mappings.size());
145,407✔
914
    m_data = m_mappings[0].primary_mapping.get_addr();
145,407✔
915
    realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
145,407✔
916
    dg.release();  // Do not detach
145,407✔
917
    fcg.release(); // Do not close
145,407✔
918
#if REALM_ENABLE_ENCRYPTION
145,407✔
919
    m_realm_file_info = util::get_file_info_for_file(m_file);
145,407✔
920
#endif
145,407✔
921
    return top_ref;
145,407✔
922
}
145,407✔
923

924
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
925
{
120,921✔
926
    auto header = reinterpret_cast<const Header*>(m_data);
120,921✔
927
    if (!is_file_on_streaming_form(*header))
120,921✔
928
        return;
120,423✔
929

249✔
930
    // Make sure the database is not on streaming format. If we did not do this,
249✔
931
    // a later commit would have to do it. That would require coordination with
249✔
932
    // anybody concurrently joining the session, so it seems easier to do it at
249✔
933
    // session initialization, even if it means writing the database during open.
249✔
934
    {
498✔
935
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
498✔
936
        Header& writable_header = *writable_map.get_addr();
498✔
937
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
498✔
938
        writable_header.m_top_ref[1] = top_ref;
498✔
939
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
498✔
940
        realm::util::encryption_write_barrier(writable_map, 0);
498✔
941
        writable_map.sync();
498✔
942
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
498✔
943
        writable_header.m_flags |= flags_SelectBit;
498✔
944
        realm::util::encryption_write_barrier(writable_map, 0);
498✔
945
        writable_map.sync();
498✔
946

249✔
947
        realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
498✔
948
    }
498✔
949
}
498✔
950

951
void SlabAlloc::note_reader_start(const void* reader_id)
952
{
2,994,303✔
953
#if REALM_ENABLE_ENCRYPTION
2,994,303✔
954
    if (m_realm_file_info)
2,994,303✔
955
        util::encryption_note_reader_start(*m_realm_file_info, reader_id);
1,197✔
956
#else
957
    static_cast<void>(reader_id);
958
#endif
959
}
2,994,303✔
960

961
void SlabAlloc::note_reader_end(const void* reader_id) noexcept
962
{
2,996,142✔
963
#if REALM_ENABLE_ENCRYPTION
2,996,142✔
964
    if (m_realm_file_info)
2,996,142✔
965
        util::encryption_note_reader_end(*m_realm_file_info, reader_id);
1,437✔
966
#else
967
    static_cast<void>(reader_id);
968
#endif
969
}
2,996,142✔
970

971
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
972
{
90✔
973
    // ExceptionSafety: If this function throws, it must leave the allocator in
45✔
974
    // the detached state.
45✔
975

45✔
976
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
977
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
978

45✔
979
    // Verify the data structures
45✔
980
    std::string path;                                     // No path
90✔
981
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
982

45✔
983
    m_data = data;
90✔
984
    size = align_size_to_section_boundary(size);
90✔
985
    m_baseline = size;
90✔
986
    m_attach_mode = attach_UsersBuffer;
90✔
987

45✔
988
    m_translation_table_size = 1;
90✔
989
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
990
    return top_ref;
90✔
991
}
90✔
992

993
void SlabAlloc::init_in_memory_buffer()
994
{
25,296✔
995
    m_attach_mode = attach_Heap;
25,296✔
996
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,296✔
997
    m_data = m_virtual_file_buffer.back().addr;
25,296✔
998
    m_virtual_file_size = sizeof(empty_file_header);
25,296✔
999
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,296✔
1000

12,648✔
1001
    m_baseline = m_virtual_file_size;
25,296✔
1002
    m_translation_table_size = 1;
25,296✔
1003
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,296✔
1004
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,296✔
1005
    m_ref_translation_ptr = ref_translation_ptr;
25,296✔
1006
}
25,296✔
1007

1008
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
1009
{
5,333,616✔
1010
    auto idx = get_section_index(ref);
5,333,616✔
1011
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
5,333,616✔
1012
    auto& buf = m_virtual_file_buffer[idx];
5,333,616✔
1013
    return buf.addr + (ref - buf.start_ref);
5,333,616✔
1014
}
5,333,616✔
1015

1016
void SlabAlloc::attach_empty()
1017
{
4,584✔
1018
    // ExceptionSafety: If this function throws, it must leave the allocator in
2,292✔
1019
    // the detached state.
2,292✔
1020

2,292✔
1021
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
4,584✔
1022

2,292✔
1023
    m_attach_mode = attach_OwnedBuffer;
4,584✔
1024
    m_data = nullptr; // Empty buffer
4,584✔
1025

2,292✔
1026
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
2,292✔
1027

2,292✔
1028
    // No ref must ever be less than the header size, so we will use that as the
2,292✔
1029
    // baseline here.
2,292✔
1030
    size_t size = align_size_to_section_boundary(sizeof(Header));
4,584✔
1031
    m_baseline = size;
4,584✔
1032
    m_translation_table_size = 1;
4,584✔
1033
    m_ref_translation_ptr = new RefTranslation[1];
4,584✔
1034
}
4,584✔
1035

1036
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1037
{
30✔
1038
    char buf[256];
30✔
1039
    snprintf(buf, sizeof(buf),
30✔
1040
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
30✔
1041
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
30✔
1042
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
30✔
1043
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
30✔
1044
             header.m_flags);
30✔
1045
    msg += buf;
30✔
1046
    throw InvalidDatabase(msg, path);
30✔
1047
}
30✔
1048

1049
// Note: This relies on proper mappings having been established by the caller
1050
// for both the header and the streaming footer
1051
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1052
{
90✔
1053
    auto header = reinterpret_cast<const Header*>(data);
90✔
1054
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1055
    return validate_header(header, footer, size, path);
90✔
1056
}
90✔
1057

1058
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1059
                                    const std::string& path, bool is_encrypted)
1060
{
145,557✔
1061
    // Verify that size is sane and 8-byte aligned
71,505✔
1062
    if (REALM_UNLIKELY(size < sizeof(Header)))
145,557✔
1063
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
71,532✔
1064
                              path);
54✔
1065
    if (REALM_UNLIKELY(size % 8 != 0))
145,503✔
1066
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
71,478✔
1067

71,478✔
1068
    // First four bytes of info block is file format id
71,478✔
1069
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
145,503✔
1070
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
71,493✔
1071
        if (is_encrypted) {
30✔
1072
            // Encrypted files check the hmac on read, so there's a lot less
1073
            // which could go wrong and have us still reach this point
1074
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
×
1075
                                   path);
×
1076
        }
×
1077
        else {
30✔
1078
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
30✔
1079
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
30✔
1080
                                   *header, path);
30✔
1081
        }
30✔
1082
    }
30✔
1083

71,478✔
1084
    // Last bit in info block indicates which top_ref block is valid
71,478✔
1085
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
108,606✔
1086

71,478✔
1087
    // Top-ref must always point within buffer
71,478✔
1088
    auto top_ref = header->m_top_ref[slot_selector];
145,503✔
1089
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
145,503✔
1090
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
636✔
1091
            throw InvalidDatabase(
×
1092
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1093
                path);
×
1094
        }
×
1095
        REALM_ASSERT(footer);
636✔
1096
        top_ref = footer->m_top_ref;
636✔
1097
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
636✔
1098
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1099
                                               "The file is probably truncated.",
×
1100
                                               footer->m_magic_cookie),
×
1101
                                  path);
×
1102
        }
×
1103
    }
145,503✔
1104
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
145,503✔
1105
        throw_header_exception("top ref is not aligned", *header, path);
×
1106
    }
×
1107
    if (REALM_UNLIKELY(top_ref >= size)) {
145,503✔
1108
        throw_header_exception(
×
1109
            util::format(
×
1110
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1111
                top_ref),
×
1112
            *header, path);
×
1113
    }
×
1114
    return ref_type(top_ref);
145,503✔
1115
}
145,503✔
1116

1117

1118
size_t SlabAlloc::get_total_size() const noexcept
1119
{
3,232,374✔
1120
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
3,232,341✔
1121
}
3,232,374✔
1122

1123

1124
void SlabAlloc::reset_free_space_tracking()
1125
{
1,559,622✔
1126
    CriticalSection cs(changes);
1,559,622✔
1127
    if (is_free_space_clean())
1,559,622✔
1128
        return;
8,832✔
1129

778,203✔
1130
    // Free all scratch space (done after all data has
778,203✔
1131
    // been commited to persistent space)
778,203✔
1132
    m_free_read_only.clear();
1,550,790✔
1133

778,203✔
1134
    // release slabs.. keep the initial allocation if it's a minimal allocation,
778,203✔
1135
    // otherwise release it as well. This saves map/unmap for small transactions.
778,203✔
1136
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
1,559,598✔
1137
        auto& last_slab = m_slabs.back();
8,808✔
1138
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
8,808✔
1139
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
8,808✔
1140
        --m_translation_table_size;
8,808✔
1141
        m_slabs.pop_back();
8,808✔
1142
    }
8,808✔
1143
    rebuild_freelists_from_slab();
1,550,790✔
1144
    m_free_space_state = free_space_Clean;
1,550,790✔
1145
    m_commit_size = 0;
1,550,790✔
1146
}
1,550,790✔
1147

1148
inline bool randomly_false_in_debug(bool x)
1149
{
×
1150
#ifdef REALM_DEBUG
×
1151
    if (x)
×
1152
        return (std::rand() & 1);
×
1153
#endif
×
1154
    return x;
×
1155
}
×
1156

1157

1158
/*
1159
  Memory mapping
1160

1161
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1162
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1163
  essential to map the file in even sized sections.
1164

1165
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1166
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1167
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1168
  We don't want to extend the file in increments as large as the chunk size.
1169

1170
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1171
  old one in the mapping table. However, we must keep the old mapping open, because older
1172
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1173
  and only cleaned out once we know that no transaction can refer to them anymore.
1174

1175
  Interaction with encryption
1176

1177
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1178
  The binding to the file is done by software. This allows us to "cheat" and allocate
1179
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1180
  extends beyond the end of file, because it will not be accessed.
1181

1182
  Growing/Changing the mapping table.
1183

1184
  There are two mapping tables:
1185

1186
  * m_mappings: This is the "source of truth" about what the current mapping is.
1187
    It is only accessed under lock.
1188
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1189
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1190
    table can only be extended. Only selected members in each entry can be changed.
1191
    See RefTranslation in alloc.hpp for more details.
1192
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1193
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1194
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1195
    to memory allocation and release, which is already serialized (since write transactions are
1196
    single threaded).
1197

1198
  When m_mappings is changed due to an extend operation changing a mapping, or when
1199
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1200

1201
  * A new fast mapping table is created. The old one is not modified.
1202
  * The old one is held in a waiting area until it is no longer relevant because no
1203
    live transaction can refer to it any more.
1204
 */
1205
void SlabAlloc::update_reader_view(size_t file_size)
1206
{
3,431,517✔
1207
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
3,431,517✔
1208
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
3,431,517✔
1209
    if (file_size <= old_baseline) {
3,431,517✔
1210
        schedule_refresh_of_outdated_encrypted_pages();
3,241,698✔
1211
        return;
3,241,698✔
1212
    }
3,241,698✔
1213

101,592✔
1214
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
189,819✔
1215
    bool replace_last_mapping = false;
189,819✔
1216
    size_t old_num_mappings = get_section_index(old_slab_base);
189,819✔
1217

101,592✔
1218
    if (!is_in_memory()) {
189,819✔
1219
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
166,161✔
1220
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
166,161✔
1221
                        get_file_path_for_assertions());
166,161✔
1222
        REALM_ASSERT_DEBUG(is_free_space_clean());
166,161✔
1223

90,636✔
1224
        // Create the new mappings we needed to cover the new size. We don't mutate
90,636✔
1225
        // any of the member variables until we've successfully created all of the
90,636✔
1226
        // mappings so that we leave things in a consistent state if one of them
90,636✔
1227
        // hits an allocation failure.
90,636✔
1228

90,636✔
1229
        std::vector<MapEntry> new_mappings;
166,161✔
1230
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
166,161✔
1231

90,636✔
1232
        {
166,161✔
1233
            // If the old slab base was greater than the old baseline then the final
90,636✔
1234
            // mapping was a partial section and we need to replace it with a larger
90,636✔
1235
            // mapping.
90,636✔
1236
            if (old_baseline < old_slab_base) {
166,161✔
1237
                // old_slab_base should be 0 if we had no mappings previously
19,194✔
1238
                REALM_ASSERT(old_num_mappings > 0);
20,730✔
1239
                // try to extend the old mapping in-place instead of replacing it.
19,194✔
1240
                MapEntry& cur_entry = m_mappings.back();
20,730✔
1241
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
20,730✔
1242
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
20,730✔
1243
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
20,730✔
1244
                    replace_last_mapping = true;
72✔
1245
                    --old_num_mappings;
72✔
1246
                }
72✔
1247
            }
20,730✔
1248

90,636✔
1249
            // Create new mappings covering from the end of the last complete
90,636✔
1250
            // section to the end of the new file size.
90,636✔
1251
            const auto new_slab_base = align_size_to_section_boundary(file_size);
166,161✔
1252
            const size_t num_mappings = get_section_index(new_slab_base);
166,161✔
1253
            new_mappings.reserve(num_mappings - old_num_mappings);
166,161✔
1254
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
311,796✔
1255
                const size_t section_start_offset = get_section_base(k);
145,647✔
1256
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
145,647✔
1257
                if (section_size == (1 << section_shift)) {
145,647✔
1258
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
66✔
1259
                                                                  section_size, 0, m_write_observer)});
66✔
1260
                }
66✔
1261
                else {
145,581✔
1262
                    new_mappings.push_back({util::File::Map<char>()});
145,581✔
1263
                    auto& mapping = new_mappings.back().primary_mapping;
145,581✔
1264
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
145,581✔
1265
                                                        section_start_offset, m_write_observer);
145,581✔
1266
                    if (reserved) {
145,581✔
1267
                        // if reservation is supported, first attempt at extending must succeed
71,517✔
1268
                        if (!mapping.try_extend_to(section_size))
145,581✔
1269
                            throw std::bad_alloc();
12✔
1270
                    }
×
1271
                    else {
×
1272
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size, 0,
×
1273
                                                                section_start_offset, m_write_observer);
×
1274
                    }
×
1275
                }
145,581✔
1276
            }
145,647✔
1277
        }
166,161✔
1278

90,636✔
1279
        // Now that we've successfully created our mappings, update our member
90,636✔
1280
        // variables (and assume that resizing a simple vector won't produce memory
90,636✔
1281
        // allocation failures, unlike 64 MB mmaps).
90,636✔
1282
        if (replace_last_mapping) {
166,155✔
1283
            MapEntry& cur_entry = m_mappings.back();
66✔
1284
            // We should not have a xover mapping here because that would mean
33✔
1285
            // that there was already something mapped after the last section
33✔
1286
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
66✔
1287
            // save the old mapping/keep it open
33✔
1288
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
66✔
1289
            m_mappings.pop_back();
66✔
1290
            m_mapping_version++;
66✔
1291
        }
66✔
1292

90,630✔
1293
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
166,149✔
1294
    }
166,149✔
1295

101,592✔
1296
    m_baseline.store(file_size, std::memory_order_relaxed);
189,813✔
1297

101,586✔
1298
    const size_t ref_start = align_size_to_section_boundary(file_size);
189,807✔
1299
    const size_t ref_displacement = ref_start - old_slab_base;
189,807✔
1300
    if (ref_displacement > 0) {
189,807✔
1301
        // Rebase slabs as m_baseline is now bigger than old_slab_base
71,484✔
1302
        for (auto& e : m_slabs) {
71,520✔
1303
            e.ref_end += ref_displacement;
72✔
1304
        }
72✔
1305
    }
145,515✔
1306

101,586✔
1307
    rebuild_freelists_from_slab();
189,807✔
1308

101,586✔
1309
    // Build the fast path mapping
101,586✔
1310

101,586✔
1311
    // The fast path mapping is an array which is used from multiple threads
101,586✔
1312
    // without locking - see translate().
101,586✔
1313

101,586✔
1314
    // Addition of a new mapping may require a completely new fast mapping table.
101,586✔
1315
    //
101,586✔
1316
    // Being used in a multithreaded scenario, the old mappings must be retained open,
101,586✔
1317
    // until the realm version for which they were established has been closed/detached.
101,586✔
1318
    //
101,586✔
1319
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
101,586✔
1320
    // translate refs in the slab area, and that all these uses are serialized, whether
101,586✔
1321
    // that is achieved by being single threaded, interlocked or run from a sequential
101,586✔
1322
    // scheduling queue.
101,586✔
1323
    //
101,586✔
1324
    rebuild_translations(replace_last_mapping, old_num_mappings);
189,807✔
1325

101,586✔
1326
    schedule_refresh_of_outdated_encrypted_pages();
189,807✔
1327
}
189,807✔
1328

1329

1330
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1331
{
3,433,347✔
1332
#if REALM_ENABLE_ENCRYPTION
3,433,347✔
1333
    // callers must already hold m_mapping_mutex
1,963,047✔
1334
    for (auto& e : m_mappings) {
3,294,882✔
1335
        if (auto m = e.primary_mapping.get_encrypted_mapping()) {
3,142,155✔
1336
            encryption_mark_pages_for_IV_check(m);
1,419✔
1337
        }
1,419✔
1338
        if (auto m = e.xover_mapping.get_encrypted_mapping()) {
3,142,155✔
1339
            encryption_mark_pages_for_IV_check(m);
×
1340
        }
×
1341
    }
3,142,155✔
1342
    // unsafe to do outside writing thread: verify();
1,963,047✔
1343
#endif // REALM_ENABLE_ENCRYPTION
3,433,347✔
1344
}
3,433,347✔
1345

1346
size_t SlabAlloc::get_allocated_size() const noexcept
1347
{
126,606✔
1348
    size_t sz = 0;
126,606✔
1349
    for (const auto& s : m_slabs)
126,606✔
1350
        sz += s.size;
16,104✔
1351
    return sz;
126,606✔
1352
}
126,606✔
1353

1354
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1355
{
126,600✔
1356
    ++m_translation_table_size;
126,600✔
1357
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
126,600✔
1358
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
274,854✔
1359
        new_fast_mapping[i] = m_ref_translation_ptr[i];
148,254✔
1360
    }
148,254✔
1361
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
126,600✔
1362
                                    m_ref_translation_ptr.load());
126,600✔
1363
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
126,600✔
1364
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
62,409✔
1365
    // so optimize by clamping the lowest possible xover offset to the end of the section.
62,409✔
1366
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
126,600✔
1367
    m_ref_translation_ptr = new_fast_mapping.release();
126,600✔
1368
}
126,600✔
1369

1370
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1371
{
191,793✔
1372
    size_t free_space_size = m_slabs.size();
191,793✔
1373
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
179,157✔
1374
    if (m_translation_table_size < num_mappings + free_space_size) {
191,793✔
1375
        requires_new_translation = true;
145,515✔
1376
    }
145,515✔
1377
    RefTranslation* new_translation_table = m_ref_translation_ptr;
191,793✔
1378
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
191,793✔
1379
    if (requires_new_translation) {
191,793✔
1380
        // we need a new translation table, but must preserve old, as translations using it
71,517✔
1381
        // may be in progress concurrently
71,517✔
1382
        if (m_translation_table_size)
145,581✔
1383
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
174✔
1384
                                            m_ref_translation_ptr.load());
174✔
1385
        m_translation_table_size = num_mappings + free_space_size;
145,581✔
1386
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
145,581✔
1387
        new_translation_table = new_translation_table_owner.get();
145,581✔
1388
        old_num_sections = 0;
145,581✔
1389
    }
145,581✔
1390
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
337,548✔
1391
        if (is_in_memory()) {
145,755✔
1392
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1393
        }
12✔
1394
        else {
145,743✔
1395
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
145,743✔
1396
#if REALM_ENABLE_ENCRYPTION
145,743✔
1397
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
145,743✔
1398
#endif
145,743✔
1399
        }
145,743✔
1400
        REALM_ASSERT(new_translation_table[i].mapping_addr);
145,755✔
1401
        // We don't copy over data for the cross over mapping. If the mapping is needed,
71,604✔
1402
        // copying will happen on demand (in get_or_add_xover_mapping).
71,604✔
1403
        // Note: that may never be needed, because if the array that needed the original cross over
71,604✔
1404
        // mapping is freed, any new array allocated at the same position will NOT need a cross
71,604✔
1405
        // over mapping, but just use the primary mapping.
71,604✔
1406
    }
145,755✔
1407
    for (size_t k = 0; k < free_space_size; ++k) {
237,519✔
1408
        char* base = m_slabs[k].addr;
45,726✔
1409
        REALM_ASSERT(base);
45,726✔
1410
        new_translation_table[num_mappings + k].mapping_addr = base;
45,726✔
1411
    }
45,726✔
1412

103,641✔
1413
    // This will either be null or the same as new_translation_table, which is about to become owned by
103,641✔
1414
    // m_ref_translation_ptr.
103,641✔
1415
    (void)new_translation_table_owner.release();
191,793✔
1416

103,641✔
1417
    m_ref_translation_ptr = new_translation_table;
191,793✔
1418
}
191,793✔
1419

1420
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
1421
{
6✔
1422
    auto _page_size = page_size();
6✔
1423
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
6✔
1424
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
6✔
1425
        // some other thread already added a mapping
1426
        // it MUST have been for the exact same address:
1427
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1428
        return;
×
1429
    }
×
1430
    MapEntry* map_entry = &m_mappings[index];
6✔
1431
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
6✔
1432
    if (!map_entry->xover_mapping.is_attached()) {
6✔
1433
        // Create a xover mapping
3✔
1434
        auto file_offset = get_section_base(index) + offset;
6✔
1435
        auto end_offset = file_offset + size;
6✔
1436
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
6✔
1437
        auto minimal_mapping_size = end_offset - mapping_file_offset;
6✔
1438
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size, 0,
6✔
1439
                                      m_write_observer);
6✔
1440
        map_entry->xover_mapping = std::move(mapping);
6✔
1441
    }
6✔
1442
    txl.xover_mapping_base = offset & ~(_page_size - 1);
6✔
1443
#if REALM_ENABLE_ENCRYPTION
6✔
1444
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
6✔
1445
#endif
6✔
1446
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
6✔
1447
}
6✔
1448

1449
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1450
{
3,128,343✔
1451
    // Verify that each old ref translation pointer still points to a valid
1,569,708✔
1452
    // thing that we haven't released yet.
1,569,708✔
1453
#if REALM_DEBUG
3,128,343✔
1454
    std::unordered_set<const char*> mappings;
3,128,343✔
1455
    for (auto& m : m_old_mappings) {
1,569,795✔
1456
        REALM_ASSERT(m.mapping.is_attached());
174✔
1457
        mappings.insert(m.mapping.get_addr());
174✔
1458
    }
174✔
1459
    for (auto& m : m_mappings) {
3,019,395✔
1460
        REALM_ASSERT(m.primary_mapping.is_attached());
2,910,468✔
1461
        mappings.insert(m.primary_mapping.get_addr());
2,910,468✔
1462
        if (m.xover_mapping.is_attached())
2,910,468✔
1463
            mappings.insert(m.xover_mapping.get_addr());
12✔
1464
    }
2,910,468✔
1465
    for (auto& m : m_virtual_file_buffer) {
1,679,196✔
1466
        mappings.insert(m.addr);
218,976✔
1467
    }
218,976✔
1468
    if (m_data)
3,128,343✔
1469
        mappings.insert(m_data);
3,118,263✔
1470
    for (auto& t : m_old_translations) {
1,761,294✔
1471
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
377,592✔
1472
                        youngest_live_version, t.replaced_at_version);
377,592✔
1473
        if (nonempty_attachment()) {
377,592✔
1474
            for (size_t i = 0; i < t.translation_count; ++i)
762,078✔
1475
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
372,672✔
1476
        }
372,672✔
1477
    }
377,592✔
1478
#else
1479
    static_cast<void>(youngest_live_version);
1480
#endif
1481
}
3,128,343✔
1482

1483

1484
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1485
{
1,564,194✔
1486
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
1,564,194✔
1487
    verify_old_translations(youngest_live_version);
1,564,194✔
1488

784,860✔
1489
    auto pred = [=](auto& oldie) {
912,852✔
1490
        return oldie.replaced_at_version < oldest_live_version;
252,303✔
1491
    };
252,303✔
1492
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
1,564,194✔
1493
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
1,564,194✔
1494
                             m_old_translations.end());
1,564,194✔
1495
    m_youngest_live_version = youngest_live_version;
1,564,194✔
1496
    verify_old_translations(youngest_live_version);
1,564,194✔
1497
}
1,564,194✔
1498

1499
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1500
{
1,545,396✔
1501
    m_youngest_live_version = currently_live_version;
1,545,396✔
1502
}
1,545,396✔
1503

1504
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1505
{
1,388,412✔
1506
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
1,388,412✔
1507
        throw InvalidFreeSpace();
698,268✔
1508
    return m_free_read_only;
1,388,412✔
1509
}
1,388,412✔
1510

1511

1512
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1513
{
25,222,947✔
1514
    size_t end_of_block = start_pos + free_chunk_size;
25,222,947✔
1515
    size_t alloc_pos = start_pos;
25,222,947✔
1516
    while (alloc_pos + request_size <= end_of_block) {
25,224,615✔
1517
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
25,223,808✔
1518
        if (alloc_pos + request_size <= next_section_boundary) {
25,223,808✔
1519
            return alloc_pos;
25,222,140✔
1520
        }
25,222,140✔
1521
        alloc_pos = next_section_boundary;
1,668✔
1522
    }
1,668✔
1523
    return 0;
12,631,335✔
1524
}
25,222,947✔
1525

1526

1527
void SlabAlloc::resize_file(size_t new_file_size)
1528
{
103,560✔
1529
    if (m_attach_mode == attach_SharedFile) {
103,560✔
1530
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
76,842✔
1531
        m_file.prealloc(new_file_size); // Throws
76,842✔
1532
        // resizing is done based on the logical file size. It is ok for the file
47,160✔
1533
        // to actually be bigger, but never smaller.
47,160✔
1534
        REALM_ASSERT(new_file_size <= static_cast<size_t>(m_file.get_size()));
76,842✔
1535

47,160✔
1536
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
76,842✔
1537
        if (!disable_sync)
76,842✔
1538
            m_file.sync(); // Throws
561✔
1539
    }
76,842✔
1540
    else {
26,718✔
1541
        size_t current_size = 0;
26,718✔
1542
        for (auto& b : m_virtual_file_buffer) {
26,934✔
1543
            current_size += b.size;
26,934✔
1544
        }
26,934✔
1545
        if (new_file_size > current_size) {
26,718✔
1546
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1547
        }
6✔
1548
        m_virtual_file_size = new_file_size;
26,718✔
1549
    }
26,718✔
1550
}
103,560✔
1551

1552
#ifdef REALM_DEBUG
1553
void SlabAlloc::reserve_disk_space(size_t size)
1554
{
36✔
1555
    if (size != round_up_to_page_size(size))
36✔
1556
        size = round_up_to_page_size(size);
30✔
1557
    m_file.prealloc(size); // Throws
36✔
1558

18✔
1559
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1560
    if (!disable_sync)
36✔
1561
        m_file.sync(); // Throws
×
1562
}
36✔
1563
#endif
1564

1565
void SlabAlloc::verify() const
1566
{
702,495✔
1567
#ifdef REALM_DEBUG
702,495✔
1568
    if (!m_slabs.empty()) {
702,495✔
1569
        // Make sure that all free blocks are within a slab. This is done
338,961✔
1570
        // implicitly by using for_all_free_entries()
338,961✔
1571
        size_t first_possible_ref = m_baseline;
677,865✔
1572
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
677,865✔
1573
        for_all_free_entries([&](size_t ref, size_t size) {
5,581,989✔
1574
            REALM_ASSERT(ref >= first_possible_ref);
5,581,989✔
1575
            REALM_ASSERT(ref + size <= first_impossible_ref);
5,581,989✔
1576
            first_possible_ref = ref;
5,581,989✔
1577
        });
5,581,989✔
1578
    }
677,865✔
1579
#endif
702,495✔
1580
}
702,495✔
1581

1582
#ifdef REALM_DEBUG
1583

1584
bool SlabAlloc::is_all_free() const
1585
{
606✔
1586
    // verify that slabs contain only free space.
303✔
1587
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
303✔
1588
    for (const auto& e : m_slabs) {
600✔
1589
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
594✔
1590
        REALM_ASSERT(first->block_before_size == 0);
594✔
1591
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
594✔
1592
        REALM_ASSERT(last->block_after_size == 0);
594✔
1593
        if (first->block_after_size != last->block_before_size)
594✔
1594
            return false;
×
1595
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
594✔
1596
        range -= sizeof(BetweenBlocks);
594✔
1597
        // the size of the free area must match the distance between the two BetweenBlocks:
297✔
1598
        if (range != first->block_after_size)
594✔
1599
            return false;
×
1600
    }
594✔
1601
    return true;
606✔
1602
}
606✔
1603

1604

1605
// LCOV_EXCL_START
1606
void SlabAlloc::print() const
1607
{
×
1608
    /* TODO
1609
     *
1610

1611
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1612

1613
    size_t free = 0;
1614
    for (const auto& free_block : m_free_space) {
1615
        free += free_block.size;
1616
    }
1617

1618
    size_t allocated = allocated_for_slabs - free;
1619
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1620

1621
    if (!m_slabs.empty()) {
1622
        std::cout << "Slabs: ";
1623
        ref_type first_ref = m_baseline;
1624

1625
        for (const auto& slab : m_slabs) {
1626
            if (&slab != &m_slabs.front())
1627
                std::cout << ", ";
1628

1629
            ref_type last_ref = slab.ref_end - 1;
1630
            size_t size = slab.ref_end - first_ref;
1631
            void* addr = slab.addr;
1632
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1633
            first_ref = slab.ref_end;
1634
        }
1635
        std::cout << "\n";
1636
    }
1637

1638
    if (!m_free_space.empty()) {
1639
        std::cout << "FreeSpace: ";
1640
        for (const auto& free_block : m_free_space) {
1641
            if (&free_block != &m_free_space.front())
1642
                std::cout << ", ";
1643

1644
            ref_type last_ref = free_block.ref + free_block.size - 1;
1645
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1646
        }
1647
        std::cout << "\n";
1648
    }
1649
    if (!m_free_read_only.empty()) {
1650
        std::cout << "FreeSpace (ro): ";
1651
        for (const auto& free_block : m_free_read_only) {
1652
            if (&free_block != &m_free_read_only.front())
1653
                std::cout << ", ";
1654

1655
            ref_type last_ref = free_block.ref + free_block.size - 1;
1656
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1657
        }
1658
        std::cout << "\n";
1659
    }
1660
    std::cout << std::flush;
1661
    */
1662
}
×
1663
// LCOV_EXCL_STOP
1664

1665
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc