• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / michael.wilkersonbarker_976

05 Mar 2024 06:32PM UTC coverage: 90.896% (-0.04%) from 90.936%
michael.wilkersonbarker_976

Pull #7416

Evergreen

michael-wb
Added thread-safe comment to DeadlineTimer
Pull Request #7416: RCORE-1987 network::Service does not start waiting on timers if no other events are currently active

93900 of 173116 branches covered (54.24%)

238313 of 262182 relevant lines covered (90.9%)

5950091.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.93
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <cinttypes>
20
#include <type_traits>
21
#include <exception>
22
#include <algorithm>
23
#include <memory>
24
#include <mutex>
25
#include <map>
26
#include <atomic>
27
#include <cstring>
28

29
#if REALM_DEBUG
30
#include <iostream>
31
#include <unordered_set>
32
#endif
33

34
#ifdef REALM_SLAB_ALLOC_DEBUG
35
#include <cstdlib>
36
#endif
37

38
#include <realm/util/errno.hpp>
39
#include <realm/util/encrypted_file_mapping.hpp>
40
#include <realm/util/terminate.hpp>
41
#include <realm/util/thread.hpp>
42
#include <realm/util/scope_exit.hpp>
43
#include <realm/array.hpp>
44
#include <realm/alloc_slab.hpp>
45
#include <realm/group.hpp>
46

47
using namespace realm;
48
using namespace realm::util;
49

50

51
namespace {
52

53
#ifdef REALM_SLAB_ALLOC_DEBUG
54
std::map<ref_type, void*> malloc_debug_map;
55
#endif
56

57
class InvalidFreeSpace : std::exception {
58
public:
59
    const char* what() const noexcept override
60
    {
×
61
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
62
               "further writes can be performed.";
×
63
    }
×
64
};
65

66
std::atomic<size_t> total_slab_allocated(0);
67

68
} // anonymous namespace
69

70
size_t SlabAlloc::get_total_slab_size() noexcept
71
{
×
72
    return total_slab_allocated;
×
73
}
×
74

75
SlabAlloc::SlabAlloc()
76
{
116,799✔
77
    m_initial_section_size = 1UL << section_shift; // page_size();
116,799✔
78
    m_free_space_state = free_space_Clean;
116,799✔
79
    m_baseline = 0;
116,799✔
80
}
116,799✔
81

82
util::File& SlabAlloc::get_file()
83
{
953,892✔
84
    return m_file;
953,892✔
85
}
953,892✔
86

87

88
const SlabAlloc::Header SlabAlloc::empty_file_header = {
89
    {0, 0}, // top-refs
90
    {'T', '-', 'D', 'B'},
91
    {0, 0}, // undecided file format
92
    0,      // reserved
93
    0       // flags (lsb is select bit)
94
};
95

96

97
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
98
{
630✔
99
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
630✔
100
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
630✔
101
    *streaming_header = {
630✔
102
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
630✔
103
        {'T', '-', 'D', 'B'},
630✔
104
        {storage_type(file_format_version), 0},
630✔
105
        0, // reserved
630✔
106
        0  // flags (lsb is select bit)
630✔
107
    };
630✔
108
}
630✔
109

110
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
111
    : ref_end(r)
112
    , size(s)
113
{
90,075✔
114
    // Ensure that allocation is aligned to at least 8 bytes
44,331✔
115
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
90,075✔
116

44,331✔
117
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
90,075✔
118
    addr = new char[size];
90,075✔
119
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
90,075✔
120
#if REALM_ENABLE_ALLOC_SET_ZERO
121
    std::fill(addr, addr + size, 0);
122
#endif
123
}
90,075✔
124

125
SlabAlloc::Slab::~Slab()
126
{
93,048✔
127
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
93,048✔
128
    if (addr)
93,048✔
129
        delete[] addr;
90,075✔
130
}
93,048✔
131

132
void SlabAlloc::detach(bool keep_file_open) noexcept
133
{
117,666✔
134
    delete[] m_ref_translation_ptr;
117,666✔
135
    m_ref_translation_ptr.store(nullptr);
117,666✔
136
    m_translation_table_size = 0;
117,666✔
137
    set_read_only(true);
117,666✔
138
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
117,666✔
139
    switch (m_attach_mode) {
117,666✔
140
        case attach_None:
423✔
141
            break;
423✔
142
        case attach_UsersBuffer:
24✔
143
            break;
24✔
144
        case attach_OwnedBuffer:
4,746✔
145
            delete[] m_data;
4,746✔
146
            break;
4,746✔
147
        case attach_SharedFile:
86,409✔
148
        case attach_UnsharedFile:
87,051✔
149
            m_data = 0;
87,051✔
150
            m_mappings.clear();
87,051✔
151
            m_youngest_live_version = 0;
87,051✔
152
            if (!keep_file_open)
87,051✔
153
                m_file.close();
86,628✔
154
            break;
87,051✔
155
        case attach_Heap:
55,335✔
156
            m_data = 0;
25,422✔
157
            break;
25,422✔
158
        default:
42,624✔
159
            REALM_UNREACHABLE();
160
    }
117,666✔
161

57,918✔
162
    // Release all allocated memory - this forces us to create new
57,918✔
163
    // slabs after re-attaching thereby ensuring that the slabs are
57,918✔
164
    // placed correctly (logically) after the end of the file.
57,918✔
165
    m_slabs.clear();
117,666✔
166
    clear_freelists();
117,666✔
167
#if REALM_ENABLE_ENCRYPTION
117,666✔
168
    m_realm_file_info = nullptr;
117,666✔
169
#endif
117,666✔
170

57,918✔
171
    m_attach_mode = attach_None;
117,666✔
172
}
117,666✔
173

174

175
SlabAlloc::~SlabAlloc() noexcept
176
{
116,802✔
177
#ifdef REALM_DEBUG
116,802✔
178
    if (is_attached()) {
116,802✔
179
        // A shared group does not guarantee that all space is free
351✔
180
        if (m_attach_mode != attach_SharedFile) {
702✔
181
            // No point inchecking if free space info is invalid
351✔
182
            if (m_free_space_state != free_space_Invalid) {
702✔
183
                if (REALM_COVER_NEVER(!is_all_free())) {
702✔
184
                    print();
×
185
#ifndef REALM_SLAB_ALLOC_DEBUG
×
186
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
187
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
188
                                 "then run under Valgrind with --leak-check=full\n";
×
189
                    REALM_TERMINATE("SlabAlloc detected a leak");
190
#endif
×
191
                }
×
192
            }
702✔
193
        }
702✔
194
    }
702✔
195
#endif
116,802✔
196

57,513✔
197
    if (is_attached())
116,802✔
198
        detach();
702✔
199
}
116,802✔
200

201

202
MemRef SlabAlloc::do_alloc(size_t size)
203
{
26,270,604✔
204
    CriticalSection cs(changes);
26,270,604✔
205
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
26,270,604✔
206
    REALM_ASSERT_EX((size & 0x7) == 0, size,
26,270,604✔
207
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
26,270,604✔
208
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
26,270,604✔
209
    // This limits the size of any array to ensure it can fit within a memory section.
13,154,997✔
210
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
13,154,997✔
211
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
26,270,604✔
212

13,154,997✔
213
    // If we failed to correctly record free space, new allocations cannot be
13,154,997✔
214
    // carried out until the free space record is reset.
13,154,997✔
215
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
26,270,604✔
216
        throw InvalidFreeSpace();
13,154,997✔
217

13,154,997✔
218
    m_free_space_state = free_space_Dirty;
26,270,604✔
219
    m_commit_size += size;
26,270,604✔
220

13,154,997✔
221
    // minimal allocation is sizeof(FreeListEntry)
13,154,997✔
222
    if (size < sizeof(FreeBlock))
26,270,604✔
223
        size = sizeof(FreeBlock);
6,657✔
224
    // align to multipla of 8
13,154,997✔
225
    if (size & 0x7)
26,270,604✔
226
        size = (size + 7) & ~0x7;
×
227

13,154,997✔
228
    FreeBlock* entry = allocate_block(static_cast<int>(size));
26,270,604✔
229
    mark_allocated(entry);
26,270,604✔
230
    ref_type ref = entry->ref;
26,270,604✔
231

13,154,997✔
232
#ifdef REALM_DEBUG
26,270,604✔
233
    if (REALM_COVER_NEVER(m_debug_out))
26,270,604✔
234
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
13,154,997✔
235
#endif
26,270,604✔
236

13,154,997✔
237
    char* addr = reinterpret_cast<char*>(entry);
26,270,604✔
238
    REALM_ASSERT_EX(addr == translate(ref), addr, ref, get_file_path_for_assertions());
26,270,604✔
239

13,154,997✔
240
#if REALM_ENABLE_ALLOC_SET_ZERO
241
    std::fill(addr, addr + size, 0);
242
#endif
243
#ifdef REALM_SLAB_ALLOC_DEBUG
244
    malloc_debug_map[ref] = malloc(1);
245
#endif
246
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
26,270,604✔
247
    return MemRef(addr, ref, *this);
26,270,604✔
248
}
26,270,604✔
249

250
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
251
{
2,813,175✔
252
    auto bb = bb_before(entry);
2,813,175✔
253
    if (bb->block_before_size <= 0)
2,813,175✔
254
        return nullptr; // no prev block, or it is in use
2,481,285✔
255
    return block_before(bb);
331,890✔
256
}
331,890✔
257

258
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
259
{
2,813,133✔
260
    auto bb = bb_after(entry);
2,813,133✔
261
    if (bb->block_after_size <= 0)
2,813,133✔
262
        return nullptr; // no next block, or it is in use
894,933✔
263
    return block_after(bb);
1,918,200✔
264
}
1,918,200✔
265

266
SlabAlloc::FreeList SlabAlloc::find(int size)
267
{
26,287,563✔
268
    FreeList retval;
26,287,563✔
269
    retval.it = m_block_map.lower_bound(size);
26,287,563✔
270
    if (retval.it != m_block_map.end()) {
26,287,563✔
271
        retval.size = retval.it->first;
26,196,429✔
272
    }
26,196,429✔
273
    else {
91,134✔
274
        retval.size = 0;
91,134✔
275
    }
91,134✔
276
    return retval;
26,287,563✔
277
}
26,287,563✔
278

279
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
280
{
25,879,431✔
281
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
25,879,431✔
282
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
34,024,974✔
283
        ++hint.it;
8,145,543✔
284
    if (hint.it == m_block_map.end())
25,879,431✔
285
        hint.size = 0; // indicate "not found"
90,075✔
286
    return hint;
25,879,431✔
287
}
25,879,431✔
288

289
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
290
{
26,204,994✔
291
    FreeBlock* retval = list.it->second;
26,204,994✔
292
    FreeBlock* header = retval->next;
26,204,994✔
293
    if (header == retval)
26,204,994✔
294
        m_block_map.erase(list.it);
26,027,664✔
295
    else
177,330✔
296
        list.it->second = header;
177,330✔
297
    retval->unlink();
26,204,994✔
298
    return retval;
26,204,994✔
299
}
26,204,994✔
300

301
void SlabAlloc::FreeBlock::unlink()
302
{
28,452,648✔
303
    auto _next = next;
28,452,648✔
304
    auto _prev = prev;
28,452,648✔
305
    _next->prev = prev;
28,452,648✔
306
    _prev->next = next;
28,452,648✔
307
    clear_links();
28,452,648✔
308
}
28,452,648✔
309

310
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
311
{
2,250,351✔
312
    int size = bb_before(entry)->block_after_size;
2,250,351✔
313
    auto it = m_block_map.find(size);
2,250,351✔
314
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
2,250,351✔
315
    auto header = it->second;
2,250,351✔
316
    if (header == entry) {
2,250,351✔
317
        header = entry->next;
2,227,869✔
318
        if (header == entry)
2,227,869✔
319
            m_block_map.erase(it);
1,610,931✔
320
        else
616,938✔
321
            it->second = header;
616,938✔
322
    }
2,227,869✔
323
    entry->unlink();
2,250,351✔
324
}
2,250,351✔
325

326
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
327
{
29,350,107✔
328
    int size = bb_before(entry)->block_after_size;
29,350,107✔
329
    FreeBlock* header;
29,350,107✔
330
    auto it = m_block_map.find(size);
29,350,107✔
331
    if (it != m_block_map.end()) {
29,350,107✔
332
        header = it->second;
841,314✔
333
        it->second = entry;
841,314✔
334
        entry->next = header;
841,314✔
335
        entry->prev = header->prev;
841,314✔
336
        entry->prev->next = entry;
841,314✔
337
        entry->next->prev = entry;
841,314✔
338
    }
841,314✔
339
    else {
28,508,793✔
340
        header = nullptr;
28,508,793✔
341
        m_block_map[size] = entry;
28,508,793✔
342
        entry->next = entry->prev = entry;
28,508,793✔
343
    }
28,508,793✔
344
}
29,350,107✔
345

346
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
347
{
2,812,851✔
348
    auto bb = bb_before(entry);
2,812,851✔
349
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
2,812,851✔
350
    auto alloc_size = -bb->block_after_size;
2,812,851✔
351
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
2,812,851✔
352
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
2,812,851✔
353
                    get_file_path_for_assertions());
2,812,851✔
354
    bb->block_after_size = alloc_size;
2,812,851✔
355
    bb = bb_after(entry);
2,812,851✔
356
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
2,812,851✔
357
    REALM_ASSERT(-bb->block_before_size == alloc_size);
2,812,851✔
358
    bb->block_before_size = alloc_size;
2,812,851✔
359
}
2,812,851✔
360

361
void SlabAlloc::mark_allocated(FreeBlock* entry)
362
{
26,294,382✔
363
    auto bb = bb_before(entry);
26,294,382✔
364
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
26,294,382✔
365
    auto bb2 = bb_after(entry);
26,294,382✔
366
    bb->block_after_size = 0 - bb->block_after_size;
26,294,382✔
367
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
26,294,382✔
368
    bb2->block_before_size = 0 - bb2->block_before_size;
26,294,382✔
369
}
26,294,382✔
370

371
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
372
{
26,289,840✔
373
    FreeList list = find(size);
26,289,840✔
374
    if (list.found_exact(size)) {
26,289,840✔
375
        return pop_freelist_entry(list);
411,372✔
376
    }
411,372✔
377
    // no exact matches.
12,964,854✔
378
    list = find_larger(list, size);
25,878,468✔
379
    FreeBlock* block;
25,878,468✔
380
    if (list.found_something()) {
25,878,468✔
381
        block = pop_freelist_entry(list);
25,793,934✔
382
    }
25,793,934✔
383
    else {
84,534✔
384
        block = grow_slab(size);
84,534✔
385
    }
84,534✔
386
    FreeBlock* remaining = break_block(block, size);
25,878,468✔
387
    if (remaining)
25,878,468✔
388
        push_freelist_entry(remaining);
25,882,203✔
389
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
25,878,468✔
390
    return block;
25,878,468✔
391
}
25,878,468✔
392

393
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
394
{
751,065✔
395
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
751,065✔
396
    bb->block_before_size = 0;
751,065✔
397
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
751,065✔
398
    bb->block_after_size = block_size;
751,065✔
399
    auto entry = block_after(bb);
751,065✔
400
    entry->clear_links();
751,065✔
401
    entry->ref = ref_start + sizeof(BetweenBlocks);
751,065✔
402
    bb = bb_after(entry);
751,065✔
403
    bb->block_before_size = block_size;
751,065✔
404
    bb->block_after_size = 0;
751,065✔
405
    return entry;
751,065✔
406
}
751,065✔
407

408
void SlabAlloc::clear_freelists()
409
{
957,522✔
410
    m_block_map.clear();
957,522✔
411
}
957,522✔
412

413
void SlabAlloc::rebuild_freelists_from_slab()
414
{
839,865✔
415
    clear_freelists();
839,865✔
416
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
839,865✔
417
    for (const auto& e : m_slabs) {
748,668✔
418
        FreeBlock* entry = slab_to_entry(e, ref_start);
660,996✔
419
        push_freelist_entry(entry);
660,996✔
420
        ref_start = align_size_to_section_boundary(e.ref_end);
660,996✔
421
    }
660,996✔
422
}
839,865✔
423

424
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
425
{
25,880,166✔
426
    int size = size_from_block(block);
25,880,166✔
427
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
25,880,166✔
428
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
25,880,166✔
429
        return nullptr;
6✔
430
    bb_after(block)->block_before_size = remaining_size;
25,880,160✔
431
    bb_before(block)->block_after_size = new_size;
25,880,160✔
432
    auto bb_between = bb_after(block);
25,880,160✔
433
    bb_between->block_before_size = new_size;
25,880,160✔
434
    bb_between->block_after_size = remaining_size;
25,880,160✔
435
    FreeBlock* remaining_block = block_after(bb_between);
25,880,160✔
436
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
25,880,160✔
437
    remaining_block->clear_links();
25,880,160✔
438
    block->clear_links();
25,880,160✔
439
    return remaining_block;
25,880,160✔
440
}
25,880,160✔
441

442
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
443
{
2,250,384✔
444
    int size_first = size_from_block(first);
2,250,384✔
445
    int size_last = size_from_block(last);
2,250,384✔
446
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
2,250,384✔
447
    bb_before(first)->block_after_size = new_size;
2,250,384✔
448
    bb_after(last)->block_before_size = new_size;
2,250,384✔
449
    return first;
2,250,384✔
450
}
2,250,384✔
451

452
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
453
{
90,075✔
454
    // Allocate new slab.
44,331✔
455
    // - Always allocate at least 128K. This is also the amount of
44,331✔
456
    //   memory that we allow the slab allocator to keep between
44,331✔
457
    //   transactions. Allowing it to keep a small amount between
44,331✔
458
    //   transactions makes very small transactions faster by avoiding
44,331✔
459
    //   repeated unmap/mmap system calls.
44,331✔
460
    // - When allocating, allocate as much as we already have, but
44,331✔
461
    // - Never allocate more than a full section (64MB). This policy
44,331✔
462
    //   leads to gradual allocation of larger and larger blocks until
44,331✔
463
    //   we reach allocation of entire sections.
44,331✔
464
    size += 2 * sizeof(BetweenBlocks);
90,075✔
465
    size_t new_size = minimal_alloc;
90,075✔
466
    while (new_size < uint64_t(size))
121,077✔
467
        new_size += minimal_alloc;
31,002✔
468
    size_t already_allocated = get_allocated_size();
90,075✔
469
    if (new_size < already_allocated)
90,075✔
470
        new_size = already_allocated;
2,745✔
471
    if (new_size > maximal_alloc)
90,075✔
472
        new_size = maximal_alloc;
18✔
473

44,331✔
474
    ref_type ref;
90,075✔
475
    if (m_slabs.empty()) {
90,075✔
476
        ref = m_baseline.load(std::memory_order_relaxed);
80,724✔
477
    }
80,724✔
478
    else {
9,351✔
479
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
4,530✔
480
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
9,351✔
481
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
9,351✔
482
        ref = curr_ref_end;
9,351✔
483
    }
9,351✔
484
    ref = align_size_to_section_boundary(ref);
90,075✔
485
    size_t ref_end = ref;
90,075✔
486
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
90,075✔
487
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
488
                                      util::to_string(new_size));
×
489
    }
×
490

44,331✔
491
    REALM_ASSERT(matches_section_boundary(ref));
90,075✔
492

44,331✔
493
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
90,075✔
494
    // Create new slab and add to list of slabs
44,331✔
495
    m_slabs.emplace_back(ref_end, new_size); // Throws
90,075✔
496
    const Slab& slab = m_slabs.back();
90,075✔
497
    extend_fast_mapping_with_slab(slab.addr);
90,075✔
498

44,331✔
499
    // build a single block from that entry
44,331✔
500
    return slab_to_entry(slab, ref);
90,075✔
501
}
90,075✔
502

503

504
void SlabAlloc::do_free(ref_type ref, char* addr)
505
{
18,085,770✔
506
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
18,085,770✔
507
    CriticalSection cs(changes);
18,085,770✔
508

9,091,524✔
509
    bool read_only = is_read_only(ref);
18,085,770✔
510
#ifdef REALM_SLAB_ALLOC_DEBUG
511
    free(malloc_debug_map[ref]);
512
#endif
513

9,091,524✔
514
    // Get size from segment
9,091,524✔
515
    size_t size =
18,085,770✔
516
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
16,654,674✔
517

9,091,524✔
518
#ifdef REALM_DEBUG
18,085,770✔
519
    if (REALM_COVER_NEVER(m_debug_out))
18,085,770✔
520
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
9,091,524✔
521
#endif
18,085,770✔
522

9,091,524✔
523
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
18,085,770✔
524
        return;
9,091,524✔
525

9,091,524✔
526
    // Mutable memory cannot be freed unless it has first been allocated, and
9,091,524✔
527
    // any allocation puts free space tracking into the "dirty" state.
9,091,524✔
528
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
18,085,770✔
529
                    free_space_Dirty, get_file_path_for_assertions());
18,085,770✔
530

9,091,524✔
531
    m_free_space_state = free_space_Dirty;
18,085,770✔
532

9,091,524✔
533
    if (read_only) {
18,085,770✔
534
        // Free space in read only segment is tracked separately
7,720,806✔
535
        try {
15,282,975✔
536
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
15,282,975✔
537
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
15,282,975✔
538
            auto next = m_free_read_only.lower_bound(ref);
15,282,975✔
539
            if (next != m_free_read_only.end()) {
15,282,975✔
540
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
13,859,844✔
541
                                        get_file_path_for_assertions());
13,859,844✔
542
                // See if element can be combined with next element
6,993,531✔
543
                if (ref + size == next->first) {
13,859,844✔
544
                    // if so, combine to include next element and remove that from collection
1,268,142✔
545
                    size += next->second;
2,471,736✔
546
                    next = m_free_read_only.erase(next);
2,471,736✔
547
                }
2,471,736✔
548
            }
13,859,844✔
549
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
15,282,975✔
550
                // There must be a previous element - see if we can merge
6,443,388✔
551
                auto prev = next;
12,769,926✔
552
                prev--;
12,769,926✔
553

6,443,388✔
554
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
12,769,926✔
555
                                        get_file_path_for_assertions());
12,769,926✔
556
                // See if element can be combined with previous element
6,443,388✔
557
                // We can do that just by adding the size
6,443,388✔
558
                if (prev->first + prev->second == ref) {
12,769,926✔
559
                    prev->second += size;
6,202,956✔
560
                    return; // Done!
6,202,956✔
561
                }
6,202,956✔
562
                m_free_read_only.emplace_hint(next, ref, size); // Throws
6,566,970✔
563
            }
6,566,970✔
564
            else {
2,513,049✔
565
                m_free_read_only.emplace(ref, size); // Throws
2,513,049✔
566
            }
2,513,049✔
567
        }
15,282,975✔
568
        catch (...) {
7,720,806✔
569
            m_free_space_state = free_space_Invalid;
×
570
        }
×
571
    }
15,282,975✔
572
    else {
2,802,795✔
573
        m_commit_size -= size;
2,802,795✔
574

1,370,718✔
575
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
1,370,718✔
576
        if (size < sizeof(FreeBlock))
2,802,795✔
577
            size = sizeof(FreeBlock);
6,657✔
578
        // align to multipla of 8
1,370,718✔
579
        if (size & 0x7)
2,802,795✔
580
            size = (size + 7) & ~0x7;
×
581

1,370,718✔
582
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
2,802,795✔
583
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
2,802,795✔
584
        mark_freed(e, static_cast<int>(size));
2,802,795✔
585
        free_block(ref, e);
2,802,795✔
586
    }
2,802,795✔
587
}
18,085,770✔
588

589
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
590
{
2,813,184✔
591
    // merge with surrounding blocks if possible
1,380,015✔
592
    block->ref = ref;
2,813,184✔
593
    FreeBlock* prev = get_prev_block_if_mergeable(block);
2,813,184✔
594
    if (prev) {
2,813,184✔
595
        remove_freelist_entry(prev);
332,004✔
596
        block = merge_blocks(prev, block);
332,004✔
597
    }
332,004✔
598
    FreeBlock* next = get_next_block_if_mergeable(block);
2,813,184✔
599
    if (next) {
2,813,184✔
600
        remove_freelist_entry(next);
1,918,479✔
601
        block = merge_blocks(block, next);
1,918,479✔
602
    }
1,918,479✔
603
    push_freelist_entry(block);
2,813,184✔
604
}
2,813,184✔
605

606
size_t SlabAlloc::consolidate_free_read_only()
607
{
604,368✔
608
    CriticalSection cs(changes);
604,368✔
609
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
604,368✔
610
        throw InvalidFreeSpace();
307,233✔
611

307,233✔
612
    return m_free_read_only.size();
604,368✔
613
}
604,368✔
614

615

616
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
617
{
2,376,582✔
618
    REALM_ASSERT_DEBUG(translate(ref) == addr);
2,376,582✔
619
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
2,376,582✔
620
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
2,376,582✔
621
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
2,376,582✔
622

1,184,586✔
623
    // Possible future enhancement: check if we can extend current space instead
1,184,586✔
624
    // of unconditionally allocating new space. In that case, remember to
1,184,586✔
625
    // check whether m_free_space_state == free_state_Invalid. Also remember to
1,184,586✔
626
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
1,184,586✔
627

1,184,586✔
628
    // Allocate new space
1,184,586✔
629
    MemRef new_mem = do_alloc(new_size); // Throws
2,376,582✔
630

1,184,586✔
631
    // Copy existing segment
1,184,586✔
632
    char* new_addr = new_mem.get_addr();
2,376,582✔
633
    realm::safe_copy_n(addr, old_size, new_addr);
2,376,582✔
634

1,184,586✔
635
    // Add old segment to freelist
1,184,586✔
636
    do_free(ref, addr);
2,376,582✔
637

1,184,586✔
638
#ifdef REALM_DEBUG
2,376,582✔
639
    if (REALM_COVER_NEVER(m_debug_out)) {
2,376,582✔
640
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
641
                  << " new_size: " << new_size << "\n";
×
642
    }
×
643
#endif // REALM_DEBUG
2,376,582✔
644

1,184,586✔
645
    return new_mem;
2,376,582✔
646
}
2,376,582✔
647

648

649
char* SlabAlloc::do_translate(ref_type) const noexcept
650
{
×
651
    REALM_ASSERT(false); // never come here
×
652
    return nullptr;
×
653
}
×
654

655

656
int SlabAlloc::get_committed_file_format_version() noexcept
657
{
87,168✔
658
    {
87,168✔
659
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
87,168✔
660
        if (m_mappings.size()) {
87,168✔
661
            // if we have mapped a file, m_mappings will have at least one mapping and
42,663✔
662
            // the first will be to the start of the file. Don't come here, if we're
42,663✔
663
            // just attaching a buffer. They don't have mappings.
42,663✔
664
            realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
87,129✔
665
        }
87,129✔
666
    }
87,168✔
667
    const Header& header = *reinterpret_cast<const Header*>(m_data);
87,168✔
668
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
69,207✔
669
    int file_format_version = int(header.m_file_format[slot_selector]);
87,168✔
670
    return file_format_version;
87,168✔
671
}
87,168✔
672

673
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
674
{
263,496✔
675
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
129,369✔
676
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
227,616✔
677
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
263,496✔
678
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
263,496✔
679
}
263,496✔
680

681
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
682
{
×
683
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
684
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
685
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
686
    if (is_file_on_streaming_form(header)) {
×
687
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
688
        return ref_type(footer.m_top_ref);
×
689
    }
×
690
    else {
×
691
        return to_ref(header.m_top_ref[slot_selector]);
×
692
    }
×
693
}
×
694

695
std::string SlabAlloc::get_file_path_for_assertions() const
696
{
×
697
    return m_file.get_path();
×
698
}
×
699

700
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
701
{
58,686✔
702
    if (cfg.read_only) {
58,686✔
703
        // If the file is opened read-only, we cannot change it. This is not a problem,
704
        // because for a read-only file we assume that it will not change while we use it,
705
        // hence there will be no need to grow memory mappings.
706
        // This assumption obviously will not hold, if the file is shared by multiple
707
        // processes or threads with different opening modes.
708
        // Currently, there is no way to detect if this assumption is violated.
709
        return false;
×
710
    }
×
711
    size_t expected_size = size_t(-1);
58,686✔
712
    size_t size = static_cast<size_t>(m_file.get_size());
58,686✔
713

28,851✔
714
    // It is not safe to change the size of a file on streaming form, since the footer
28,851✔
715
    // must remain available and remain at the very end of the file.
28,851✔
716
    REALM_ASSERT(!is_file_on_streaming_form());
58,686✔
717

28,851✔
718
    // check if online compaction allows us to shrink the file:
28,851✔
719
    if (top_ref) {
58,686✔
720
        // Get the expected file size by looking up logical file size stored in top array
10,359✔
721
        constexpr size_t max_top_size = (Group::s_file_size_ndx + 1) * 8 + sizeof(Header);
20,847✔
722
        size_t top_page_base = top_ref & ~(page_size() - 1);
20,847✔
723
        size_t top_offset = top_ref - top_page_base;
20,847✔
724
        size_t map_size = std::min(max_top_size + top_offset, size - top_page_base);
20,847✔
725
        File::Map<char> map_top(m_file, top_page_base, File::access_ReadOnly, map_size, 0, m_write_observer);
20,847✔
726
        realm::util::encryption_read_barrier(map_top, top_offset, max_top_size);
20,847✔
727
        auto top_header = map_top.get_addr() + top_offset;
20,847✔
728
        auto top_data = NodeHeader::get_data_from_header(top_header);
20,847✔
729
        auto w = NodeHeader::get_width_from_header(top_header);
20,847✔
730
        auto logical_size = size_t(get_direct(top_data, w, Group::s_file_size_ndx)) >> 1;
20,847✔
731
        // make sure we're page aligned, so the code below doesn't first
10,359✔
732
        // truncate the file, then expand it again
10,359✔
733
        expected_size = round_up_to_page_size(logical_size);
20,847✔
734
    }
20,847✔
735

28,851✔
736
    // Check if we can shrink the file
28,851✔
737
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
58,686✔
738
        detach(true); // keep m_file open
9✔
739
        m_file.resize(expected_size);
9✔
740
        m_file.close();
9✔
741
        size = expected_size;
9✔
742
        return true;
9✔
743
    }
9✔
744

28,845✔
745
    // We can only safely mmap the file, if its size matches a page boundary. If not,
28,845✔
746
    // we must change the size to match before mmaping it.
28,845✔
747
    if (size != round_up_to_page_size(size)) {
58,677✔
748
        // The file size did not match a page boundary.
192✔
749
        // We must extend the file to a page boundary (unless already there)
192✔
750
        // The file must be extended to match in size prior to being mmapped,
192✔
751
        // as extending it after mmap has undefined behavior.
192✔
752
        if (cfg.session_initiator || !cfg.is_shared) {
414✔
753
            // We can only safely extend the file if we're the session initiator, or if
192✔
754
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
192✔
755
            // done to ensure well defined behavior for memory mappings. It does not matter,
192✔
756
            // that the free space management isn't informed
192✔
757
            size = round_up_to_page_size(size);
414✔
758
            detach(true); // keep m_file open
414✔
759
            m_file.prealloc(size);
414✔
760
            m_file.close();
414✔
761
            return true;
414✔
762
        }
414✔
763
        else {
×
764
            // Getting here, we have a file of a size that will not work, and without being
765
            // allowed to extend it. This should not be possible. But allowing a retry is
766
            // arguably better than giving up and crashing...
767
            throw Retry();
×
768
        }
×
769
    }
58,263✔
770
    return false;
58,263✔
771
}
58,263✔
772

773
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
774
{
87,213✔
775
    m_cfg = cfg;
87,213✔
776
    m_write_observer = write_observer;
87,213✔
777
    // ExceptionSafety: If this function throws, it must leave the allocator in
42,705✔
778
    // the detached state.
42,705✔
779

42,705✔
780
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
87,213✔
781

42,705✔
782
    // When 'read_only' is true, this function will throw InvalidDatabase if the
42,705✔
783
    // file exists already but is empty. This can happen if another process is
42,705✔
784
    // currently creating it. Note however, that it is only legal for multiple
42,705✔
785
    // processes to access a database file concurrently if it is done via a
42,705✔
786
    // DB, and in that case 'read_only' can never be true.
42,705✔
787
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
87,213✔
788
    // session_initiator can be set *only* if we're shared.
42,705✔
789
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
87,213✔
790
                    get_file_path_for_assertions());
87,213✔
791
    // clear_file can be set *only* if we're the first session.
42,705✔
792
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
87,213✔
793
                    get_file_path_for_assertions());
87,213✔
794

42,705✔
795
    using namespace realm::util;
87,213✔
796
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
86,565✔
797
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
87,213✔
798
    set_read_only(cfg.read_only);
87,213✔
799
    try {
87,213✔
800
        m_file.open(path.c_str(), access, create, 0); // Throws
87,213✔
801
    }
87,213✔
802
    catch (const FileAccessError& ex) {
42,726✔
803
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
42✔
804
        if (ex.code() == ErrorCodes::PermissionDenied) {
42✔
805
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
806
                                cfg.read_only ? "read" : "read-write");
6✔
807
        }
6✔
808
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
42✔
809
    }
42✔
810
    File::CloseGuard fcg(m_file);
87,171✔
811
    auto physical_file_size = m_file.get_size();
87,171✔
812
    // Note that get_size() may (will) return a different size before and after
42,684✔
813
    // the call below to set_encryption_key.
42,684✔
814
    m_file.set_encryption_key(cfg.encryption_key);
87,171✔
815

42,684✔
816
    size_t size = 0;
87,171✔
817
    // The size of a database file must not exceed what can be encoded in
42,684✔
818
    // size_t.
42,684✔
819
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
87,171✔
820
        throw InvalidDatabase("Realm file too large", path);
42,684✔
821
    if (cfg.encryption_key && size == 0 && physical_file_size != 0) {
87,171✔
822
        // The opened file holds data, but is so small it cannot have
6✔
823
        // been created with encryption
6✔
824
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
6✔
825
    }
6✔
826
    if (size == 0 || cfg.clear_file) {
87,165✔
827
        if (REALM_UNLIKELY(cfg.read_only))
37,812✔
828
            throw InvalidDatabase("Read-only access to empty Realm file", path);
18,480✔
829

18,480✔
830
        size_t initial_size = page_size(); // m_initial_section_size;
37,812✔
831
        // exFAT does not allocate a unique id for the file until it is non-empty. It must be
18,480✔
832
        // valid at this point because File::get_unique_id() is used to distinguish
18,480✔
833
        // mappings_for_file in the encryption layer. So the prealloc() is required before
18,480✔
834
        // interacting with the encryption layer in File::write().
18,480✔
835
        // Pre-alloc initial space
18,480✔
836
        m_file.prealloc(initial_size);     // Throws
37,812✔
837
        // seek() back to the start of the file in preparation for writing the header
18,480✔
838
        // This sequence of File operations is protected from races by
18,480✔
839
        // DB::m_controlmutex, so we know we are the only ones operating on the file
18,480✔
840
        m_file.seek(0);
37,812✔
841
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
37,812✔
842
        m_file.write(data, sizeof empty_file_header); // Throws
37,812✔
843

18,480✔
844
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
37,812✔
845
        if (!disable_sync)
37,812✔
846
            m_file.sync(); // Throws
12✔
847

18,480✔
848
        size = initial_size;
37,812✔
849
    }
37,812✔
850
    ref_type top_ref;
87,165✔
851
    note_reader_start(this);
87,165✔
852
    util::ScopeExit reader_end_guard([this]() noexcept {
87,165✔
853
        note_reader_end(this);
87,165✔
854
    });
87,165✔
855

42,678✔
856
    try {
87,165✔
857
        // we'll read header and (potentially) footer
42,678✔
858
        File::Map<char> map_header(m_file, File::access_ReadOnly, sizeof(Header), 0, m_write_observer);
87,165✔
859
        realm::util::encryption_read_barrier(map_header, 0, sizeof(Header));
87,165✔
860
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
87,165✔
861

42,678✔
862
        File::Map<char> map_footer;
87,165✔
863
        const StreamingFooter* footer = nullptr;
87,165✔
864
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
87,165✔
865
            size_t footer_ref = size - sizeof(StreamingFooter);
618✔
866
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
618✔
867
            size_t footer_offset = footer_ref - footer_page_base;
618✔
868
            map_footer = File::Map<char>(m_file, footer_page_base, File::access_ReadOnly,
618✔
869
                                         sizeof(StreamingFooter) + footer_offset, 0, m_write_observer);
618✔
870
            realm::util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
618✔
871
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
618✔
872
        }
618✔
873

42,678✔
874
        top_ref = validate_header(header, footer, size, path, cfg.encryption_key != nullptr); // Throws
87,165✔
875
        m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
86,463✔
876
        m_data = map_header.get_addr(); // <-- needed below
87,165✔
877

42,678✔
878
        if (cfg.session_initiator && is_file_on_streaming_form(*header)) {
87,165✔
879
            // Don't compare file format version fields as they are allowed to differ.
267✔
880
            // Also don't compare reserved fields.
267✔
881
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, get_file_path_for_assertions());
534✔
882
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0],
534✔
883
                            get_file_path_for_assertions());
534✔
884
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1],
534✔
885
                            get_file_path_for_assertions());
534✔
886
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2],
534✔
887
                            get_file_path_for_assertions());
534✔
888
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3],
534✔
889
                            get_file_path_for_assertions());
534✔
890
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0],
534✔
891
                            get_file_path_for_assertions());
534✔
892
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], get_file_path_for_assertions());
534✔
893
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie,
534✔
894
                            get_file_path_for_assertions());
534✔
895
        }
534✔
896
    }
87,165✔
897
    catch (const InvalidDatabase&) {
42,708✔
898
        throw;
60✔
899
    }
60✔
900
    catch (const DecryptionFailed& e) {
42✔
901
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
42✔
902
    }
42✔
903
    catch (const std::exception& e) {
12✔
904
        throw InvalidDatabase(e.what(), path);
12✔
905
    }
12✔
906
    catch (...) {
×
907
        throw InvalidDatabase("unknown error", path);
×
908
    }
×
909
    // m_data not valid at this point!
42,624✔
910
    m_baseline = 0;
87,051✔
911
    // make sure that any call to begin_read cause any slab to be placed in free
42,624✔
912
    // lists correctly
42,624✔
913
    m_free_space_state = free_space_Invalid;
87,051✔
914

42,624✔
915
    // Ensure clean up, if we need to back out:
42,624✔
916
    DetachGuard dg(*this);
87,051✔
917

42,624✔
918
    reset_free_space_tracking();
87,051✔
919
    update_reader_view(size);
87,051✔
920
    REALM_ASSERT(m_mappings.size());
87,051✔
921
    m_data = m_mappings[0].primary_mapping.get_addr();
87,051✔
922
    realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
87,051✔
923
    dg.release();  // Do not detach
87,051✔
924
    fcg.release(); // Do not close
87,051✔
925
#if REALM_ENABLE_ENCRYPTION
87,051✔
926
    m_realm_file_info = util::get_file_info_for_file(m_file);
87,051✔
927
#endif
87,051✔
928
    return top_ref;
87,051✔
929
}
87,051✔
930

931
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
932
{
58,830✔
933
    auto header = reinterpret_cast<const Header*>(m_data);
58,830✔
934
    if (!is_file_on_streaming_form(*header))
58,830✔
935
        return;
58,308✔
936

261✔
937
    // Make sure the database is not on streaming format. If we did not do this,
261✔
938
    // a later commit would have to do it. That would require coordination with
261✔
939
    // anybody concurrently joining the session, so it seems easier to do it at
261✔
940
    // session initialization, even if it means writing the database during open.
261✔
941
    {
522✔
942
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
522✔
943
        Header& writable_header = *writable_map.get_addr();
522✔
944
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
522✔
945
        writable_header.m_top_ref[1] = top_ref;
522✔
946
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
522✔
947
        realm::util::encryption_write_barrier(writable_map, 0);
522✔
948
        writable_map.sync();
522✔
949
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
522✔
950
        writable_header.m_flags |= flags_SelectBit;
522✔
951
        realm::util::encryption_write_barrier(writable_map, 0);
522✔
952
        writable_map.sync();
522✔
953

261✔
954
        realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
522✔
955
    }
522✔
956
}
522✔
957

958
void SlabAlloc::note_reader_start(const void* reader_id)
959
{
1,871,328✔
960
#if REALM_ENABLE_ENCRYPTION
1,871,328✔
961
    if (m_realm_file_info)
1,871,328✔
962
        util::encryption_note_reader_start(*m_realm_file_info, reader_id);
1,233✔
963
#else
964
    static_cast<void>(reader_id);
965
#endif
966
}
1,871,328✔
967

968
void SlabAlloc::note_reader_end(const void* reader_id) noexcept
969
{
1,872,165✔
970
#if REALM_ENABLE_ENCRYPTION
1,872,165✔
971
    if (m_realm_file_info)
1,872,165✔
972
        util::encryption_note_reader_end(*m_realm_file_info, reader_id);
1,476✔
973
#else
974
    static_cast<void>(reader_id);
975
#endif
976
}
1,872,165✔
977

978
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
979
{
90✔
980
    // ExceptionSafety: If this function throws, it must leave the allocator in
45✔
981
    // the detached state.
45✔
982

45✔
983
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
984
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
985

45✔
986
    // Verify the data structures
45✔
987
    std::string path;                                     // No path
90✔
988
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
989

45✔
990
    m_data = data;
90✔
991
    size = align_size_to_section_boundary(size);
90✔
992
    m_baseline = size;
90✔
993
    m_attach_mode = attach_UsersBuffer;
90✔
994

45✔
995
    m_translation_table_size = 1;
90✔
996
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
997
    return top_ref;
90✔
998
}
90✔
999

1000
void SlabAlloc::init_in_memory_buffer()
1001
{
25,422✔
1002
    m_attach_mode = attach_Heap;
25,422✔
1003
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,422✔
1004
    m_data = m_virtual_file_buffer.back().addr;
25,422✔
1005
    m_virtual_file_size = sizeof(empty_file_header);
25,422✔
1006
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,422✔
1007

12,711✔
1008
    m_baseline = m_virtual_file_size;
25,422✔
1009
    m_translation_table_size = 1;
25,422✔
1010
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,422✔
1011
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,422✔
1012
    m_ref_translation_ptr = ref_translation_ptr;
25,422✔
1013
}
25,422✔
1014

1015
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
1016
{
5,346,537✔
1017
    auto idx = get_section_index(ref);
5,346,537✔
1018
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
5,346,537✔
1019
    auto& buf = m_virtual_file_buffer[idx];
5,346,537✔
1020
    return buf.addr + (ref - buf.start_ref);
5,346,537✔
1021
}
5,346,537✔
1022

1023
void SlabAlloc::attach_empty()
1024
{
4,704✔
1025
    // ExceptionSafety: If this function throws, it must leave the allocator in
2,352✔
1026
    // the detached state.
2,352✔
1027

2,352✔
1028
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
4,704✔
1029

2,352✔
1030
    m_attach_mode = attach_OwnedBuffer;
4,704✔
1031
    m_data = nullptr; // Empty buffer
4,704✔
1032

2,352✔
1033
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
2,352✔
1034

2,352✔
1035
    // No ref must ever be less than the header size, so we will use that as the
2,352✔
1036
    // baseline here.
2,352✔
1037
    size_t size = align_size_to_section_boundary(sizeof(Header));
4,704✔
1038
    m_baseline = size;
4,704✔
1039
    m_translation_table_size = 1;
4,704✔
1040
    m_ref_translation_ptr = new RefTranslation[1];
4,704✔
1041
}
4,704✔
1042

1043
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1044
{
30✔
1045
    char buf[256];
30✔
1046
    snprintf(buf, sizeof(buf),
30✔
1047
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
30✔
1048
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
30✔
1049
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
30✔
1050
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
30✔
1051
             header.m_flags);
30✔
1052
    msg += buf;
30✔
1053
    throw InvalidDatabase(msg, path);
30✔
1054
}
30✔
1055

1056
// Note: This relies on proper mappings having been established by the caller
1057
// for both the header and the streaming footer
1058
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1059
{
90✔
1060
    auto header = reinterpret_cast<const Header*>(data);
90✔
1061
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1062
    return validate_header(header, footer, size, path);
90✔
1063
}
90✔
1064

1065
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1066
                                    const std::string& path, bool is_encrypted)
1067
{
87,201✔
1068
    // Verify that size is sane and 8-byte aligned
42,699✔
1069
    if (REALM_UNLIKELY(size < sizeof(Header)))
87,201✔
1070
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
42,726✔
1071
                              path);
54✔
1072
    if (REALM_UNLIKELY(size % 8 != 0))
87,147✔
1073
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
42,672✔
1074

42,672✔
1075
    // First four bytes of info block is file format id
42,672✔
1076
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
87,147✔
1077
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
42,687✔
1078
        if (is_encrypted) {
30✔
1079
            // Encrypted files check the hmac on read, so there's a lot less
1080
            // which could go wrong and have us still reach this point
1081
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
×
1082
                                   path);
×
1083
        }
×
1084
        else {
30✔
1085
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
30✔
1086
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
30✔
1087
                                   *header, path);
30✔
1088
        }
30✔
1089
    }
30✔
1090

42,672✔
1091
    // Last bit in info block indicates which top_ref block is valid
42,672✔
1092
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
69,312✔
1093

42,672✔
1094
    // Top-ref must always point within buffer
42,672✔
1095
    auto top_ref = header->m_top_ref[slot_selector];
87,147✔
1096
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
87,147✔
1097
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
666✔
1098
            throw InvalidDatabase(
×
1099
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1100
                path);
×
1101
        }
×
1102
        REALM_ASSERT(footer);
666✔
1103
        top_ref = footer->m_top_ref;
666✔
1104
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
666✔
1105
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1106
                                               "The file is probably truncated.",
×
1107
                                               footer->m_magic_cookie),
×
1108
                                  path);
×
1109
        }
×
1110
    }
87,147✔
1111
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
87,147✔
1112
        throw_header_exception("top ref is not aligned", *header, path);
×
1113
    }
×
1114
    if (REALM_UNLIKELY(top_ref >= size)) {
87,147✔
1115
        throw_header_exception(
×
1116
            util::format(
×
1117
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1118
                top_ref),
×
1119
            *header, path);
×
1120
    }
×
1121
    return ref_type(top_ref);
87,147✔
1122
}
87,147✔
1123

1124

1125
size_t SlabAlloc::get_total_size() const noexcept
1126
{
1,073,934✔
1127
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
1,073,895✔
1128
}
1,073,934✔
1129

1130

1131
void SlabAlloc::reset_free_space_tracking()
1132
{
717,432✔
1133
    CriticalSection cs(changes);
717,432✔
1134
    if (is_free_space_clean())
717,432✔
1135
        return;
8,979✔
1136

358,437✔
1137
    // Free all scratch space (done after all data has
358,437✔
1138
    // been commited to persistent space)
358,437✔
1139
    m_free_read_only.clear();
708,453✔
1140

358,437✔
1141
    // release slabs.. keep the initial allocation if it's a minimal allocation,
358,437✔
1142
    // otherwise release it as well. This saves map/unmap for small transactions.
358,437✔
1143
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
717,465✔
1144
        auto& last_slab = m_slabs.back();
9,012✔
1145
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
9,012✔
1146
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
9,012✔
1147
        --m_translation_table_size;
9,012✔
1148
        m_slabs.pop_back();
9,012✔
1149
    }
9,012✔
1150
    rebuild_freelists_from_slab();
708,453✔
1151
    m_free_space_state = free_space_Clean;
708,453✔
1152
    m_commit_size = 0;
708,453✔
1153
}
708,453✔
1154

1155
inline bool randomly_false_in_debug(bool x)
1156
{
×
1157
#ifdef REALM_DEBUG
×
1158
    if (x)
×
1159
        return (std::rand() & 1);
×
1160
#endif
×
1161
    return x;
×
1162
}
×
1163

1164

1165
/*
1166
  Memory mapping
1167

1168
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1169
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1170
  essential to map the file in even sized sections.
1171

1172
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1173
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1174
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1175
  We don't want to extend the file in increments as large as the chunk size.
1176

1177
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1178
  old one in the mapping table. However, we must keep the old mapping open, because older
1179
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1180
  and only cleaned out once we know that no transaction can refer to them anymore.
1181

1182
  Interaction with encryption
1183

1184
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1185
  The binding to the file is done by software. This allows us to "cheat" and allocate
1186
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1187
  extends beyond the end of file, because it will not be accessed.
1188

1189
  Growing/Changing the mapping table.
1190

1191
  There are two mapping tables:
1192

1193
  * m_mappings: This is the "source of truth" about what the current mapping is.
1194
    It is only accessed under lock.
1195
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1196
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1197
    table can only be extended. Only selected members in each entry can be changed.
1198
    See RefTranslation in alloc.hpp for more details.
1199
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1200
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1201
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1202
    to memory allocation and release, which is already serialized (since write transactions are
1203
    single threaded).
1204

1205
  When m_mappings is changed due to an extend operation changing a mapping, or when
1206
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1207

1208
  * A new fast mapping table is created. The old one is not modified.
1209
  * The old one is held in a waiting area until it is no longer relevant because no
1210
    live transaction can refer to it any more.
1211
 */
1212
void SlabAlloc::update_reader_view(size_t file_size)
1213
{
2,354,076✔
1214
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
2,354,076✔
1215
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
2,354,076✔
1216
    if (file_size <= old_baseline) {
2,354,076✔
1217
        schedule_refresh_of_outdated_encrypted_pages();
2,223,945✔
1218
        return;
2,223,945✔
1219
    }
2,223,945✔
1220

69,294✔
1221
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
130,131✔
1222
    bool replace_last_mapping = false;
130,131✔
1223
    size_t old_num_mappings = get_section_index(old_slab_base);
130,131✔
1224

69,294✔
1225
    if (!is_in_memory()) {
130,131✔
1226
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
105,582✔
1227
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
105,582✔
1228
                        get_file_path_for_assertions());
105,582✔
1229
        REALM_ASSERT_DEBUG(is_free_space_clean());
105,582✔
1230

57,429✔
1231
        // Create the new mappings we needed to cover the new size. We don't mutate
57,429✔
1232
        // any of the member variables until we've successfully created all of the
57,429✔
1233
        // mappings so that we leave things in a consistent state if one of them
57,429✔
1234
        // hits an allocation failure.
57,429✔
1235

57,429✔
1236
        std::vector<MapEntry> new_mappings;
105,582✔
1237
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
105,582✔
1238

57,429✔
1239
        {
105,582✔
1240
            // If the old slab base was greater than the old baseline then the final
57,429✔
1241
            // mapping was a partial section and we need to replace it with a larger
57,429✔
1242
            // mapping.
57,429✔
1243
            if (old_baseline < old_slab_base) {
105,582✔
1244
                // old_slab_base should be 0 if we had no mappings previously
14,793✔
1245
                REALM_ASSERT(old_num_mappings > 0);
18,507✔
1246
                // try to extend the old mapping in-place instead of replacing it.
14,793✔
1247
                MapEntry& cur_entry = m_mappings.back();
18,507✔
1248
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
18,507✔
1249
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
18,507✔
1250
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
18,507✔
1251
                    replace_last_mapping = true;
72✔
1252
                    --old_num_mappings;
72✔
1253
                }
72✔
1254
            }
18,507✔
1255

57,429✔
1256
            // Create new mappings covering from the end of the last complete
57,429✔
1257
            // section to the end of the new file size.
57,429✔
1258
            const auto new_slab_base = align_size_to_section_boundary(file_size);
105,582✔
1259
            const size_t num_mappings = get_section_index(new_slab_base);
105,582✔
1260
            new_mappings.reserve(num_mappings - old_num_mappings);
105,582✔
1261
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
192,861✔
1262
                const size_t section_start_offset = get_section_base(k);
87,291✔
1263
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
87,291✔
1264
                if (section_size == (1 << section_shift)) {
87,291✔
1265
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
66✔
1266
                                                                  section_size, 0, m_write_observer)});
66✔
1267
                }
66✔
1268
                else {
87,225✔
1269
                    new_mappings.push_back({util::File::Map<char>()});
87,225✔
1270
                    auto& mapping = new_mappings.back().primary_mapping;
87,225✔
1271
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
87,225✔
1272
                                                        section_start_offset, m_write_observer);
87,225✔
1273
                    if (reserved) {
87,225✔
1274
                        // if reservation is supported, first attempt at extending must succeed
42,711✔
1275
                        if (!mapping.try_extend_to(section_size))
87,225✔
1276
                            throw std::bad_alloc();
12✔
1277
                    }
×
1278
                    else {
×
1279
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size, 0,
×
1280
                                                                section_start_offset, m_write_observer);
×
1281
                    }
×
1282
                }
87,225✔
1283
            }
87,291✔
1284
        }
105,582✔
1285

57,429✔
1286
        // Now that we've successfully created our mappings, update our member
57,429✔
1287
        // variables (and assume that resizing a simple vector won't produce memory
57,429✔
1288
        // allocation failures, unlike 64 MB mmaps).
57,429✔
1289
        if (replace_last_mapping) {
105,576✔
1290
            MapEntry& cur_entry = m_mappings.back();
66✔
1291
            // We should not have a xover mapping here because that would mean
33✔
1292
            // that there was already something mapped after the last section
33✔
1293
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
66✔
1294
            // save the old mapping/keep it open
33✔
1295
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
66✔
1296
            m_mappings.pop_back();
66✔
1297
            m_mapping_version++;
66✔
1298
        }
66✔
1299

57,423✔
1300
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
105,570✔
1301
    }
105,570✔
1302

69,294✔
1303
    m_baseline.store(file_size, std::memory_order_relaxed);
130,125✔
1304

69,288✔
1305
    const size_t ref_start = align_size_to_section_boundary(file_size);
130,119✔
1306
    const size_t ref_displacement = ref_start - old_slab_base;
130,119✔
1307
    if (ref_displacement > 0) {
130,119✔
1308
        // Rebase slabs as m_baseline is now bigger than old_slab_base
42,678✔
1309
        for (auto& e : m_slabs) {
42,714✔
1310
            e.ref_end += ref_displacement;
72✔
1311
        }
72✔
1312
    }
87,159✔
1313

69,288✔
1314
    rebuild_freelists_from_slab();
130,119✔
1315

69,288✔
1316
    // Build the fast path mapping
69,288✔
1317

69,288✔
1318
    // The fast path mapping is an array which is used from multiple threads
69,288✔
1319
    // without locking - see translate().
69,288✔
1320

69,288✔
1321
    // Addition of a new mapping may require a completely new fast mapping table.
69,288✔
1322
    //
69,288✔
1323
    // Being used in a multithreaded scenario, the old mappings must be retained open,
69,288✔
1324
    // until the realm version for which they were established has been closed/detached.
69,288✔
1325
    //
69,288✔
1326
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
69,288✔
1327
    // translate refs in the slab area, and that all these uses are serialized, whether
69,288✔
1328
    // that is achieved by being single threaded, interlocked or run from a sequential
69,288✔
1329
    // scheduling queue.
69,288✔
1330
    //
69,288✔
1331
    rebuild_translations(replace_last_mapping, old_num_mappings);
130,119✔
1332

69,288✔
1333
    schedule_refresh_of_outdated_encrypted_pages();
130,119✔
1334
}
130,119✔
1335

1336

1337
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1338
{
2,355,150✔
1339
#if REALM_ENABLE_ENCRYPTION
2,355,150✔
1340
    // callers must already hold m_mapping_mutex
1,378,578✔
1341
    for (auto& e : m_mappings) {
2,216,349✔
1342
        if (auto m = e.primary_mapping.get_encrypted_mapping()) {
2,062,146✔
1343
            encryption_mark_pages_for_IV_check(m);
1,434✔
1344
        }
1,434✔
1345
        if (auto m = e.xover_mapping.get_encrypted_mapping()) {
2,062,146✔
1346
            encryption_mark_pages_for_IV_check(m);
×
1347
        }
×
1348
    }
2,062,146✔
1349
    // unsafe to do outside writing thread: verify();
1,378,578✔
1350
#endif // REALM_ENABLE_ENCRYPTION
2,355,150✔
1351
}
2,355,150✔
1352

1353
size_t SlabAlloc::get_allocated_size() const noexcept
1354
{
90,081✔
1355
    size_t sz = 0;
90,081✔
1356
    for (const auto& s : m_slabs)
90,081✔
1357
        sz += s.size;
16,740✔
1358
    return sz;
90,081✔
1359
}
90,081✔
1360

1361
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1362
{
90,075✔
1363
    ++m_translation_table_size;
90,075✔
1364
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
90,075✔
1365
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
202,446✔
1366
        new_fast_mapping[i] = m_ref_translation_ptr[i];
112,371✔
1367
    }
112,371✔
1368
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
90,075✔
1369
                                    m_ref_translation_ptr.load());
90,075✔
1370
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
90,075✔
1371
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
44,331✔
1372
    // so optimize by clamping the lowest possible xover offset to the end of the section.
44,331✔
1373
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
90,075✔
1374
    m_ref_translation_ptr = new_fast_mapping.release();
90,075✔
1375
}
90,075✔
1376

1377
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1378
{
131,346✔
1379
    size_t free_space_size = m_slabs.size();
131,346✔
1380
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
118,644✔
1381
    if (m_translation_table_size < num_mappings + free_space_size) {
131,346✔
1382
        requires_new_translation = true;
87,159✔
1383
    }
87,159✔
1384
    RefTranslation* new_translation_table = m_ref_translation_ptr;
131,346✔
1385
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
131,346✔
1386
    if (requires_new_translation) {
131,346✔
1387
        // we need a new translation table, but must preserve old, as translations using it
42,711✔
1388
        // may be in progress concurrently
42,711✔
1389
        if (m_translation_table_size)
87,225✔
1390
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
174✔
1391
                                            m_ref_translation_ptr.load());
174✔
1392
        m_translation_table_size = num_mappings + free_space_size;
87,225✔
1393
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
87,225✔
1394
        new_translation_table = new_translation_table_owner.get();
87,225✔
1395
        old_num_sections = 0;
87,225✔
1396
    }
87,225✔
1397
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
218,745✔
1398
        if (is_in_memory()) {
87,399✔
1399
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1400
        }
12✔
1401
        else {
87,387✔
1402
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
87,387✔
1403
#if REALM_ENABLE_ENCRYPTION
87,387✔
1404
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
87,387✔
1405
#endif
87,387✔
1406
        }
87,387✔
1407
        REALM_ASSERT(new_translation_table[i].mapping_addr);
87,399✔
1408
        // We don't copy over data for the cross over mapping. If the mapping is needed,
42,798✔
1409
        // copying will happen on demand (in get_or_add_xover_mapping).
42,798✔
1410
        // Note: that may never be needed, because if the array that needed the original cross over
42,798✔
1411
        // mapping is freed, any new array allocated at the same position will NOT need a cross
42,798✔
1412
        // over mapping, but just use the primary mapping.
42,798✔
1413
    }
87,399✔
1414
    for (size_t k = 0; k < free_space_size; ++k) {
170,847✔
1415
        char* base = m_slabs[k].addr;
39,501✔
1416
        REALM_ASSERT(base);
39,501✔
1417
        new_translation_table[num_mappings + k].mapping_addr = base;
39,501✔
1418
    }
39,501✔
1419

70,500✔
1420
    // This will either be null or the same as new_translation_table, which is about to become owned by
70,500✔
1421
    // m_ref_translation_ptr.
70,500✔
1422
    (void)new_translation_table_owner.release();
131,346✔
1423

70,500✔
1424
    m_ref_translation_ptr = new_translation_table;
131,346✔
1425
}
131,346✔
1426

1427
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
1428
{
6✔
1429
    auto _page_size = page_size();
6✔
1430
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
6✔
1431
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
6✔
1432
        // some other thread already added a mapping
1433
        // it MUST have been for the exact same address:
1434
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1435
        return;
×
1436
    }
×
1437
    MapEntry* map_entry = &m_mappings[index];
6✔
1438
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
6✔
1439
    if (!map_entry->xover_mapping.is_attached()) {
6✔
1440
        // Create a xover mapping
3✔
1441
        auto file_offset = get_section_base(index) + offset;
6✔
1442
        auto end_offset = file_offset + size;
6✔
1443
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
6✔
1444
        auto minimal_mapping_size = end_offset - mapping_file_offset;
6✔
1445
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size, 0,
6✔
1446
                                      m_write_observer);
6✔
1447
        map_entry->xover_mapping = std::move(mapping);
6✔
1448
    }
6✔
1449
    txl.xover_mapping_base = offset & ~(_page_size - 1);
6✔
1450
#if REALM_ENABLE_ENCRYPTION
6✔
1451
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
6✔
1452
#endif
6✔
1453
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
6✔
1454
}
6✔
1455

1456
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1457
{
1,444,089✔
1458
    // Verify that each old ref translation pointer still points to a valid
730,311✔
1459
    // thing that we haven't released yet.
730,311✔
1460
#if REALM_DEBUG
1,444,089✔
1461
    std::unordered_set<const char*> mappings;
1,444,089✔
1462
    for (auto& m : m_old_mappings) {
730,392✔
1463
        REALM_ASSERT(m.mapping.is_attached());
162✔
1464
        mappings.insert(m.mapping.get_addr());
162✔
1465
    }
162✔
1466
    for (auto& m : m_mappings) {
1,334,457✔
1467
        REALM_ASSERT(m.primary_mapping.is_attached());
1,224,825✔
1468
        mappings.insert(m.primary_mapping.get_addr());
1,224,825✔
1469
        if (m.xover_mapping.is_attached())
1,224,825✔
1470
            mappings.insert(m.xover_mapping.get_addr());
12✔
1471
    }
1,224,825✔
1472
    for (auto& m : m_virtual_file_buffer) {
840,381✔
1473
        mappings.insert(m.addr);
220,140✔
1474
    }
220,140✔
1475
    if (m_data)
1,444,089✔
1476
        mappings.insert(m_data);
1,433,775✔
1477
    for (auto& t : m_old_translations) {
866,910✔
1478
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
268,875✔
1479
                        youngest_live_version, t.replaced_at_version);
268,875✔
1480
        if (nonempty_attachment()) {
268,875✔
1481
            for (size_t i = 0; i < t.translation_count; ++i)
544,416✔
1482
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
263,832✔
1483
        }
263,832✔
1484
    }
268,875✔
1485
#else
1486
    static_cast<void>(youngest_live_version);
1487
#endif
1488
}
1,444,089✔
1489

1490

1491
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1492
{
722,070✔
1493
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
722,070✔
1494
    verify_old_translations(youngest_live_version);
722,070✔
1495

365,157✔
1496
    auto pred = [=](auto& oldie) {
456,426✔
1497
        return oldie.replaced_at_version < oldest_live_version;
179,673✔
1498
    };
179,673✔
1499
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
722,070✔
1500
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
722,070✔
1501
                             m_old_translations.end());
722,070✔
1502
    m_youngest_live_version = youngest_live_version;
722,070✔
1503
    verify_old_translations(youngest_live_version);
722,070✔
1504
}
722,070✔
1505

1506
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1507
{
703,554✔
1508
    m_youngest_live_version = currently_live_version;
703,554✔
1509
}
703,554✔
1510

1511
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1512
{
604,380✔
1513
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
604,380✔
1514
        throw InvalidFreeSpace();
307,245✔
1515
    return m_free_read_only;
604,380✔
1516
}
604,380✔
1517

1518

1519
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1520
{
19,897,344✔
1521
    size_t end_of_block = start_pos + free_chunk_size;
19,897,344✔
1522
    size_t alloc_pos = start_pos;
19,897,344✔
1523
    while (alloc_pos + request_size <= end_of_block) {
19,904,721✔
1524
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
19,898,121✔
1525
        if (alloc_pos + request_size <= next_section_boundary) {
19,898,121✔
1526
            return alloc_pos;
19,890,744✔
1527
        }
19,890,744✔
1528
        alloc_pos = next_section_boundary;
7,377✔
1529
    }
7,377✔
1530
    return 0;
9,983,688✔
1531
}
19,897,344✔
1532

1533

1534
void SlabAlloc::resize_file(size_t new_file_size)
1535
{
83,919✔
1536
    if (m_attach_mode == attach_SharedFile) {
83,919✔
1537
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
57,069✔
1538
        m_file.prealloc(new_file_size); // Throws
57,069✔
1539
        // resizing is done based on the logical file size. It is ok for the file
34,074✔
1540
        // to actually be bigger, but never smaller.
34,074✔
1541
        REALM_ASSERT(new_file_size <= static_cast<size_t>(m_file.get_size()));
57,069✔
1542

34,074✔
1543
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
57,069✔
1544
        if (!disable_sync)
57,069✔
1545
            m_file.sync(); // Throws
558✔
1546
    }
57,069✔
1547
    else {
26,850✔
1548
        size_t current_size = 0;
26,850✔
1549
        for (auto& b : m_virtual_file_buffer) {
27,066✔
1550
            current_size += b.size;
27,066✔
1551
        }
27,066✔
1552
        if (new_file_size > current_size) {
26,850✔
1553
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1554
        }
6✔
1555
        m_virtual_file_size = new_file_size;
26,850✔
1556
    }
26,850✔
1557
}
83,919✔
1558

1559
#ifdef REALM_DEBUG
1560
void SlabAlloc::reserve_disk_space(size_t size)
1561
{
36✔
1562
    if (size != round_up_to_page_size(size))
36✔
1563
        size = round_up_to_page_size(size);
30✔
1564
    m_file.prealloc(size); // Throws
36✔
1565

18✔
1566
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1567
    if (!disable_sync)
36✔
1568
        m_file.sync(); // Throws
×
1569
}
36✔
1570
#endif
1571

1572
void SlabAlloc::verify() const
1573
{
124,263✔
1574
#ifdef REALM_DEBUG
124,263✔
1575
    if (!m_slabs.empty()) {
124,263✔
1576
        // Make sure that all free blocks are within a slab. This is done
47,499✔
1577
        // implicitly by using for_all_free_entries()
47,499✔
1578
        size_t first_possible_ref = m_baseline;
94,980✔
1579
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
94,980✔
1580
        for_all_free_entries([&](size_t ref, size_t size) {
545,559✔
1581
            REALM_ASSERT(ref >= first_possible_ref);
545,559✔
1582
            REALM_ASSERT(ref + size <= first_impossible_ref);
545,559✔
1583
            first_possible_ref = ref;
545,559✔
1584
        });
545,559✔
1585
    }
94,980✔
1586
#endif
124,263✔
1587
}
124,263✔
1588

1589
#ifdef REALM_DEBUG
1590

1591
bool SlabAlloc::is_all_free() const
1592
{
702✔
1593
    // verify that slabs contain only free space.
351✔
1594
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
351✔
1595
    for (const auto& e : m_slabs) {
672✔
1596
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
642✔
1597
        REALM_ASSERT(first->block_before_size == 0);
642✔
1598
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
642✔
1599
        REALM_ASSERT(last->block_after_size == 0);
642✔
1600
        if (first->block_after_size != last->block_before_size)
642✔
1601
            return false;
×
1602
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
642✔
1603
        range -= sizeof(BetweenBlocks);
642✔
1604
        // the size of the free area must match the distance between the two BetweenBlocks:
321✔
1605
        if (range != first->block_after_size)
642✔
1606
            return false;
×
1607
    }
642✔
1608
    return true;
702✔
1609
}
702✔
1610

1611

1612
// LCOV_EXCL_START
1613
void SlabAlloc::print() const
1614
{
×
1615
    /* TODO
1616
     *
1617

1618
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1619

1620
    size_t free = 0;
1621
    for (const auto& free_block : m_free_space) {
1622
        free += free_block.size;
1623
    }
1624

1625
    size_t allocated = allocated_for_slabs - free;
1626
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1627

1628
    if (!m_slabs.empty()) {
1629
        std::cout << "Slabs: ";
1630
        ref_type first_ref = m_baseline;
1631

1632
        for (const auto& slab : m_slabs) {
1633
            if (&slab != &m_slabs.front())
1634
                std::cout << ", ";
1635

1636
            ref_type last_ref = slab.ref_end - 1;
1637
            size_t size = slab.ref_end - first_ref;
1638
            void* addr = slab.addr;
1639
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1640
            first_ref = slab.ref_end;
1641
        }
1642
        std::cout << "\n";
1643
    }
1644

1645
    if (!m_free_space.empty()) {
1646
        std::cout << "FreeSpace: ";
1647
        for (const auto& free_block : m_free_space) {
1648
            if (&free_block != &m_free_space.front())
1649
                std::cout << ", ";
1650

1651
            ref_type last_ref = free_block.ref + free_block.size - 1;
1652
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1653
        }
1654
        std::cout << "\n";
1655
    }
1656
    if (!m_free_read_only.empty()) {
1657
        std::cout << "FreeSpace (ro): ";
1658
        for (const auto& free_block : m_free_read_only) {
1659
            if (&free_block != &m_free_read_only.front())
1660
                std::cout << ", ";
1661

1662
            ref_type last_ref = free_block.ref + free_block.size - 1;
1663
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1664
        }
1665
        std::cout << "\n";
1666
    }
1667
    std::cout << std::flush;
1668
    */
1669
}
×
1670
// LCOV_EXCL_STOP
1671

1672
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc