• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 2213

10 Apr 2024 11:21PM UTC coverage: 91.792% (-0.8%) from 92.623%
2213

push

Evergreen

web-flow
Add missing availability checks for SecCopyErrorMessageString (#7577)

This requires iOS 11.3 and we currently target iOS 11.

94842 of 175770 branches covered (53.96%)

7 of 22 new or added lines in 2 files covered. (31.82%)

1861 existing lines in 82 files now uncovered.

242866 of 264583 relevant lines covered (91.79%)

5593111.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.24
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <cinttypes>
20
#include <type_traits>
21
#include <exception>
22
#include <algorithm>
23
#include <memory>
24
#include <mutex>
25
#include <map>
26
#include <atomic>
27
#include <cstring>
28

29
#if REALM_DEBUG
30
#include <iostream>
31
#include <unordered_set>
32
#endif
33

34
#ifdef REALM_SLAB_ALLOC_DEBUG
35
#include <cstdlib>
36
#endif
37

38
#include <realm/util/errno.hpp>
39
#include <realm/util/encrypted_file_mapping.hpp>
40
#include <realm/util/terminate.hpp>
41
#include <realm/util/thread.hpp>
42
#include <realm/util/scope_exit.hpp>
43
#include <realm/array.hpp>
44
#include <realm/alloc_slab.hpp>
45
#include <realm/group.hpp>
46

47
using namespace realm;
48
using namespace realm::util;
49

50

51
namespace {
52

53
#ifdef REALM_SLAB_ALLOC_DEBUG
54
std::map<ref_type, void*> malloc_debug_map;
55
#endif
56

57
class InvalidFreeSpace : std::exception {
58
public:
59
    const char* what() const noexcept override
60
    {
×
61
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
62
               "further writes can be performed.";
×
63
    }
×
64
};
65

66
std::atomic<size_t> total_slab_allocated(0);
67

68
} // anonymous namespace
69

70
size_t SlabAlloc::get_total_slab_size() noexcept
71
{
×
72
    return total_slab_allocated;
×
73
}
×
74

75
SlabAlloc::SlabAlloc()
76
{
125,550✔
77
    m_initial_section_size = 1UL << section_shift; // page_size();
125,550✔
78
    m_free_space_state = free_space_Clean;
125,550✔
79
    m_baseline = 0;
125,550✔
80
}
125,550✔
81

82
util::File& SlabAlloc::get_file()
83
{
947,403✔
84
    return m_file;
947,403✔
85
}
947,403✔
86

87

88
const SlabAlloc::Header SlabAlloc::empty_file_header = {
89
    {0, 0}, // top-refs
90
    {'T', '-', 'D', 'B'},
91
    {0, 0}, // undecided file format
92
    0,      // reserved
93
    0       // flags (lsb is select bit)
94
};
95

96

97
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
98
{
630✔
99
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
630✔
100
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
630✔
101
    *streaming_header = {
630✔
102
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
630✔
103
        {'T', '-', 'D', 'B'},
630✔
104
        {storage_type(file_format_version), 0},
630✔
105
        0, // reserved
630✔
106
        0  // flags (lsb is select bit)
630✔
107
    };
630✔
108
}
630✔
109

110
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
111
    : ref_end(r)
112
    , size(s)
113
{
92,145✔
114
    // Ensure that allocation is aligned to at least 8 bytes
45,357✔
115
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
92,145✔
116

45,357✔
117
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
92,145✔
118
    addr = new char[size];
92,145✔
119
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
92,145✔
120
#if REALM_ENABLE_ALLOC_SET_ZERO
121
    std::fill(addr, addr + size, 0);
122
#endif
123
}
92,145✔
124

125
SlabAlloc::Slab::~Slab()
126
{
95,310✔
127
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
95,310✔
128
    if (addr)
95,310✔
129
        delete[] addr;
92,148✔
130
}
95,310✔
131

132
void SlabAlloc::detach(bool keep_file_open) noexcept
133
{
126,405✔
134
    delete[] m_ref_translation_ptr;
126,405✔
135
    m_ref_translation_ptr.store(nullptr);
126,405✔
136
    m_translation_table_size = 0;
126,405✔
137
    set_read_only(true);
126,405✔
138
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
126,405✔
139
    switch (m_attach_mode) {
126,405✔
140
        case attach_None:
423✔
141
            break;
423✔
142
        case attach_UsersBuffer:
24✔
143
            break;
24✔
144
        case attach_OwnedBuffer:
4,764✔
145
            delete[] m_data;
4,764✔
146
            break;
4,764✔
147
        case attach_SharedFile:
95,130✔
148
        case attach_UnsharedFile:
95,772✔
149
            m_data = 0;
95,772✔
150
            m_mappings.clear();
95,772✔
151
            m_youngest_live_version = 0;
95,772✔
152
            if (!keep_file_open)
95,772✔
153
                m_file.close();
95,349✔
154
            break;
95,772✔
155
        case attach_Heap:
59,601✔
156
            m_data = 0;
25,422✔
157
            break;
25,422✔
158
        default:
46,890✔
159
            REALM_UNREACHABLE();
160
    }
126,405✔
161

62,193✔
162
    // Release all allocated memory - this forces us to create new
62,193✔
163
    // slabs after re-attaching thereby ensuring that the slabs are
62,193✔
164
    // placed correctly (logically) after the end of the file.
62,193✔
165
    m_slabs.clear();
126,405✔
166
    clear_freelists();
126,405✔
167
#if REALM_ENABLE_ENCRYPTION
126,405✔
168
    m_realm_file_info = nullptr;
126,405✔
169
#endif
126,405✔
170

62,193✔
171
    m_attach_mode = attach_None;
126,405✔
172
}
126,405✔
173

174

175
SlabAlloc::~SlabAlloc() noexcept
176
{
125,550✔
177
#ifdef REALM_DEBUG
125,550✔
178
    if (is_attached()) {
125,550✔
179
        // A shared group does not guarantee that all space is free
351✔
180
        if (m_attach_mode != attach_SharedFile) {
702✔
181
            // No point inchecking if free space info is invalid
351✔
182
            if (m_free_space_state != free_space_Invalid) {
702✔
183
                if (REALM_COVER_NEVER(!is_all_free())) {
702✔
184
                    print();
×
185
#ifndef REALM_SLAB_ALLOC_DEBUG
×
186
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
187
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
188
                                 "then run under Valgrind with --leak-check=full\n";
×
189
                    REALM_TERMINATE("SlabAlloc detected a leak");
190
#endif
×
191
                }
×
192
            }
702✔
193
        }
702✔
194
    }
702✔
195
#endif
125,550✔
196

61,794✔
197
    if (is_attached())
125,550✔
198
        detach();
702✔
199
}
125,550✔
200

201

202
MemRef SlabAlloc::do_alloc(size_t size)
203
{
26,307,648✔
204
    CriticalSection cs(changes);
26,307,648✔
205
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
26,307,648✔
206
    REALM_ASSERT_EX((size & 0x7) == 0, size,
26,307,648✔
207
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
26,307,648✔
208
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
26,307,648✔
209
    // This limits the size of any array to ensure it can fit within a memory section.
13,207,047✔
210
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
13,207,047✔
211
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
26,307,648✔
212

13,207,047✔
213
    // If we failed to correctly record free space, new allocations cannot be
13,207,047✔
214
    // carried out until the free space record is reset.
13,207,047✔
215
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
26,307,648✔
216
        throw InvalidFreeSpace();
13,207,047✔
217

13,207,047✔
218
    m_free_space_state = free_space_Dirty;
26,307,648✔
219
    m_commit_size += size;
26,307,648✔
220

13,207,047✔
221
    // minimal allocation is sizeof(FreeListEntry)
13,207,047✔
222
    if (size < sizeof(FreeBlock))
26,307,648✔
223
        size = sizeof(FreeBlock);
6,696✔
224
    // align to multipla of 8
13,207,047✔
225
    if (size & 0x7)
26,307,648✔
226
        size = (size + 7) & ~0x7;
×
227

13,207,047✔
228
    FreeBlock* entry = allocate_block(static_cast<int>(size));
26,307,648✔
229
    mark_allocated(entry);
26,307,648✔
230
    ref_type ref = entry->ref;
26,307,648✔
231

13,207,047✔
232
#ifdef REALM_DEBUG
26,307,648✔
233
    if (REALM_COVER_NEVER(m_debug_out))
26,307,648✔
234
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
13,207,047✔
235
#endif
26,307,648✔
236

13,207,047✔
237
    char* addr = reinterpret_cast<char*>(entry);
26,307,648✔
238
    REALM_ASSERT_EX(addr == translate(ref), addr, ref, get_file_path_for_assertions());
26,307,648✔
239

13,207,047✔
240
#if REALM_ENABLE_ALLOC_SET_ZERO
241
    std::fill(addr, addr + size, 0);
242
#endif
243
#ifdef REALM_SLAB_ALLOC_DEBUG
244
    malloc_debug_map[ref] = malloc(1);
245
#endif
246
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
26,307,648✔
247
    return MemRef(addr, ref, *this);
26,307,648✔
248
}
26,307,648✔
249

250
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
251
{
2,824,296✔
252
    auto bb = bb_before(entry);
2,824,296✔
253
    if (bb->block_before_size <= 0)
2,824,296✔
254
        return nullptr; // no prev block, or it is in use
2,488,947✔
255
    return block_before(bb);
335,349✔
256
}
335,349✔
257

258
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
259
{
2,824,350✔
260
    auto bb = bb_after(entry);
2,824,350✔
261
    if (bb->block_after_size <= 0)
2,824,350✔
262
        return nullptr; // no next block, or it is in use
901,839✔
263
    return block_after(bb);
1,922,511✔
264
}
1,922,511✔
265

266
SlabAlloc::FreeList SlabAlloc::find(int size)
267
{
26,335,926✔
268
    FreeList retval;
26,335,926✔
269
    retval.it = m_block_map.lower_bound(size);
26,335,926✔
270
    if (retval.it != m_block_map.end()) {
26,335,926✔
271
        retval.size = retval.it->first;
26,246,520✔
272
    }
26,246,520✔
273
    else {
89,406✔
274
        retval.size = 0;
89,406✔
275
    }
89,406✔
276
    return retval;
26,335,926✔
277
}
26,335,926✔
278

279
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
280
{
25,926,579✔
281
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
25,926,579✔
282
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
34,098,222✔
283
        ++hint.it;
8,171,643✔
284
    if (hint.it == m_block_map.end())
25,926,579✔
285
        hint.size = 0; // indicate "not found"
92,145✔
286
    return hint;
25,926,579✔
287
}
25,926,579✔
288

289
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
290
{
26,253,612✔
291
    FreeBlock* retval = list.it->second;
26,253,612✔
292
    FreeBlock* header = retval->next;
26,253,612✔
293
    if (header == retval)
26,253,612✔
294
        m_block_map.erase(list.it);
26,076,033✔
295
    else
177,579✔
296
        list.it->second = header;
177,579✔
297
    retval->unlink();
26,253,612✔
298
    return retval;
26,253,612✔
299
}
26,253,612✔
300

301
void SlabAlloc::FreeBlock::unlink()
302
{
28,507,089✔
303
    auto _next = next;
28,507,089✔
304
    auto _prev = prev;
28,507,089✔
305
    _next->prev = prev;
28,507,089✔
306
    _prev->next = next;
28,507,089✔
307
    clear_links();
28,507,089✔
308
}
28,507,089✔
309

310
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
311
{
2,258,016✔
312
    int size = bb_before(entry)->block_after_size;
2,258,016✔
313
    auto it = m_block_map.find(size);
2,258,016✔
314
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
2,258,016✔
315
    auto header = it->second;
2,258,016✔
316
    if (header == entry) {
2,258,016✔
317
        header = entry->next;
2,236,092✔
318
        if (header == entry)
2,236,092✔
319
            m_block_map.erase(it);
1,617,483✔
320
        else
618,609✔
321
            it->second = header;
618,609✔
322
    }
2,236,092✔
323
    entry->unlink();
2,258,016✔
324
}
2,258,016✔
325

326
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
327
{
29,402,046✔
328
    int size = bb_before(entry)->block_after_size;
29,402,046✔
329
    FreeBlock* header;
29,402,046✔
330
    auto it = m_block_map.find(size);
29,402,046✔
331
    if (it != m_block_map.end()) {
29,402,046✔
332
        header = it->second;
843,213✔
333
        it->second = entry;
843,213✔
334
        entry->next = header;
843,213✔
335
        entry->prev = header->prev;
843,213✔
336
        entry->prev->next = entry;
843,213✔
337
        entry->next->prev = entry;
843,213✔
338
    }
843,213✔
339
    else {
28,558,833✔
340
        header = nullptr;
28,558,833✔
341
        m_block_map[size] = entry;
28,558,833✔
342
        entry->next = entry->prev = entry;
28,558,833✔
343
    }
28,558,833✔
344
}
29,402,046✔
345

346
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
347
{
2,824,116✔
348
    auto bb = bb_before(entry);
2,824,116✔
349
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
2,824,116✔
350
    auto alloc_size = -bb->block_after_size;
2,824,116✔
351
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
2,824,116✔
352
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
2,824,116✔
353
                    get_file_path_for_assertions());
2,824,116✔
354
    bb->block_after_size = alloc_size;
2,824,116✔
355
    bb = bb_after(entry);
2,824,116✔
356
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
2,824,116✔
357
    REALM_ASSERT(-bb->block_before_size == alloc_size);
2,824,116✔
358
    bb->block_before_size = alloc_size;
2,824,116✔
359
}
2,824,116✔
360

361
void SlabAlloc::mark_allocated(FreeBlock* entry)
362
{
26,341,650✔
363
    auto bb = bb_before(entry);
26,341,650✔
364
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
26,341,650✔
365
    auto bb2 = bb_after(entry);
26,341,650✔
366
    bb->block_after_size = 0 - bb->block_after_size;
26,341,650✔
367
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
26,341,650✔
368
    bb2->block_before_size = 0 - bb2->block_before_size;
26,341,650✔
369
}
26,341,650✔
370

371
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
372
{
26,338,311✔
373
    FreeList list = find(size);
26,338,311✔
374
    if (list.found_exact(size)) {
26,338,311✔
375
        return pop_freelist_entry(list);
414,219✔
376
    }
414,219✔
377
    // no exact matches.
13,025,985✔
378
    list = find_larger(list, size);
25,924,092✔
379
    FreeBlock* block;
25,924,092✔
380
    if (list.found_something()) {
25,924,092✔
381
        block = pop_freelist_entry(list);
25,840,122✔
382
    }
25,840,122✔
383
    else {
83,970✔
384
        block = grow_slab(size);
83,970✔
385
    }
83,970✔
386
    FreeBlock* remaining = break_block(block, size);
25,924,092✔
387
    if (remaining)
25,924,092✔
388
        push_freelist_entry(remaining);
25,925,307✔
389
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
25,924,092✔
390
    return block;
25,924,092✔
391
}
25,924,092✔
392

393
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
394
{
750,762✔
395
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
750,762✔
396
    bb->block_before_size = 0;
750,762✔
397
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
750,762✔
398
    bb->block_after_size = block_size;
750,762✔
399
    auto entry = block_after(bb);
750,762✔
400
    entry->clear_links();
750,762✔
401
    entry->ref = ref_start + sizeof(BetweenBlocks);
750,762✔
402
    bb = bb_after(entry);
750,762✔
403
    bb->block_before_size = block_size;
750,762✔
404
    bb->block_after_size = 0;
750,762✔
405
    return entry;
750,762✔
406
}
750,762✔
407

408
void SlabAlloc::clear_freelists()
409
{
981,348✔
410
    m_block_map.clear();
981,348✔
411
}
981,348✔
412

413
void SlabAlloc::rebuild_freelists_from_slab()
414
{
854,940✔
415
    clear_freelists();
854,940✔
416
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
854,940✔
417
    for (const auto& e : m_slabs) {
754,812✔
418
        FreeBlock* entry = slab_to_entry(e, ref_start);
658,611✔
419
        push_freelist_entry(entry);
658,611✔
420
        ref_start = align_size_to_section_boundary(e.ref_end);
658,611✔
421
    }
658,611✔
422
}
854,940✔
423

424
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
425
{
25,924,638✔
426
    int size = size_from_block(block);
25,924,638✔
427
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
25,924,638✔
428
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
25,924,638✔
429
        return nullptr;
6✔
430
    bb_after(block)->block_before_size = remaining_size;
25,924,632✔
431
    bb_before(block)->block_after_size = new_size;
25,924,632✔
432
    auto bb_between = bb_after(block);
25,924,632✔
433
    bb_between->block_before_size = new_size;
25,924,632✔
434
    bb_between->block_after_size = remaining_size;
25,924,632✔
435
    FreeBlock* remaining_block = block_after(bb_between);
25,924,632✔
436
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
25,924,632✔
437
    remaining_block->clear_links();
25,924,632✔
438
    block->clear_links();
25,924,632✔
439
    return remaining_block;
25,924,632✔
440
}
25,924,632✔
441

442
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
443
{
2,258,097✔
444
    int size_first = size_from_block(first);
2,258,097✔
445
    int size_last = size_from_block(last);
2,258,097✔
446
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
2,258,097✔
447
    bb_before(first)->block_after_size = new_size;
2,258,097✔
448
    bb_after(last)->block_before_size = new_size;
2,258,097✔
449
    return first;
2,258,097✔
450
}
2,258,097✔
451

452
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
453
{
92,145✔
454
    // Allocate new slab.
45,357✔
455
    // - Always allocate at least 128K. This is also the amount of
45,357✔
456
    //   memory that we allow the slab allocator to keep between
45,357✔
457
    //   transactions. Allowing it to keep a small amount between
45,357✔
458
    //   transactions makes very small transactions faster by avoiding
45,357✔
459
    //   repeated unmap/mmap system calls.
45,357✔
460
    // - When allocating, allocate as much as we already have, but
45,357✔
461
    // - Never allocate more than a full section (64MB). This policy
45,357✔
462
    //   leads to gradual allocation of larger and larger blocks until
45,357✔
463
    //   we reach allocation of entire sections.
45,357✔
464
    size += 2 * sizeof(BetweenBlocks);
92,145✔
465
    size_t new_size = minimal_alloc;
92,145✔
466
    while (new_size < uint64_t(size))
131,157✔
467
        new_size += minimal_alloc;
39,012✔
468
    size_t already_allocated = get_allocated_size();
92,145✔
469
    if (new_size < already_allocated)
92,145✔
470
        new_size = already_allocated;
3,111✔
471
    if (new_size > maximal_alloc)
92,145✔
472
        new_size = maximal_alloc;
18✔
473

45,357✔
474
    ref_type ref;
92,145✔
475
    if (m_slabs.empty()) {
92,145✔
476
        ref = m_baseline.load(std::memory_order_relaxed);
81,630✔
477
    }
81,630✔
478
    else {
10,515✔
479
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
5,109✔
480
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
10,515✔
481
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
10,515✔
482
        ref = curr_ref_end;
10,515✔
483
    }
10,515✔
484
    ref = align_size_to_section_boundary(ref);
92,145✔
485
    size_t ref_end = ref;
92,145✔
486
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
92,145✔
487
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
488
                                      util::to_string(new_size));
×
489
    }
×
490

45,357✔
491
    REALM_ASSERT(matches_section_boundary(ref));
92,145✔
492

45,357✔
493
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
92,145✔
494
    // Create new slab and add to list of slabs
45,357✔
495
    m_slabs.emplace_back(ref_end, new_size); // Throws
92,145✔
496
    const Slab& slab = m_slabs.back();
92,145✔
497
    extend_fast_mapping_with_slab(slab.addr);
92,145✔
498

45,357✔
499
    // build a single block from that entry
45,357✔
500
    return slab_to_entry(slab, ref);
92,145✔
501
}
92,145✔
502

503

504
void SlabAlloc::do_free(ref_type ref, char* addr)
505
{
18,083,016✔
506
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
18,083,016✔
507
    CriticalSection cs(changes);
18,083,016✔
508

9,126,402✔
509
    bool read_only = is_read_only(ref);
18,083,016✔
510
#ifdef REALM_SLAB_ALLOC_DEBUG
511
    free(malloc_debug_map[ref]);
512
#endif
513

9,126,402✔
514
    // Get size from segment
9,126,402✔
515
    size_t size =
18,083,016✔
516
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
16,645,500✔
517

9,126,402✔
518
#ifdef REALM_DEBUG
18,083,016✔
519
    if (REALM_COVER_NEVER(m_debug_out))
18,083,016✔
520
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
9,126,402✔
521
#endif
18,083,016✔
522

9,126,402✔
523
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
18,083,016✔
524
        return;
9,126,402✔
525

9,126,402✔
526
    // Mutable memory cannot be freed unless it has first been allocated, and
9,126,402✔
527
    // any allocation puts free space tracking into the "dirty" state.
9,126,402✔
528
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
18,083,016✔
529
                    free_space_Dirty, get_file_path_for_assertions());
18,083,016✔
530

9,126,402✔
531
    m_free_space_state = free_space_Dirty;
18,083,016✔
532

9,126,402✔
533
    if (read_only) {
18,083,016✔
534
        // Free space in read only segment is tracked separately
7,747,716✔
535
        try {
15,265,530✔
536
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
15,265,530✔
537
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
15,265,530✔
538
            auto next = m_free_read_only.lower_bound(ref);
15,265,530✔
539
            if (next != m_free_read_only.end()) {
15,265,530✔
540
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
13,850,013✔
541
                                        get_file_path_for_assertions());
13,850,013✔
542
                // See if element can be combined with next element
7,023,675✔
543
                if (ref + size == next->first) {
13,850,013✔
544
                    // if so, combine to include next element and remove that from collection
1,270,050✔
545
                    size += next->second;
2,462,949✔
546
                    next = m_free_read_only.erase(next);
2,462,949✔
547
                }
2,462,949✔
548
            }
13,850,013✔
549
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
15,265,530✔
550
                // There must be a previous element - see if we can merge
6,466,116✔
551
                auto prev = next;
12,768,558✔
552
                prev--;
12,768,558✔
553

6,466,116✔
554
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
12,768,558✔
555
                                        get_file_path_for_assertions());
12,768,558✔
556
                // See if element can be combined with previous element
6,466,116✔
557
                // We can do that just by adding the size
6,466,116✔
558
                if (prev->first + prev->second == ref) {
12,768,558✔
559
                    prev->second += size;
6,207,831✔
560
                    return; // Done!
6,207,831✔
561
                }
6,207,831✔
562
                m_free_read_only.emplace_hint(next, ref, size); // Throws
6,560,727✔
563
            }
6,560,727✔
564
            else {
2,496,972✔
565
                m_free_read_only.emplace(ref, size); // Throws
2,496,972✔
566
            }
2,496,972✔
567
        }
15,265,530✔
568
        catch (...) {
7,747,716✔
569
            m_free_space_state = free_space_Invalid;
×
570
        }
×
571
    }
15,265,530✔
572
    else {
2,817,486✔
573
        m_commit_size -= size;
2,817,486✔
574

1,378,686✔
575
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
1,378,686✔
576
        if (size < sizeof(FreeBlock))
2,817,486✔
577
            size = sizeof(FreeBlock);
6,696✔
578
        // align to multipla of 8
1,378,686✔
579
        if (size & 0x7)
2,817,486✔
580
            size = (size + 7) & ~0x7;
×
581

1,378,686✔
582
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
2,817,486✔
583
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
2,817,486✔
584
        mark_freed(e, static_cast<int>(size));
2,817,486✔
585
        free_block(ref, e);
2,817,486✔
586
    }
2,817,486✔
587
}
18,083,016✔
588

589
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
590
{
2,824,308✔
591
    // merge with surrounding blocks if possible
1,385,190✔
592
    block->ref = ref;
2,824,308✔
593
    FreeBlock* prev = get_prev_block_if_mergeable(block);
2,824,308✔
594
    if (prev) {
2,824,308✔
595
        remove_freelist_entry(prev);
335,475✔
596
        block = merge_blocks(prev, block);
335,475✔
597
    }
335,475✔
598
    FreeBlock* next = get_next_block_if_mergeable(block);
2,824,308✔
599
    if (next) {
2,824,308✔
600
        remove_freelist_entry(next);
1,922,658✔
601
        block = merge_blocks(block, next);
1,922,658✔
602
    }
1,922,658✔
603
    push_freelist_entry(block);
2,824,308✔
604
}
2,824,308✔
605

606
size_t SlabAlloc::consolidate_free_read_only()
607
{
601,017✔
608
    CriticalSection cs(changes);
601,017✔
609
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
601,017✔
610
        throw InvalidFreeSpace();
307,641✔
611

307,641✔
612
    return m_free_read_only.size();
601,017✔
613
}
601,017✔
614

615

616
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
617
{
2,376,555✔
618
    REALM_ASSERT_DEBUG(translate(ref) == addr);
2,376,555✔
619
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
2,376,555✔
620
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
2,376,555✔
621
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
2,376,555✔
622

1,184,343✔
623
    // Possible future enhancement: check if we can extend current space instead
1,184,343✔
624
    // of unconditionally allocating new space. In that case, remember to
1,184,343✔
625
    // check whether m_free_space_state == free_state_Invalid. Also remember to
1,184,343✔
626
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
1,184,343✔
627

1,184,343✔
628
    // Allocate new space
1,184,343✔
629
    MemRef new_mem = do_alloc(new_size); // Throws
2,376,555✔
630

1,184,343✔
631
    // Copy existing segment
1,184,343✔
632
    char* new_addr = new_mem.get_addr();
2,376,555✔
633
    realm::safe_copy_n(addr, old_size, new_addr);
2,376,555✔
634

1,184,343✔
635
    // Add old segment to freelist
1,184,343✔
636
    do_free(ref, addr);
2,376,555✔
637

1,184,343✔
638
#ifdef REALM_DEBUG
2,376,555✔
639
    if (REALM_COVER_NEVER(m_debug_out)) {
2,376,555✔
640
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
641
                  << " new_size: " << new_size << "\n";
×
642
    }
×
643
#endif // REALM_DEBUG
2,376,555✔
644

1,184,343✔
645
    return new_mem;
2,376,555✔
646
}
2,376,555✔
647

648

649
char* SlabAlloc::do_translate(ref_type) const noexcept
650
{
×
651
    REALM_ASSERT(false); // never come here
×
652
    return nullptr;
×
653
}
×
654

655

656
int SlabAlloc::get_committed_file_format_version() noexcept
657
{
95,892✔
658
    {
95,892✔
659
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
95,892✔
660
        if (m_mappings.size()) {
95,892✔
661
            // if we have mapped a file, m_mappings will have at least one mapping and
46,929✔
662
            // the first will be to the start of the file. Don't come here, if we're
46,929✔
663
            // just attaching a buffer. They don't have mappings.
46,929✔
664
            realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
95,850✔
665
        }
95,850✔
666
    }
95,892✔
667
    const Header& header = *reinterpret_cast<const Header*>(m_data);
95,892✔
668
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
76,323✔
669
    int file_format_version = int(header.m_file_format[slot_selector]);
95,892✔
670
    return file_format_version;
95,892✔
671
}
95,892✔
672

673
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
674
{
350,538✔
675
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
168,120✔
676
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
289,413✔
677
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
350,538✔
678
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
350,538✔
679
}
350,538✔
680

681
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
682
{
×
683
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
684
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
685
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
686
    if (is_file_on_streaming_form(header)) {
×
687
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
688
        return ref_type(footer.m_top_ref);
×
689
    }
×
690
    else {
×
691
        return to_ref(header.m_top_ref[slot_selector]);
×
692
    }
×
693
}
×
694

695
std::string SlabAlloc::get_file_path_for_assertions() const
696
{
×
697
    return m_file.get_path();
×
698
}
×
699

700
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
701
{
69,642✔
702
    if (cfg.read_only) {
69,642✔
703
        // If the file is opened read-only, we cannot change it. This is not a problem,
704
        // because for a read-only file we assume that it will not change while we use it,
705
        // hence there will be no need to grow memory mappings.
706
        // This assumption obviously will not hold, if the file is shared by multiple
707
        // processes or threads with different opening modes.
708
        // Currently, there is no way to detect if this assumption is violated.
709
        return false;
×
710
    }
×
711
    size_t expected_size = size_t(-1);
69,642✔
712
    size_t size = static_cast<size_t>(m_file.get_size());
69,642✔
713

32,931✔
714
    // It is not safe to change the size of a file on streaming form, since the footer
32,931✔
715
    // must remain available and remain at the very end of the file.
32,931✔
716
    REALM_ASSERT(!is_file_on_streaming_form());
69,642✔
717

32,931✔
718
    // check if online compaction allows us to shrink the file:
32,931✔
719
    if (top_ref) {
69,642✔
720
        // Get the expected file size by looking up logical file size stored in top array
13,986✔
721
        constexpr size_t max_top_size = (Group::s_file_size_ndx + 1) * 8 + sizeof(Header);
30,873✔
722
        size_t top_page_base = top_ref & ~(page_size() - 1);
30,873✔
723
        size_t top_offset = top_ref - top_page_base;
30,873✔
724
        size_t map_size = std::min(max_top_size + top_offset, size - top_page_base);
30,873✔
725
        File::Map<char> map_top(m_file, top_page_base, File::access_ReadOnly, map_size, 0, m_write_observer);
30,873✔
726
        realm::util::encryption_read_barrier(map_top, top_offset, max_top_size);
30,873✔
727
        auto top_header = map_top.get_addr() + top_offset;
30,873✔
728
        auto top_data = NodeHeader::get_data_from_header(top_header);
30,873✔
729
        auto w = NodeHeader::get_width_from_header(top_header);
30,873✔
730
        auto logical_size = size_t(get_direct(top_data, w, Group::s_file_size_ndx)) >> 1;
30,873✔
731
        // make sure we're page aligned, so the code below doesn't first
13,986✔
732
        // truncate the file, then expand it again
13,986✔
733
        expected_size = round_up_to_page_size(logical_size);
30,873✔
734
    }
30,873✔
735

32,931✔
736
    // Check if we can shrink the file
32,931✔
737
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
69,642✔
738
        detach(true); // keep m_file open
6✔
739
        m_file.resize(expected_size);
6✔
740
        m_file.close();
6✔
741
        size = expected_size;
6✔
742
        return true;
6✔
743
    }
6✔
744

32,928✔
745
    // We can only safely mmap the file, if its size matches a page boundary. If not,
32,928✔
746
    // we must change the size to match before mmaping it.
32,928✔
747
    if (size != round_up_to_page_size(size)) {
69,636✔
748
        // The file size did not match a page boundary.
195✔
749
        // We must extend the file to a page boundary (unless already there)
195✔
750
        // The file must be extended to match in size prior to being mmapped,
195✔
751
        // as extending it after mmap has undefined behavior.
195✔
752
        if (cfg.session_initiator || !cfg.is_shared) {
417✔
753
            // We can only safely extend the file if we're the session initiator, or if
195✔
754
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
195✔
755
            // done to ensure well defined behavior for memory mappings. It does not matter,
195✔
756
            // that the free space management isn't informed
195✔
757
            size = round_up_to_page_size(size);
417✔
758
            detach(true); // keep m_file open
417✔
759
            m_file.prealloc(size);
417✔
760
            m_file.close();
417✔
761
            return true;
417✔
762
        }
417✔
763
        else {
×
764
            // Getting here, we have a file of a size that will not work, and without being
765
            // allowed to extend it. This should not be possible. But allowing a retry is
766
            // arguably better than giving up and crashing...
767
            throw Retry();
×
768
        }
×
769
    }
69,219✔
770
    return false;
69,219✔
771
}
69,219✔
772

773
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
774
{
95,955✔
775
    m_cfg = cfg;
95,955✔
776
    m_write_observer = write_observer;
95,955✔
777
    // ExceptionSafety: If this function throws, it must leave the allocator in
46,980✔
778
    // the detached state.
46,980✔
779

46,980✔
780
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
95,955✔
781

46,980✔
782
    // When 'read_only' is true, this function will throw InvalidDatabase if the
46,980✔
783
    // file exists already but is empty. This can happen if another process is
46,980✔
784
    // currently creating it. Note however, that it is only legal for multiple
46,980✔
785
    // processes to access a database file concurrently if it is done via a
46,980✔
786
    // DB, and in that case 'read_only' can never be true.
46,980✔
787
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
95,955✔
788
    // session_initiator can be set *only* if we're shared.
46,980✔
789
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
95,955✔
790
                    get_file_path_for_assertions());
95,955✔
791
    // clear_file can be set *only* if we're the first session.
46,980✔
792
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
95,955✔
793
                    get_file_path_for_assertions());
95,955✔
794

46,980✔
795
    using namespace realm::util;
95,955✔
796
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
95,307✔
797
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
95,955✔
798
    set_read_only(cfg.read_only);
95,955✔
799
    try {
95,955✔
800
        m_file.open(path.c_str(), access, create, 0); // Throws
95,955✔
801
    }
95,955✔
802
    catch (const FileAccessError& ex) {
47,001✔
803
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
42✔
804
        if (ex.code() == ErrorCodes::PermissionDenied) {
42✔
805
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
806
                                cfg.read_only ? "read" : "read-write");
6✔
807
        }
6✔
808
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
42✔
809
    }
42✔
810
    File::CloseGuard fcg(m_file);
95,916✔
811
    auto physical_file_size = m_file.get_size();
95,916✔
812
    // Note that get_size() may (will) return a different size before and after
46,962✔
813
    // the call below to set_encryption_key.
46,962✔
814
    m_file.set_encryption_key(cfg.encryption_key);
95,916✔
815

46,962✔
816
    note_reader_start(this);
95,916✔
817
    util::ScopeExit reader_end_guard([this]() noexcept {
95,916✔
818
        note_reader_end(this);
95,916✔
819
    });
95,916✔
820
    size_t size = 0;
95,916✔
821
    // The size of a database file must not exceed what can be encoded in
46,962✔
822
    // size_t.
46,962✔
823
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
95,916✔
824
        throw InvalidDatabase("Realm file too large", path);
46,962✔
825
    if (cfg.clear_file_on_error && cfg.session_initiator) {
95,916✔
826
        if (size == 0 && physical_file_size != 0) {
24,528✔
827
            cfg.clear_file = true;
6✔
828
        }
6✔
829
        else if (size > 0) {
24,522✔
830
            try {
22,749✔
831
                read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
22,749✔
832
            }
22,749✔
833
            catch (const InvalidDatabase&) {
11,148✔
834
                cfg.clear_file = true;
30✔
835
            }
30✔
836
        }
22,749✔
837
    }
24,528✔
838
    if (cfg.clear_file) {
95,916✔
839
        m_file.resize(0);
22,158✔
840
        size = 0;
22,158✔
841
        physical_file_size = 0;
22,158✔
842
    }
22,158✔
843
    else if (cfg.encryption_key && !cfg.clear_file && size == 0 && physical_file_size != 0) {
73,758✔
844
        // The opened file holds data, but is so small it cannot have
12✔
845
        // been created with encryption
12✔
846
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
12✔
847
    }
12✔
848
    if (size == 0) {
95,904✔
849
        if (REALM_UNLIKELY(cfg.read_only))
38,763✔
850
            throw InvalidDatabase("Read-only access to empty Realm file", path);
18,948✔
851

18,948✔
852
        size_t initial_size = page_size(); // m_initial_section_size;
38,763✔
853
        // exFAT does not allocate a unique id for the file until it is non-empty. It must be
18,948✔
854
        // valid at this point because File::get_unique_id() is used to distinguish
18,948✔
855
        // mappings_for_file in the encryption layer. So the prealloc() is required before
18,948✔
856
        // interacting with the encryption layer in File::write().
18,948✔
857
        // Pre-alloc initial space
18,948✔
858
        m_file.prealloc(initial_size); // Throws
38,763✔
859
        // seek() back to the start of the file in preparation for writing the header
18,948✔
860
        // This sequence of File operations is protected from races by
18,948✔
861
        // DB::m_controlmutex, so we know we are the only ones operating on the file
18,948✔
862
        m_file.seek(0);
38,763✔
863
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
38,763✔
864
        m_file.write(data, sizeof empty_file_header); // Throws
38,763✔
865

18,948✔
866
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
38,763✔
867
        if (!disable_sync)
38,763✔
868
            m_file.sync(); // Throws
12✔
869

18,948✔
870
        size = initial_size;
38,763✔
871
    }
38,763✔
872

46,950✔
873
    ref_type top_ref = read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
95,904✔
874
    m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
95,190✔
875
    // m_data not valid at this point!
46,950✔
876
    m_baseline = 0;
95,904✔
877
    // make sure that any call to begin_read cause any slab to be placed in free
46,950✔
878
    // lists correctly
46,950✔
879
    m_free_space_state = free_space_Invalid;
95,904✔
880

46,950✔
881
    // Ensure clean up, if we need to back out:
46,950✔
882
    DetachGuard dg(*this);
95,904✔
883

46,950✔
884
    reset_free_space_tracking();
95,904✔
885
    update_reader_view(size);
95,904✔
886
    REALM_ASSERT(m_mappings.size());
95,904✔
887
    m_data = m_mappings[0].primary_mapping.get_addr();
95,904✔
888
    realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
95,904✔
889
    dg.release();  // Do not detach
95,904✔
890
    fcg.release(); // Do not close
95,904✔
891
#if REALM_ENABLE_ENCRYPTION
95,904✔
892
    m_realm_file_info = util::get_file_info_for_file(m_file);
95,904✔
893
#endif
95,904✔
894
    return top_ref;
95,904✔
895
}
95,904✔
896

897
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
898
{
69,786✔
899
    auto header = reinterpret_cast<const Header*>(m_data);
69,786✔
900
    if (!is_file_on_streaming_form(*header))
69,786✔
901
        return;
69,264✔
902

261✔
903
    // Make sure the database is not on streaming format. If we did not do this,
261✔
904
    // a later commit would have to do it. That would require coordination with
261✔
905
    // anybody concurrently joining the session, so it seems easier to do it at
261✔
906
    // session initialization, even if it means writing the database during open.
261✔
907
    {
522✔
908
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
522✔
909
        Header& writable_header = *writable_map.get_addr();
522✔
910
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
522✔
911
        writable_header.m_top_ref[1] = top_ref;
522✔
912
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
522✔
913
        realm::util::encryption_write_barrier(writable_map, 0);
522✔
914
        writable_map.sync();
522✔
915
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
522✔
916
        writable_header.m_flags |= flags_SelectBit;
522✔
917
        realm::util::encryption_write_barrier(writable_map, 0);
522✔
918
        writable_map.sync();
522✔
919

261✔
920
        realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
522✔
921
    }
522✔
922
}
522✔
923

924
void SlabAlloc::note_reader_start(const void* reader_id)
925
{
2,457,168✔
926
#if REALM_ENABLE_ENCRYPTION
2,457,168✔
927
    if (m_realm_file_info)
2,457,168✔
928
        util::encryption_note_reader_start(*m_realm_file_info, reader_id);
1,395✔
929
#else
930
    static_cast<void>(reader_id);
931
#endif
932
}
2,457,168✔
933

934
void SlabAlloc::note_reader_end(const void* reader_id) noexcept
935
{
2,464,527✔
936
#if REALM_ENABLE_ENCRYPTION
2,464,527✔
937
    if (m_realm_file_info)
2,464,527✔
938
        util::encryption_note_reader_end(*m_realm_file_info, reader_id);
1,707✔
939
#else
940
    static_cast<void>(reader_id);
941
#endif
942
}
2,464,527✔
943

944
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
945
{
90✔
946
    // ExceptionSafety: If this function throws, it must leave the allocator in
45✔
947
    // the detached state.
45✔
948

45✔
949
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
950
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
951

45✔
952
    // Verify the data structures
45✔
953
    std::string path;                                     // No path
90✔
954
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
955

45✔
956
    m_data = data;
90✔
957
    size = align_size_to_section_boundary(size);
90✔
958
    m_baseline = size;
90✔
959
    m_attach_mode = attach_UsersBuffer;
90✔
960

45✔
961
    m_translation_table_size = 1;
90✔
962
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
963
    return top_ref;
90✔
964
}
90✔
965

966
void SlabAlloc::init_in_memory_buffer()
967
{
25,422✔
968
    m_attach_mode = attach_Heap;
25,422✔
969
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,422✔
970
    m_data = m_virtual_file_buffer.back().addr;
25,422✔
971
    m_virtual_file_size = sizeof(empty_file_header);
25,422✔
972
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,422✔
973

12,711✔
974
    m_baseline = m_virtual_file_size;
25,422✔
975
    m_translation_table_size = 1;
25,422✔
976
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,422✔
977
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,422✔
978
    m_ref_translation_ptr = ref_translation_ptr;
25,422✔
979
}
25,422✔
980

981
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
982
{
5,348,046✔
983
    auto idx = get_section_index(ref);
5,348,046✔
984
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
5,348,046✔
985
    auto& buf = m_virtual_file_buffer[idx];
5,348,046✔
986
    return buf.addr + (ref - buf.start_ref);
5,348,046✔
987
}
5,348,046✔
988

989
void SlabAlloc::attach_empty()
990
{
4,722✔
991
    // ExceptionSafety: If this function throws, it must leave the allocator in
2,361✔
992
    // the detached state.
2,361✔
993

2,361✔
994
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
4,722✔
995

2,361✔
996
    m_attach_mode = attach_OwnedBuffer;
4,722✔
997
    m_data = nullptr; // Empty buffer
4,722✔
998

2,361✔
999
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
2,361✔
1000

2,361✔
1001
    // No ref must ever be less than the header size, so we will use that as the
2,361✔
1002
    // baseline here.
2,361✔
1003
    size_t size = align_size_to_section_boundary(sizeof(Header));
4,722✔
1004
    m_baseline = size;
4,722✔
1005
    m_translation_table_size = 1;
4,722✔
1006
    m_ref_translation_ptr = new RefTranslation[1];
4,722✔
1007
}
4,722✔
1008

1009
ref_type SlabAlloc::read_and_validate_header(util::File& file, const std::string& path, size_t size,
1010
                                             bool session_initiator, util::WriteObserver* write_observer)
1011
{
118,653✔
1012
    try {
118,653✔
1013
        // we'll read header and (potentially) footer
58,080✔
1014
        File::Map<char> map_header(file, File::access_ReadOnly, sizeof(Header), 0, write_observer);
118,653✔
1015
        realm::util::encryption_read_barrier(map_header, 0, sizeof(Header));
118,653✔
1016
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
118,653✔
1017

58,080✔
1018
        File::Map<char> map_footer;
118,653✔
1019
        const StreamingFooter* footer = nullptr;
118,653✔
1020
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
118,653✔
1021
            size_t footer_ref = size - sizeof(StreamingFooter);
636✔
1022
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
636✔
1023
            size_t footer_offset = footer_ref - footer_page_base;
636✔
1024
            map_footer = File::Map<char>(file, footer_page_base, File::access_ReadOnly,
636✔
1025
                                         sizeof(StreamingFooter) + footer_offset, 0, write_observer);
636✔
1026
            realm::util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
636✔
1027
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
636✔
1028
        }
636✔
1029

58,080✔
1030
        auto top_ref = validate_header(header, footer, size, path, file.get_encryption_key() != nullptr); // Throws
118,653✔
1031

58,080✔
1032
        if (session_initiator && is_file_on_streaming_form(*header)) {
118,653✔
1033
            // Don't compare file format version fields as they are allowed to differ.
276✔
1034
            // Also don't compare reserved fields.
276✔
1035
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, path);
552✔
1036
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0], path);
552✔
1037
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1], path);
552✔
1038
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2], path);
552✔
1039
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3], path);
552✔
1040
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0], path);
552✔
1041
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], path);
552✔
1042
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie, path);
552✔
1043
        }
552✔
1044
        return top_ref;
118,653✔
1045
    }
118,653✔
1046
    catch (const InvalidDatabase&) {
72✔
1047
        throw;
72✔
1048
    }
72✔
1049
    catch (const DecryptionFailed& e) {
78✔
1050
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
78✔
1051
    }
78✔
1052
    catch (const std::exception& e) {
12✔
1053
        throw InvalidDatabase(e.what(), path);
12✔
1054
    }
12✔
1055
    catch (...) {
×
1056
        throw InvalidDatabase("unknown error", path);
×
1057
    }
×
1058
}
118,653✔
1059

1060
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1061
{
42✔
1062
    char buf[256];
42✔
1063
    snprintf(buf, sizeof(buf),
42✔
1064
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
42✔
1065
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
42✔
1066
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
42✔
1067
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
42✔
1068
             header.m_flags);
42✔
1069
    msg += buf;
42✔
1070
    throw InvalidDatabase(msg, path);
42✔
1071
}
42✔
1072

1073
// Note: This relies on proper mappings having been established by the caller
1074
// for both the header and the streaming footer
1075
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1076
{
90✔
1077
    auto header = reinterpret_cast<const Header*>(data);
90✔
1078
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1079
    return validate_header(header, footer, size, path);
90✔
1080
}
90✔
1081

1082
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1083
                                    const std::string& path, bool is_encrypted)
1084
{
118,653✔
1085
    // Verify that size is sane and 8-byte aligned
58,089✔
1086
    if (REALM_UNLIKELY(size < sizeof(Header)))
118,653✔
1087
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
58,116✔
1088
                              path);
54✔
1089
    if (REALM_UNLIKELY(size % 8 != 0))
118,599✔
1090
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
58,062✔
1091

58,062✔
1092
    // First four bytes of info block is file format id
58,062✔
1093
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
118,599✔
1094
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
58,083✔
1095
        if (is_encrypted) {
42✔
1096
            // Encrypted files check the hmac on read, so there's a lot less
3✔
1097
            // which could go wrong and have us still reach this point
3✔
1098
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
6✔
1099
                                   path);
6✔
1100
        }
6✔
1101
        else {
36✔
1102
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
36✔
1103
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
36✔
1104
                                   *header, path);
36✔
1105
        }
36✔
1106
    }
42✔
1107

58,062✔
1108
    // Last bit in info block indicates which top_ref block is valid
58,062✔
1109
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
93,393✔
1110

58,062✔
1111
    // Top-ref must always point within buffer
58,062✔
1112
    auto top_ref = header->m_top_ref[slot_selector];
118,599✔
1113
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
118,599✔
1114
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
684✔
1115
            throw InvalidDatabase(
×
1116
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1117
                path);
×
1118
        }
×
1119
        REALM_ASSERT(footer);
684✔
1120
        top_ref = footer->m_top_ref;
684✔
1121
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
684✔
1122
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1123
                                               "The file is probably truncated.",
×
1124
                                               footer->m_magic_cookie),
×
1125
                                  path);
×
1126
        }
×
1127
    }
118,599✔
1128
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
118,599✔
1129
        throw_header_exception("top ref is not aligned", *header, path);
×
1130
    }
×
1131
    if (REALM_UNLIKELY(top_ref >= size)) {
118,599✔
1132
        throw_header_exception(
×
1133
            util::format(
×
1134
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1135
                top_ref),
×
1136
            *header, path);
×
1137
    }
×
1138
    return ref_type(top_ref);
118,599✔
1139
}
118,599✔
1140

1141

1142
size_t SlabAlloc::get_total_size() const noexcept
1143
{
1,066,929✔
1144
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
1,066,893✔
1145
}
1,066,929✔
1146

1147

1148
void SlabAlloc::reset_free_space_tracking()
1149
{
723,681✔
1150
    CriticalSection cs(changes);
723,681✔
1151
    if (is_free_space_clean())
723,681✔
1152
        return;
9,636✔
1153

363,192✔
1154
    // Free all scratch space (done after all data has
363,192✔
1155
    // been commited to persistent space)
363,192✔
1156
    m_free_read_only.clear();
714,045✔
1157

363,192✔
1158
    // release slabs.. keep the initial allocation if it's a minimal allocation,
363,192✔
1159
    // otherwise release it as well. This saves map/unmap for small transactions.
363,192✔
1160
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
724,218✔
1161
        auto& last_slab = m_slabs.back();
10,173✔
1162
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
10,173✔
1163
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
10,173✔
1164
        --m_translation_table_size;
10,173✔
1165
        m_slabs.pop_back();
10,173✔
1166
    }
10,173✔
1167
    rebuild_freelists_from_slab();
714,045✔
1168
    m_free_space_state = free_space_Clean;
714,045✔
1169
    m_commit_size = 0;
714,045✔
1170
}
714,045✔
1171

1172
inline bool randomly_false_in_debug(bool x)
1173
{
×
1174
#ifdef REALM_DEBUG
×
1175
    if (x)
×
1176
        return (std::rand() & 1);
×
1177
#endif
×
1178
    return x;
×
1179
}
×
1180

1181

1182
/*
1183
  Memory mapping
1184

1185
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1186
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1187
  essential to map the file in even sized sections.
1188

1189
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1190
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1191
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1192
  We don't want to extend the file in increments as large as the chunk size.
1193

1194
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1195
  old one in the mapping table. However, we must keep the old mapping open, because older
1196
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1197
  and only cleaned out once we know that no transaction can refer to them anymore.
1198

1199
  Interaction with encryption
1200

1201
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1202
  The binding to the file is done by software. This allows us to "cheat" and allocate
1203
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1204
  extends beyond the end of file, because it will not be accessed.
1205

1206
  Growing/Changing the mapping table.
1207

1208
  There are two mapping tables:
1209

1210
  * m_mappings: This is the "source of truth" about what the current mapping is.
1211
    It is only accessed under lock.
1212
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1213
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1214
    table can only be extended. Only selected members in each entry can be changed.
1215
    See RefTranslation in alloc.hpp for more details.
1216
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1217
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1218
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1219
    to memory allocation and release, which is already serialized (since write transactions are
1220
    single threaded).
1221

1222
  When m_mappings is changed due to an extend operation changing a mapping, or when
1223
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1224

1225
  * A new fast mapping table is created. The old one is not modified.
1226
  * The old one is held in a waiting area until it is no longer relevant because no
1227
    live transaction can refer to it any more.
1228
 */
1229
void SlabAlloc::update_reader_view(size_t file_size)
1230
{
2,897,679✔
1231
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
2,897,679✔
1232
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
2,897,679✔
1233
    if (file_size <= old_baseline) {
2,897,679✔
1234
        schedule_refresh_of_outdated_encrypted_pages();
2,760,606✔
1235
        return;
2,760,606✔
1236
    }
2,760,606✔
1237

71,304✔
1238
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
137,073✔
1239
    bool replace_last_mapping = false;
137,073✔
1240
    size_t old_num_mappings = get_section_index(old_slab_base);
137,073✔
1241

71,304✔
1242
    if (!is_in_memory()) {
137,073✔
1243
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
115,104✔
1244
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
115,104✔
1245
                        get_file_path_for_assertions());
115,104✔
1246
        REALM_ASSERT_DEBUG(is_free_space_clean());
115,104✔
1247

62,121✔
1248
        // Create the new mappings we needed to cover the new size. We don't mutate
62,121✔
1249
        // any of the member variables until we've successfully created all of the
62,121✔
1250
        // mappings so that we leave things in a consistent state if one of them
62,121✔
1251
        // hits an allocation failure.
62,121✔
1252

62,121✔
1253
        std::vector<MapEntry> new_mappings;
115,104✔
1254
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
115,104✔
1255

62,121✔
1256
        {
115,104✔
1257
            // If the old slab base was greater than the old baseline then the final
62,121✔
1258
            // mapping was a partial section and we need to replace it with a larger
62,121✔
1259
            // mapping.
62,121✔
1260
            if (old_baseline < old_slab_base) {
115,104✔
1261
                // old_slab_base should be 0 if we had no mappings previously
15,219✔
1262
                REALM_ASSERT(old_num_mappings > 0);
19,308✔
1263
                // try to extend the old mapping in-place instead of replacing it.
15,219✔
1264
                MapEntry& cur_entry = m_mappings.back();
19,308✔
1265
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
19,308✔
1266
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
19,308✔
1267
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
19,308✔
1268
                    replace_last_mapping = true;
72✔
1269
                    --old_num_mappings;
72✔
1270
                }
72✔
1271
            }
19,308✔
1272

62,121✔
1273
            // Create new mappings covering from the end of the last complete
62,121✔
1274
            // section to the end of the new file size.
62,121✔
1275
            const auto new_slab_base = align_size_to_section_boundary(file_size);
115,104✔
1276
            const size_t num_mappings = get_section_index(new_slab_base);
115,104✔
1277
            new_mappings.reserve(num_mappings - old_num_mappings);
115,104✔
1278
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
211,104✔
1279
                const size_t section_start_offset = get_section_base(k);
96,012✔
1280
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
96,012✔
1281
                if (section_size == (1 << section_shift)) {
96,012✔
1282
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
66✔
1283
                                                                  section_size, 0, m_write_observer)});
66✔
1284
                }
66✔
1285
                else {
95,946✔
1286
                    new_mappings.push_back({util::File::Map<char>()});
95,946✔
1287
                    auto& mapping = new_mappings.back().primary_mapping;
95,946✔
1288
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
95,946✔
1289
                                                        section_start_offset, m_write_observer);
95,946✔
1290
                    if (reserved) {
95,946✔
1291
                        // if reservation is supported, first attempt at extending must succeed
46,977✔
1292
                        if (!mapping.try_extend_to(section_size))
95,946✔
1293
                            throw std::bad_alloc();
12✔
UNCOV
1294
                    }
×
UNCOV
1295
                    else {
×
UNCOV
1296
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size, 0,
×
UNCOV
1297
                                                                section_start_offset, m_write_observer);
×
UNCOV
1298
                    }
×
1299
                }
95,946✔
1300
            }
96,012✔
1301
        }
115,104✔
1302

62,121✔
1303
        // Now that we've successfully created our mappings, update our member
62,121✔
1304
        // variables (and assume that resizing a simple vector won't produce memory
62,121✔
1305
        // allocation failures, unlike 64 MB mmaps).
62,121✔
1306
        if (replace_last_mapping) {
115,098✔
1307
            MapEntry& cur_entry = m_mappings.back();
66✔
1308
            // We should not have a xover mapping here because that would mean
33✔
1309
            // that there was already something mapped after the last section
33✔
1310
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
66✔
1311
            // save the old mapping/keep it open
33✔
1312
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
66✔
1313
            m_mappings.pop_back();
66✔
1314
            m_mapping_version++;
66✔
1315
        }
66✔
1316

62,115✔
1317
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
115,092✔
1318
    }
115,092✔
1319

71,304✔
1320
    m_baseline.store(file_size, std::memory_order_relaxed);
137,067✔
1321

71,298✔
1322
    const size_t ref_start = align_size_to_section_boundary(file_size);
137,061✔
1323
    const size_t ref_displacement = ref_start - old_slab_base;
137,061✔
1324
    if (ref_displacement > 0) {
137,061✔
1325
        // Rebase slabs as m_baseline is now bigger than old_slab_base
46,944✔
1326
        for (auto& e : m_slabs) {
46,980✔
1327
            e.ref_end += ref_displacement;
72✔
1328
        }
72✔
1329
    }
95,880✔
1330

71,298✔
1331
    rebuild_freelists_from_slab();
137,061✔
1332

71,298✔
1333
    // Build the fast path mapping
71,298✔
1334

71,298✔
1335
    // The fast path mapping is an array which is used from multiple threads
71,298✔
1336
    // without locking - see translate().
71,298✔
1337

71,298✔
1338
    // Addition of a new mapping may require a completely new fast mapping table.
71,298✔
1339
    //
71,298✔
1340
    // Being used in a multithreaded scenario, the old mappings must be retained open,
71,298✔
1341
    // until the realm version for which they were established has been closed/detached.
71,298✔
1342
    //
71,298✔
1343
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
71,298✔
1344
    // translate refs in the slab area, and that all these uses are serialized, whether
71,298✔
1345
    // that is achieved by being single threaded, interlocked or run from a sequential
71,298✔
1346
    // scheduling queue.
71,298✔
1347
    //
71,298✔
1348
    rebuild_translations(replace_last_mapping, old_num_mappings);
137,061✔
1349

71,298✔
1350
    schedule_refresh_of_outdated_encrypted_pages();
137,061✔
1351
}
137,061✔
1352

1353

1354
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1355
{
2,901,435✔
1356
#if REALM_ENABLE_ENCRYPTION
2,901,435✔
1357
    // callers must already hold m_mapping_mutex
1,954,452✔
1358
    for (auto& e : m_mappings) {
2,761,932✔
1359
        if (auto m = e.primary_mapping.get_encrypted_mapping()) {
2,608,773✔
1360
            encryption_mark_pages_for_IV_check(m);
1,566✔
1361
        }
1,566✔
1362
        if (auto m = e.xover_mapping.get_encrypted_mapping()) {
2,608,773✔
1363
            encryption_mark_pages_for_IV_check(m);
×
1364
        }
×
1365
    }
2,608,773✔
1366
    // unsafe to do outside writing thread: verify();
1,954,452✔
1367
#endif // REALM_ENABLE_ENCRYPTION
2,901,435✔
1368
}
2,901,435✔
1369

1370
size_t SlabAlloc::get_allocated_size() const noexcept
1371
{
92,151✔
1372
    size_t sz = 0;
92,151✔
1373
    for (const auto& s : m_slabs)
92,151✔
1374
        sz += s.size;
18,273✔
1375
    return sz;
92,151✔
1376
}
92,151✔
1377

1378
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1379
{
92,148✔
1380
    ++m_translation_table_size;
92,148✔
1381
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
92,148✔
1382
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
208,122✔
1383
        new_fast_mapping[i] = m_ref_translation_ptr[i];
115,974✔
1384
    }
115,974✔
1385
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
92,148✔
1386
                                    m_ref_translation_ptr.load());
92,148✔
1387
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
92,148✔
1388
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
45,360✔
1389
    // so optimize by clamping the lowest possible xover offset to the end of the section.
45,360✔
1390
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
92,148✔
1391
    m_ref_translation_ptr = new_fast_mapping.release();
92,148✔
1392
}
92,148✔
1393

1394
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1395
{
140,874✔
1396
    size_t free_space_size = m_slabs.size();
140,874✔
1397
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
128,169✔
1398
    if (m_translation_table_size < num_mappings + free_space_size) {
140,874✔
1399
        requires_new_translation = true;
95,880✔
1400
    }
95,880✔
1401
    RefTranslation* new_translation_table = m_ref_translation_ptr;
140,874✔
1402
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
140,874✔
1403
    if (requires_new_translation) {
140,874✔
1404
        // we need a new translation table, but must preserve old, as translations using it
46,977✔
1405
        // may be in progress concurrently
46,977✔
1406
        if (m_translation_table_size)
95,946✔
1407
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
174✔
1408
                                            m_ref_translation_ptr.load());
174✔
1409
        m_translation_table_size = num_mappings + free_space_size;
95,946✔
1410
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
95,946✔
1411
        new_translation_table = new_translation_table_owner.get();
95,946✔
1412
        old_num_sections = 0;
95,946✔
1413
    }
95,946✔
1414
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
236,994✔
1415
        if (is_in_memory()) {
96,120✔
1416
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1417
        }
12✔
1418
        else {
96,108✔
1419
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
96,108✔
1420
#if REALM_ENABLE_ENCRYPTION
96,108✔
1421
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
96,108✔
1422
#endif
96,108✔
1423
        }
96,108✔
1424
        REALM_ASSERT(new_translation_table[i].mapping_addr);
96,120✔
1425
        // We don't copy over data for the cross over mapping. If the mapping is needed,
47,064✔
1426
        // copying will happen on demand (in get_or_add_xover_mapping).
47,064✔
1427
        // Note: that may never be needed, because if the array that needed the original cross over
47,064✔
1428
        // mapping is freed, any new array allocated at the same position will NOT need a cross
47,064✔
1429
        // over mapping, but just use the primary mapping.
47,064✔
1430
    }
96,120✔
1431
    for (size_t k = 0; k < free_space_size; ++k) {
181,191✔
1432
        char* base = m_slabs[k].addr;
40,317✔
1433
        REALM_ASSERT(base);
40,317✔
1434
        new_translation_table[num_mappings + k].mapping_addr = base;
40,317✔
1435
    }
40,317✔
1436

75,195✔
1437
    // This will either be null or the same as new_translation_table, which is about to become owned by
75,195✔
1438
    // m_ref_translation_ptr.
75,195✔
1439
    (void)new_translation_table_owner.release();
140,874✔
1440

75,195✔
1441
    m_ref_translation_ptr = new_translation_table;
140,874✔
1442
}
140,874✔
1443

1444
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
1445
{
6✔
1446
    auto _page_size = page_size();
6✔
1447
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
6✔
1448
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
6✔
1449
        // some other thread already added a mapping
1450
        // it MUST have been for the exact same address:
1451
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1452
        return;
×
1453
    }
×
1454
    MapEntry* map_entry = &m_mappings[index];
6✔
1455
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
6✔
1456
    if (!map_entry->xover_mapping.is_attached()) {
6✔
1457
        // Create a xover mapping
3✔
1458
        auto file_offset = get_section_base(index) + offset;
6✔
1459
        auto end_offset = file_offset + size;
6✔
1460
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
6✔
1461
        auto minimal_mapping_size = end_offset - mapping_file_offset;
6✔
1462
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size, 0,
6✔
1463
                                      m_write_observer);
6✔
1464
        map_entry->xover_mapping = std::move(mapping);
6✔
1465
    }
6✔
1466
    txl.xover_mapping_base = offset & ~(_page_size - 1);
6✔
1467
#if REALM_ENABLE_ENCRYPTION
6✔
1468
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
6✔
1469
#endif
6✔
1470
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
6✔
1471
}
6✔
1472

1473
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1474
{
1,454,856✔
1475
    // Verify that each old ref translation pointer still points to a valid
739,680✔
1476
    // thing that we haven't released yet.
739,680✔
1477
#if REALM_DEBUG
1,454,856✔
1478
    std::unordered_set<const char*> mappings;
1,454,856✔
1479
    for (auto& m : m_old_mappings) {
739,761✔
1480
        REALM_ASSERT(m.mapping.is_attached());
162✔
1481
        mappings.insert(m.mapping.get_addr());
162✔
1482
    }
162✔
1483
    for (auto& m : m_mappings) {
1,345,224✔
1484
        REALM_ASSERT(m.primary_mapping.is_attached());
1,235,580✔
1485
        mappings.insert(m.primary_mapping.get_addr());
1,235,580✔
1486
        if (m.xover_mapping.is_attached())
1,235,580✔
1487
            mappings.insert(m.xover_mapping.get_addr());
12✔
1488
    }
1,235,580✔
1489
    for (auto& m : m_virtual_file_buffer) {
849,750✔
1490
        mappings.insert(m.addr);
220,140✔
1491
    }
220,140✔
1492
    if (m_data)
1,454,856✔
1493
        mappings.insert(m_data);
1,444,494✔
1494
    for (auto& t : m_old_translations) {
879,489✔
1495
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
275,223✔
1496
                        youngest_live_version, t.replaced_at_version);
275,223✔
1497
        if (nonempty_attachment()) {
275,223✔
1498
            for (size_t i = 0; i < t.translation_count; ++i)
557,052✔
1499
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
270,153✔
1500
        }
270,153✔
1501
    }
275,223✔
1502
#else
1503
    static_cast<void>(youngest_live_version);
1504
#endif
1505
}
1,454,856✔
1506

1507

1508
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1509
{
727,455✔
1510
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
727,455✔
1511
    verify_old_translations(youngest_live_version);
727,455✔
1512

369,849✔
1513
    auto pred = [=](auto& oldie) {
463,248✔
1514
        return oldie.replaced_at_version < oldest_live_version;
183,885✔
1515
    };
183,885✔
1516
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
727,455✔
1517
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
727,455✔
1518
                             m_old_translations.end());
727,455✔
1519
    m_youngest_live_version = youngest_live_version;
727,455✔
1520
    verify_old_translations(youngest_live_version);
727,455✔
1521
}
727,455✔
1522

1523
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1524
{
709,407✔
1525
    m_youngest_live_version = currently_live_version;
709,407✔
1526
}
709,407✔
1527

1528
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1529
{
601,026✔
1530
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
601,026✔
1531
        throw InvalidFreeSpace();
307,641✔
1532
    return m_free_read_only;
601,026✔
1533
}
601,026✔
1534

1535

1536
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1537
{
19,942,380✔
1538
    size_t end_of_block = start_pos + free_chunk_size;
19,942,380✔
1539
    size_t alloc_pos = start_pos;
19,942,380✔
1540
    while (alloc_pos + request_size <= end_of_block) {
19,944,978✔
1541
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
19,943,349✔
1542
        if (alloc_pos + request_size <= next_section_boundary) {
19,943,349✔
1543
            return alloc_pos;
19,940,751✔
1544
        }
19,940,751✔
1545
        alloc_pos = next_section_boundary;
2,598✔
1546
    }
2,598✔
1547
    return 0;
10,037,670✔
1548
}
19,942,380✔
1549

1550

1551
void SlabAlloc::resize_file(size_t new_file_size)
1552
{
86,265✔
1553
    if (m_attach_mode == attach_SharedFile) {
86,265✔
1554
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
59,409✔
1555
        m_file.prealloc(new_file_size); // Throws
59,409✔
1556
        // resizing is done based on the logical file size. It is ok for the file
35,247✔
1557
        // to actually be bigger, but never smaller.
35,247✔
1558
        REALM_ASSERT(new_file_size <= static_cast<size_t>(m_file.get_size()));
59,409✔
1559

35,247✔
1560
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
59,409✔
1561
        if (!disable_sync)
59,409✔
1562
            m_file.sync(); // Throws
558✔
1563
    }
59,409✔
1564
    else {
26,856✔
1565
        size_t current_size = 0;
26,856✔
1566
        for (auto& b : m_virtual_file_buffer) {
27,072✔
1567
            current_size += b.size;
27,072✔
1568
        }
27,072✔
1569
        if (new_file_size > current_size) {
26,856✔
1570
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1571
        }
6✔
1572
        m_virtual_file_size = new_file_size;
26,856✔
1573
    }
26,856✔
1574
}
86,265✔
1575

1576
#ifdef REALM_DEBUG
1577
void SlabAlloc::reserve_disk_space(size_t size)
1578
{
36✔
1579
    if (size != round_up_to_page_size(size))
36✔
1580
        size = round_up_to_page_size(size);
30✔
1581
    m_file.prealloc(size); // Throws
36✔
1582

18✔
1583
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1584
    if (!disable_sync)
36✔
1585
        m_file.sync(); // Throws
×
1586
}
36✔
1587
#endif
1588

1589
void SlabAlloc::verify() const
1590
{
125,958✔
1591
#ifdef REALM_DEBUG
125,958✔
1592
    if (!m_slabs.empty()) {
125,958✔
1593
        // Make sure that all free blocks are within a slab. This is done
48,372✔
1594
        // implicitly by using for_all_free_entries()
48,372✔
1595
        size_t first_possible_ref = m_baseline;
96,705✔
1596
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
96,705✔
1597
        for_all_free_entries([&](size_t ref, size_t size) {
571,425✔
1598
            REALM_ASSERT(ref >= first_possible_ref);
571,425✔
1599
            REALM_ASSERT(ref + size <= first_impossible_ref);
571,425✔
1600
            first_possible_ref = ref;
571,425✔
1601
        });
571,425✔
1602
    }
96,705✔
1603
#endif
125,958✔
1604
}
125,958✔
1605

1606
#ifdef REALM_DEBUG
1607

1608
bool SlabAlloc::is_all_free() const
1609
{
702✔
1610
    // verify that slabs contain only free space.
351✔
1611
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
351✔
1612
    for (const auto& e : m_slabs) {
672✔
1613
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
642✔
1614
        REALM_ASSERT(first->block_before_size == 0);
642✔
1615
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
642✔
1616
        REALM_ASSERT(last->block_after_size == 0);
642✔
1617
        if (first->block_after_size != last->block_before_size)
642✔
1618
            return false;
×
1619
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
642✔
1620
        range -= sizeof(BetweenBlocks);
642✔
1621
        // the size of the free area must match the distance between the two BetweenBlocks:
321✔
1622
        if (range != first->block_after_size)
642✔
1623
            return false;
×
1624
    }
642✔
1625
    return true;
702✔
1626
}
702✔
1627

1628

1629
// LCOV_EXCL_START
1630
void SlabAlloc::print() const
1631
{
×
1632
    /* TODO
1633
     *
1634

1635
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1636

1637
    size_t free = 0;
1638
    for (const auto& free_block : m_free_space) {
1639
        free += free_block.size;
1640
    }
1641

1642
    size_t allocated = allocated_for_slabs - free;
1643
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1644

1645
    if (!m_slabs.empty()) {
1646
        std::cout << "Slabs: ";
1647
        ref_type first_ref = m_baseline;
1648

1649
        for (const auto& slab : m_slabs) {
1650
            if (&slab != &m_slabs.front())
1651
                std::cout << ", ";
1652

1653
            ref_type last_ref = slab.ref_end - 1;
1654
            size_t size = slab.ref_end - first_ref;
1655
            void* addr = slab.addr;
1656
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1657
            first_ref = slab.ref_end;
1658
        }
1659
        std::cout << "\n";
1660
    }
1661

1662
    if (!m_free_space.empty()) {
1663
        std::cout << "FreeSpace: ";
1664
        for (const auto& free_block : m_free_space) {
1665
            if (&free_block != &m_free_space.front())
1666
                std::cout << ", ";
1667

1668
            ref_type last_ref = free_block.ref + free_block.size - 1;
1669
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1670
        }
1671
        std::cout << "\n";
1672
    }
1673
    if (!m_free_read_only.empty()) {
1674
        std::cout << "FreeSpace (ro): ";
1675
        for (const auto& free_block : m_free_read_only) {
1676
            if (&free_block != &m_free_read_only.front())
1677
                std::cout << ", ";
1678

1679
            ref_type last_ref = free_block.ref + free_block.size - 1;
1680
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1681
        }
1682
        std::cout << "\n";
1683
    }
1684
    std::cout << std::flush;
1685
    */
1686
}
×
1687
// LCOV_EXCL_STOP
1688

1689
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc