• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / finn.schiermer-andersen_89

04 Jun 2024 02:04PM UTC coverage: 90.651% (-0.03%) from 90.685%
finn.schiermer-andersen_89

Pull #7654

Evergreen

finnschiermer
optimized string cache gc
Pull Request #7654: Fsa/string interning

102644 of 180648 branches covered (56.82%)

1005 of 1125 new or added lines in 15 files covered. (89.33%)

154 existing lines in 21 files now uncovered.

217953 of 240431 relevant lines covered (90.65%)

7671710.15 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.78
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <cinttypes>
20
#include <type_traits>
21
#include <exception>
22
#include <algorithm>
23
#include <memory>
24
#include <mutex>
25
#include <map>
26
#include <atomic>
27
#include <cstring>
28

29
#if REALM_DEBUG
30
#include <iostream>
31
#include <unordered_set>
32
#endif
33

34
#ifdef REALM_SLAB_ALLOC_DEBUG
35
#include <cstdlib>
36
#endif
37

38
#include <realm/util/errno.hpp>
39
#include <realm/util/encrypted_file_mapping.hpp>
40
#include <realm/util/terminate.hpp>
41
#include <realm/util/thread.hpp>
42
#include <realm/util/scope_exit.hpp>
43
#include <realm/array.hpp>
44
#include <realm/alloc_slab.hpp>
45
#include <realm/group.hpp>
46

47
using namespace realm;
48
using namespace realm::util;
49

50

51
namespace {
52

53
#ifdef REALM_SLAB_ALLOC_DEBUG
54
std::map<ref_type, void*> malloc_debug_map;
55
#endif
56

57
class InvalidFreeSpace : std::exception {
58
public:
59
    const char* what() const noexcept override
60
    {
×
61
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
62
               "further writes can be performed.";
×
63
    }
×
64
};
65

66
std::atomic<size_t> total_slab_allocated(0);
67

68
} // anonymous namespace
69

70
size_t SlabAlloc::get_total_slab_size() noexcept
71
{
×
72
    return total_slab_allocated;
×
73
}
×
74

75
SlabAlloc::SlabAlloc()
76
{
131,883✔
77
    m_initial_section_size = section_size();
131,883✔
78
    m_free_space_state = free_space_Clean;
131,883✔
79
    m_baseline = 0;
131,883✔
80
}
131,883✔
81

82
util::File& SlabAlloc::get_file()
83
{
973,572✔
84
    return m_file;
973,572✔
85
}
973,572✔
86

87

88
const SlabAlloc::Header SlabAlloc::empty_file_header = {
89
    {0, 0}, // top-refs
90
    {'T', '-', 'D', 'B'},
91
    {0, 0}, // undecided file format
92
    0,      // reserved
93
    0       // flags (lsb is select bit)
94
};
95

96

97
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
98
{
1,266✔
99
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
1,266✔
100
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
1,266✔
101
    *streaming_header = {
1,266✔
102
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
1,266✔
103
        {'T', '-', 'D', 'B'},
1,266✔
104
        {storage_type(file_format_version), 0},
1,266✔
105
        0, // reserved
1,266✔
106
        0  // flags (lsb is select bit)
1,266✔
107
    };
1,266✔
108
}
1,266✔
109

110
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
111
    : ref_end(r)
48,687✔
112
    , size(s)
48,687✔
113
{
98,703✔
114
    // Ensure that allocation is aligned to at least 8 bytes
115
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
98,703✔
116

117
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
98,703✔
118
    addr = new char[size];
98,703✔
119
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
98,703✔
120
#if REALM_ENABLE_ALLOC_SET_ZERO
121
    std::fill(addr, addr + size, 0);
122
#endif
123
}
98,703✔
124

125
SlabAlloc::Slab::~Slab()
126
{
102,303✔
127
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
102,303✔
128
    if (addr)
102,303✔
129
        delete[] addr;
98,703✔
130
}
102,303✔
131

132
void SlabAlloc::detach(bool keep_file_open) noexcept
133
{
135,291✔
134
    delete[] m_ref_translation_ptr;
135,291✔
135
    m_ref_translation_ptr.store(nullptr);
135,291✔
136
    m_translation_table_size = 0;
135,291✔
137
    set_read_only(true);
135,291✔
138
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
135,291✔
139
    switch (m_attach_mode) {
135,291✔
140
        case attach_None:
1,698✔
141
            break;
1,698✔
142
        case attach_UsersBuffer:
24✔
143
            break;
24✔
144
        case attach_OwnedBuffer:
6,198✔
145
            delete[] m_data;
6,198✔
146
            break;
6,198✔
147
        case attach_SharedFile:
100,773✔
148
        case attach_UnsharedFile:
101,877✔
149
            m_data = 0;
101,877✔
150
            m_mappings.clear();
101,877✔
151
            m_youngest_live_version = 0;
101,877✔
152
            if (!keep_file_open)
101,877✔
153
                m_file.close();
100,173✔
154
            break;
101,877✔
155
        case attach_Heap:
25,494✔
156
            m_data = 0;
25,494✔
157
            break;
25,494✔
158
        default:
✔
159
            REALM_UNREACHABLE();
160
    }
135,291✔
161

162
    // Release all allocated memory - this forces us to create new
163
    // slabs after re-attaching thereby ensuring that the slabs are
164
    // placed correctly (logically) after the end of the file.
165
    m_slabs.clear();
135,291✔
166
    clear_freelists();
135,291✔
167
#if REALM_ENABLE_ENCRYPTION
135,291✔
168
    m_realm_file_info = nullptr;
135,291✔
169
#endif
135,291✔
170

171
    m_attach_mode = attach_None;
135,291✔
172
}
135,291✔
173

174

175
SlabAlloc::~SlabAlloc() noexcept
176
{
131,880✔
177
#ifdef REALM_DEBUG
131,880✔
178
    if (is_attached()) {
131,880✔
179
        // A shared group does not guarantee that all space is free
180
        if (m_attach_mode != attach_SharedFile) {
1,338✔
181
            // No point inchecking if free space info is invalid
182
            if (m_free_space_state != free_space_Invalid) {
1,332✔
183
                if (REALM_COVER_NEVER(!is_all_free())) {
1,332✔
184
                    print();
×
185
#ifndef REALM_SLAB_ALLOC_DEBUG
×
186
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
187
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
188
                                 "then run under Valgrind with --leak-check=full\n";
×
189
                    REALM_TERMINATE("SlabAlloc detected a leak");
190
#endif
×
191
                }
×
192
            }
1,332✔
193
        }
1,332✔
194
    }
1,338✔
195
#endif
131,880✔
196

197
    if (is_attached())
131,880✔
198
        detach();
1,338✔
199
}
131,880✔
200

201

202
MemRef SlabAlloc::do_alloc(size_t size)
203
{
31,894,572✔
204
    CriticalSection cs(changes);
31,894,572✔
205
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
31,894,572✔
206
    REALM_ASSERT_EX((size & 0x7) == 0, size,
31,894,572✔
207
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
31,894,572✔
208
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
31,894,572✔
209
    // This limits the size of any array to ensure it can fit within a memory section.
210
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
211
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
31,894,572✔
212

213
    // If we failed to correctly record free space, new allocations cannot be
214
    // carried out until the free space record is reset.
215
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
31,894,572✔
216
        throw InvalidFreeSpace();
×
217

218
    m_free_space_state = free_space_Dirty;
31,894,572✔
219
    m_commit_size += size;
31,894,572✔
220

221
    // minimal allocation is sizeof(FreeListEntry)
222
    if (size < sizeof(FreeBlock))
31,894,572✔
223
        size = sizeof(FreeBlock);
6,693✔
224
    // align to multipla of 8
225
    if (size & 0x7)
31,894,572✔
226
        size = (size + 7) & ~0x7;
×
227

228
    FreeBlock* entry = allocate_block(static_cast<int>(size));
31,894,572✔
229
    mark_allocated(entry);
31,894,572✔
230
    ref_type ref = entry->ref;
31,894,572✔
231

232
#ifdef REALM_DEBUG
31,894,572✔
233
    if (REALM_COVER_NEVER(m_debug_out))
31,894,572✔
234
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
×
235
#endif
31,894,572✔
236

237
    char* addr = reinterpret_cast<char*>(entry);
31,894,572✔
238
    REALM_ASSERT_EX(addr == translate_in_slab(ref), addr, ref, get_file_path_for_assertions());
31,894,572✔
239

240
#if REALM_ENABLE_ALLOC_SET_ZERO
241
    std::fill(addr, addr + size, 0);
242
#endif
243
#ifdef REALM_SLAB_ALLOC_DEBUG
244
    malloc_debug_map[ref] = malloc(1);
245
#endif
246
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
31,894,572✔
247
    return MemRef(addr, ref, *this);
31,894,572✔
248
}
31,894,572✔
249

250
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
251
{
3,558,528✔
252
    auto bb = bb_before(entry);
3,558,528✔
253
    if (bb->block_before_size <= 0)
3,558,528✔
254
        return nullptr; // no prev block, or it is in use
3,164,748✔
255
    return block_before(bb);
393,780✔
256
}
3,558,528✔
257

258
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
259
{
3,558,654✔
260
    auto bb = bb_after(entry);
3,558,654✔
261
    if (bb->block_after_size <= 0)
3,558,654✔
262
        return nullptr; // no next block, or it is in use
1,236,885✔
263
    return block_after(bb);
2,321,769✔
264
}
3,558,654✔
265

266
SlabAlloc::FreeList SlabAlloc::find(int size)
267
{
31,921,101✔
268
    FreeList retval;
31,921,101✔
269
    retval.it = m_block_map.lower_bound(size);
31,921,101✔
270
    if (retval.it != m_block_map.end()) {
31,921,101✔
271
        retval.size = retval.it->first;
31,818,270✔
272
    }
31,818,270✔
273
    else {
102,831✔
274
        retval.size = 0;
102,831✔
275
    }
102,831✔
276
    return retval;
31,921,101✔
277
}
31,921,101✔
278

279
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
280
{
31,275,993✔
281
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
31,275,993✔
282
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
39,955,491✔
283
        ++hint.it;
8,679,498✔
284
    if (hint.it == m_block_map.end())
31,275,993✔
285
        hint.size = 0; // indicate "not found"
98,703✔
286
    return hint;
31,275,993✔
287
}
31,275,993✔
288

289
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
290
{
31,824,906✔
291
    FreeBlock* retval = list.it->second;
31,824,906✔
292
    FreeBlock* header = retval->next;
31,824,906✔
293
    if (header == retval)
31,824,906✔
294
        m_block_map.erase(list.it);
31,565,349✔
295
    else
259,557✔
296
        list.it->second = header;
259,557✔
297
    retval->unlink();
31,824,906✔
298
    return retval;
31,824,906✔
299
}
31,824,906✔
300

301
void SlabAlloc::FreeBlock::unlink()
302
{
34,533,429✔
303
    REALM_ASSERT_DEBUG(next != nullptr && prev != nullptr);
34,533,429✔
304
    auto _next = next;
34,533,429✔
305
    auto _prev = prev;
34,533,429✔
306
    _next->prev = prev;
34,533,429✔
307
    _prev->next = next;
34,533,429✔
308
    clear_links();
34,533,429✔
309
}
34,533,429✔
310

311
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
312
{
2,715,753✔
313
    int size = bb_before(entry)->block_after_size;
2,715,753✔
314
    auto it = m_block_map.find(size);
2,715,753✔
315
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
2,715,753✔
316
    auto header = it->second;
2,715,753✔
317
    if (header == entry) {
2,715,753✔
318
        header = entry->next;
2,687,946✔
319
        if (header == entry)
2,687,946✔
320
            m_block_map.erase(it);
2,012,985✔
321
        else
674,961✔
322
            it->second = header;
674,961✔
323
    }
2,687,946✔
324
    entry->unlink();
2,715,753✔
325
}
2,715,753✔
326

327
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
328
{
35,493,942✔
329
    int size = bb_before(entry)->block_after_size;
35,493,942✔
330
    FreeBlock* header;
35,493,942✔
331
    auto it = m_block_map.find(size);
35,493,942✔
332
    if (it != m_block_map.end()) {
35,493,942✔
333
        header = it->second;
1,001,991✔
334
        it->second = entry;
1,001,991✔
335
        entry->next = header;
1,001,991✔
336
        entry->prev = header->prev;
1,001,991✔
337
        entry->prev->next = entry;
1,001,991✔
338
        entry->next->prev = entry;
1,001,991✔
339
    }
1,001,991✔
340
    else {
34,491,951✔
341
        header = nullptr;
34,491,951✔
342
        m_block_map[size] = entry;
34,491,951✔
343
        entry->next = entry->prev = entry;
34,491,951✔
344
    }
34,491,951✔
345
}
35,493,942✔
346

347
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
348
{
3,557,844✔
349
    auto bb = bb_before(entry);
3,557,844✔
350
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
3,557,844✔
351
    auto alloc_size = -bb->block_after_size;
3,557,844✔
352
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
3,557,844✔
353
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
3,557,844✔
354
                    get_file_path_for_assertions());
3,557,844✔
355
    bb->block_after_size = alloc_size;
3,557,844✔
356
    bb = bb_after(entry);
3,557,844✔
357
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
3,557,844✔
358
    REALM_ASSERT(-bb->block_before_size == alloc_size);
3,557,844✔
359
    bb->block_before_size = alloc_size;
3,557,844✔
360
}
3,557,844✔
361

362
void SlabAlloc::mark_allocated(FreeBlock* entry)
363
{
31,921,095✔
364
    auto bb = bb_before(entry);
31,921,095✔
365
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
31,921,095✔
366
    auto bb2 = bb_after(entry);
31,921,095✔
367
    bb->block_after_size = 0 - bb->block_after_size;
31,921,095✔
368
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
31,921,095✔
369
    bb2->block_before_size = 0 - bb2->block_before_size;
31,921,095✔
370
}
31,921,095✔
371

372
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
373
{
31,923,450✔
374
    FreeList list = find(size);
31,923,450✔
375
    if (list.found_exact(size)) {
31,923,450✔
376
        return pop_freelist_entry(list);
651,168✔
377
    }
651,168✔
378
    // no exact matches.
379
    list = find_larger(list, size);
31,272,282✔
380
    FreeBlock* block;
31,272,282✔
381
    if (list.found_something()) {
31,272,282✔
382
        block = pop_freelist_entry(list);
31,174,308✔
383
    }
31,174,308✔
384
    else {
97,974✔
385
        block = grow_slab(size);
97,974✔
386
    }
97,974✔
387
    FreeBlock* remaining = break_block(block, size);
31,272,282✔
388
    if (remaining)
31,272,282✔
389
        push_freelist_entry(remaining);
31,266,264✔
390
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
31,272,282✔
391
    const auto block_before = bb_before(block);
31,272,282✔
392
    REALM_ASSERT_DEBUG(block_before && block_before->block_after_size >= size);
31,272,282✔
393
    const auto after_block_size = size_from_block(block);
31,272,282✔
394
    REALM_ASSERT_DEBUG(after_block_size >= size);
31,272,282✔
395
    return block;
31,272,282✔
396
}
31,923,450✔
397

398
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
399
{
777,267✔
400
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
777,267✔
401
    bb->block_before_size = 0;
777,267✔
402
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
777,267✔
403
    bb->block_after_size = block_size;
777,267✔
404
    auto entry = block_after(bb);
777,267✔
405
    entry->clear_links();
777,267✔
406
    entry->ref = ref_start + sizeof(BetweenBlocks);
777,267✔
407
    bb = bb_after(entry);
777,267✔
408
    bb->block_before_size = block_size;
777,267✔
409
    bb->block_after_size = 0;
777,267✔
410
    return entry;
777,267✔
411
}
777,267✔
412

413
void SlabAlloc::clear_freelists()
414
{
1,022,457✔
415
    m_block_map.clear();
1,022,457✔
416
}
1,022,457✔
417

418
void SlabAlloc::rebuild_freelists_from_slab()
419
{
887,169✔
420
    clear_freelists();
887,169✔
421
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
887,169✔
422
    for (const auto& e : m_slabs) {
887,169✔
423
        FreeBlock* entry = slab_to_entry(e, ref_start);
678,564✔
424
        push_freelist_entry(entry);
678,564✔
425
        ref_start = align_size_to_section_boundary(e.ref_end);
678,564✔
426
    }
678,564✔
427
}
887,169✔
428

429
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
430
{
31,268,343✔
431
    int size = size_from_block(block);
31,268,343✔
432
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
31,268,343✔
433
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
31,268,343✔
434
        return nullptr;
9✔
435
    bb_after(block)->block_before_size = remaining_size;
31,268,334✔
436
    bb_before(block)->block_after_size = new_size;
31,268,334✔
437
    auto bb_between = bb_after(block);
31,268,334✔
438
    bb_between->block_before_size = new_size;
31,268,334✔
439
    bb_between->block_after_size = remaining_size;
31,268,334✔
440
    FreeBlock* remaining_block = block_after(bb_between);
31,268,334✔
441
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
31,268,334✔
442
    remaining_block->clear_links();
31,268,334✔
443
    block->clear_links();
31,268,334✔
444
    return remaining_block;
31,268,334✔
445
}
31,268,343✔
446

447
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
448
{
2,715,858✔
449
    int size_first = size_from_block(first);
2,715,858✔
450
    int size_last = size_from_block(last);
2,715,858✔
451
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
2,715,858✔
452
    bb_before(first)->block_after_size = new_size;
2,715,858✔
453
    bb_after(last)->block_before_size = new_size;
2,715,858✔
454
    return first;
2,715,858✔
455
}
2,715,858✔
456

457
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
458
{
98,703✔
459
    // Allocate new slab.
460
    // - Always allocate at least 128K. This is also the amount of
461
    //   memory that we allow the slab allocator to keep between
462
    //   transactions. Allowing it to keep a small amount between
463
    //   transactions makes very small transactions faster by avoiding
464
    //   repeated unmap/mmap system calls.
465
    // - When allocating, allocate as much as we already have, but
466
    // - Never allocate more than a full section (64MB). This policy
467
    //   leads to gradual allocation of larger and larger blocks until
468
    //   we reach allocation of entire sections.
469
    size += 2 * sizeof(BetweenBlocks);
98,703✔
470
    size_t new_size = minimal_alloc;
98,703✔
471
    while (new_size < uint64_t(size))
143,922✔
472
        new_size += minimal_alloc;
45,219✔
473
    size_t already_allocated = get_allocated_size();
98,703✔
474
    if (new_size < already_allocated)
98,703✔
475
        new_size = already_allocated;
4,653✔
476
    if (new_size > maximal_alloc)
98,703✔
477
        new_size = maximal_alloc;
18✔
478

479
    ref_type ref;
98,703✔
480
    if (m_slabs.empty()) {
98,703✔
481
        ref = m_baseline.load(std::memory_order_relaxed);
86,034✔
482
    }
86,034✔
483
    else {
12,669✔
484
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
485
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
12,669✔
486
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
12,669✔
487
        ref = curr_ref_end;
12,669✔
488
    }
12,669✔
489
    ref = align_size_to_section_boundary(ref);
98,703✔
490
    size_t ref_end = ref;
98,703✔
491
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
98,703✔
492
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
493
                                      util::to_string(new_size));
×
494
    }
×
495

496
    REALM_ASSERT(matches_section_boundary(ref));
98,703✔
497

498
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
98,703✔
499
    // Create new slab and add to list of slabs
500
    m_slabs.emplace_back(ref_end, new_size); // Throws
98,703✔
501
    const Slab& slab = m_slabs.back();
98,703✔
502
    extend_fast_mapping_with_slab(slab.addr);
98,703✔
503

504
    // build a single block from that entry
505
    return slab_to_entry(slab, ref);
98,703✔
506
}
98,703✔
507

508

509
void SlabAlloc::do_free(ref_type ref, char* addr)
510
{
19,551,705✔
511
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
19,551,705✔
512
    CriticalSection cs(changes);
19,551,705✔
513

514
    bool read_only = is_read_only(ref);
19,551,705✔
515
#ifdef REALM_SLAB_ALLOC_DEBUG
516
    free(malloc_debug_map[ref]);
517
#endif
518

519
    // Get size from segment
520
    size_t size =
19,551,705✔
521
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
19,551,705✔
522

523
#ifdef REALM_DEBUG
19,551,705✔
524
    if (REALM_COVER_NEVER(m_debug_out))
19,551,705✔
525
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
×
526
#endif
19,551,705✔
527

528
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
19,551,705✔
529
        return;
×
530

531
    // Mutable memory cannot be freed unless it has first been allocated, and
532
    // any allocation puts free space tracking into the "dirty" state.
533
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
19,551,705✔
534
                    free_space_Dirty, get_file_path_for_assertions());
19,551,705✔
535

536
    m_free_space_state = free_space_Dirty;
19,551,705✔
537

538
    if (read_only) {
19,551,705✔
539
        // Free space in read only segment is tracked separately
540
        try {
15,984,735✔
541
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
15,984,735✔
542
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
15,984,735✔
543
            auto next = m_free_read_only.lower_bound(ref);
15,984,735✔
544
            if (next != m_free_read_only.end()) {
15,984,735✔
545
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
14,548,752✔
546
                                        get_file_path_for_assertions());
14,548,752✔
547
                // See if element can be combined with next element
548
                if (ref + size == next->first) {
14,548,752✔
549
                    // if so, combine to include next element and remove that from collection
550
                    size += next->second;
2,772,717✔
551
                    next = m_free_read_only.erase(next);
2,772,717✔
552
                }
2,772,717✔
553
            }
14,548,752✔
554
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
15,984,735✔
555
                // There must be a previous element - see if we can merge
556
                auto prev = next;
13,439,175✔
557
                prev--;
13,439,175✔
558

559
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
13,439,175✔
560
                                        get_file_path_for_assertions());
13,439,175✔
561
                // See if element can be combined with previous element
562
                // We can do that just by adding the size
563
                if (prev->first + prev->second == ref) {
13,439,175✔
564
                    prev->second += size;
6,409,371✔
565
                    return; // Done!
6,409,371✔
566
                }
6,409,371✔
567
                m_free_read_only.emplace_hint(next, ref, size); // Throws
7,029,804✔
568
            }
7,029,804✔
569
            else {
2,545,560✔
570
                m_free_read_only.emplace(ref, size); // Throws
2,545,560✔
571
            }
2,545,560✔
572
        }
15,984,735✔
573
        catch (...) {
15,984,735✔
574
            m_free_space_state = free_space_Invalid;
×
575
        }
×
576
    }
15,984,735✔
577
    else {
3,566,970✔
578
        m_commit_size -= size;
3,566,970✔
579

580
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
581
        if (size < sizeof(FreeBlock))
3,566,970✔
582
            size = sizeof(FreeBlock);
6,693✔
583
        // align to multipla of 8
584
        if (size & 0x7)
3,566,970✔
585
            size = (size + 7) & ~0x7;
×
586

587
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
3,566,970✔
588
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
3,566,970✔
589
        mark_freed(e, static_cast<int>(size));
3,566,970✔
590
        free_block(ref, e);
3,566,970✔
591
    }
3,566,970✔
592
}
19,551,705✔
593

594
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
595
{
3,558,534✔
596
    // merge with surrounding blocks if possible
597
    block->ref = ref;
3,558,534✔
598
    FreeBlock* prev = get_prev_block_if_mergeable(block);
3,558,534✔
599
    if (prev) {
3,558,534✔
600
        remove_freelist_entry(prev);
393,933✔
601
        block = merge_blocks(prev, block);
393,933✔
602
    }
393,933✔
603
    FreeBlock* next = get_next_block_if_mergeable(block);
3,558,534✔
604
    if (next) {
3,558,534✔
605
        remove_freelist_entry(next);
2,321,931✔
606
        block = merge_blocks(block, next);
2,321,931✔
607
    }
2,321,931✔
608
    push_freelist_entry(block);
3,558,534✔
609
}
3,558,534✔
610

611
size_t SlabAlloc::consolidate_free_read_only()
612
{
613,185✔
613
    CriticalSection cs(changes);
613,185✔
614
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
613,185✔
615
        throw InvalidFreeSpace();
×
616

617
    return m_free_read_only.size();
613,185✔
618
}
613,185✔
619

620

621
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
622
{
3,104,415✔
623
    REALM_ASSERT_DEBUG(translate(ref) == addr);
3,104,415✔
624
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
3,104,415✔
625
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
3,104,415✔
626
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
3,104,415✔
627

628
    // Possible future enhancement: check if we can extend current space instead
629
    // of unconditionally allocating new space. In that case, remember to
630
    // check whether m_free_space_state == free_state_Invalid. Also remember to
631
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
632

633
    // Allocate new space
634
    MemRef new_mem = do_alloc(new_size); // Throws
3,104,415✔
635

636
    // Copy existing segment
637
    char* new_addr = new_mem.get_addr();
3,104,415✔
638
    realm::safe_copy_n(addr, old_size, new_addr);
3,104,415✔
639

640
    // Add old segment to freelist
641
    do_free(ref, addr);
3,104,415✔
642

643
#ifdef REALM_DEBUG
3,104,415✔
644
    if (REALM_COVER_NEVER(m_debug_out)) {
3,104,415✔
645
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
646
                  << " new_size: " << new_size << "\n";
×
647
    }
×
648
#endif // REALM_DEBUG
3,104,415✔
649

650
    return new_mem;
3,104,415✔
651
}
3,104,415✔
652

653

654
char* SlabAlloc::do_translate(ref_type) const noexcept
655
{
×
656
    REALM_ASSERT(false); // never come here
×
657
    return nullptr;
×
658
}
×
659

660

661
int SlabAlloc::get_committed_file_format_version() noexcept
662
{
102,618✔
663
    {
102,618✔
664
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
102,618✔
665
        if (m_mappings.size()) {
102,618✔
666
            // if we have mapped a file, m_mappings will have at least one mapping and
667
            // the first will be to the start of the file. Don't come here, if we're
668
            // just attaching a buffer. They don't have mappings.
669
            realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
102,576✔
670
        }
102,576✔
671
    }
102,618✔
672
    const Header& header = *reinterpret_cast<const Header*>(m_data);
102,618✔
673
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
102,618✔
674
    int file_format_version = int(header.m_file_format[slot_selector]);
102,618✔
675
    return file_format_version;
102,618✔
676
}
102,618✔
677

678
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
679
{
371,052✔
680
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
681
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
371,052✔
682
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
371,052✔
683
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
371,052✔
684
}
371,052✔
685

686
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
687
{
×
688
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
689
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
690
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
691
    if (is_file_on_streaming_form(header)) {
×
692
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
693
        return ref_type(footer.m_top_ref);
×
694
    }
×
695
    else {
×
696
        return to_ref(header.m_top_ref[slot_selector]);
×
697
    }
×
698
}
×
699

700
std::string SlabAlloc::get_file_path_for_assertions() const
701
{
×
702
    return m_file.get_path();
×
703
}
×
704

705
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
706
{
73,725✔
707
    if (cfg.read_only) {
73,725✔
708
        // If the file is opened read-only, we cannot change it. This is not a problem,
709
        // because for a read-only file we assume that it will not change while we use it,
710
        // hence there will be no need to grow memory mappings.
711
        // This assumption obviously will not hold, if the file is shared by multiple
712
        // processes or threads with different opening modes.
713
        // Currently, there is no way to detect if this assumption is violated.
714
        return false;
×
715
    }
×
716
    size_t expected_size = size_t(-1);
73,725✔
717
    size_t size = static_cast<size_t>(m_file.get_size());
73,725✔
718

719
    // It is not safe to change the size of a file on streaming form, since the footer
720
    // must remain available and remain at the very end of the file.
721
    REALM_ASSERT(!is_file_on_streaming_form());
73,725✔
722

723
    // check if online compaction allows us to shrink the file:
724
    if (top_ref) {
73,725✔
725
        // Get the expected file size by looking up logical file size stored in top array
726
        Array top(*this);
33,633✔
727
        top.init_from_ref(top_ref);
33,633✔
728
        size_t logical_size = Group::get_logical_file_size(top);
33,633✔
729
        // make sure we're page aligned, so the code below doesn't first
730
        // truncate the file, then expand it again
731
        expected_size = round_up_to_page_size(logical_size);
33,633✔
732
    }
33,633✔
733

734
    // Check if we can shrink the file
735
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
73,728✔
736
        detach(true); // keep m_file open
267✔
737
        m_file.resize(expected_size);
267✔
738
        m_file.close();
267✔
739
        return true;
267✔
740
    }
267✔
741

742
    // We can only safely mmap the file, if its size matches a page boundary. If not,
743
    // we must change the size to match before mmaping it.
744
    if (size != round_up_to_page_size(size)) {
73,458✔
745
        // The file size did not match a page boundary.
746
        // We must extend the file to a page boundary (unless already there)
747
        // The file must be extended to match in size prior to being mmapped,
748
        // as extending it after mmap has undefined behavior.
749
        if (cfg.session_initiator || !cfg.is_shared) {
1,431!
750
            // We can only safely extend the file if we're the session initiator, or if
751
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
752
            // done to ensure well defined behavior for memory mappings. It does not matter,
753
            // that the free space management isn't informed
754
            size = round_up_to_page_size(size);
1,431✔
755
            detach(true); // keep m_file open
1,431✔
756
            m_file.prealloc(size);
1,431✔
757
            m_file.close();
1,431✔
758
            return true;
1,431✔
759
        }
1,431✔
760
        else {
×
761
            // Getting here, we have a file of a size that will not work, and without being
762
            // allowed to extend it. This should not be possible. But allowing a retry is
763
            // arguably better than giving up and crashing...
764
            throw Retry();
×
765
        }
×
766
    }
1,431✔
767
    return false;
72,027✔
768
}
73,458✔
769

770
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
771
{
102,081✔
772
    m_cfg = cfg;
102,081✔
773
    m_write_observer = write_observer;
102,081✔
774
    // ExceptionSafety: If this function throws, it must leave the allocator in
775
    // the detached state.
776

777
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
102,081✔
778

779
    // When 'read_only' is true, this function will throw InvalidDatabase if the
780
    // file exists already but is empty. This can happen if another process is
781
    // currently creating it. Note however, that it is only legal for multiple
782
    // processes to access a database file concurrently if it is done via a
783
    // DB, and in that case 'read_only' can never be true.
784
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
102,081✔
785
    // session_initiator can be set *only* if we're shared.
786
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
102,081✔
787
                    get_file_path_for_assertions());
102,081✔
788
    // clear_file can be set *only* if we're the first session.
789
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
102,081✔
790
                    get_file_path_for_assertions());
102,081✔
791

792
    using namespace realm::util;
102,081✔
793
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
102,081✔
794
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
102,081✔
795
    set_read_only(cfg.read_only);
102,081✔
796
    try {
102,081✔
797
        m_file.open(path.c_str(), access, create, 0); // Throws
102,081✔
798
    }
102,081✔
799
    catch (const FileAccessError& ex) {
102,081✔
800
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
42✔
801
        if (ex.code() == ErrorCodes::PermissionDenied) {
42✔
802
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
803
                                cfg.read_only ? "read" : "read-write");
6✔
804
        }
6✔
805
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
42✔
806
    }
42✔
807
    File::CloseGuard fcg(m_file);
102,039✔
808
    auto physical_file_size = m_file.get_size();
102,039✔
809
    // Note that get_size() may (will) return a different size before and after
810
    // the call below to set_encryption_key.
811
    m_file.set_encryption_key(cfg.encryption_key);
102,039✔
812

813
    note_reader_start(this);
102,039✔
814
    util::ScopeExit reader_end_guard([this]() noexcept {
102,039✔
815
        note_reader_end(this);
102,039✔
816
    });
102,039✔
817
    size_t size = 0;
102,039✔
818
    // The size of a database file must not exceed what can be encoded in
819
    // size_t.
820
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
102,039✔
821
        throw InvalidDatabase("Realm file too large", path);
×
822
    if (cfg.clear_file_on_error && cfg.session_initiator) {
102,039✔
823
        if (size == 0 && physical_file_size != 0) {
25,680✔
824
            cfg.clear_file = true;
3✔
825
        }
3✔
826
        else if (size > 0) {
25,677✔
827
            try {
23,808✔
828
                read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
23,808✔
829
            }
23,808✔
830
            catch (const InvalidDatabase&) {
23,808✔
831
                cfg.clear_file = true;
33✔
832
            }
33✔
833
        }
23,808✔
834
    }
25,680✔
835
    if (cfg.clear_file) {
102,039✔
836
        m_file.resize(0);
22,734✔
837
        size = 0;
22,734✔
838
        physical_file_size = 0;
22,734✔
839
    }
22,734✔
840
    else if (cfg.encryption_key && !cfg.clear_file && size == 0 && physical_file_size != 0) {
79,305✔
841
        // The opened file holds data, but is so small it cannot have
842
        // been created with encryption
843
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
9✔
844
    }
9✔
845
    if (size == 0) {
102,030✔
846
        if (REALM_UNLIKELY(cfg.read_only))
40,074✔
847
            throw InvalidDatabase("Read-only access to empty Realm file", path);
×
848

849
        size_t initial_size = page_size();
40,074✔
850
        // exFAT does not allocate a unique id for the file until it is non-empty. It must be
851
        // valid at this point because File::get_unique_id() is used to distinguish
852
        // mappings_for_file in the encryption layer. So the prealloc() is required before
853
        // interacting with the encryption layer in File::write().
854
        // Pre-alloc initial space
855
        m_file.prealloc(initial_size); // Throws
40,074✔
856
        // seek() back to the start of the file in preparation for writing the header
857
        // This sequence of File operations is protected from races by
858
        // DB::m_controlmutex, so we know we are the only ones operating on the file
859
        m_file.seek(0);
40,074✔
860
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
40,074✔
861
        m_file.write(data, sizeof empty_file_header); // Throws
40,074✔
862

863
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
40,074✔
864
        if (!disable_sync)
40,074✔
865
            m_file.sync(); // Throws
12✔
866

867
        size = initial_size;
40,074✔
868
    }
40,074✔
869

870
    ref_type top_ref = read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
102,030✔
871
    m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
102,030✔
872
    // m_data not valid at this point!
873
    m_baseline = 0;
102,030✔
874
    // make sure that any call to begin_read cause any slab to be placed in free
875
    // lists correctly
876
    m_free_space_state = free_space_Invalid;
102,030✔
877

878
    // Ensure clean up, if we need to back out:
879
    DetachGuard dg(*this);
102,030✔
880

881
    reset_free_space_tracking();
102,030✔
882

883
    // the file could have been produced on a device with a different
884
    // page size than our own so don't expect the size to be aligned
885
    if (cfg.encryption_key && size != 0 && size != round_up_to_page_size(size)) {
102,030✔
886
        size = round_up_to_page_size(size);
1,026✔
887
    }
1,026✔
888
    update_reader_view(size);
102,030✔
889
    REALM_ASSERT(m_mappings.size());
102,030✔
890
    m_data = m_mappings[0].primary_mapping.get_addr();
102,030✔
891
    realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
102,030✔
892
    dg.release();  // Do not detach
102,030✔
893
    fcg.release(); // Do not close
102,030✔
894
#if REALM_ENABLE_ENCRYPTION
102,030✔
895
    m_realm_file_info = util::get_file_info_for_file(m_file);
102,030✔
896
#endif
102,030✔
897
    return top_ref;
102,030✔
898
}
102,030✔
899

900
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
901
{
73,875✔
902
    auto header = reinterpret_cast<const Header*>(m_data);
73,875✔
903
    if (!is_file_on_streaming_form(*header))
73,875✔
904
        return;
72,741✔
905

906
    // Make sure the database is not on streaming format. If we did not do this,
907
    // a later commit would have to do it. That would require coordination with
908
    // anybody concurrently joining the session, so it seems easier to do it at
909
    // session initialization, even if it means writing the database during open.
910
    {
1,134✔
911
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
1,134✔
912
        Header& writable_header = *writable_map.get_addr();
1,134✔
913
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
1,134✔
914
        writable_header.m_top_ref[1] = top_ref;
1,134✔
915
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
1,134✔
916
        realm::util::encryption_write_barrier(writable_map, 0);
1,134✔
917
        writable_map.sync();
1,134✔
918
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
1,134✔
919
        writable_header.m_flags |= flags_SelectBit;
1,134✔
920
        realm::util::encryption_write_barrier(writable_map, 0);
1,134✔
921
        writable_map.sync();
1,134✔
922

923
        realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
1,134✔
924
    }
1,134✔
925
}
1,134✔
926

927
void SlabAlloc::note_reader_start(const void* reader_id)
928
{
1,989,306✔
929
#if REALM_ENABLE_ENCRYPTION
1,989,306✔
930
    if (m_realm_file_info)
1,989,306✔
931
        util::encryption_note_reader_start(*m_realm_file_info, reader_id);
12,429✔
932
#else
933
    static_cast<void>(reader_id);
934
#endif
935
}
1,989,306✔
936

937
void SlabAlloc::note_reader_end(const void* reader_id) noexcept
938
{
1,990,116✔
939
#if REALM_ENABLE_ENCRYPTION
1,990,116✔
940
    if (m_realm_file_info)
1,990,116✔
941
        util::encryption_note_reader_end(*m_realm_file_info, reader_id);
15,165✔
942
#else
943
    static_cast<void>(reader_id);
944
#endif
945
}
1,990,116✔
946

947
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
948
{
90✔
949
    // ExceptionSafety: If this function throws, it must leave the allocator in
950
    // the detached state.
951

952
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
953
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
954

955
    // Verify the data structures
956
    std::string path;                                     // No path
90✔
957
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
958

959
    m_data = data;
90✔
960
    size = align_size_to_section_boundary(size);
90✔
961
    m_baseline = size;
90✔
962
    m_attach_mode = attach_UsersBuffer;
90✔
963

964
    m_translation_table_size = 1;
90✔
965
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
966
    return top_ref;
90✔
967
}
90✔
968

969
void SlabAlloc::init_in_memory_buffer()
970
{
25,494✔
971
    m_attach_mode = attach_Heap;
25,494✔
972
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,494✔
973
    m_data = m_virtual_file_buffer.back().addr;
25,494✔
974
    m_virtual_file_size = sizeof(empty_file_header);
25,494✔
975
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,494✔
976

977
    m_baseline = m_virtual_file_size;
25,494✔
978
    m_translation_table_size = 1;
25,494✔
979
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,494✔
980
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,494✔
981
    m_ref_translation_ptr = ref_translation_ptr;
25,494✔
982
}
25,494✔
983

984
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
985
{
6,218,172✔
986
    auto idx = get_section_index(ref);
6,218,172✔
987
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
6,218,172✔
988
    auto& buf = m_virtual_file_buffer[idx];
6,218,172✔
989
    return buf.addr + (ref - buf.start_ref);
6,218,172✔
990
}
6,218,172✔
991

992
void SlabAlloc::attach_empty()
993
{
6,156✔
994
    // ExceptionSafety: If this function throws, it must leave the allocator in
995
    // the detached state.
996

997
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
6,156✔
998

999
    m_attach_mode = attach_OwnedBuffer;
6,156✔
1000
    m_data = nullptr; // Empty buffer
6,156✔
1001

1002
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
1003

1004
    // No ref must ever be less than the header size, so we will use that as the
1005
    // baseline here.
1006
    size_t size = align_size_to_section_boundary(sizeof(Header));
6,156✔
1007
    m_baseline = size;
6,156✔
1008
    m_translation_table_size = 1;
6,156✔
1009
    m_ref_translation_ptr = new RefTranslation[1];
6,156✔
1010
}
6,156✔
1011

1012
ref_type SlabAlloc::read_and_validate_header(util::File& file, const std::string& path, size_t size,
1013
                                             bool session_initiator, util::WriteObserver* write_observer)
1014
{
125,835✔
1015
    try {
125,835✔
1016
        // we'll read header and (potentially) footer
1017
        File::Map<char> map_header(file, File::access_ReadOnly, sizeof(Header), 0, write_observer);
125,835✔
1018
        realm::util::encryption_read_barrier(map_header, 0, sizeof(Header));
125,835✔
1019
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
125,835✔
1020

1021
        File::Map<char> map_footer;
125,835✔
1022
        const StreamingFooter* footer = nullptr;
125,835✔
1023
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
125,835✔
1024
            size_t footer_ref = size - sizeof(StreamingFooter);
1,272✔
1025
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
1,272✔
1026
            size_t footer_offset = footer_ref - footer_page_base;
1,272✔
1027
            map_footer = File::Map<char>(file, footer_page_base, File::access_ReadOnly,
1,272✔
1028
                                         sizeof(StreamingFooter) + footer_offset, 0, write_observer);
1,272✔
1029
            realm::util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
1,272✔
1030
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
1,272✔
1031
        }
1,272✔
1032

1033
        auto top_ref = validate_header(header, footer, size, path, file.get_encryption_key() != nullptr); // Throws
125,835✔
1034

1035
        if (session_initiator && is_file_on_streaming_form(*header)) {
125,835✔
1036
            // Don't compare file format version fields as they are allowed to differ.
1037
            // Also don't compare reserved fields.
1038
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, path);
1,170✔
1039
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0], path);
1,170✔
1040
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1], path);
1,170✔
1041
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2], path);
1,170✔
1042
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3], path);
1,170✔
1043
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0], path);
1,170✔
1044
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], path);
1,170✔
1045
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie, path);
1,170✔
1046
        }
1,170✔
1047
        return top_ref;
125,835✔
1048
    }
125,835✔
1049
    catch (const InvalidDatabase&) {
125,835✔
1050
        throw;
72✔
1051
    }
72✔
1052
    catch (const DecryptionFailed& e) {
125,835✔
1053
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
84✔
1054
    }
84✔
1055
    catch (const std::exception& e) {
125,835✔
1056
        throw InvalidDatabase(e.what(), path);
12✔
1057
    }
12✔
1058
    catch (...) {
125,835✔
1059
        throw InvalidDatabase("unknown error", path);
×
1060
    }
×
1061
}
125,835✔
1062

1063
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1064
{
42✔
1065
    char buf[256];
42✔
1066
    snprintf(buf, sizeof(buf),
42✔
1067
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
42✔
1068
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
42✔
1069
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
42✔
1070
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
42✔
1071
             header.m_flags);
42✔
1072
    msg += buf;
42✔
1073
    throw InvalidDatabase(msg, path);
42✔
1074
}
42✔
1075

1076
// Note: This relies on proper mappings having been established by the caller
1077
// for both the header and the streaming footer
1078
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1079
{
90✔
1080
    auto header = reinterpret_cast<const Header*>(data);
90✔
1081
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1082
    return validate_header(header, footer, size, path);
90✔
1083
}
90✔
1084

1085
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1086
                                    const std::string& path, bool is_encrypted)
1087
{
125,832✔
1088
    // Verify that size is sane and 8-byte aligned
1089
    if (REALM_UNLIKELY(size < sizeof(Header)))
125,832✔
1090
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
54✔
1091
                              path);
54✔
1092
    if (REALM_UNLIKELY(size % 8 != 0))
125,778✔
1093
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
×
1094

1095
    // First four bytes of info block is file format id
1096
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
125,778✔
1097
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
125,778✔
1098
        if (is_encrypted) {
42✔
1099
            // Encrypted files check the hmac on read, so there's a lot less
1100
            // which could go wrong and have us still reach this point
1101
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
6✔
1102
                                   path);
6✔
1103
        }
6✔
1104
        else {
36✔
1105
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
36✔
1106
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
36✔
1107
                                   *header, path);
36✔
1108
        }
36✔
1109
    }
42✔
1110

1111
    // Last bit in info block indicates which top_ref block is valid
1112
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
125,778✔
1113

1114
    // Top-ref must always point within buffer
1115
    auto top_ref = header->m_top_ref[slot_selector];
125,778✔
1116
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
125,778✔
1117
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
1,320✔
1118
            throw InvalidDatabase(
×
1119
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1120
                path);
×
1121
        }
×
1122
        REALM_ASSERT(footer);
1,320✔
1123
        top_ref = footer->m_top_ref;
1,320✔
1124
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
1,320✔
1125
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1126
                                               "The file is probably truncated.",
×
1127
                                               footer->m_magic_cookie),
×
1128
                                  path);
×
1129
        }
×
1130
    }
1,320✔
1131
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
125,778✔
1132
        throw_header_exception("top ref is not aligned", *header, path);
×
1133
    }
×
1134
    if (REALM_UNLIKELY(top_ref >= size)) {
125,778✔
1135
        throw_header_exception(
×
1136
            util::format(
×
1137
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1138
                top_ref),
×
1139
            *header, path);
×
1140
    }
×
1141
    return ref_type(top_ref);
125,778✔
1142
}
125,778✔
1143

1144

1145
size_t SlabAlloc::get_total_size() const noexcept
1146
{
1,089,666✔
1147
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
1,089,666✔
1148
}
1,089,666✔
1149

1150

1151
void SlabAlloc::reset_free_space_tracking()
1152
{
742,101✔
1153
    CriticalSection cs(changes);
742,101✔
1154
    if (is_free_space_clean())
742,101✔
1155
        return;
9,528✔
1156

1157
    // Free all scratch space (done after all data has
1158
    // been commited to persistent space)
1159
    m_free_read_only.clear();
732,573✔
1160

1161
    // release slabs.. keep the initial allocation if it's a minimal allocation,
1162
    // otherwise release it as well. This saves map/unmap for small transactions.
1163
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
744,903✔
1164
        auto& last_slab = m_slabs.back();
12,330✔
1165
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
12,330✔
1166
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
12,330✔
1167
        --m_translation_table_size;
12,330✔
1168
        m_slabs.pop_back();
12,330✔
1169
    }
12,330✔
1170
    rebuild_freelists_from_slab();
732,573✔
1171
    m_free_space_state = free_space_Clean;
732,573✔
1172
    m_commit_size = 0;
732,573✔
1173
}
732,573✔
1174

1175
inline bool randomly_false_in_debug(bool x)
1176
{
×
1177
#ifdef REALM_DEBUG
×
1178
    if (x)
×
1179
        return (std::rand() & 1);
×
1180
#endif
×
1181
    return x;
×
1182
}
×
1183

1184

1185
/*
1186
  Memory mapping
1187

1188
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1189
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1190
  essential to map the file in even sized sections.
1191

1192
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1193
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1194
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1195
  We don't want to extend the file in increments as large as the chunk size.
1196

1197
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1198
  old one in the mapping table. However, we must keep the old mapping open, because older
1199
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1200
  and only cleaned out once we know that no transaction can refer to them anymore.
1201

1202
  Interaction with encryption
1203

1204
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1205
  The binding to the file is done by software. This allows us to "cheat" and allocate
1206
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1207
  extends beyond the end of file, because it will not be accessed.
1208

1209
  Growing/Changing the mapping table.
1210

1211
  There are two mapping tables:
1212

1213
  * m_mappings: This is the "source of truth" about what the current mapping is.
1214
    It is only accessed under lock.
1215
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1216
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1217
    table can only be extended. Only selected members in each entry can be changed.
1218
    See RefTranslation in alloc.hpp for more details.
1219
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1220
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1221
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1222
    to memory allocation and release, which is already serialized (since write transactions are
1223
    single threaded).
1224

1225
  When m_mappings is changed due to an extend operation changing a mapping, or when
1226
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1227

1228
  * A new fast mapping table is created. The old one is not modified.
1229
  * The old one is held in a waiting area until it is no longer relevant because no
1230
    live transaction can refer to it any more.
1231
 */
1232
void SlabAlloc::update_reader_view(size_t file_size)
1233
{
2,443,377✔
1234
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
2,443,377✔
1235
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
2,443,377✔
1236
    if (file_size <= old_baseline) {
2,443,377✔
1237
        schedule_refresh_of_outdated_encrypted_pages();
2,289,654✔
1238
        return;
2,289,654✔
1239
    }
2,289,654✔
1240

1241
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
153,723✔
1242
    bool replace_last_mapping = false;
153,723✔
1243
    size_t old_num_mappings = get_section_index(old_slab_base);
153,723✔
1244

1245
    if (!is_in_memory()) {
153,723✔
1246
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
127,497✔
1247
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
127,497✔
1248
                        get_file_path_for_assertions());
127,497✔
1249
        REALM_ASSERT_DEBUG(is_free_space_clean());
127,497✔
1250

1251
        // Create the new mappings we needed to cover the new size. We don't mutate
1252
        // any of the member variables until we've successfully created all of the
1253
        // mappings so that we leave things in a consistent state if one of them
1254
        // hits an allocation failure.
1255

1256
        std::vector<MapEntry> new_mappings;
127,497✔
1257
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
127,497✔
1258

1259
        {
127,497✔
1260
            // If the old slab base was greater than the old baseline then the final
1261
            // mapping was a partial section and we need to replace it with a larger
1262
            // mapping.
1263
            if (old_baseline < old_slab_base) {
127,497✔
1264
                // old_slab_base should be 0 if we had no mappings previously
1265
                REALM_ASSERT(old_num_mappings > 0);
25,578✔
1266
                // try to extend the old mapping in-place instead of replacing it.
1267
                MapEntry& cur_entry = m_mappings.back();
25,578✔
1268
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
25,578✔
1269
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
25,578✔
1270
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
25,578✔
1271
                    replace_last_mapping = true;
324✔
1272
                    --old_num_mappings;
324✔
1273
                }
324✔
1274
            }
25,578✔
1275

1276
            // Create new mappings covering from the end of the last complete
1277
            // section to the end of the new file size.
1278
            const auto new_slab_base = align_size_to_section_boundary(file_size);
127,497✔
1279
            const size_t num_mappings = get_section_index(new_slab_base);
127,497✔
1280
            new_mappings.reserve(num_mappings - old_num_mappings);
127,497✔
1281
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
229,854✔
1282
                const size_t section_start_offset = get_section_base(k);
102,369✔
1283
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
102,369✔
1284
                if (section_size == (1 << section_shift)) {
102,369✔
1285
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
54✔
1286
                                                                  section_size, 0, m_write_observer)});
54✔
1287
                }
54✔
1288
                else {
102,315✔
1289
                    new_mappings.push_back({util::File::Map<char>()});
102,315✔
1290
                    auto& mapping = new_mappings.back().primary_mapping;
102,315✔
1291
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
102,315✔
1292
                                                        section_start_offset, m_write_observer);
102,315✔
1293
                    if (reserved) {
102,315✔
1294
                        // if reservation is supported, first attempt at extending must succeed
1295
                        if (!mapping.try_extend_to(section_size))
102,315✔
1296
                            throw std::bad_alloc();
12✔
1297
                    }
102,315✔
1298
                    else {
×
1299
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size, 0,
×
1300
                                                                section_start_offset, m_write_observer);
×
1301
                    }
×
1302
                }
102,315✔
1303
            }
102,369✔
1304
        }
127,497✔
1305

1306
        // Now that we've successfully created our mappings, update our member
1307
        // variables (and assume that resizing a simple vector won't produce memory
1308
        // allocation failures, unlike 64 MB mmaps).
1309
        if (replace_last_mapping) {
127,485✔
1310
            MapEntry& cur_entry = m_mappings.back();
318✔
1311
            // We should not have a xover mapping here because that would mean
1312
            // that there was already something mapped after the last section
1313
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
318✔
1314
            // save the old mapping/keep it open
1315
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
318✔
1316
            m_mappings.pop_back();
318✔
1317
            m_mapping_version++;
318✔
1318
        }
318✔
1319

1320
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
127,485✔
1321
    }
127,485✔
1322

1323
    m_baseline.store(file_size, std::memory_order_relaxed);
153,711✔
1324

1325
    const size_t ref_start = align_size_to_section_boundary(file_size);
153,711✔
1326
    const size_t ref_displacement = ref_start - old_slab_base;
153,711✔
1327
    if (ref_displacement > 0) {
153,711✔
1328
        // Rebase slabs as m_baseline is now bigger than old_slab_base
1329
        for (auto& e : m_slabs) {
101,997✔
1330
            e.ref_end += ref_displacement;
66✔
1331
        }
66✔
1332
    }
101,997✔
1333

1334
    rebuild_freelists_from_slab();
153,711✔
1335

1336
    // Build the fast path mapping
1337

1338
    // The fast path mapping is an array which is used from multiple threads
1339
    // without locking - see translate().
1340

1341
    // Addition of a new mapping may require a completely new fast mapping table.
1342
    //
1343
    // Being used in a multithreaded scenario, the old mappings must be retained open,
1344
    // until the realm version for which they were established has been closed/detached.
1345
    //
1346
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
1347
    // translate refs in the slab area, and that all these uses are serialized, whether
1348
    // that is achieved by being single threaded, interlocked or run from a sequential
1349
    // scheduling queue.
1350
    //
1351
    rebuild_translations(replace_last_mapping, old_num_mappings);
153,711✔
1352

1353
    schedule_refresh_of_outdated_encrypted_pages();
153,711✔
1354
}
153,711✔
1355

1356

1357
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1358
{
2,444,178✔
1359
#if REALM_ENABLE_ENCRYPTION
2,444,178✔
1360
    // callers must already hold m_mapping_mutex
1361
    for (auto& e : m_mappings) {
2,444,178✔
1362
        if (auto m = e.primary_mapping.get_encrypted_mapping()) {
2,139,564✔
1363
            encryption_mark_pages_for_IV_check(m);
8,895✔
1364
        }
8,895✔
1365
        if (auto m = e.xover_mapping.get_encrypted_mapping()) {
2,139,564✔
1366
            encryption_mark_pages_for_IV_check(m);
×
1367
        }
×
1368
    }
2,139,564✔
1369
    // unsafe to do outside writing thread: verify();
1370
#endif // REALM_ENABLE_ENCRYPTION
2,444,178✔
1371
}
2,444,178✔
1372

1373
size_t SlabAlloc::get_allocated_size() const noexcept
1374
{
98,709✔
1375
    size_t sz = 0;
98,709✔
1376
    for (const auto& s : m_slabs)
98,709✔
1377
        sz += s.size;
25,110✔
1378
    return sz;
98,709✔
1379
}
98,709✔
1380

1381
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1382
{
98,703✔
1383
    ++m_translation_table_size;
98,703✔
1384
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
98,703✔
1385
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
228,075✔
1386
        new_fast_mapping[i] = m_ref_translation_ptr[i];
129,372✔
1387
    }
129,372✔
1388
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
98,703✔
1389
                                    m_ref_translation_ptr.load());
98,703✔
1390
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
98,703✔
1391
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
1392
    // so optimize by clamping the lowest possible xover offset to the end of the section.
1393
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
98,703✔
1394
    m_ref_translation_ptr = new_fast_mapping.release();
98,703✔
1395
}
98,703✔
1396

1397
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1398
{
154,593✔
1399
    size_t free_space_size = m_slabs.size();
154,593✔
1400
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
154,593✔
1401
    if (m_translation_table_size < num_mappings + free_space_size) {
154,593✔
1402
        requires_new_translation = true;
101,997✔
1403
    }
101,997✔
1404
    RefTranslation* new_translation_table = m_ref_translation_ptr;
154,593✔
1405
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
154,593✔
1406
    if (requires_new_translation) {
154,593✔
1407
        // we need a new translation table, but must preserve old, as translations using it
1408
        // may be in progress concurrently
1409
        if (m_translation_table_size)
102,315✔
1410
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
420✔
1411
                                            m_ref_translation_ptr.load());
420✔
1412
        m_translation_table_size = num_mappings + free_space_size;
102,315✔
1413
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
102,315✔
1414
        new_translation_table = new_translation_table_owner.get();
102,315✔
1415
        old_num_sections = 0;
102,315✔
1416
    }
102,315✔
1417
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
257,058✔
1418
        if (is_in_memory()) {
102,465✔
1419
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1420
        }
12✔
1421
        else {
102,453✔
1422
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
102,453✔
1423
#if REALM_ENABLE_ENCRYPTION
102,453✔
1424
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
102,453✔
1425
#endif
102,453✔
1426
        }
102,453✔
1427
        REALM_ASSERT(new_translation_table[i].mapping_addr);
102,465✔
1428
        // We don't copy over data for the cross over mapping. If the mapping is needed,
1429
        // copying will happen on demand (in get_or_add_xover_mapping).
1430
        // Note: that may never be needed, because if the array that needed the original cross over
1431
        // mapping is freed, any new array allocated at the same position will NOT need a cross
1432
        // over mapping, but just use the primary mapping.
1433
    }
102,465✔
1434
    for (size_t k = 0; k < free_space_size; ++k) {
202,473✔
1435
        char* base = m_slabs[k].addr;
47,880✔
1436
        REALM_ASSERT(base);
47,880✔
1437
        new_translation_table[num_mappings + k].mapping_addr = base;
47,880✔
1438
    }
47,880✔
1439

1440
    // This will either be null or the same as new_translation_table, which is about to become owned by
1441
    // m_ref_translation_ptr.
1442
    (void)new_translation_table_owner.release();
154,593✔
1443

1444
    m_ref_translation_ptr = new_translation_table;
154,593✔
1445
}
154,593✔
1446

1447
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
UNCOV
1448
{
×
UNCOV
1449
    auto _page_size = page_size();
×
UNCOV
1450
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
×
UNCOV
1451
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
×
1452
        // some other thread already added a mapping
1453
        // it MUST have been for the exact same address:
1454
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1455
        return;
×
1456
    }
×
UNCOV
1457
    MapEntry* map_entry = &m_mappings[index];
×
UNCOV
1458
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
×
UNCOV
1459
    if (!map_entry->xover_mapping.is_attached()) {
×
1460
        // Create a xover mapping
UNCOV
1461
        auto file_offset = get_section_base(index) + offset;
×
UNCOV
1462
        auto end_offset = file_offset + size;
×
UNCOV
1463
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
×
UNCOV
1464
        auto minimal_mapping_size = end_offset - mapping_file_offset;
×
UNCOV
1465
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size, 0,
×
UNCOV
1466
                                      m_write_observer);
×
UNCOV
1467
        map_entry->xover_mapping = std::move(mapping);
×
UNCOV
1468
    }
×
UNCOV
1469
    txl.xover_mapping_base = offset & ~(_page_size - 1);
×
UNCOV
1470
#if REALM_ENABLE_ENCRYPTION
×
UNCOV
1471
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
×
UNCOV
1472
#endif
×
UNCOV
1473
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
×
UNCOV
1474
}
×
1475

1476
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1477
{
1,496,973✔
1478
    // Verify that each old ref translation pointer still points to a valid
1479
    // thing that we haven't released yet.
1480
#if REALM_DEBUG
1,496,973✔
1481
    std::unordered_set<const char*> mappings;
1,496,973✔
1482
    for (auto& m : m_old_mappings) {
1,496,973✔
1483
        REALM_ASSERT(m.mapping.is_attached());
414✔
1484
        mappings.insert(m.mapping.get_addr());
414✔
1485
    }
414✔
1486
    for (auto& m : m_mappings) {
1,496,973✔
1487
        REALM_ASSERT(m.primary_mapping.is_attached());
1,271,682✔
1488
        mappings.insert(m.primary_mapping.get_addr());
1,271,682✔
1489
        if (m.xover_mapping.is_attached())
1,271,682✔
UNCOV
1490
            mappings.insert(m.xover_mapping.get_addr());
×
1491
    }
1,271,682✔
1492
    for (auto& m : m_virtual_file_buffer) {
1,496,973✔
1493
        mappings.insert(m.addr);
220,728✔
1494
    }
220,728✔
1495
    if (m_data)
1,496,973✔
1496
        mappings.insert(m_data);
1,481,211✔
1497
    for (auto& t : m_old_translations) {
1,496,973✔
1498
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
291,837✔
1499
                        youngest_live_version, t.replaced_at_version);
291,837✔
1500
        if (nonempty_attachment()) {
291,837✔
1501
            for (size_t i = 0; i < t.translation_count; ++i)
587,457✔
1502
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
302,118✔
1503
        }
285,339✔
1504
    }
291,837✔
1505
#else
1506
    static_cast<void>(youngest_live_version);
1507
#endif
1508
}
1,496,973✔
1509

1510

1511
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1512
{
748,500✔
1513
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
748,500✔
1514
    verify_old_translations(youngest_live_version);
748,500✔
1515

1516
    auto pred = [=](auto& oldie) {
748,500✔
1517
        return oldie.replaced_at_version < oldest_live_version;
195,846✔
1518
    };
195,846✔
1519
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
748,500✔
1520
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
748,500✔
1521
                             m_old_translations.end());
748,500✔
1522
    m_youngest_live_version = youngest_live_version;
748,500✔
1523
    verify_old_translations(youngest_live_version);
748,500✔
1524
}
748,500✔
1525

1526
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1527
{
726,297✔
1528
    m_youngest_live_version = currently_live_version;
726,297✔
1529
}
726,297✔
1530

1531
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1532
{
613,197✔
1533
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
613,197✔
1534
        throw InvalidFreeSpace();
×
1535
    return m_free_read_only;
613,197✔
1536
}
613,197✔
1537

1538

1539
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1540
{
23,925,555✔
1541
    size_t end_of_block = start_pos + free_chunk_size;
23,925,555✔
1542
    size_t alloc_pos = start_pos;
23,925,555✔
1543
    while (alloc_pos + request_size <= end_of_block) {
23,926,152✔
1544
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
23,926,053✔
1545
        if (alloc_pos + request_size <= next_section_boundary) {
23,926,077✔
1546
            return alloc_pos;
23,925,669✔
1547
        }
23,925,669✔
1548
        alloc_pos = next_section_boundary;
2,147,484,055✔
1549
    }
2,147,484,055✔
1550
    return 0;
2,147,483,746✔
1551
}
23,925,555✔
1552

1553

1554
void SlabAlloc::resize_file(size_t new_file_size)
1555
{
94,608✔
1556
    if (m_attach_mode == attach_SharedFile) {
94,608✔
1557
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
66,048✔
1558
        m_file.prealloc(new_file_size); // Throws
66,048✔
1559
        // resizing is done based on the logical file size. It is ok for the file
1560
        // to actually be bigger, but never smaller.
1561
        REALM_ASSERT(new_file_size <= static_cast<size_t>(m_file.get_size()));
66,048✔
1562

1563
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
66,048✔
1564
        if (!disable_sync)
66,048✔
1565
            m_file.sync(); // Throws
558✔
1566
    }
66,048✔
1567
    else {
28,560✔
1568
        size_t current_size = 0;
28,560✔
1569
        for (auto& b : m_virtual_file_buffer) {
28,776✔
1570
            current_size += b.size;
28,776✔
1571
        }
28,776✔
1572
        if (new_file_size > current_size) {
28,560✔
1573
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1574
        }
6✔
1575
        m_virtual_file_size = new_file_size;
28,560✔
1576
    }
28,560✔
1577
}
94,608✔
1578

1579
#ifdef REALM_DEBUG
1580
void SlabAlloc::reserve_disk_space(size_t size)
1581
{
36✔
1582
    if (size != round_up_to_page_size(size))
36✔
1583
        size = round_up_to_page_size(size);
30✔
1584
    m_file.prealloc(size); // Throws
36✔
1585

1586
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1587
    if (!disable_sync)
36✔
1588
        m_file.sync(); // Throws
×
1589
}
36✔
1590
#endif
1591

1592
void SlabAlloc::verify() const
1593
{
127,107✔
1594
#ifdef REALM_DEBUG
127,107✔
1595
    if (!m_slabs.empty()) {
127,107✔
1596
        // Make sure that all free blocks are within a slab. This is done
1597
        // implicitly by using for_all_free_entries()
1598
        size_t first_possible_ref = m_baseline;
97,797✔
1599
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
97,797✔
1600
        for_all_free_entries([&](size_t ref, size_t size) {
628,944✔
1601
            REALM_ASSERT(ref >= first_possible_ref);
628,944✔
1602
            REALM_ASSERT(ref + size <= first_impossible_ref);
628,944✔
1603
            first_possible_ref = ref;
628,944✔
1604
        });
628,944✔
1605
    }
97,797✔
1606
#endif
127,107✔
1607
}
127,107✔
1608

1609
#ifdef REALM_DEBUG
1610

1611
bool SlabAlloc::is_all_free() const
1612
{
1,332✔
1613
    // verify that slabs contain only free space.
1614
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
1615
    for (const auto& e : m_slabs) {
1,332✔
1616
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
1,278✔
1617
        REALM_ASSERT(first->block_before_size == 0);
1,278✔
1618
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
1,278✔
1619
        REALM_ASSERT(last->block_after_size == 0);
1,278✔
1620
        if (first->block_after_size != last->block_before_size)
1,278✔
1621
            return false;
×
1622
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
1,278✔
1623
        range -= sizeof(BetweenBlocks);
1,278✔
1624
        // the size of the free area must match the distance between the two BetweenBlocks:
1625
        if (range != first->block_after_size)
1,278✔
1626
            return false;
×
1627
    }
1,278✔
1628
    return true;
1,332✔
1629
}
1,332✔
1630

1631

1632
// LCOV_EXCL_START
1633
void SlabAlloc::print() const
1634
{
×
1635
    /* TODO
1636
     *
1637

1638
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1639

1640
    size_t free = 0;
1641
    for (const auto& free_block : m_free_space) {
1642
        free += free_block.size;
1643
    }
1644

1645
    size_t allocated = allocated_for_slabs - free;
1646
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1647

1648
    if (!m_slabs.empty()) {
1649
        std::cout << "Slabs: ";
1650
        ref_type first_ref = m_baseline;
1651

1652
        for (const auto& slab : m_slabs) {
1653
            if (&slab != &m_slabs.front())
1654
                std::cout << ", ";
1655

1656
            ref_type last_ref = slab.ref_end - 1;
1657
            size_t size = slab.ref_end - first_ref;
1658
            void* addr = slab.addr;
1659
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1660
            first_ref = slab.ref_end;
1661
        }
1662
        std::cout << "\n";
1663
    }
1664

1665
    if (!m_free_space.empty()) {
1666
        std::cout << "FreeSpace: ";
1667
        for (const auto& free_block : m_free_space) {
1668
            if (&free_block != &m_free_space.front())
1669
                std::cout << ", ";
1670

1671
            ref_type last_ref = free_block.ref + free_block.size - 1;
1672
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1673
        }
1674
        std::cout << "\n";
1675
    }
1676
    if (!m_free_read_only.empty()) {
1677
        std::cout << "FreeSpace (ro): ";
1678
        for (const auto& free_block : m_free_read_only) {
1679
            if (&free_block != &m_free_read_only.front())
1680
                std::cout << ", ";
1681

1682
            ref_type last_ref = free_block.ref + free_block.size - 1;
1683
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1684
        }
1685
        std::cout << "\n";
1686
    }
1687
    std::cout << std::flush;
1688
    */
1689
}
×
1690
// LCOV_EXCL_STOP
1691

1692
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc