• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 2303

09 May 2024 05:59PM UTC coverage: 90.741% (-0.03%) from 90.769%
2303

push

Evergreen

web-flow
RCORE-2120 Throw useful error messages if baasaas fails to start a container (#7480)

101984 of 180590 branches covered (56.47%)

15 of 43 new or added lines in 1 file covered. (34.88%)

87 existing lines in 10 files now uncovered.

213153 of 234903 relevant lines covered (90.74%)

5893508.36 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.81
/src/realm/alloc_slab.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <cinttypes>
20
#include <type_traits>
21
#include <exception>
22
#include <algorithm>
23
#include <memory>
24
#include <mutex>
25
#include <map>
26
#include <atomic>
27
#include <cstring>
28

29
#if REALM_DEBUG
30
#include <iostream>
31
#include <unordered_set>
32
#endif
33

34
#ifdef REALM_SLAB_ALLOC_DEBUG
35
#include <cstdlib>
36
#endif
37

38
#include <realm/util/errno.hpp>
39
#include <realm/util/encrypted_file_mapping.hpp>
40
#include <realm/util/terminate.hpp>
41
#include <realm/util/thread.hpp>
42
#include <realm/util/scope_exit.hpp>
43
#include <realm/array.hpp>
44
#include <realm/alloc_slab.hpp>
45
#include <realm/group.hpp>
46

47
using namespace realm;
48
using namespace realm::util;
49

50

51
namespace {
52

53
#ifdef REALM_SLAB_ALLOC_DEBUG
54
std::map<ref_type, void*> malloc_debug_map;
55
#endif
56

57
class InvalidFreeSpace : std::exception {
58
public:
59
    const char* what() const noexcept override
60
    {
×
61
        return "Free space tracking was lost due to out-of-memory. The Realm file must be closed and reopened before "
×
62
               "further writes can be performed.";
×
63
    }
×
64
};
65

66
std::atomic<size_t> total_slab_allocated(0);
67

68
} // anonymous namespace
69

70
size_t SlabAlloc::get_total_slab_size() noexcept
71
{
×
72
    return total_slab_allocated;
×
73
}
×
74

75
SlabAlloc::SlabAlloc()
76
{
130,950✔
77
    m_initial_section_size = section_size();
130,950✔
78
    m_free_space_state = free_space_Clean;
130,950✔
79
    m_baseline = 0;
130,950✔
80
}
130,950✔
81

82
util::File& SlabAlloc::get_file()
83
{
961,413✔
84
    return m_file;
961,413✔
85
}
961,413✔
86

87

88
const SlabAlloc::Header SlabAlloc::empty_file_header = {
89
    {0, 0}, // top-refs
90
    {'T', '-', 'D', 'B'},
91
    {0, 0}, // undecided file format
92
    0,      // reserved
93
    0       // flags (lsb is select bit)
94
};
95

96

97
void SlabAlloc::init_streaming_header(Header* streaming_header, int file_format_version)
98
{
1,236✔
99
    using storage_type = std::remove_reference<decltype(Header::m_file_format[0])>::type;
1,236✔
100
    REALM_ASSERT(!util::int_cast_has_overflow<storage_type>(file_format_version));
1,236✔
101
    *streaming_header = {
1,236✔
102
        {0xFFFFFFFFFFFFFFFFULL, 0}, // top-refs
1,236✔
103
        {'T', '-', 'D', 'B'},
1,236✔
104
        {storage_type(file_format_version), 0},
1,236✔
105
        0, // reserved
1,236✔
106
        0  // flags (lsb is select bit)
1,236✔
107
    };
1,236✔
108
}
1,236✔
109

110
inline SlabAlloc::Slab::Slab(ref_type r, size_t s)
111
    : ref_end(r)
47,166✔
112
    , size(s)
47,166✔
113
{
95,799✔
114
    // Ensure that allocation is aligned to at least 8 bytes
115
    static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 8);
95,799✔
116

117
    total_slab_allocated.fetch_add(s, std::memory_order_relaxed);
95,799✔
118
    addr = new char[size];
95,799✔
119
    REALM_ASSERT((reinterpret_cast<size_t>(addr) & 0x7ULL) == 0);
95,799✔
120
#if REALM_ENABLE_ALLOC_SET_ZERO
121
    std::fill(addr, addr + size, 0);
122
#endif
123
}
95,799✔
124

125
SlabAlloc::Slab::~Slab()
126
{
98,964✔
127
    total_slab_allocated.fetch_sub(size, std::memory_order_relaxed);
98,964✔
128
    if (addr)
98,964✔
129
        delete[] addr;
95,796✔
130
}
98,964✔
131

132
void SlabAlloc::detach(bool keep_file_open) noexcept
133
{
133,032✔
134
    delete[] m_ref_translation_ptr;
133,032✔
135
    m_ref_translation_ptr.store(nullptr);
133,032✔
136
    m_translation_table_size = 0;
133,032✔
137
    set_read_only(true);
133,032✔
138
    purge_old_mappings(static_cast<uint64_t>(-1), 0);
133,032✔
139
    switch (m_attach_mode) {
133,032✔
140
        case attach_None:
1,041✔
141
            break;
1,041✔
142
        case attach_UsersBuffer:
24✔
143
            break;
24✔
144
        case attach_OwnedBuffer:
6,144✔
145
            delete[] m_data;
6,144✔
146
            break;
6,144✔
147
        case attach_SharedFile:
99,252✔
148
        case attach_UnsharedFile:
100,332✔
149
            m_data = 0;
100,332✔
150
            m_mappings.clear();
100,332✔
151
            m_youngest_live_version = 0;
100,332✔
152
            if (!keep_file_open)
100,332✔
153
                m_file.close();
99,288✔
154
            break;
100,332✔
155
        case attach_Heap:
25,494✔
156
            m_data = 0;
25,494✔
157
            break;
25,494✔
158
        default:
✔
159
            REALM_UNREACHABLE();
160
    }
133,032✔
161

162
    // Release all allocated memory - this forces us to create new
163
    // slabs after re-attaching thereby ensuring that the slabs are
164
    // placed correctly (logically) after the end of the file.
165
    m_slabs.clear();
133,035✔
166
    clear_freelists();
133,035✔
167
#if REALM_ENABLE_ENCRYPTION
133,035✔
168
    m_realm_file_info = nullptr;
133,035✔
169
#endif
133,035✔
170

171
    m_attach_mode = attach_None;
133,035✔
172
}
133,035✔
173

174

175
SlabAlloc::~SlabAlloc() noexcept
176
{
130,941✔
177
#ifdef REALM_DEBUG
130,941✔
178
    if (is_attached()) {
130,941✔
179
        // A shared group does not guarantee that all space is free
180
        if (m_attach_mode != attach_SharedFile) {
1,308✔
181
            // No point inchecking if free space info is invalid
182
            if (m_free_space_state != free_space_Invalid) {
1,302✔
183
                if (REALM_COVER_NEVER(!is_all_free())) {
1,302✔
184
                    print();
×
185
#ifndef REALM_SLAB_ALLOC_DEBUG
×
186
                    std::cerr << "To get the stack-traces of the corresponding allocations,"
×
187
                                 "first compile with REALM_SLAB_ALLOC_DEBUG defined,"
×
188
                                 "then run under Valgrind with --leak-check=full\n";
×
189
                    REALM_TERMINATE("SlabAlloc detected a leak");
190
#endif
×
191
                }
×
192
            }
1,302✔
193
        }
1,302✔
194
    }
1,308✔
195
#endif
130,941✔
196

197
    if (is_attached())
130,941✔
198
        detach();
1,308✔
199
}
130,941✔
200

201

202
MemRef SlabAlloc::do_alloc(size_t size)
203
{
26,582,412✔
204
    CriticalSection cs(changes);
26,582,412✔
205
    REALM_ASSERT_EX(0 < size, size, get_file_path_for_assertions());
26,582,412✔
206
    REALM_ASSERT_EX((size & 0x7) == 0, size,
26,582,412✔
207
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
26,582,412✔
208
    REALM_ASSERT_EX(is_attached(), get_file_path_for_assertions());
26,582,412✔
209
    // This limits the size of any array to ensure it can fit within a memory section.
210
    // NOTE: This limit is lower than the limit set by the encoding in node_header.hpp
211
    REALM_ASSERT_RELEASE_EX(size < (1 << section_shift), size, get_file_path_for_assertions());
26,582,412✔
212

213
    // If we failed to correctly record free space, new allocations cannot be
214
    // carried out until the free space record is reset.
215
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
26,582,412✔
216
        throw InvalidFreeSpace();
×
217

218
    m_free_space_state = free_space_Dirty;
26,582,412✔
219
    m_commit_size += size;
26,582,412✔
220

221
    // minimal allocation is sizeof(FreeListEntry)
222
    if (size < sizeof(FreeBlock))
26,582,412✔
223
        size = sizeof(FreeBlock);
6,696✔
224
    // align to multipla of 8
225
    if (size & 0x7)
26,582,412✔
226
        size = (size + 7) & ~0x7;
×
227

228
    FreeBlock* entry = allocate_block(static_cast<int>(size));
26,582,412✔
229
    mark_allocated(entry);
26,582,412✔
230
    ref_type ref = entry->ref;
26,582,412✔
231

232
#ifdef REALM_DEBUG
26,582,412✔
233
    if (REALM_COVER_NEVER(m_debug_out))
26,582,412✔
234
        std::cerr << "Alloc ref: " << ref << " size: " << size << "\n";
×
235
#endif
26,582,412✔
236

237
    char* addr = reinterpret_cast<char*>(entry);
26,582,412✔
238
    REALM_ASSERT_EX(addr == translate(ref), addr, ref, get_file_path_for_assertions());
26,582,412✔
239

240
#if REALM_ENABLE_ALLOC_SET_ZERO
241
    std::fill(addr, addr + size, 0);
242
#endif
243
#ifdef REALM_SLAB_ALLOC_DEBUG
244
    malloc_debug_map[ref] = malloc(1);
245
#endif
246
    REALM_ASSERT_EX(ref >= m_baseline, ref, m_baseline, get_file_path_for_assertions());
26,582,412✔
247
    return MemRef(addr, ref, *this);
26,582,412✔
248
}
26,582,412✔
249

250
SlabAlloc::FreeBlock* SlabAlloc::get_prev_block_if_mergeable(SlabAlloc::FreeBlock* entry)
251
{
2,843,103✔
252
    auto bb = bb_before(entry);
2,843,103✔
253
    if (bb->block_before_size <= 0)
2,843,103✔
254
        return nullptr; // no prev block, or it is in use
2,498,463✔
255
    return block_before(bb);
344,640✔
256
}
2,843,103✔
257

258
SlabAlloc::FreeBlock* SlabAlloc::get_next_block_if_mergeable(SlabAlloc::FreeBlock* entry)
259
{
2,843,145✔
260
    auto bb = bb_after(entry);
2,843,145✔
261
    if (bb->block_after_size <= 0)
2,843,145✔
262
        return nullptr; // no next block, or it is in use
917,559✔
263
    return block_after(bb);
1,925,586✔
264
}
2,843,145✔
265

266
SlabAlloc::FreeList SlabAlloc::find(int size)
267
{
26,600,583✔
268
    FreeList retval;
26,600,583✔
269
    retval.it = m_block_map.lower_bound(size);
26,600,583✔
270
    if (retval.it != m_block_map.end()) {
26,600,583✔
271
        retval.size = retval.it->first;
26,499,399✔
272
    }
26,499,399✔
273
    else {
101,184✔
274
        retval.size = 0;
101,184✔
275
    }
101,184✔
276
    return retval;
26,600,583✔
277
}
26,600,583✔
278

279
SlabAlloc::FreeList SlabAlloc::find_larger(FreeList hint, int size)
280
{
26,185,155✔
281
    int needed_size = size + sizeof(BetweenBlocks) + sizeof(FreeBlock);
26,185,155✔
282
    while (hint.it != m_block_map.end() && hint.it->first < needed_size)
34,341,222✔
283
        ++hint.it;
8,156,067✔
284
    if (hint.it == m_block_map.end())
26,185,155✔
285
        hint.size = 0; // indicate "not found"
95,799✔
286
    return hint;
26,185,155✔
287
}
26,185,155✔
288

289
SlabAlloc::FreeBlock* SlabAlloc::pop_freelist_entry(FreeList list)
290
{
26,504,640✔
291
    FreeBlock* retval = list.it->second;
26,504,640✔
292
    FreeBlock* header = retval->next;
26,504,640✔
293
    if (header == retval)
26,504,640✔
294
        m_block_map.erase(list.it);
26,334,774✔
295
    else
169,866✔
296
        list.it->second = header;
169,866✔
297
    retval->unlink();
26,504,640✔
298
    return retval;
26,504,640✔
299
}
26,504,640✔
300

301
void SlabAlloc::FreeBlock::unlink()
302
{
28,773,381✔
303
    auto _next = next;
28,773,381✔
304
    auto _prev = prev;
28,773,381✔
305
    _next->prev = prev;
28,773,381✔
306
    _prev->next = next;
28,773,381✔
307
    clear_links();
28,773,381✔
308
}
28,773,381✔
309

310
void SlabAlloc::remove_freelist_entry(FreeBlock* entry)
311
{
2,270,328✔
312
    int size = bb_before(entry)->block_after_size;
2,270,328✔
313
    auto it = m_block_map.find(size);
2,270,328✔
314
    REALM_ASSERT_EX(it != m_block_map.end(), get_file_path_for_assertions());
2,270,328✔
315
    auto header = it->second;
2,270,328✔
316
    if (header == entry) {
2,270,328✔
317
        header = entry->next;
2,248,944✔
318
        if (header == entry)
2,248,944✔
319
            m_block_map.erase(it);
1,629,072✔
320
        else
619,872✔
321
            it->second = header;
619,872✔
322
    }
2,248,944✔
323
    entry->unlink();
2,270,328✔
324
}
2,270,328✔
325

326
void SlabAlloc::push_freelist_entry(FreeBlock* entry)
327
{
29,684,115✔
328
    int size = bb_before(entry)->block_after_size;
29,684,115✔
329
    FreeBlock* header;
29,684,115✔
330
    auto it = m_block_map.find(size);
29,684,115✔
331
    if (it != m_block_map.end()) {
29,684,115✔
332
        header = it->second;
840,822✔
333
        it->second = entry;
840,822✔
334
        entry->next = header;
840,822✔
335
        entry->prev = header->prev;
840,822✔
336
        entry->prev->next = entry;
840,822✔
337
        entry->next->prev = entry;
840,822✔
338
    }
840,822✔
339
    else {
28,843,293✔
340
        header = nullptr;
28,843,293✔
341
        m_block_map[size] = entry;
28,843,293✔
342
        entry->next = entry->prev = entry;
28,843,293✔
343
    }
28,843,293✔
344
}
29,684,115✔
345

346
void SlabAlloc::mark_freed(FreeBlock* entry, int size)
347
{
2,842,947✔
348
    auto bb = bb_before(entry);
2,842,947✔
349
    REALM_ASSERT_EX(bb->block_after_size < 0, bb->block_after_size, get_file_path_for_assertions());
2,842,947✔
350
    auto alloc_size = -bb->block_after_size;
2,842,947✔
351
    int max_waste = sizeof(FreeBlock) + sizeof(BetweenBlocks);
2,842,947✔
352
    REALM_ASSERT_EX(alloc_size >= size && alloc_size <= size + max_waste, alloc_size, size,
2,842,947✔
353
                    get_file_path_for_assertions());
2,842,947✔
354
    bb->block_after_size = alloc_size;
2,842,947✔
355
    bb = bb_after(entry);
2,842,947✔
356
    REALM_ASSERT_EX(bb->block_before_size < 0, bb->block_before_size, get_file_path_for_assertions());
2,842,947✔
357
    REALM_ASSERT(-bb->block_before_size == alloc_size);
2,842,947✔
358
    bb->block_before_size = alloc_size;
2,842,947✔
359
}
2,842,947✔
360

361
void SlabAlloc::mark_allocated(FreeBlock* entry)
362
{
26,597,880✔
363
    auto bb = bb_before(entry);
26,597,880✔
364
    REALM_ASSERT_EX(bb->block_after_size > 0, bb->block_after_size, get_file_path_for_assertions());
26,597,880✔
365
    auto bb2 = bb_after(entry);
26,597,880✔
366
    bb->block_after_size = 0 - bb->block_after_size;
26,597,880✔
367
    REALM_ASSERT_EX(bb2->block_before_size > 0, bb2->block_before_size, get_file_path_for_assertions());
26,597,880✔
368
    bb2->block_before_size = 0 - bb2->block_before_size;
26,597,880✔
369
}
26,597,880✔
370

371
SlabAlloc::FreeBlock* SlabAlloc::allocate_block(int size)
372
{
26,602,239✔
373
    FreeList list = find(size);
26,602,239✔
374
    if (list.found_exact(size)) {
26,602,239✔
375
        return pop_freelist_entry(list);
418,275✔
376
    }
418,275✔
377
    // no exact matches.
378
    list = find_larger(list, size);
26,183,964✔
379
    FreeBlock* block;
26,183,964✔
380
    if (list.found_something()) {
26,183,964✔
381
        block = pop_freelist_entry(list);
26,087,106✔
382
    }
26,087,106✔
383
    else {
96,858✔
384
        block = grow_slab(size);
96,858✔
385
    }
96,858✔
386
    FreeBlock* remaining = break_block(block, size);
26,183,964✔
387
    if (remaining)
26,183,964✔
388
        push_freelist_entry(remaining);
26,180,634✔
389
    REALM_ASSERT_EX(size_from_block(block) >= size, size_from_block(block), size, get_file_path_for_assertions());
26,183,964✔
390
    return block;
26,183,964✔
391
}
26,602,239✔
392

393
SlabAlloc::FreeBlock* SlabAlloc::slab_to_entry(const Slab& slab, ref_type ref_start)
394
{
762,633✔
395
    auto bb = reinterpret_cast<BetweenBlocks*>(slab.addr);
762,633✔
396
    bb->block_before_size = 0;
762,633✔
397
    int block_size = static_cast<int>(slab.ref_end - ref_start - 2 * sizeof(BetweenBlocks));
762,633✔
398
    bb->block_after_size = block_size;
762,633✔
399
    auto entry = block_after(bb);
762,633✔
400
    entry->clear_links();
762,633✔
401
    entry->ref = ref_start + sizeof(BetweenBlocks);
762,633✔
402
    bb = bb_after(entry);
762,633✔
403
    bb->block_before_size = block_size;
762,633✔
404
    bb->block_after_size = 0;
762,633✔
405
    return entry;
762,633✔
406
}
762,633✔
407

408
void SlabAlloc::clear_freelists()
409
{
1,005,363✔
410
    m_block_map.clear();
1,005,363✔
411
}
1,005,363✔
412

413
void SlabAlloc::rebuild_freelists_from_slab()
414
{
872,328✔
415
    clear_freelists();
872,328✔
416
    ref_type ref_start = align_size_to_section_boundary(m_baseline.load(std::memory_order_relaxed));
872,328✔
417
    for (const auto& e : m_slabs) {
872,328✔
418
        FreeBlock* entry = slab_to_entry(e, ref_start);
666,834✔
419
        push_freelist_entry(entry);
666,834✔
420
        ref_start = align_size_to_section_boundary(e.ref_end);
666,834✔
421
    }
666,834✔
422
}
872,328✔
423

424
SlabAlloc::FreeBlock* SlabAlloc::break_block(FreeBlock* block, int new_size)
425
{
26,181,924✔
426
    int size = size_from_block(block);
26,181,924✔
427
    int remaining_size = size - (new_size + sizeof(BetweenBlocks));
26,181,924✔
428
    if (remaining_size < static_cast<int>(sizeof(FreeBlock)))
26,181,924✔
429
        return nullptr;
6✔
430
    bb_after(block)->block_before_size = remaining_size;
26,181,918✔
431
    bb_before(block)->block_after_size = new_size;
26,181,918✔
432
    auto bb_between = bb_after(block);
26,181,918✔
433
    bb_between->block_before_size = new_size;
26,181,918✔
434
    bb_between->block_after_size = remaining_size;
26,181,918✔
435
    FreeBlock* remaining_block = block_after(bb_between);
26,181,918✔
436
    remaining_block->ref = block->ref + new_size + sizeof(BetweenBlocks);
26,181,918✔
437
    remaining_block->clear_links();
26,181,918✔
438
    block->clear_links();
26,181,918✔
439
    return remaining_block;
26,181,918✔
440
}
26,181,924✔
441

442
SlabAlloc::FreeBlock* SlabAlloc::merge_blocks(FreeBlock* first, FreeBlock* last)
443
{
2,270,382✔
444
    int size_first = size_from_block(first);
2,270,382✔
445
    int size_last = size_from_block(last);
2,270,382✔
446
    int new_size = size_first + size_last + sizeof(BetweenBlocks);
2,270,382✔
447
    bb_before(first)->block_after_size = new_size;
2,270,382✔
448
    bb_after(last)->block_before_size = new_size;
2,270,382✔
449
    return first;
2,270,382✔
450
}
2,270,382✔
451

452
SlabAlloc::FreeBlock* SlabAlloc::grow_slab(int size)
453
{
95,799✔
454
    // Allocate new slab.
455
    // - Always allocate at least 128K. This is also the amount of
456
    //   memory that we allow the slab allocator to keep between
457
    //   transactions. Allowing it to keep a small amount between
458
    //   transactions makes very small transactions faster by avoiding
459
    //   repeated unmap/mmap system calls.
460
    // - When allocating, allocate as much as we already have, but
461
    // - Never allocate more than a full section (64MB). This policy
462
    //   leads to gradual allocation of larger and larger blocks until
463
    //   we reach allocation of entire sections.
464
    size += 2 * sizeof(BetweenBlocks);
95,799✔
465
    size_t new_size = minimal_alloc;
95,799✔
466
    while (new_size < uint64_t(size))
134,811✔
467
        new_size += minimal_alloc;
39,012✔
468
    size_t already_allocated = get_allocated_size();
95,799✔
469
    if (new_size < already_allocated)
95,799✔
470
        new_size = already_allocated;
3,102✔
471
    if (new_size > maximal_alloc)
95,799✔
472
        new_size = maximal_alloc;
18✔
473

474
    ref_type ref;
95,799✔
475
    if (m_slabs.empty()) {
95,799✔
476
        ref = m_baseline.load(std::memory_order_relaxed);
85,287✔
477
    }
85,287✔
478
    else {
10,512✔
479
        // Find size of memory that has been modified (through copy-on-write) in current write transaction
480
        ref_type curr_ref_end = to_size_t(m_slabs.back().ref_end);
10,512✔
481
        REALM_ASSERT_DEBUG_EX(curr_ref_end >= m_baseline, curr_ref_end, m_baseline, get_file_path_for_assertions());
10,512✔
482
        ref = curr_ref_end;
10,512✔
483
    }
10,512✔
484
    ref = align_size_to_section_boundary(ref);
95,799✔
485
    size_t ref_end = ref;
95,799✔
486
    if (REALM_UNLIKELY(int_add_with_overflow_detect(ref_end, new_size))) {
95,799✔
487
        throw MaximumFileSizeExceeded("AllocSlab slab ref_end size overflow: " + util::to_string(ref) + " + " +
×
488
                                      util::to_string(new_size));
×
489
    }
×
490

491
    REALM_ASSERT(matches_section_boundary(ref));
95,799✔
492

493
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
95,799✔
494
    // Create new slab and add to list of slabs
495
    m_slabs.emplace_back(ref_end, new_size); // Throws
95,799✔
496
    const Slab& slab = m_slabs.back();
95,799✔
497
    extend_fast_mapping_with_slab(slab.addr);
95,799✔
498

499
    // build a single block from that entry
500
    return slab_to_entry(slab, ref);
95,799✔
501
}
95,799✔
502

503

504
void SlabAlloc::do_free(ref_type ref, char* addr)
505
{
18,273,417✔
506
    REALM_ASSERT_EX(translate(ref) == addr, translate(ref), addr, get_file_path_for_assertions());
18,273,417✔
507
    CriticalSection cs(changes);
18,273,417✔
508

509
    bool read_only = is_read_only(ref);
18,273,417✔
510
#ifdef REALM_SLAB_ALLOC_DEBUG
511
    free(malloc_debug_map[ref]);
512
#endif
513

514
    // Get size from segment
515
    size_t size =
18,273,417✔
516
        read_only ? NodeHeader::get_byte_size_from_header(addr) : NodeHeader::get_capacity_from_header(addr);
18,273,417✔
517

518
#ifdef REALM_DEBUG
18,273,417✔
519
    if (REALM_COVER_NEVER(m_debug_out))
18,273,417✔
520
        std::cerr << "Free ref: " << ref << " size: " << size << "\n";
×
521
#endif
18,273,417✔
522

523
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
18,273,417✔
524
        return;
×
525

526
    // Mutable memory cannot be freed unless it has first been allocated, and
527
    // any allocation puts free space tracking into the "dirty" state.
528
    REALM_ASSERT_EX(read_only || m_free_space_state == free_space_Dirty, read_only, m_free_space_state,
18,273,417✔
529
                    free_space_Dirty, get_file_path_for_assertions());
18,273,417✔
530

531
    m_free_space_state = free_space_Dirty;
18,273,417✔
532

533
    if (read_only) {
18,273,417✔
534
        // Free space in read only segment is tracked separately
535
        try {
15,434,484✔
536
            REALM_ASSERT_RELEASE_EX(ref != 0, ref, get_file_path_for_assertions());
15,434,484✔
537
            REALM_ASSERT_RELEASE_EX(!(ref & 7), ref, get_file_path_for_assertions());
15,434,484✔
538
            auto next = m_free_read_only.lower_bound(ref);
15,434,484✔
539
            if (next != m_free_read_only.end()) {
15,434,484✔
540
                REALM_ASSERT_RELEASE_EX(ref + size <= next->first, ref, size, next->first, next->second,
14,007,597✔
541
                                        get_file_path_for_assertions());
14,007,597✔
542
                // See if element can be combined with next element
543
                if (ref + size == next->first) {
14,007,597✔
544
                    // if so, combine to include next element and remove that from collection
545
                    size += next->second;
2,541,366✔
546
                    next = m_free_read_only.erase(next);
2,541,366✔
547
                }
2,541,366✔
548
            }
14,007,597✔
549
            if (!m_free_read_only.empty() && next != m_free_read_only.begin()) {
15,434,484✔
550
                // There must be a previous element - see if we can merge
551
                auto prev = next;
12,897,831✔
552
                prev--;
12,897,831✔
553

554
                REALM_ASSERT_RELEASE_EX(prev->first + prev->second <= ref, ref, size, prev->first, prev->second,
12,897,831✔
555
                                        get_file_path_for_assertions());
12,897,831✔
556
                // See if element can be combined with previous element
557
                // We can do that just by adding the size
558
                if (prev->first + prev->second == ref) {
12,897,831✔
559
                    prev->second += size;
6,307,449✔
560
                    return; // Done!
6,307,449✔
561
                }
6,307,449✔
562
                m_free_read_only.emplace_hint(next, ref, size); // Throws
6,590,382✔
563
            }
6,590,382✔
564
            else {
2,536,653✔
565
                m_free_read_only.emplace(ref, size); // Throws
2,536,653✔
566
            }
2,536,653✔
567
        }
15,434,484✔
568
        catch (...) {
15,434,484✔
569
            m_free_space_state = free_space_Invalid;
×
570
        }
×
571
    }
15,434,484✔
572
    else {
2,838,933✔
573
        m_commit_size -= size;
2,838,933✔
574

575
        // fixup size to take into account the allocator's need to store a FreeBlock in a freed block
576
        if (size < sizeof(FreeBlock))
2,838,933✔
577
            size = sizeof(FreeBlock);
6,696✔
578
        // align to multipla of 8
579
        if (size & 0x7)
2,838,933✔
580
            size = (size + 7) & ~0x7;
×
581

582
        FreeBlock* e = reinterpret_cast<FreeBlock*>(addr);
2,838,933✔
583
        REALM_ASSERT_RELEASE_EX(size < 2UL * 1024 * 1024 * 1024, size, get_file_path_for_assertions());
2,838,933✔
584
        mark_freed(e, static_cast<int>(size));
2,838,933✔
585
        free_block(ref, e);
2,838,933✔
586
    }
2,838,933✔
587
}
18,273,417✔
588

589
void SlabAlloc::free_block(ref_type ref, SlabAlloc::FreeBlock* block)
590
{
2,843,109✔
591
    // merge with surrounding blocks if possible
592
    block->ref = ref;
2,843,109✔
593
    FreeBlock* prev = get_prev_block_if_mergeable(block);
2,843,109✔
594
    if (prev) {
2,843,109✔
595
        remove_freelist_entry(prev);
344,709✔
596
        block = merge_blocks(prev, block);
344,709✔
597
    }
344,709✔
598
    FreeBlock* next = get_next_block_if_mergeable(block);
2,843,109✔
599
    if (next) {
2,843,109✔
600
        remove_freelist_entry(next);
1,925,697✔
601
        block = merge_blocks(block, next);
1,925,697✔
602
    }
1,925,697✔
603
    push_freelist_entry(block);
2,843,109✔
604
}
2,843,109✔
605

606
size_t SlabAlloc::consolidate_free_read_only()
607
{
608,424✔
608
    CriticalSection cs(changes);
608,424✔
609
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
608,424✔
610
        throw InvalidFreeSpace();
×
611

612
    return m_free_read_only.size();
608,424✔
613
}
608,424✔
614

615

616
MemRef SlabAlloc::do_realloc(size_t ref, char* addr, size_t old_size, size_t new_size)
617
{
2,392,629✔
618
    REALM_ASSERT_DEBUG(translate(ref) == addr);
2,392,629✔
619
    REALM_ASSERT_EX(0 < new_size, new_size, get_file_path_for_assertions());
2,392,629✔
620
    REALM_ASSERT_EX((new_size & 0x7) == 0, new_size,
2,392,629✔
621
                    get_file_path_for_assertions()); // only allow sizes that are multiples of 8
2,392,629✔
622

623
    // Possible future enhancement: check if we can extend current space instead
624
    // of unconditionally allocating new space. In that case, remember to
625
    // check whether m_free_space_state == free_state_Invalid. Also remember to
626
    // fill with zero if REALM_ENABLE_ALLOC_SET_ZERO is non-zero.
627

628
    // Allocate new space
629
    MemRef new_mem = do_alloc(new_size); // Throws
2,392,629✔
630

631
    // Copy existing segment
632
    char* new_addr = new_mem.get_addr();
2,392,629✔
633
    realm::safe_copy_n(addr, old_size, new_addr);
2,392,629✔
634

635
    // Add old segment to freelist
636
    do_free(ref, addr);
2,392,629✔
637

638
#ifdef REALM_DEBUG
2,392,629✔
639
    if (REALM_COVER_NEVER(m_debug_out)) {
2,392,629✔
640
        std::cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " new_ref: " << new_mem.get_ref()
×
641
                  << " new_size: " << new_size << "\n";
×
642
    }
×
643
#endif // REALM_DEBUG
2,392,629✔
644

645
    return new_mem;
2,392,629✔
646
}
2,392,629✔
647

648

649
char* SlabAlloc::do_translate(ref_type) const noexcept
650
{
×
651
    REALM_ASSERT(false); // never come here
×
652
    return nullptr;
×
653
}
×
654

655

656
int SlabAlloc::get_committed_file_format_version() noexcept
657
{
101,076✔
658
    {
101,076✔
659
        std::lock_guard<std::mutex> lock(m_mapping_mutex);
101,076✔
660
        if (m_mappings.size()) {
101,076✔
661
            // if we have mapped a file, m_mappings will have at least one mapping and
662
            // the first will be to the start of the file. Don't come here, if we're
663
            // just attaching a buffer. They don't have mappings.
664
            realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
101,034✔
665
        }
101,034✔
666
    }
101,076✔
667
    const Header& header = *reinterpret_cast<const Header*>(m_data);
101,076✔
668
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
101,076✔
669
    int file_format_version = int(header.m_file_format[slot_selector]);
101,076✔
670
    return file_format_version;
101,076✔
671
}
101,076✔
672

673
bool SlabAlloc::is_file_on_streaming_form(const Header& header)
674
{
366,891✔
675
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
676
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
366,891✔
677
    uint_fast64_t ref = uint_fast64_t(header.m_top_ref[slot_selector]);
366,891✔
678
    return (slot_selector == 0 && ref == 0xFFFFFFFFFFFFFFFFULL);
366,891✔
679
}
366,891✔
680

681
ref_type SlabAlloc::get_top_ref(const char* buffer, size_t len)
682
{
×
683
    // LIMITATION: Only come here if we've already had a read barrier for the affected part of the file
684
    const Header& header = reinterpret_cast<const Header&>(*buffer);
×
685
    int slot_selector = ((header.m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
×
686
    if (is_file_on_streaming_form(header)) {
×
687
        const StreamingFooter& footer = *(reinterpret_cast<const StreamingFooter*>(buffer + len) - 1);
×
688
        return ref_type(footer.m_top_ref);
×
689
    }
×
690
    else {
×
691
        return to_ref(header.m_top_ref[slot_selector]);
×
692
    }
×
693
}
×
694

695
std::string SlabAlloc::get_file_path_for_assertions() const
696
{
×
697
    return m_file.get_path();
×
698
}
×
699

700
bool SlabAlloc::align_filesize_for_mmap(ref_type top_ref, Config& cfg)
701
{
73,083✔
702
    if (cfg.read_only) {
73,083✔
703
        // If the file is opened read-only, we cannot change it. This is not a problem,
704
        // because for a read-only file we assume that it will not change while we use it,
705
        // hence there will be no need to grow memory mappings.
706
        // This assumption obviously will not hold, if the file is shared by multiple
707
        // processes or threads with different opening modes.
708
        // Currently, there is no way to detect if this assumption is violated.
709
        return false;
×
710
    }
×
711
    size_t expected_size = size_t(-1);
73,083✔
712
    size_t size = static_cast<size_t>(m_file.get_size());
73,083✔
713

714
    // It is not safe to change the size of a file on streaming form, since the footer
715
    // must remain available and remain at the very end of the file.
716
    REALM_ASSERT(!is_file_on_streaming_form());
73,083✔
717

718
    // check if online compaction allows us to shrink the file:
719
    if (top_ref) {
73,083✔
720
        // Get the expected file size by looking up logical file size stored in top array
721
        Array top(*this);
33,576✔
722
        top.init_from_ref(top_ref);
33,576✔
723
        size_t logical_size = Group::get_logical_file_size(top);
33,576✔
724
        // make sure we're page aligned, so the code below doesn't first
725
        // truncate the file, then expand it again
726
        expected_size = round_up_to_page_size(logical_size);
33,576✔
727
    }
33,576✔
728

729
    // Check if we can shrink the file
730
    if (cfg.session_initiator && expected_size < size && !cfg.read_only) {
73,083✔
731
        detach(true); // keep m_file open
132✔
732
        m_file.resize(expected_size);
132✔
733
        m_file.close();
132✔
734
        return true;
132✔
735
    }
132✔
736

737
    // We can only safely mmap the file, if its size matches a page boundary. If not,
738
    // we must change the size to match before mmaping it.
739
    if (size != round_up_to_page_size(size)) {
72,951✔
740
        // The file size did not match a page boundary.
741
        // We must extend the file to a page boundary (unless already there)
742
        // The file must be extended to match in size prior to being mmapped,
743
        // as extending it after mmap has undefined behavior.
744
        if (cfg.session_initiator || !cfg.is_shared) {
909!
745
            // We can only safely extend the file if we're the session initiator, or if
746
            // the file isn't shared at all. Extending the file to a page boundary is ONLY
747
            // done to ensure well defined behavior for memory mappings. It does not matter,
748
            // that the free space management isn't informed
749
            size = round_up_to_page_size(size);
909✔
750
            detach(true); // keep m_file open
909✔
751
            m_file.prealloc(size);
909✔
752
            m_file.close();
909✔
753
            return true;
909✔
754
        }
909✔
755
        else {
×
756
            // Getting here, we have a file of a size that will not work, and without being
757
            // allowed to extend it. This should not be possible. But allowing a retry is
758
            // arguably better than giving up and crashing...
759
            throw Retry();
×
760
        }
×
761
    }
909✔
762
    return false;
72,042✔
763
}
72,951✔
764

765
ref_type SlabAlloc::attach_file(const std::string& path, Config& cfg, util::WriteObserver* write_observer)
766
{
100,536✔
767
    m_cfg = cfg;
100,536✔
768
    m_write_observer = write_observer;
100,536✔
769
    // ExceptionSafety: If this function throws, it must leave the allocator in
770
    // the detached state.
771

772
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
100,536✔
773

774
    // When 'read_only' is true, this function will throw InvalidDatabase if the
775
    // file exists already but is empty. This can happen if another process is
776
    // currently creating it. Note however, that it is only legal for multiple
777
    // processes to access a database file concurrently if it is done via a
778
    // DB, and in that case 'read_only' can never be true.
779
    REALM_ASSERT_EX(!(cfg.is_shared && cfg.read_only), cfg.is_shared, cfg.read_only, get_file_path_for_assertions());
100,536✔
780
    // session_initiator can be set *only* if we're shared.
781
    REALM_ASSERT_EX(cfg.is_shared || !cfg.session_initiator, cfg.is_shared, cfg.session_initiator,
100,536✔
782
                    get_file_path_for_assertions());
100,536✔
783
    // clear_file can be set *only* if we're the first session.
784
    REALM_ASSERT_EX(cfg.session_initiator || !cfg.clear_file, cfg.session_initiator, cfg.clear_file,
100,536✔
785
                    get_file_path_for_assertions());
100,536✔
786

787
    using namespace realm::util;
100,536✔
788
    File::AccessMode access = cfg.read_only ? File::access_ReadOnly : File::access_ReadWrite;
100,536✔
789
    File::CreateMode create = cfg.read_only || cfg.no_create ? File::create_Never : File::create_Auto;
100,536✔
790
    set_read_only(cfg.read_only);
100,536✔
791
    try {
100,536✔
792
        m_file.open(path.c_str(), access, create, 0); // Throws
100,536✔
793
    }
100,536✔
794
    catch (const FileAccessError& ex) {
100,536✔
795
        auto msg = util::format_errno("Failed to open Realm file at path '%2': %1", ex.get_errno(), path);
42✔
796
        if (ex.code() == ErrorCodes::PermissionDenied) {
42✔
797
            msg += util::format(". Please use a path where your app has %1 permissions.",
6✔
798
                                cfg.read_only ? "read" : "read-write");
6✔
799
        }
6✔
800
        throw FileAccessError(ex.code(), msg, path, ex.get_errno());
42✔
801
    }
42✔
802
    File::CloseGuard fcg(m_file);
100,494✔
803
    auto physical_file_size = m_file.get_size();
100,494✔
804
    // Note that get_size() may (will) return a different size before and after
805
    // the call below to set_encryption_key.
806
    m_file.set_encryption_key(cfg.encryption_key);
100,494✔
807

808
    note_reader_start(this);
100,494✔
809
    util::ScopeExit reader_end_guard([this]() noexcept {
100,494✔
810
        note_reader_end(this);
100,494✔
811
    });
100,494✔
812
    size_t size = 0;
100,494✔
813
    // The size of a database file must not exceed what can be encoded in
814
    // size_t.
815
    if (REALM_UNLIKELY(int_cast_with_overflow_detect(m_file.get_size(), size)))
100,494✔
816
        throw InvalidDatabase("Realm file too large", path);
×
817
    if (cfg.clear_file_on_error && cfg.session_initiator) {
100,494✔
818
        if (size == 0 && physical_file_size != 0) {
25,320✔
819
            cfg.clear_file = true;
6✔
820
        }
6✔
821
        else if (size > 0) {
25,314✔
822
            try {
23,469✔
823
                read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
23,469✔
824
            }
23,469✔
825
            catch (const InvalidDatabase&) {
23,469✔
826
                cfg.clear_file = true;
30✔
827
            }
30✔
828
        }
23,469✔
829
    }
25,320✔
830
    if (cfg.clear_file) {
100,494✔
831
        m_file.resize(0);
22,254✔
832
        size = 0;
22,254✔
833
        physical_file_size = 0;
22,254✔
834
    }
22,254✔
835
    else if (cfg.encryption_key && !cfg.clear_file && size == 0 && physical_file_size != 0) {
78,240✔
836
        // The opened file holds data, but is so small it cannot have
837
        // been created with encryption
838
        throw InvalidDatabase("Attempt to open unencrypted file with encryption key", path);
12✔
839
    }
12✔
840
    if (size == 0) {
100,482✔
841
        if (REALM_UNLIKELY(cfg.read_only))
39,483✔
842
            throw InvalidDatabase("Read-only access to empty Realm file", path);
×
843

844
        size_t initial_size = page_size();
39,483✔
845
        // exFAT does not allocate a unique id for the file until it is non-empty. It must be
846
        // valid at this point because File::get_unique_id() is used to distinguish
847
        // mappings_for_file in the encryption layer. So the prealloc() is required before
848
        // interacting with the encryption layer in File::write().
849
        // Pre-alloc initial space
850
        m_file.prealloc(initial_size); // Throws
39,483✔
851
        // seek() back to the start of the file in preparation for writing the header
852
        // This sequence of File operations is protected from races by
853
        // DB::m_controlmutex, so we know we are the only ones operating on the file
854
        m_file.seek(0);
39,483✔
855
        const char* data = reinterpret_cast<const char*>(&empty_file_header);
39,483✔
856
        m_file.write(data, sizeof empty_file_header); // Throws
39,483✔
857

858
        bool disable_sync = get_disable_sync_to_disk() || cfg.disable_sync;
39,483✔
859
        if (!disable_sync)
39,483✔
860
            m_file.sync(); // Throws
12✔
861

862
        size = initial_size;
39,483✔
863
    }
39,483✔
864

865
    ref_type top_ref = read_and_validate_header(m_file, path, size, cfg.session_initiator, m_write_observer);
100,482✔
866
    m_attach_mode = cfg.is_shared ? attach_SharedFile : attach_UnsharedFile;
100,482✔
867
    // m_data not valid at this point!
868
    m_baseline = 0;
100,482✔
869
    // make sure that any call to begin_read cause any slab to be placed in free
870
    // lists correctly
871
    m_free_space_state = free_space_Invalid;
100,482✔
872

873
    // Ensure clean up, if we need to back out:
874
    DetachGuard dg(*this);
100,482✔
875

876
    reset_free_space_tracking();
100,482✔
877

878
    // the file could have been produced on a device with a different
879
    // page size than our own so don't expect the size to be aligned
880
    if (cfg.encryption_key && size != 0 && size != round_up_to_page_size(size)) {
100,482✔
881
        size = round_up_to_page_size(size);
498✔
882
    }
498✔
883
    update_reader_view(size);
100,482✔
884
    REALM_ASSERT(m_mappings.size());
100,482✔
885
    m_data = m_mappings[0].primary_mapping.get_addr();
100,482✔
886
    realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
100,482✔
887
    dg.release();  // Do not detach
100,482✔
888
    fcg.release(); // Do not close
100,482✔
889
#if REALM_ENABLE_ENCRYPTION
100,482✔
890
    m_realm_file_info = util::get_file_info_for_file(m_file);
100,482✔
891
#endif
100,482✔
892
    return top_ref;
100,482✔
893
}
100,482✔
894

895
void SlabAlloc::convert_from_streaming_form(ref_type top_ref)
896
{
73,227✔
897
    auto header = reinterpret_cast<const Header*>(m_data);
73,227✔
898
    if (!is_file_on_streaming_form(*header))
73,227✔
899
        return;
72,099✔
900

901
    // Make sure the database is not on streaming format. If we did not do this,
902
    // a later commit would have to do it. That would require coordination with
903
    // anybody concurrently joining the session, so it seems easier to do it at
904
    // session initialization, even if it means writing the database during open.
905
    {
1,128✔
906
        File::Map<Header> writable_map(m_file, File::access_ReadWrite, sizeof(Header)); // Throws
1,128✔
907
        Header& writable_header = *writable_map.get_addr();
1,128✔
908
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
1,128✔
909
        writable_header.m_top_ref[1] = top_ref;
1,128✔
910
        writable_header.m_file_format[1] = writable_header.m_file_format[0];
1,128✔
911
        realm::util::encryption_write_barrier(writable_map, 0);
1,128✔
912
        writable_map.sync();
1,128✔
913
        realm::util::encryption_read_barrier_for_write(writable_map, 0);
1,128✔
914
        writable_header.m_flags |= flags_SelectBit;
1,128✔
915
        realm::util::encryption_write_barrier(writable_map, 0);
1,128✔
916
        writable_map.sync();
1,128✔
917

918
        realm::util::encryption_read_barrier(m_mappings[0].primary_mapping, 0, sizeof(Header));
1,128✔
919
    }
1,128✔
920
}
1,128✔
921

922
void SlabAlloc::note_reader_start(const void* reader_id)
923
{
2,249,994✔
924
#if REALM_ENABLE_ENCRYPTION
2,249,994✔
925
    if (m_realm_file_info)
2,249,994✔
926
        util::encryption_note_reader_start(*m_realm_file_info, reader_id);
11,151✔
927
#else
928
    static_cast<void>(reader_id);
929
#endif
930
}
2,249,994✔
931

932
void SlabAlloc::note_reader_end(const void* reader_id) noexcept
933
{
2,248,311✔
934
#if REALM_ENABLE_ENCRYPTION
2,248,311✔
935
    if (m_realm_file_info)
2,248,311✔
936
        util::encryption_note_reader_end(*m_realm_file_info, reader_id);
13,899✔
937
#else
938
    static_cast<void>(reader_id);
939
#endif
940
}
2,248,311✔
941

942
ref_type SlabAlloc::attach_buffer(const char* data, size_t size)
943
{
90✔
944
    // ExceptionSafety: If this function throws, it must leave the allocator in
945
    // the detached state.
946

947
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
90✔
948
    REALM_ASSERT_EX(size <= (1UL << section_shift), get_file_path_for_assertions());
90✔
949

950
    // Verify the data structures
951
    std::string path;                                     // No path
90✔
952
    ref_type top_ref = validate_header(data, size, path); // Throws
90✔
953

954
    m_data = data;
90✔
955
    size = align_size_to_section_boundary(size);
90✔
956
    m_baseline = size;
90✔
957
    m_attach_mode = attach_UsersBuffer;
90✔
958

959
    m_translation_table_size = 1;
90✔
960
    m_ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
90✔
961
    return top_ref;
90✔
962
}
90✔
963

964
void SlabAlloc::init_in_memory_buffer()
965
{
25,494✔
966
    m_attach_mode = attach_Heap;
25,494✔
967
    m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, 0);
25,494✔
968
    m_data = m_virtual_file_buffer.back().addr;
25,494✔
969
    m_virtual_file_size = sizeof(empty_file_header);
25,494✔
970
    memcpy(const_cast<char*>(m_data), &empty_file_header, m_virtual_file_size);
25,494✔
971

972
    m_baseline = m_virtual_file_size;
25,494✔
973
    m_translation_table_size = 1;
25,494✔
974
    auto ref_translation_ptr = new RefTranslation[1]{RefTranslation{const_cast<char*>(m_data)}};
25,494✔
975
    ref_translation_ptr->lowest_possible_xover_offset = m_virtual_file_buffer.back().size;
25,494✔
976
    m_ref_translation_ptr = ref_translation_ptr;
25,494✔
977
}
25,494✔
978

979
char* SlabAlloc::translate_memory_pos(ref_type ref) const noexcept
980
{
5,355,240✔
981
    auto idx = get_section_index(ref);
5,355,240✔
982
    REALM_ASSERT(idx < m_virtual_file_buffer.size());
5,355,240✔
983
    auto& buf = m_virtual_file_buffer[idx];
5,355,240✔
984
    return buf.addr + (ref - buf.start_ref);
5,355,240✔
985
}
5,355,240✔
986

987
void SlabAlloc::attach_empty()
988
{
6,102✔
989
    // ExceptionSafety: If this function throws, it must leave the allocator in
990
    // the detached state.
991

992
    REALM_ASSERT_EX(!is_attached(), get_file_path_for_assertions());
6,102✔
993

994
    m_attach_mode = attach_OwnedBuffer;
6,102✔
995
    m_data = nullptr; // Empty buffer
6,102✔
996

997
    // Below this point (assignment to `m_attach_mode`), nothing must throw.
998

999
    // No ref must ever be less than the header size, so we will use that as the
1000
    // baseline here.
1001
    size_t size = align_size_to_section_boundary(sizeof(Header));
6,102✔
1002
    m_baseline = size;
6,102✔
1003
    m_translation_table_size = 1;
6,102✔
1004
    m_ref_translation_ptr = new RefTranslation[1];
6,102✔
1005
}
6,102✔
1006

1007
ref_type SlabAlloc::read_and_validate_header(util::File& file, const std::string& path, size_t size,
1008
                                             bool session_initiator, util::WriteObserver* write_observer)
1009
{
123,951✔
1010
    try {
123,951✔
1011
        // we'll read header and (potentially) footer
1012
        File::Map<char> map_header(file, File::access_ReadOnly, sizeof(Header), 0, write_observer);
123,951✔
1013
        realm::util::encryption_read_barrier(map_header, 0, sizeof(Header));
123,951✔
1014
        auto header = reinterpret_cast<const Header*>(map_header.get_addr());
123,951✔
1015

1016
        File::Map<char> map_footer;
123,951✔
1017
        const StreamingFooter* footer = nullptr;
123,951✔
1018
        if (is_file_on_streaming_form(*header) && size >= sizeof(StreamingFooter) + sizeof(Header)) {
123,951✔
1019
            size_t footer_ref = size - sizeof(StreamingFooter);
1,242✔
1020
            size_t footer_page_base = footer_ref & ~(page_size() - 1);
1,242✔
1021
            size_t footer_offset = footer_ref - footer_page_base;
1,242✔
1022
            map_footer = File::Map<char>(file, footer_page_base, File::access_ReadOnly,
1,242✔
1023
                                         sizeof(StreamingFooter) + footer_offset, 0, write_observer);
1,242✔
1024
            realm::util::encryption_read_barrier(map_footer, footer_offset, sizeof(StreamingFooter));
1,242✔
1025
            footer = reinterpret_cast<const StreamingFooter*>(map_footer.get_addr() + footer_offset);
1,242✔
1026
        }
1,242✔
1027

1028
        auto top_ref = validate_header(header, footer, size, path, file.get_encryption_key() != nullptr); // Throws
123,951✔
1029

1030
        if (session_initiator && is_file_on_streaming_form(*header)) {
123,951✔
1031
            // Don't compare file format version fields as they are allowed to differ.
1032
            // Also don't compare reserved fields.
1033
            REALM_ASSERT_EX(header->m_flags == 0, header->m_flags, path);
1,164✔
1034
            REALM_ASSERT_EX(header->m_mnemonic[0] == uint8_t('T'), header->m_mnemonic[0], path);
1,164✔
1035
            REALM_ASSERT_EX(header->m_mnemonic[1] == uint8_t('-'), header->m_mnemonic[1], path);
1,164✔
1036
            REALM_ASSERT_EX(header->m_mnemonic[2] == uint8_t('D'), header->m_mnemonic[2], path);
1,164✔
1037
            REALM_ASSERT_EX(header->m_mnemonic[3] == uint8_t('B'), header->m_mnemonic[3], path);
1,164✔
1038
            REALM_ASSERT_EX(header->m_top_ref[0] == 0xFFFFFFFFFFFFFFFFULL, header->m_top_ref[0], path);
1,164✔
1039
            REALM_ASSERT_EX(header->m_top_ref[1] == 0, header->m_top_ref[1], path);
1,164✔
1040
            REALM_ASSERT_EX(footer->m_magic_cookie == footer_magic_cookie, footer->m_magic_cookie, path);
1,164✔
1041
        }
1,164✔
1042
        return top_ref;
123,951✔
1043
    }
123,951✔
1044
    catch (const InvalidDatabase&) {
123,951✔
1045
        throw;
72✔
1046
    }
72✔
1047
    catch (const DecryptionFailed& e) {
123,951✔
1048
        throw InvalidDatabase(util::format("Realm file decryption failed (%1)", e.what()), path);
78✔
1049
    }
78✔
1050
    catch (const std::exception& e) {
123,951✔
1051
        throw InvalidDatabase(e.what(), path);
12✔
1052
    }
12✔
1053
    catch (...) {
123,951✔
1054
        throw InvalidDatabase("unknown error", path);
×
1055
    }
×
1056
}
123,951✔
1057

1058
void SlabAlloc::throw_header_exception(std::string msg, const Header& header, const std::string& path)
1059
{
42✔
1060
    char buf[256];
42✔
1061
    snprintf(buf, sizeof(buf),
42✔
1062
             " top_ref[0]: %" PRIX64 ", top_ref[1]: %" PRIX64 ", "
42✔
1063
             "mnemonic: %X %X %X %X, fmt[0]: %d, fmt[1]: %d, flags: %X",
42✔
1064
             header.m_top_ref[0], header.m_top_ref[1], header.m_mnemonic[0], header.m_mnemonic[1],
42✔
1065
             header.m_mnemonic[2], header.m_mnemonic[3], header.m_file_format[0], header.m_file_format[1],
42✔
1066
             header.m_flags);
42✔
1067
    msg += buf;
42✔
1068
    throw InvalidDatabase(msg, path);
42✔
1069
}
42✔
1070

1071
// Note: This relies on proper mappings having been established by the caller
1072
// for both the header and the streaming footer
1073
ref_type SlabAlloc::validate_header(const char* data, size_t size, const std::string& path)
1074
{
90✔
1075
    auto header = reinterpret_cast<const Header*>(data);
90✔
1076
    auto footer = reinterpret_cast<const StreamingFooter*>(data + size - sizeof(StreamingFooter));
90✔
1077
    return validate_header(header, footer, size, path);
90✔
1078
}
90✔
1079

1080
ref_type SlabAlloc::validate_header(const Header* header, const StreamingFooter* footer, size_t size,
1081
                                    const std::string& path, bool is_encrypted)
1082
{
123,951✔
1083
    // Verify that size is sane and 8-byte aligned
1084
    if (REALM_UNLIKELY(size < sizeof(Header)))
123,951✔
1085
        throw InvalidDatabase(util::format("file is non-empty but too small (%1 bytes) to be a valid Realm.", size),
54✔
1086
                              path);
54✔
1087
    if (REALM_UNLIKELY(size % 8 != 0))
123,897✔
1088
        throw InvalidDatabase(util::format("file has an invalid size (%1).", size), path);
×
1089

1090
    // First four bytes of info block is file format id
1091
    if (REALM_UNLIKELY(!(char(header->m_mnemonic[0]) == 'T' && char(header->m_mnemonic[1]) == '-' &&
123,897✔
1092
                         char(header->m_mnemonic[2]) == 'D' && char(header->m_mnemonic[3]) == 'B'))) {
123,897✔
1093
        if (is_encrypted) {
42✔
1094
            // Encrypted files check the hmac on read, so there's a lot less
1095
            // which could go wrong and have us still reach this point
1096
            throw_header_exception("header has invalid mnemonic. The file does not appear to be Realm file.", *header,
6✔
1097
                                   path);
6✔
1098
        }
6✔
1099
        else {
36✔
1100
            throw_header_exception("header has invalid mnemonic. The file is either not a Realm file, is an "
36✔
1101
                                   "encrypted Realm file but no encryption key was supplied, or is corrupted.",
36✔
1102
                                   *header, path);
36✔
1103
        }
36✔
1104
    }
42✔
1105

1106
    // Last bit in info block indicates which top_ref block is valid
1107
    int slot_selector = ((header->m_flags & SlabAlloc::flags_SelectBit) != 0 ? 1 : 0);
123,897✔
1108

1109
    // Top-ref must always point within buffer
1110
    auto top_ref = header->m_top_ref[slot_selector];
123,897✔
1111
    if (slot_selector == 0 && top_ref == 0xFFFFFFFFFFFFFFFFULL) {
123,897✔
1112
        if (REALM_UNLIKELY(size < sizeof(Header) + sizeof(StreamingFooter))) {
1,290✔
1113
            throw InvalidDatabase(
×
1114
                util::format("file is in streaming format but too small (%1 bytes) to be a valid Realm.", size),
×
1115
                path);
×
1116
        }
×
1117
        REALM_ASSERT(footer);
1,290✔
1118
        top_ref = footer->m_top_ref;
1,290✔
1119
        if (REALM_UNLIKELY(footer->m_magic_cookie != footer_magic_cookie)) {
1,290✔
1120
            throw InvalidDatabase(util::format("file is in streaming format but has an invalid footer cookie (%1). "
×
1121
                                               "The file is probably truncated.",
×
1122
                                               footer->m_magic_cookie),
×
1123
                                  path);
×
1124
        }
×
1125
    }
1,290✔
1126
    if (REALM_UNLIKELY(top_ref % 8 != 0)) {
123,897✔
1127
        throw_header_exception("top ref is not aligned", *header, path);
×
1128
    }
×
1129
    if (REALM_UNLIKELY(top_ref >= size)) {
123,897✔
1130
        throw_header_exception(
×
1131
            util::format(
×
1132
                "top ref is outside of the file (size: %1, top_ref: %2). The file has probably been truncated.", size,
×
1133
                top_ref),
×
1134
            *header, path);
×
1135
    }
×
1136
    return ref_type(top_ref);
123,897✔
1137
}
123,897✔
1138

1139

1140
size_t SlabAlloc::get_total_size() const noexcept
1141
{
1,082,175✔
1142
    return m_slabs.empty() ? size_t(m_baseline.load(std::memory_order_relaxed)) : m_slabs.back().ref_end;
1,082,175✔
1143
}
1,082,175✔
1144

1145

1146
void SlabAlloc::reset_free_space_tracking()
1147
{
735,738✔
1148
    CriticalSection cs(changes);
735,738✔
1149
    if (is_free_space_clean())
735,738✔
1150
        return;
9,600✔
1151

1152
    // Free all scratch space (done after all data has
1153
    // been commited to persistent space)
1154
    m_free_read_only.clear();
726,138✔
1155

1156
    // release slabs.. keep the initial allocation if it's a minimal allocation,
1157
    // otherwise release it as well. This saves map/unmap for small transactions.
1158
    while (m_slabs.size() > 1 || (m_slabs.size() == 1 && m_slabs[0].size > minimal_alloc)) {
736,314✔
1159
        auto& last_slab = m_slabs.back();
10,176✔
1160
        auto& last_translation = m_ref_translation_ptr[m_translation_table_size - 1];
10,176✔
1161
        REALM_ASSERT(last_translation.mapping_addr == last_slab.addr);
10,176✔
1162
        --m_translation_table_size;
10,176✔
1163
        m_slabs.pop_back();
10,176✔
1164
    }
10,176✔
1165
    rebuild_freelists_from_slab();
726,138✔
1166
    m_free_space_state = free_space_Clean;
726,138✔
1167
    m_commit_size = 0;
726,138✔
1168
}
726,138✔
1169

1170
inline bool randomly_false_in_debug(bool x)
1171
{
×
1172
#ifdef REALM_DEBUG
×
1173
    if (x)
×
1174
        return (std::rand() & 1);
×
1175
#endif
×
1176
    return x;
×
1177
}
×
1178

1179

1180
/*
1181
  Memory mapping
1182

1183
  To make ref->ptr translation fast while also avoiding to have to memory map the entire file
1184
  contiguously (which is a problem for large files on 32-bit devices and most iOS devices), it is
1185
  essential to map the file in even sized sections.
1186

1187
  These sections must be large enough to hold one or more of the largest arrays, which can be up
1188
  to 16MB. You can only mmap file space which has been allocated to a file. If you mmap a range
1189
  which extends beyond the last page of a file, the result is undefined, so we can't do that.
1190
  We don't want to extend the file in increments as large as the chunk size.
1191

1192
  As the file grows, we grow the mapping by creating a new larger one, which replaces the
1193
  old one in the mapping table. However, we must keep the old mapping open, because older
1194
  read transactions will continue to use it. Hence, the replaced mappings are accumulated
1195
  and only cleaned out once we know that no transaction can refer to them anymore.
1196

1197
  Interaction with encryption
1198

1199
  When encryption is enabled, the memory mapping is to temporary memory, not the file.
1200
  The binding to the file is done by software. This allows us to "cheat" and allocate
1201
  entire sections. With encryption, it doesn't matter if the mapped memory logically
1202
  extends beyond the end of file, because it will not be accessed.
1203

1204
  Growing/Changing the mapping table.
1205

1206
  There are two mapping tables:
1207

1208
  * m_mappings: This is the "source of truth" about what the current mapping is.
1209
    It is only accessed under lock.
1210
  * m_fast_mapping: This is generated to match m_mappings, but is also accessed in a
1211
    mostly lock-free fashion from the translate function. Because of the lock free operation this
1212
    table can only be extended. Only selected members in each entry can be changed.
1213
    See RefTranslation in alloc.hpp for more details.
1214
    The fast mapping also maps the slab area used for allocations - as mappings are added,
1215
    the slab area *moves*, corresponding to the movement of m_baseline. This movement does
1216
    not need to trigger generation of a new m_fast_mapping table, because it is only relevant
1217
    to memory allocation and release, which is already serialized (since write transactions are
1218
    single threaded).
1219

1220
  When m_mappings is changed due to an extend operation changing a mapping, or when
1221
  it has grown such that it cannot be reflected in m_fast_mapping, we use read-copy-update:
1222

1223
  * A new fast mapping table is created. The old one is not modified.
1224
  * The old one is held in a waiting area until it is no longer relevant because no
1225
    live transaction can refer to it any more.
1226
 */
1227
void SlabAlloc::update_reader_view(size_t file_size)
1228
{
2,723,259✔
1229
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
2,723,259✔
1230
    size_t old_baseline = m_baseline.load(std::memory_order_relaxed);
2,723,259✔
1231
    if (file_size <= old_baseline) {
2,723,259✔
1232
        schedule_refresh_of_outdated_encrypted_pages();
2,578,233✔
1233
        return;
2,578,233✔
1234
    }
2,578,233✔
1235

1236
    const auto old_slab_base = align_size_to_section_boundary(old_baseline);
145,026✔
1237
    bool replace_last_mapping = false;
145,026✔
1238
    size_t old_num_mappings = get_section_index(old_slab_base);
145,026✔
1239

1240
    if (!is_in_memory()) {
145,026✔
1241
        REALM_ASSERT_EX(file_size % 8 == 0, file_size, get_file_path_for_assertions()); // 8-byte alignment required
120,351✔
1242
        REALM_ASSERT_EX(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile, m_attach_mode,
120,351✔
1243
                        get_file_path_for_assertions());
120,351✔
1244
        REALM_ASSERT_DEBUG(is_free_space_clean());
120,351✔
1245

1246
        // Create the new mappings we needed to cover the new size. We don't mutate
1247
        // any of the member variables until we've successfully created all of the
1248
        // mappings so that we leave things in a consistent state if one of them
1249
        // hits an allocation failure.
1250

1251
        std::vector<MapEntry> new_mappings;
120,351✔
1252
        REALM_ASSERT(m_mappings.size() == old_num_mappings);
120,351✔
1253

1254
        {
120,351✔
1255
            // If the old slab base was greater than the old baseline then the final
1256
            // mapping was a partial section and we need to replace it with a larger
1257
            // mapping.
1258
            if (old_baseline < old_slab_base) {
120,351✔
1259
                // old_slab_base should be 0 if we had no mappings previously
1260
                REALM_ASSERT(old_num_mappings > 0);
19,974✔
1261
                // try to extend the old mapping in-place instead of replacing it.
1262
                MapEntry& cur_entry = m_mappings.back();
19,974✔
1263
                const size_t section_start_offset = get_section_base(old_num_mappings - 1);
19,974✔
1264
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
19,974✔
1265
                if (!cur_entry.primary_mapping.try_extend_to(section_size)) {
19,974✔
1266
                    replace_last_mapping = true;
72✔
1267
                    --old_num_mappings;
72✔
1268
                }
72✔
1269
            }
19,974✔
1270

1271
            // Create new mappings covering from the end of the last complete
1272
            // section to the end of the new file size.
1273
            const auto new_slab_base = align_size_to_section_boundary(file_size);
120,351✔
1274
            const size_t num_mappings = get_section_index(new_slab_base);
120,351✔
1275
            new_mappings.reserve(num_mappings - old_num_mappings);
120,351✔
1276
            for (size_t k = old_num_mappings; k < num_mappings; ++k) {
220,929✔
1277
                const size_t section_start_offset = get_section_base(k);
100,590✔
1278
                const size_t section_size = std::min<size_t>(1 << section_shift, file_size - section_start_offset);
100,590✔
1279
                if (section_size == (1 << section_shift)) {
100,590✔
1280
                    new_mappings.push_back({util::File::Map<char>(m_file, section_start_offset, File::access_ReadOnly,
66✔
1281
                                                                  section_size, 0, m_write_observer)});
66✔
1282
                }
66✔
1283
                else {
100,524✔
1284
                    new_mappings.push_back({util::File::Map<char>()});
100,524✔
1285
                    auto& mapping = new_mappings.back().primary_mapping;
100,524✔
1286
                    bool reserved = mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift,
100,524✔
1287
                                                        section_start_offset, m_write_observer);
100,524✔
1288
                    if (reserved) {
100,524✔
1289
                        // if reservation is supported, first attempt at extending must succeed
1290
                        if (!mapping.try_extend_to(section_size))
100,524✔
1291
                            throw std::bad_alloc();
12✔
1292
                    }
100,524✔
UNCOV
1293
                    else {
×
UNCOV
1294
                        new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size, 0,
×
UNCOV
1295
                                                                section_start_offset, m_write_observer);
×
UNCOV
1296
                    }
×
1297
                }
100,524✔
1298
            }
100,590✔
1299
        }
120,351✔
1300

1301
        // Now that we've successfully created our mappings, update our member
1302
        // variables (and assume that resizing a simple vector won't produce memory
1303
        // allocation failures, unlike 64 MB mmaps).
1304
        if (replace_last_mapping) {
120,339✔
1305
            MapEntry& cur_entry = m_mappings.back();
66✔
1306
            // We should not have a xover mapping here because that would mean
1307
            // that there was already something mapped after the last section
1308
            REALM_ASSERT(!cur_entry.xover_mapping.is_attached());
66✔
1309
            // save the old mapping/keep it open
1310
            m_old_mappings.push_back({m_youngest_live_version, std::move(cur_entry.primary_mapping)});
66✔
1311
            m_mappings.pop_back();
66✔
1312
            m_mapping_version++;
66✔
1313
        }
66✔
1314

1315
        std::move(new_mappings.begin(), new_mappings.end(), std::back_inserter(m_mappings));
120,339✔
1316
    }
120,339✔
1317

1318
    m_baseline.store(file_size, std::memory_order_relaxed);
145,014✔
1319

1320
    const size_t ref_start = align_size_to_section_boundary(file_size);
145,014✔
1321
    const size_t ref_displacement = ref_start - old_slab_base;
145,014✔
1322
    if (ref_displacement > 0) {
145,014✔
1323
        // Rebase slabs as m_baseline is now bigger than old_slab_base
1324
        for (auto& e : m_slabs) {
100,458✔
1325
            e.ref_end += ref_displacement;
72✔
1326
        }
72✔
1327
    }
100,458✔
1328

1329
    rebuild_freelists_from_slab();
145,014✔
1330

1331
    // Build the fast path mapping
1332

1333
    // The fast path mapping is an array which is used from multiple threads
1334
    // without locking - see translate().
1335

1336
    // Addition of a new mapping may require a completely new fast mapping table.
1337
    //
1338
    // Being used in a multithreaded scenario, the old mappings must be retained open,
1339
    // until the realm version for which they were established has been closed/detached.
1340
    //
1341
    // This assumes that only write transactions call do_alloc() or do_free() or needs to
1342
    // translate refs in the slab area, and that all these uses are serialized, whether
1343
    // that is achieved by being single threaded, interlocked or run from a sequential
1344
    // scheduling queue.
1345
    //
1346
    rebuild_translations(replace_last_mapping, old_num_mappings);
145,014✔
1347

1348
    schedule_refresh_of_outdated_encrypted_pages();
145,014✔
1349
}
145,014✔
1350

1351

1352
void SlabAlloc::schedule_refresh_of_outdated_encrypted_pages()
1353
{
2,724,360✔
1354
#if REALM_ENABLE_ENCRYPTION
2,724,360✔
1355
    // callers must already hold m_mapping_mutex
1356
    for (auto& e : m_mappings) {
2,724,360✔
1357
        if (auto m = e.primary_mapping.get_encrypted_mapping()) {
2,418,000✔
1358
            encryption_mark_pages_for_IV_check(m);
8,262✔
1359
        }
8,262✔
1360
        if (auto m = e.xover_mapping.get_encrypted_mapping()) {
2,418,000✔
1361
            encryption_mark_pages_for_IV_check(m);
×
1362
        }
×
1363
    }
2,418,000✔
1364
    // unsafe to do outside writing thread: verify();
1365
#endif // REALM_ENABLE_ENCRYPTION
2,724,360✔
1366
}
2,724,360✔
1367

1368
size_t SlabAlloc::get_allocated_size() const noexcept
1369
{
95,805✔
1370
    size_t sz = 0;
95,805✔
1371
    for (const auto& s : m_slabs)
95,805✔
1372
        sz += s.size;
18,255✔
1373
    return sz;
95,805✔
1374
}
95,805✔
1375

1376
void SlabAlloc::extend_fast_mapping_with_slab(char* address)
1377
{
95,799✔
1378
    ++m_translation_table_size;
95,799✔
1379
    auto new_fast_mapping = std::make_unique<RefTranslation[]>(m_translation_table_size);
95,799✔
1380
    for (size_t i = 0; i < m_translation_table_size - 1; ++i) {
215,403✔
1381
        new_fast_mapping[i] = m_ref_translation_ptr[i];
119,604✔
1382
    }
119,604✔
1383
    m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - m_slabs.size(),
95,799✔
1384
                                    m_ref_translation_ptr.load());
95,799✔
1385
    new_fast_mapping[m_translation_table_size - 1].mapping_addr = address;
95,799✔
1386
    // Memory ranges with slab (working memory) can never have arrays that straddle a boundary,
1387
    // so optimize by clamping the lowest possible xover offset to the end of the section.
1388
    new_fast_mapping[m_translation_table_size - 1].lowest_possible_xover_offset = 1ULL << section_shift;
95,799✔
1389
    m_ref_translation_ptr = new_fast_mapping.release();
95,799✔
1390
}
95,799✔
1391

1392
void SlabAlloc::rebuild_translations(bool requires_new_translation, size_t old_num_sections)
1393
{
146,193✔
1394
    size_t free_space_size = m_slabs.size();
146,193✔
1395
    auto num_mappings = is_in_memory() ? m_virtual_file_buffer.size() : m_mappings.size();
146,193✔
1396
    if (m_translation_table_size < num_mappings + free_space_size) {
146,193✔
1397
        requires_new_translation = true;
100,458✔
1398
    }
100,458✔
1399
    RefTranslation* new_translation_table = m_ref_translation_ptr;
146,193✔
1400
    std::unique_ptr<RefTranslation[]> new_translation_table_owner;
146,193✔
1401
    if (requires_new_translation) {
146,193✔
1402
        // we need a new translation table, but must preserve old, as translations using it
1403
        // may be in progress concurrently
1404
        if (m_translation_table_size)
100,524✔
1405
            m_old_translations.emplace_back(m_youngest_live_version, m_translation_table_size - free_space_size,
174✔
1406
                                            m_ref_translation_ptr.load());
174✔
1407
        m_translation_table_size = num_mappings + free_space_size;
100,524✔
1408
        new_translation_table_owner = std::make_unique<RefTranslation[]>(m_translation_table_size);
100,524✔
1409
        new_translation_table = new_translation_table_owner.get();
100,524✔
1410
        old_num_sections = 0;
100,524✔
1411
    }
100,524✔
1412
    for (size_t i = old_num_sections; i < num_mappings; ++i) {
246,891✔
1413
        if (is_in_memory()) {
100,698✔
1414
            new_translation_table[i].mapping_addr = m_virtual_file_buffer[i].addr;
12✔
1415
        }
12✔
1416
        else {
100,686✔
1417
            new_translation_table[i].mapping_addr = m_mappings[i].primary_mapping.get_addr();
100,686✔
1418
#if REALM_ENABLE_ENCRYPTION
100,686✔
1419
            new_translation_table[i].encrypted_mapping = m_mappings[i].primary_mapping.get_encrypted_mapping();
100,686✔
1420
#endif
100,686✔
1421
        }
100,686✔
1422
        REALM_ASSERT(new_translation_table[i].mapping_addr);
100,698✔
1423
        // We don't copy over data for the cross over mapping. If the mapping is needed,
1424
        // copying will happen on demand (in get_or_add_xover_mapping).
1425
        // Note: that may never be needed, because if the array that needed the original cross over
1426
        // mapping is freed, any new array allocated at the same position will NOT need a cross
1427
        // over mapping, but just use the primary mapping.
1428
    }
100,698✔
1429
    for (size_t k = 0; k < free_space_size; ++k) {
187,242✔
1430
        char* base = m_slabs[k].addr;
41,049✔
1431
        REALM_ASSERT(base);
41,049✔
1432
        new_translation_table[num_mappings + k].mapping_addr = base;
41,049✔
1433
    }
41,049✔
1434

1435
    // This will either be null or the same as new_translation_table, which is about to become owned by
1436
    // m_ref_translation_ptr.
1437
    (void)new_translation_table_owner.release();
146,193✔
1438

1439
    m_ref_translation_ptr = new_translation_table;
146,193✔
1440
}
146,193✔
1441

1442
void SlabAlloc::get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size)
1443
{
6✔
1444
    auto _page_size = page_size();
6✔
1445
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
6✔
1446
    if (txl.xover_mapping_addr.load(std::memory_order_relaxed)) {
6✔
1447
        // some other thread already added a mapping
1448
        // it MUST have been for the exact same address:
1449
        REALM_ASSERT(offset == txl.lowest_possible_xover_offset.load(std::memory_order_relaxed));
×
1450
        return;
×
1451
    }
×
1452
    MapEntry* map_entry = &m_mappings[index];
6✔
1453
    REALM_ASSERT(map_entry->primary_mapping.get_addr() == txl.mapping_addr);
6✔
1454
    if (!map_entry->xover_mapping.is_attached()) {
6✔
1455
        // Create a xover mapping
1456
        auto file_offset = get_section_base(index) + offset;
6✔
1457
        auto end_offset = file_offset + size;
6✔
1458
        auto mapping_file_offset = file_offset & ~(_page_size - 1);
6✔
1459
        auto minimal_mapping_size = end_offset - mapping_file_offset;
6✔
1460
        util::File::Map<char> mapping(m_file, mapping_file_offset, File::access_ReadOnly, minimal_mapping_size, 0,
6✔
1461
                                      m_write_observer);
6✔
1462
        map_entry->xover_mapping = std::move(mapping);
6✔
1463
    }
6✔
1464
    txl.xover_mapping_base = offset & ~(_page_size - 1);
6✔
1465
#if REALM_ENABLE_ENCRYPTION
6✔
1466
    txl.xover_encrypted_mapping = map_entry->xover_mapping.get_encrypted_mapping();
6✔
1467
#endif
6✔
1468
    txl.xover_mapping_addr.store(map_entry->xover_mapping.get_addr(), std::memory_order_release);
6✔
1469
}
6✔
1470

1471
void SlabAlloc::verify_old_translations(uint64_t youngest_live_version)
1472
{
1,482,921✔
1473
    // Verify that each old ref translation pointer still points to a valid
1474
    // thing that we haven't released yet.
1475
#if REALM_DEBUG
1,482,921✔
1476
    std::unordered_set<const char*> mappings;
1,482,921✔
1477
    for (auto& m : m_old_mappings) {
1,482,921✔
1478
        REALM_ASSERT(m.mapping.is_attached());
162✔
1479
        mappings.insert(m.mapping.get_addr());
162✔
1480
    }
162✔
1481
    for (auto& m : m_mappings) {
1,482,921✔
1482
        REALM_ASSERT(m.primary_mapping.is_attached());
1,259,067✔
1483
        mappings.insert(m.primary_mapping.get_addr());
1,259,067✔
1484
        if (m.xover_mapping.is_attached())
1,259,067✔
1485
            mappings.insert(m.xover_mapping.get_addr());
12✔
1486
    }
1,259,067✔
1487
    for (auto& m : m_virtual_file_buffer) {
1,482,921✔
1488
        mappings.insert(m.addr);
220,740✔
1489
    }
220,740✔
1490
    if (m_data)
1,482,921✔
1491
        mappings.insert(m_data);
1,468,563✔
1492
    for (auto& t : m_old_translations) {
1,482,921✔
1493
        REALM_ASSERT_EX(youngest_live_version == 0 || t.replaced_at_version < youngest_live_version,
283,335✔
1494
                        youngest_live_version, t.replaced_at_version);
283,335✔
1495
        if (nonempty_attachment()) {
283,335✔
1496
            for (size_t i = 0; i < t.translation_count; ++i)
570,516✔
1497
                REALM_ASSERT(mappings.count(t.translations[i].mapping_addr));
293,625✔
1498
        }
276,891✔
1499
    }
283,335✔
1500
#else
1501
    static_cast<void>(youngest_live_version);
1502
#endif
1503
}
1,482,921✔
1504

1505

1506
void SlabAlloc::purge_old_mappings(uint64_t oldest_live_version, uint64_t youngest_live_version)
1507
{
741,468✔
1508
    std::lock_guard<std::mutex> lock(m_mapping_mutex);
741,468✔
1509
    verify_old_translations(youngest_live_version);
741,468✔
1510

1511
    auto pred = [=](auto& oldie) {
741,468✔
1512
        return oldie.replaced_at_version < oldest_live_version;
189,765✔
1513
    };
189,765✔
1514
    m_old_mappings.erase(std::remove_if(m_old_mappings.begin(), m_old_mappings.end(), pred), m_old_mappings.end());
741,468✔
1515
    m_old_translations.erase(std::remove_if(m_old_translations.begin(), m_old_translations.end(), pred),
741,468✔
1516
                             m_old_translations.end());
741,468✔
1517
    m_youngest_live_version = youngest_live_version;
741,468✔
1518
    verify_old_translations(youngest_live_version);
741,468✔
1519
}
741,468✔
1520

1521
void SlabAlloc::init_mapping_management(uint64_t currently_live_version)
1522
{
720,873✔
1523
    m_youngest_live_version = currently_live_version;
720,873✔
1524
}
720,873✔
1525

1526
const SlabAlloc::Chunks& SlabAlloc::get_free_read_only() const
1527
{
608,442✔
1528
    if (REALM_COVER_NEVER(m_free_space_state == free_space_Invalid))
608,442✔
1529
        throw InvalidFreeSpace();
×
1530
    return m_free_read_only;
608,442✔
1531
}
608,442✔
1532

1533

1534
size_t SlabAlloc::find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept
1535
{
20,134,536✔
1536
    size_t end_of_block = start_pos + free_chunk_size;
20,134,536✔
1537
    size_t alloc_pos = start_pos;
20,134,536✔
1538
    while (alloc_pos + request_size <= end_of_block) {
20,136,462✔
1539
        size_t next_section_boundary = get_upper_section_boundary(alloc_pos);
20,135,298✔
1540
        if (alloc_pos + request_size <= next_section_boundary) {
20,135,298✔
1541
            return alloc_pos;
20,133,372✔
1542
        }
20,133,372✔
1543
        alloc_pos = next_section_boundary;
1,926✔
1544
    }
1,926✔
1545
    return 0;
1,164✔
1546
}
20,134,536✔
1547

1548

1549
void SlabAlloc::resize_file(size_t new_file_size)
1550
{
87,687✔
1551
    if (m_attach_mode == attach_SharedFile) {
87,687✔
1552
        REALM_ASSERT_EX(new_file_size == round_up_to_page_size(new_file_size), get_file_path_for_assertions());
60,762✔
1553
        m_file.prealloc(new_file_size); // Throws
60,762✔
1554
        // resizing is done based on the logical file size. It is ok for the file
1555
        // to actually be bigger, but never smaller.
1556
        REALM_ASSERT(new_file_size <= static_cast<size_t>(m_file.get_size()));
60,762✔
1557

1558
        bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
60,762✔
1559
        if (!disable_sync)
60,762✔
1560
            m_file.sync(); // Throws
555✔
1561
    }
60,762✔
1562
    else {
26,925✔
1563
        size_t current_size = 0;
26,925✔
1564
        for (auto& b : m_virtual_file_buffer) {
27,141✔
1565
            current_size += b.size;
27,141✔
1566
        }
27,141✔
1567
        if (new_file_size > current_size) {
26,925✔
1568
            m_virtual_file_buffer.emplace_back(64 * 1024 * 1024, current_size);
6✔
1569
        }
6✔
1570
        m_virtual_file_size = new_file_size;
26,925✔
1571
    }
26,925✔
1572
}
87,687✔
1573

1574
#ifdef REALM_DEBUG
1575
void SlabAlloc::reserve_disk_space(size_t size)
1576
{
36✔
1577
    if (size != round_up_to_page_size(size))
36✔
1578
        size = round_up_to_page_size(size);
30✔
1579
    m_file.prealloc(size); // Throws
36✔
1580

1581
    bool disable_sync = get_disable_sync_to_disk() || m_cfg.disable_sync;
36!
1582
    if (!disable_sync)
36✔
1583
        m_file.sync(); // Throws
×
1584
}
36✔
1585
#endif
1586

1587
void SlabAlloc::verify() const
1588
{
127,044✔
1589
#ifdef REALM_DEBUG
127,044✔
1590
    if (!m_slabs.empty()) {
127,044✔
1591
        // Make sure that all free blocks are within a slab. This is done
1592
        // implicitly by using for_all_free_entries()
1593
        size_t first_possible_ref = m_baseline;
97,707✔
1594
        size_t first_impossible_ref = align_size_to_section_boundary(m_slabs.back().ref_end);
97,707✔
1595
        for_all_free_entries([&](size_t ref, size_t size) {
590,901✔
1596
            REALM_ASSERT(ref >= first_possible_ref);
590,901✔
1597
            REALM_ASSERT(ref + size <= first_impossible_ref);
590,901✔
1598
            first_possible_ref = ref;
590,901✔
1599
        });
590,901✔
1600
    }
97,707✔
1601
#endif
127,044✔
1602
}
127,044✔
1603

1604
#ifdef REALM_DEBUG
1605

1606
bool SlabAlloc::is_all_free() const
1607
{
1,302✔
1608
    // verify that slabs contain only free space.
1609
    // this is equivalent to each slab holding BetweenBlocks only at the ends.
1610
    for (const auto& e : m_slabs) {
1,302✔
1611
        auto first = reinterpret_cast<BetweenBlocks*>(e.addr);
1,248✔
1612
        REALM_ASSERT(first->block_before_size == 0);
1,248✔
1613
        auto last = reinterpret_cast<BetweenBlocks*>(e.addr + e.size) - 1;
1,248✔
1614
        REALM_ASSERT(last->block_after_size == 0);
1,248✔
1615
        if (first->block_after_size != last->block_before_size)
1,248✔
1616
            return false;
×
1617
        auto range = reinterpret_cast<char*>(last) - reinterpret_cast<char*>(first);
1,248✔
1618
        range -= sizeof(BetweenBlocks);
1,248✔
1619
        // the size of the free area must match the distance between the two BetweenBlocks:
1620
        if (range != first->block_after_size)
1,248✔
1621
            return false;
×
1622
    }
1,248✔
1623
    return true;
1,302✔
1624
}
1,302✔
1625

1626

1627
// LCOV_EXCL_START
1628
void SlabAlloc::print() const
1629
{
×
1630
    /* TODO
1631
     *
1632

1633
    size_t allocated_for_slabs = m_slabs.empty() ? 0 : m_slabs.back().ref_end - m_baseline;
1634

1635
    size_t free = 0;
1636
    for (const auto& free_block : m_free_space) {
1637
        free += free_block.size;
1638
    }
1639

1640
    size_t allocated = allocated_for_slabs - free;
1641
    std::cout << "Attached: " << (m_data ? size_t(m_baseline) : 0) << " Allocated: " << allocated << "\n";
1642

1643
    if (!m_slabs.empty()) {
1644
        std::cout << "Slabs: ";
1645
        ref_type first_ref = m_baseline;
1646

1647
        for (const auto& slab : m_slabs) {
1648
            if (&slab != &m_slabs.front())
1649
                std::cout << ", ";
1650

1651
            ref_type last_ref = slab.ref_end - 1;
1652
            size_t size = slab.ref_end - first_ref;
1653
            void* addr = slab.addr;
1654
            std::cout << "(" << first_ref << "->" << last_ref << ", size=" << size << ", addr=" << addr << ")";
1655
            first_ref = slab.ref_end;
1656
        }
1657
        std::cout << "\n";
1658
    }
1659

1660
    if (!m_free_space.empty()) {
1661
        std::cout << "FreeSpace: ";
1662
        for (const auto& free_block : m_free_space) {
1663
            if (&free_block != &m_free_space.front())
1664
                std::cout << ", ";
1665

1666
            ref_type last_ref = free_block.ref + free_block.size - 1;
1667
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1668
        }
1669
        std::cout << "\n";
1670
    }
1671
    if (!m_free_read_only.empty()) {
1672
        std::cout << "FreeSpace (ro): ";
1673
        for (const auto& free_block : m_free_read_only) {
1674
            if (&free_block != &m_free_read_only.front())
1675
                std::cout << ", ";
1676

1677
            ref_type last_ref = free_block.ref + free_block.size - 1;
1678
            std::cout << "(" << free_block.ref << "->" << last_ref << ", size=" << free_block.size << ")";
1679
        }
1680
        std::cout << "\n";
1681
    }
1682
    std::cout << std::flush;
1683
    */
1684
}
×
1685
// LCOV_EXCL_STOP
1686

1687
#endif // REALM_DEBUG
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc