• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_338

03 Jul 2024 03:00PM UTC coverage: 90.856% (-0.008%) from 90.864%
jorgen.edelbo_338

Pull #7803

Evergreen

nicola-cab
Merge branch 'next-major' into feature/string-compression
Pull Request #7803: Feature/string compression

103028 of 180606 branches covered (57.05%)

1144 of 1267 new or added lines in 33 files covered. (90.29%)

155 existing lines in 24 files now uncovered.

218583 of 240583 relevant lines covered (90.86%)

7959624.7 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.57
/src/realm/alloc.hpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#ifndef REALM_ALLOC_HPP
20
#define REALM_ALLOC_HPP
21

22
#include <cstdint>
23
#include <cstddef>
24
#include <atomic>
25

26
#include <realm/util/features.h>
27
#include <realm/util/terminate.hpp>
28
#include <realm/util/assert.hpp>
29
#include <realm/util/file.hpp>
30
#include <realm/exceptions.hpp>
31
#include <realm/util/safe_int_ops.hpp>
32
#include <realm/node_header.hpp>
33
#include <realm/util/file_mapper.hpp>
34

35
// Temporary workaround for
36
// https://developercommunity.visualstudio.com/content/problem/994075/64-bit-atomic-load-ices-cl-1924-with-o2-ob1.html
37
#if defined REALM_ARCHITECTURE_X86_32 && defined REALM_WINDOWS
38
#define REALM_WORKAROUND_MSVC_BUG REALM_NOINLINE
39
#else
40
#define REALM_WORKAROUND_MSVC_BUG
41
#endif
42

43
namespace realm {
44

45
class Allocator;
46

47
using ref_type = size_t;
48

49
int_fast64_t from_ref(ref_type) noexcept;
50
ref_type to_ref(int_fast64_t) noexcept;
51
int64_t to_int64(size_t value) noexcept;
52

53
class MemRef {
54
public:
55
    MemRef() noexcept = default;
743,905,452✔
56

57
    MemRef(char* addr, ref_type ref, Allocator& alloc) noexcept;
58
    MemRef(ref_type ref, Allocator& alloc) noexcept;
59

60
    char* get_addr() const;
61
    ref_type get_ref() const;
62
    void set_ref(ref_type ref);
63
    void set_addr(char* addr);
64

65
private:
66
    char* m_addr = nullptr;
67
    ref_type m_ref = 0;
68
#if REALM_ENABLE_MEMDEBUG
69
    // Allocator that created m_ref. Used to verify that the ref is valid whenever you call
70
    // get_ref()/get_addr and that it e.g. has not been free'ed
71
    const Allocator* m_alloc = nullptr;
72
#endif
73
};
74
static_assert(std::is_trivially_copyable_v<MemRef>);
75

76
/// The common interface for Realm allocators.
77
///
78
/// A Realm allocator must associate a 'ref' to each allocated
79
/// object and be able to efficiently map any 'ref' to the
80
/// corresponding memory address. The 'ref' is an integer and it must
81
/// always be divisible by 8. Also, a value of zero is used to
82
/// indicate a null-reference, and must therefore never be returned by
83
/// Allocator::alloc().
84
///
85
/// The purpose of the 'refs' is to decouple the memory reference from
86
/// the actual address and thereby allowing objects to be relocated in
87
/// memory without having to modify stored references.
88
///
89
/// \sa SlabAlloc
90
class Allocator {
91
public:
92
    /// The specified size must be divisible by 8, and must not be
93
    /// zero.
94
    ///
95
    /// \throw std::bad_alloc If insufficient memory was available.
96
    MemRef alloc(size_t size);
97

98
    /// Calls do_realloc().
99
    ///
100
    /// Note: The underscore has been added because the name `realloc`
101
    /// would conflict with a macro on the Windows platform.
102
    MemRef realloc_(ref_type, const char* addr, size_t old_size, size_t new_size);
103

104
    /// Calls do_free().
105
    ///
106
    /// Note: The underscore has been added because the name `free
107
    /// would conflict with a macro on the Windows platform.
108
    void free_(ref_type, const char* addr) noexcept;
109

110
    /// Shorthand for free_(mem.get_ref(), mem.get_addr()).
111
    void free_(MemRef mem) noexcept;
112

113
    /// Calls do_translate().
114
    char* translate(ref_type ref) const noexcept;
115

116
    /// Simpler version if we know the ref points inside the slab area
117
    char* translate_in_slab(ref_type ref) const noexcept;
118

119
    /// Returns true if, and only if the object at the specified 'ref'
120
    /// is in the immutable part of the memory managed by this
121
    /// allocator. The method by which some objects become part of the
122
    /// immuatble part is entirely up to the class that implements
123
    /// this interface.
124
    bool is_read_only(ref_type) const noexcept;
125

126
    void set_read_only(bool ro)
127
    {
13,728,033✔
128
        m_is_read_only = ro;
13,728,033✔
129
    }
13,728,033✔
130
    /// Returns a simple allocator that can be used with free-standing
131
    /// Realm objects (such as a free-standing table). A
132
    /// free-standing object is one that is not part of a Group, and
133
    /// therefore, is not part of an actual database.
134
    static Allocator& get_default() noexcept;
135

136
    virtual ~Allocator() noexcept = default;
108,561✔
137

138
    // Disable copying. Copying an allocator can produce double frees.
139
    Allocator(const Allocator&) = delete;
140
    Allocator& operator=(const Allocator&) = delete;
141

142
    virtual void verify() const = 0;
143

144
#ifdef REALM_DEBUG
145
    /// Terminate the program precisely when the specified 'ref' is
146
    /// freed (or reallocated). You can use this to detect whether the
147
    /// ref is freed (or reallocated), and even to get a stacktrace at
148
    /// the point where it happens. Call watch(0) to stop watching
149
    /// that ref.
150
    void watch(ref_type ref)
151
    {
×
152
        m_debug_watch = ref;
×
153
    }
×
154
#endif
155

156
    struct MappedFile;
157

158
    static constexpr size_t section_size() noexcept
159
    {
104,916✔
160
        return 1UL << section_shift;
104,916✔
161
    }
104,916✔
162

163
protected:
164
    constexpr static int section_shift = 26;
165

166
    std::atomic<size_t> m_baseline; // Separation line between immutable and mutable refs.
167

168
    ref_type m_debug_watch = 0;
169

170
    // The following logically belongs in the slab allocator, but is placed
171
    // here to optimize a critical path:
172

173
    // The ref translation splits the full ref-space (both below and above baseline)
174
    // into equal chunks.
175
    struct RefTranslation {
176
        char* mapping_addr;
177
        uint64_t cookie = 0x1234567890;
178
        std::atomic<size_t> lowest_possible_xover_offset = 0;
179

180
        // member 'xover_mapping_addr' is used for memory synchronization of the fields
181
        // 'xover_mapping_base' and 'xover_encrypted_mapping'. It also imposes an ordering
182
        // on 'lowest_possible_xover_offset' such that once a non-null value of 'xover_mapping_addr'
183
        // has been acquired, 'lowest_possible_xover_offset' will never change.
184
        std::atomic<char*> xover_mapping_addr = nullptr;
185
        size_t xover_mapping_base = 0;
186
#if REALM_ENABLE_ENCRYPTION
187
        util::EncryptedFileMapping* encrypted_mapping = nullptr;
188
        util::EncryptedFileMapping* xover_encrypted_mapping = nullptr;
189
#else
190
        static inline util::EncryptedFileMapping* const encrypted_mapping = nullptr;
191
        static inline util::EncryptedFileMapping* const xover_encrypted_mapping = nullptr;
192
#endif
193
        explicit RefTranslation(char* addr = nullptr)
194
            : mapping_addr(addr)
152,679✔
195
        {
309,264✔
196
        }
309,264✔
197
        ~RefTranslation()
198
        {
309,267✔
199
            cookie = 0xdeadbeefdeadbeef;
309,267✔
200
        }
309,267✔
201
        RefTranslation& operator=(const RefTranslation& from)
202
        {
117,465✔
203
            if (&from != this) {
117,465✔
204
                mapping_addr = from.mapping_addr;
117,465✔
205
#if REALM_ENABLE_ENCRYPTION
117,465✔
206
                encrypted_mapping = from.encrypted_mapping;
117,465✔
207
#endif
117,465✔
208
                const auto local_xover_mapping_addr = from.xover_mapping_addr.load(std::memory_order_acquire);
117,465✔
209

210
                // This must be loaded after xover_mapping_addr to ensure it isn't stale.
211
                lowest_possible_xover_offset.store(from.lowest_possible_xover_offset, std::memory_order_relaxed);
117,465✔
212

213
                if (local_xover_mapping_addr) {
117,465✔
214
                    xover_mapping_base = from.xover_mapping_base;
×
215
#if REALM_ENABLE_ENCRYPTION
×
216
                    xover_encrypted_mapping = from.xover_encrypted_mapping;
×
217
#endif
×
218
                    xover_mapping_addr.store(local_xover_mapping_addr, std::memory_order_release);
×
219
                }
×
220
            }
117,465✔
221
            return *this;
117,465✔
222
        }
117,465✔
223
    };
224
    // This pointer may be changed concurrently with access, so make sure it is
225
    // atomic!
226
    std::atomic<RefTranslation*> m_ref_translation_ptr{nullptr};
227

228
    /// The specified size must be divisible by 8, and must not be
229
    /// zero.
230
    ///
231
    /// \throw std::bad_alloc If insufficient memory was available.
232
    virtual MemRef do_alloc(const size_t size) = 0;
233

234
    /// The specified size must be divisible by 8, and must not be
235
    /// zero.
236
    ///
237
    /// The default version of this function simply allocates a new
238
    /// chunk of memory, copies over the old contents, and then frees
239
    /// the old chunk.
240
    ///
241
    /// \throw std::bad_alloc If insufficient memory was available.
242
    virtual MemRef do_realloc(ref_type, char* addr, size_t old_size, size_t new_size) = 0;
243

244
    /// Release the specified chunk of memory.
245
    virtual void do_free(ref_type, char* addr) = 0;
246

247
    /// Map the specified \a ref to the corresponding memory
248
    /// address. Note that if is_read_only(ref) returns true, then the
249
    /// referenced object is to be considered immutable, and it is
250
    /// then entirely the responsibility of the caller that the memory
251
    /// is not modified by way of the returned memory pointer.
252
    virtual char* do_translate(ref_type ref) const noexcept = 0;
253
    char* translate_critical(RefTranslation*, ref_type ref, bool known_in_slab = false) const noexcept;
254
    char* translate_less_critical(RefTranslation*, ref_type ref, bool known_in_slab = false) const noexcept;
255
    virtual void get_or_add_xover_mapping(RefTranslation&, size_t, size_t, size_t) = 0;
256
    Allocator() noexcept = default;
133,983✔
257
    size_t get_section_index(size_t pos) const noexcept;
258
    inline size_t get_section_base(size_t index) const noexcept;
259

260

261
    // The following counters are used to ensure accessor refresh,
262
    // and allows us to report many errors related to attempts to
263
    // access data which is no longer current.
264
    //
265
    // * storage_versioning: monotonically increasing counter
266
    //   bumped whenever the underlying storage layout is changed,
267
    //   or if the owning accessor have been detached.
268
    // * content_versioning: monotonically increasing counter
269
    //   bumped whenever the data is changed. Used to detect
270
    //   if queries are stale.
271
    // * instance_versioning: monotonically increasing counter
272
    //   used to detect if the allocator (and owning structure, e.g. Table)
273
    //   is recycled. Mismatch on this counter will cause accesors
274
    //   lower in the hierarchy to throw if access is attempted.
275
    std::atomic<uint_fast64_t> m_content_versioning_counter{0};
276
    std::atomic<uint_fast64_t> m_storage_versioning_counter{0};
277
    std::atomic<uint_fast64_t> m_instance_versioning_counter{0};
278

279
    inline uint_fast64_t get_storage_version(uint64_t instance_version)
280
    {
47,382,348✔
281
        if (instance_version != m_instance_versioning_counter) {
47,382,348✔
282
            throw StaleAccessor("Stale accessor version");
×
283
        }
×
284
        return m_storage_versioning_counter.load(std::memory_order_acquire);
47,382,348✔
285
    }
47,382,348✔
286

287
public:
288
    inline uint_fast64_t get_storage_version()
289
    {
647,347,521✔
290
        return m_storage_versioning_counter.load(std::memory_order_acquire);
647,347,521✔
291
    }
647,347,521✔
292

293
protected:
294
    inline void bump_storage_version() noexcept
295
    {
55,203,357✔
296
        m_storage_versioning_counter.fetch_add(1, std::memory_order_acq_rel);
55,203,357✔
297
    }
55,203,357✔
298

299
public:
300
    REALM_WORKAROUND_MSVC_BUG inline uint_fast64_t get_content_version() noexcept
301
    {
56,390,646✔
302
        return m_content_versioning_counter.load(std::memory_order_acquire);
56,390,646✔
303
    }
56,390,646✔
304

305
protected:
306
    inline uint_fast64_t bump_content_version() noexcept
307
    {
83,925,636✔
308
        return m_content_versioning_counter.fetch_add(1, std::memory_order_acq_rel) + 1;
83,925,636✔
309
    }
83,925,636✔
310

311
    REALM_WORKAROUND_MSVC_BUG inline uint_fast64_t get_instance_version() noexcept
312
    {
1,605,078,963✔
313
        return m_instance_versioning_counter.load(std::memory_order_relaxed);
1,605,078,963✔
314
    }
1,605,078,963✔
315

316
    inline void bump_instance_version() noexcept
317
    {
2,171,745✔
318
        m_instance_versioning_counter.fetch_add(1, std::memory_order_relaxed);
2,171,745✔
319
    }
2,171,745✔
320

321
private:
322
    bool m_is_read_only = false; // prevent any alloc or free operations
323

324
    friend class Table;
325
    friend class ClusterTree;
326
    friend class Group;
327
    friend class WrappedAllocator;
328
    friend class Obj;
329
    template <class>
330
    friend class CollectionBaseImpl;
331
    friend class Dictionary;
332
};
333

334

335
class WrappedAllocator : public Allocator {
336
public:
337
    WrappedAllocator(Allocator& underlying_allocator)
338
        : m_alloc(&underlying_allocator)
16,308✔
339
    {
28,986✔
340
        m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
28,986✔
341
        m_debug_watch = 0;
28,986✔
342
        m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
28,986✔
343
    }
28,986✔
344

345
    ~WrappedAllocator() {}
3,594✔
346

347
    void switch_underlying_allocator(Allocator& underlying_allocator)
348
    {
12,107,112✔
349
        m_alloc = &underlying_allocator;
12,107,112✔
350
        m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
12,107,112✔
351
        m_debug_watch = 0;
12,107,112✔
352
        refresh_ref_translation();
12,107,112✔
353
    }
12,107,112✔
354

355
    void update_from_underlying_allocator(bool writable)
356
    {
9,957,924✔
357
        switch_underlying_allocator(*m_alloc);
9,957,924✔
358
        set_read_only(!writable);
9,957,924✔
359
    }
9,957,924✔
360

361
    void refresh_ref_translation()
362
    {
12,107,820✔
363
        m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
12,107,820✔
364
    }
12,107,820✔
365

366
protected:
367
    void get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size) override
UNCOV
368
    {
×
UNCOV
369
        m_alloc->get_or_add_xover_mapping(txl, index, offset, size);
×
UNCOV
370
    }
×
371

372
private:
373
    Allocator* m_alloc;
374
    MemRef do_alloc(const size_t size) override
375
    {
16,719,678✔
376
        auto result = m_alloc->do_alloc(size);
16,719,678✔
377
        bump_storage_version();
16,719,678✔
378
        m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
16,719,678✔
379
        m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
16,719,678✔
380
        return result;
16,719,678✔
381
    }
16,719,678✔
382
    virtual MemRef do_realloc(ref_type ref, char* addr, size_t old_size, size_t new_size) override
383
    {
3,169,164✔
384
        auto result = m_alloc->do_realloc(ref, addr, old_size, new_size);
3,169,164✔
385
        bump_storage_version();
3,169,164✔
386
        m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
3,169,164✔
387
        m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
3,169,164✔
388
        return result;
3,169,164✔
389
    }
3,169,164✔
390

391
    virtual void do_free(ref_type ref, char* addr) noexcept override
392
    {
10,254,492✔
393
        return m_alloc->do_free(ref, addr);
10,254,492✔
394
    }
10,254,492✔
395

396
    virtual char* do_translate(ref_type ref) const noexcept override
397
    {
342,265,371✔
398
        return m_alloc->translate(ref);
342,265,371✔
399
    }
342,265,371✔
400

401
    virtual void verify() const override
402
    {
×
403
        m_alloc->verify();
×
404
    }
×
405
};
406

407

408
// Implementation:
409

410
inline int_fast64_t from_ref(ref_type v) noexcept
411
{
105,355,011✔
412
    // Check that v is divisible by 8 (64-bit aligned).
413
    REALM_ASSERT_DEBUG(v % 8 == 0);
105,355,011✔
414

415
    static_assert(std::is_same<ref_type, size_t>::value,
105,355,011✔
416
                  "If ref_type changes, from_ref and to_ref should probably be updated");
105,355,011✔
417

418
    // Make sure that we preserve the bit pattern of the ref_type (without sign extension).
419
    return int_fast64_t(uint_fast64_t(v));
105,355,011✔
420
}
105,355,011✔
421

422
inline ref_type to_ref(int_fast64_t v) noexcept
423
{
1,572,096,312✔
424
    // Check that v is divisible by 8 (64-bit aligned).
425
    REALM_ASSERT_DEBUG_EX(v % 8 == 0, v);
1,572,096,312✔
426

427
    // C++11 standard, paragraph 4.7.2 [conv.integral]:
428
    // If the destination type is unsigned, the resulting value is the least unsigned integer congruent to the source
429
    // integer (modulo 2n where n is the number of bits used to represent the unsigned type). [ Note: In a two's
430
    // complement representation, this conversion is conceptual and there is no change in the bit pattern (if there is
431
    // no truncation). - end note ]
432
    static_assert(std::is_unsigned<ref_type>::value,
1,572,096,312✔
433
                  "If ref_type changes, from_ref and to_ref should probably be updated");
1,572,096,312✔
434
    return ref_type(v);
1,572,096,312✔
435
}
1,572,096,312✔
436

437
inline int64_t to_int64(size_t value) noexcept
438
{
1,249,656✔
439
    int64_t res = static_cast<int64_t>(value);
1,249,656✔
440
    REALM_ASSERT_DEBUG(res >= 0);
1,249,656✔
441
    return static_cast<int64_t>(value);
1,249,656✔
442
}
1,249,656✔
443

444
inline MemRef::MemRef(char* addr, ref_type ref, Allocator& alloc) noexcept
445
    : m_addr(addr)
680,545,008✔
446
    , m_ref(ref)
680,545,008✔
447
{
1,400,511,561✔
448
    static_cast<void>(alloc);
1,400,511,561✔
449
#if REALM_ENABLE_MEMDEBUG
450
    m_alloc = &alloc;
451
#endif
452
}
1,400,511,561✔
453

454
inline MemRef::MemRef(ref_type ref, Allocator& alloc) noexcept
455
    : m_addr(alloc.translate(ref))
10,250,478✔
456
    , m_ref(ref)
10,250,478✔
457
{
19,883,061✔
458
    static_cast<void>(alloc);
19,883,061✔
459
#if REALM_ENABLE_MEMDEBUG
460
    m_alloc = &alloc;
461
#endif
462
}
19,883,061✔
463

464
inline char* MemRef::get_addr() const
465
{
2,789,891,172✔
466
#if REALM_ENABLE_MEMDEBUG
467
    // Asserts if the ref has been freed
468
    m_alloc->translate(m_ref);
469
#endif
470
    return m_addr;
2,789,891,172✔
471
}
2,789,891,172✔
472

473
inline ref_type MemRef::get_ref() const
474
{
1,170,039,960✔
475
#if REALM_ENABLE_MEMDEBUG
476
    // Asserts if the ref has been freed
477
    m_alloc->translate(m_ref);
478
#endif
479
    return m_ref;
1,170,039,960✔
480
}
1,170,039,960✔
481

482
inline void MemRef::set_ref(ref_type ref)
483
{
×
484
#if REALM_ENABLE_MEMDEBUG
×
485
    // Asserts if the ref has been freed
×
486
    m_alloc->translate(ref);
×
487
#endif
×
488
    m_ref = ref;
×
489
}
×
490

491
inline void MemRef::set_addr(char* addr)
492
{
×
493
    m_addr = addr;
×
494
}
×
495

496
inline MemRef Allocator::alloc(size_t size)
497
{
36,860,346✔
498
    if (m_is_read_only)
36,860,346✔
499
        throw realm::LogicError(ErrorCodes::WrongTransactionState,
54✔
500
                                "Trying to modify database while in read transaction");
54✔
501
    return do_alloc(size);
36,860,292✔
502
}
36,860,346✔
503

504
inline MemRef Allocator::realloc_(ref_type ref, const char* addr, size_t old_size, size_t new_size)
505
{
4,721,205✔
506
#ifdef REALM_DEBUG
4,721,205✔
507
    if (ref == m_debug_watch)
4,721,205✔
508
        REALM_TERMINATE("Allocator watch: Ref was reallocated");
509
#endif
4,721,205✔
510
    if (m_is_read_only)
4,721,205✔
511
        throw realm::LogicError(ErrorCodes::WrongTransactionState,
×
512
                                "Trying to modify database while in read transaction");
×
513
    return do_realloc(ref, const_cast<char*>(addr), old_size, new_size);
4,721,205✔
514
}
4,721,205✔
515

516
inline void Allocator::free_(ref_type ref, const char* addr) noexcept
517
{
26,474,679✔
518
#ifdef REALM_DEBUG
26,474,679✔
519
    if (ref == m_debug_watch)
26,474,679✔
520
        REALM_TERMINATE("Allocator watch: Ref was freed");
521
#endif
26,474,679✔
522
    REALM_ASSERT(!m_is_read_only);
26,474,679✔
523

524
    return do_free(ref, const_cast<char*>(addr));
26,474,679✔
525
}
26,474,679✔
526

527
inline void Allocator::free_(MemRef mem) noexcept
528
{
1,832,076✔
529
    free_(mem.get_ref(), mem.get_addr());
1,832,076✔
530
}
1,832,076✔
531

532
inline size_t Allocator::get_section_base(size_t index) const noexcept
533
{
1,202,902,596✔
534
    return index << section_shift; // 64MB chunks
1,202,902,596✔
535
}
1,202,902,596✔
536

537
inline size_t Allocator::get_section_index(size_t pos) const noexcept
538
{
1,216,342,734✔
539
    return pos >> section_shift; // 64Mb chunks
1,216,342,734✔
540
}
1,216,342,734✔
541

542
inline bool Allocator::is_read_only(ref_type ref) const noexcept
543
{
2,631,142,971✔
544
    REALM_ASSERT_DEBUG(ref != 0);
2,631,142,971✔
545
    // REALM_ASSERT_DEBUG(m_baseline != 0); // Attached SlabAlloc
546
    return ref < m_baseline.load(std::memory_order_relaxed);
2,631,142,971✔
547
}
2,631,142,971✔
548

549
// performance critical part of the translation process. Less critical code is in translate_less_critical.
550
inline char* Allocator::translate_critical(RefTranslation* ref_translation_ptr, ref_type ref,
551
                                           bool known_in_slab) const noexcept
552
{
1,189,061,289✔
553
    size_t idx = get_section_index(ref);
1,189,061,289✔
554
    RefTranslation& txl = ref_translation_ptr[idx];
1,189,061,289✔
555
    if (REALM_LIKELY(txl.cookie == 0x1234567890)) {
1,191,473,790✔
556
        size_t offset = ref - get_section_base(idx);
1,184,117,418✔
557
        size_t lowest_possible_xover_offset = txl.lowest_possible_xover_offset.load(std::memory_order_relaxed);
1,184,117,418✔
558
        if (REALM_LIKELY(offset < lowest_possible_xover_offset)) {
1,184,117,418✔
559
            // the lowest possible xover offset may grow concurrently, but that will not affect this code path
560
            char* addr = txl.mapping_addr + offset;
1,178,778,600✔
561
            util::encryption_read_barrier(addr, NodeHeader::header_size, txl.encrypted_mapping);
1,178,778,600✔
562
            size_t size = NodeHeader::get_byte_size_from_header(addr);
1,178,778,600✔
563
            util::encryption_read_barrier(addr, size, txl.encrypted_mapping);
1,178,778,600✔
564
            return addr;
1,178,778,600✔
565
        }
1,178,778,600✔
566
        // the lowest possible xover offset may grow concurrently, but that will be handled inside the call
567
        return translate_less_critical(ref_translation_ptr, ref, known_in_slab);
5,338,818✔
568
    }
1,184,117,418✔
569
    realm::util::terminate("Invalid ref translation entry", __FILE__, __LINE__, txl.cookie, 0x1234567890, ref, idx);
2,154,840,019✔
570
}
2,154,840,019✔
571

572
inline char* Allocator::translate(ref_type ref) const noexcept
573
{
1,793,720,481✔
574
    if (auto ptr = m_ref_translation_ptr.load(std::memory_order_acquire); REALM_LIKELY(ptr)) {
1,793,720,481✔
575
        return translate_critical(ptr, ref);
1,155,908,832✔
576
    }
1,155,908,832✔
577
    return do_translate(ref);
637,811,649✔
578
}
1,793,720,481✔
579

580
inline char* Allocator::translate_in_slab(ref_type ref) const noexcept
581
{
32,045,526✔
582
    auto ref_translation_ptr = m_ref_translation_ptr.load(std::memory_order_acquire);
32,045,526✔
583
    if (REALM_LIKELY(ref_translation_ptr)) {
32,045,526✔
584
        return translate_critical(ref_translation_ptr, ref, true);
32,032,902✔
585
    }
32,032,902✔
586
    else {
12,624✔
587
        return do_translate(ref);
12,624✔
588
    }
12,624✔
589
}
32,045,526✔
590

591

592
} // namespace realm
593

594
#endif // REALM_ALLOC_HPP
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc