• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / jorgen.edelbo_402

21 Aug 2024 11:10AM UTC coverage: 91.054% (-0.03%) from 91.085%
jorgen.edelbo_402

Pull #7803

Evergreen

jedelbo
Small fix to Table::typed_write

When writing the realm to a new file from a write transaction,
the Table may be COW so that the top ref is changed. So don't
use the ref that is present in the group when the operation starts.
Pull Request #7803: Feature/string compression

103494 of 181580 branches covered (57.0%)

1929 of 1999 new or added lines in 46 files covered. (96.5%)

695 existing lines in 51 files now uncovered.

220142 of 241772 relevant lines covered (91.05%)

7344461.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.03
/src/realm/alloc.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <cerrno>
20
#include <cstdlib>
21
#include <stdexcept>
22
#include <algorithm>
23

24
#include <realm/array.hpp>
25
#include <realm/alloc_slab.hpp>
26
#include <realm/group.hpp>
27

28
using namespace realm;
29

30

31
namespace {
32

33
/// For use with free-standing objects (objects that are not part of a
34
/// Realm group)
35
///
36
/// Note that it is essential that this class is stateless as it may
37
/// be used by multiple threads. Although it has m_replication, this
38
/// is not a problem, as there is no way to modify it, so it will
39
/// remain zero.
40
class DefaultAllocator : public realm::Allocator {
41
public:
42
    DefaultAllocator()
43
    {
24✔
44
        m_baseline = 0;
24✔
45
    }
24✔
46

47
    MemRef do_alloc(const size_t size) override
48
    {
8,034,234✔
49
        char* addr = static_cast<char*>(::malloc(size));
8,034,234✔
50
        if (REALM_UNLIKELY(REALM_COVER_NEVER(!addr))) {
8,034,234✔
51
            // LCOV_EXCL_START
52
            REALM_ASSERT_DEBUG(errno == ENOMEM);
×
53
            throw std::bad_alloc();
×
54
            // LCOV_EXCL_STOP
55
        }
×
56
#if REALM_ENABLE_ALLOC_SET_ZERO
57
        std::fill(addr, addr + size, 0);
58
#endif
59
        return MemRef(addr, reinterpret_cast<size_t>(addr), *this);
8,034,234✔
60
    }
8,034,234✔
61

62
    MemRef do_realloc(ref_type, char* addr, size_t old_size, size_t new_size) override
63
    {
1,580,271✔
64
        char* new_addr = static_cast<char*>(::realloc(const_cast<char*>(addr), new_size));
1,580,271✔
65
        if (REALM_UNLIKELY(REALM_COVER_NEVER(!new_addr))) {
1,580,271✔
66
            // LCOV_EXCL_START
67
            REALM_ASSERT_DEBUG(errno == ENOMEM);
×
68
            throw std::bad_alloc();
×
69
            // LCOV_EXCL_STOP
70
        }
×
71
#if REALM_ENABLE_ALLOC_SET_ZERO
72
        std::fill(new_addr + old_size, new_addr + new_size, 0);
73
#else
74
        static_cast<void>(old_size);
1,580,271✔
75
#endif
1,580,271✔
76
        return MemRef(new_addr, reinterpret_cast<size_t>(new_addr), *this);
1,580,271✔
77
    }
1,580,271✔
78

79
    void do_free(ref_type, char* addr) override
80
    {
8,034,210✔
81
        ::free(addr);
8,034,210✔
82
    }
8,034,210✔
83

84
    char* do_translate(ref_type ref) const noexcept override
85
    {
421,044,003✔
86
        return reinterpret_cast<char*>(ref);
421,044,003✔
87
    }
421,044,003✔
88

89
    void verify() const override {}
×
90
    void get_or_add_xover_mapping(RefTranslation&, size_t, size_t, size_t) override
91
    {
×
92
        REALM_ASSERT(false);
×
93
    }
×
94
};
95

96
// This variable is declared such that get_default() can return it. It could be a static local variable, but
97
// Valgrind/Helgrind gives a false error report because it doesn't recognize gcc's static variable initialization
98
// mutex
99
DefaultAllocator default_alloc;
100

101
} // anonymous namespace
102

103
namespace realm {
104

105
Allocator& Allocator::get_default() noexcept
106
{
106,360,752✔
107
    return default_alloc;
106,360,752✔
108
}
106,360,752✔
109

110
// This function is called to handle translation of a ref which is above the limit for its
111
// memory mapping. This requires one of three:
112
// * bumping the limit of the mapping. (if the entire array is inside the mapping)
113
// * adding a cross-over mapping. (if the array crosses a mapping boundary)
114
// * using an already established cross-over mapping. (ditto)
115
// this can proceed concurrently with other calls to translate()
116
char* Allocator::translate_less_critical(RefTranslation* ref_translation_ptr, ref_type ref,
117
                                         bool known_in_slab) const noexcept
118
{
301,056✔
119
    size_t idx = get_section_index(ref);
301,056✔
120
    RefTranslation& txl = ref_translation_ptr[idx];
301,056✔
121
    size_t offset = ref - get_section_base(idx);
301,056✔
122
    char* addr = txl.mapping_addr + offset;
301,056✔
123
    util::encryption_read_barrier(addr, NodeHeader::header_size, txl.encrypted_mapping);
301,056✔
124
    // if we know the translation is inside the slab area, we don't need to check
125
    // for anything beyond the header, and we don't need to check if decryption is needed
126
    auto size = known_in_slab ? 8 : NodeHeader::get_byte_size_from_header(addr);
301,056✔
127
    bool crosses_mapping = offset + size > (1 << section_shift);
301,056✔
128
    // Move the limit on use of the existing primary mapping.
129
    // Take into account that another thread may attempt to change / have changed it concurrently,
130
    size_t lowest_possible_xover_offset = txl.lowest_possible_xover_offset.load(std::memory_order_relaxed);
301,056✔
131
    auto new_lowest_possible_xover_offset = offset + (crosses_mapping ? 0 : size);
301,056✔
132
    while (new_lowest_possible_xover_offset > lowest_possible_xover_offset) {
301,164✔
133
        if (txl.lowest_possible_xover_offset.compare_exchange_weak(
300,702✔
134
                lowest_possible_xover_offset, new_lowest_possible_xover_offset, std::memory_order_relaxed))
300,702✔
135
            break;
300,594✔
136
    }
300,702✔
137
    if (REALM_LIKELY(!crosses_mapping)) {
301,071✔
138
        // Array fits inside primary mapping, no new mapping needed.
139
        util::encryption_read_barrier(addr, size, txl.encrypted_mapping);
301,068✔
140
        return addr;
301,068✔
141
    }
301,068✔
142
    // we need a cross-over mapping. If one is already established, use that.
143
    auto xover_mapping_addr = txl.xover_mapping_addr.load(std::memory_order_acquire);
2,147,483,650✔
144
    if (!xover_mapping_addr) {
2,147,483,650✔
145
        // we need to establish a xover mapping - or wait for another thread to finish
146
        // establishing one:
UNCOV
147
        const_cast<Allocator*>(this)->get_or_add_xover_mapping(txl, idx, offset, size);
×
148
        // reload (can be relaxed since the call above synchronizes on a mutex)
UNCOV
149
        xover_mapping_addr = txl.xover_mapping_addr.load(std::memory_order_relaxed);
×
UNCOV
150
    }
×
151
    // array is now known to be inside the established xover mapping:
152
    addr = xover_mapping_addr + (offset - txl.xover_mapping_base);
2,147,483,650✔
153
    util::encryption_read_barrier(addr, size, txl.xover_encrypted_mapping);
2,147,483,650✔
154
    return addr;
2,147,483,650✔
155
}
301,056✔
156
} // namespace realm
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc