• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

realm / realm-core / 2213

10 Apr 2024 11:21PM UTC coverage: 91.792% (-0.8%) from 92.623%
2213

push

Evergreen

web-flow
Add missing availability checks for SecCopyErrorMessageString (#7577)

This requires iOS 11.3 and we currently target iOS 11.

94842 of 175770 branches covered (53.96%)

7 of 22 new or added lines in 2 files covered. (31.82%)

1861 existing lines in 82 files now uncovered.

242866 of 264583 relevant lines covered (91.79%)

5593111.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

83.48
/src/realm/util/encrypted_file_mapping.cpp
1
/*************************************************************************
2
 *
3
 * Copyright 2016 Realm Inc.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 * http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 *
17
 **************************************************************************/
18

19
#include <realm/util/encrypted_file_mapping.hpp>
20

21
#include <realm/util/file_mapper.hpp>
22

23
#if REALM_ENABLE_ENCRYPTION
24
#include <realm/util/aes_cryptor.hpp>
25
#include <realm/util/errno.hpp>
26
#include <realm/utilities.hpp>
27
#include <realm/util/sha_crypto.hpp>
28
#include <realm/util/terminate.hpp>
29

30
#include <cstdlib>
31
#include <algorithm>
32
#include <chrono>
33
#include <stdexcept>
34
#include <string_view>
35
#include <system_error>
36
#include <thread>
37

38
#ifdef REALM_DEBUG
39
#include <cstdio>
40
#endif
41

42
#include <array>
43
#include <cstring>
44
#include <iostream>
45

46
#if defined(_WIN32)
47
#include <Windows.h>
48
#include <bcrypt.h>
49
#pragma comment(lib, "bcrypt.lib")
50
#else
51
#include <sys/mman.h>
52
#include <unistd.h>
53
#include <pthread.h>
54
#endif
55

56
namespace realm::util {
57
SharedFileInfo::SharedFileInfo(const uint8_t* key)
58
    : cryptor(key)
59
{
1,581✔
60
}
1,581✔
61

62
// We have the following constraints here:
63
//
64
// 1. When writing, we only know which 4k page is dirty, and not what bytes
65
//    within the page are dirty, so we always have to write in 4k blocks.
66
// 2. Pages being written need to be entirely within an 8k-aligned block to
67
//    ensure that they're written to the hardware in atomic blocks.
68
// 3. We need to store the IV used for each 4k page somewhere, so that we can
69
//    ensure that we never reuse an IV (and still be decryptable).
70
//
71
// Because pages need to be aligned, we can't just prepend the IV to each page,
72
// or we'd have to double the size of the file (as the rest of the 4k block
73
// containing the IV would not be usable). Writing the IVs to a different part
74
// of the file from the data results in them not being in the same 8k block, and
75
// so it is possible that only the IV or only the data actually gets updated on
76
// disk. We deal with this by storing four pieces of data about each page: the
77
// hash of the encrypted data, the current IV, the hash of the previous encrypted
78
// data, and the previous IV. To write, we encrypt the data, hash the ciphertext,
79
// then write the new IV/ciphertext hash, fsync(), and then write the new
80
// ciphertext. This ensures that if an error occurs between writing the IV and
81
// the ciphertext, we can still determine that we should use the old IV, since
82
// the ciphertext's hash will match the old ciphertext.
83

84
struct iv_table {
85
    uint32_t iv1 = 0;
86
    std::array<uint8_t, 28> hmac1 = {};
87
    uint32_t iv2 = 0;
88
    std::array<uint8_t, 28> hmac2 = {};
89
    bool operator==(const iv_table& other) const
90
    {
21,891✔
91
        return iv1 == other.iv1 && iv2 == other.iv2 && hmac1 == other.hmac1 && hmac2 == other.hmac2;
21,891✔
92
    }
21,891✔
93
    bool operator!=(const iv_table& other) const
94
    {
13,446✔
95
        return !(*this == other);
13,446✔
96
    }
13,446✔
97
};
98

99
namespace {
100
const int aes_block_size = 16;
101
const size_t block_size = 4096;
102

103
const size_t metadata_size = sizeof(iv_table);
104
const size_t blocks_per_metadata_block = block_size / metadata_size;
105

106
// map an offset in the data to the actual location in the file
107
template <typename Int>
108
Int real_offset(Int pos)
109
{
602,454✔
110
    REALM_ASSERT(pos >= 0);
602,454✔
111
    const size_t index = static_cast<size_t>(pos) / block_size;
602,454✔
112
    const size_t metadata_page_count = index / blocks_per_metadata_block + 1;
602,454✔
113
    return Int(pos + metadata_page_count * block_size);
602,454✔
114
}
602,454✔
115

116
// map a location in the file to the offset in the data
117
template <typename Int>
118
Int fake_offset(Int pos)
119
{
4,413✔
120
    REALM_ASSERT(pos >= 0);
4,413✔
121
    const size_t index = static_cast<size_t>(pos) / block_size;
4,413✔
122
    const size_t metadata_page_count = (index + blocks_per_metadata_block) / (blocks_per_metadata_block + 1);
4,413✔
123
    return pos - metadata_page_count * block_size;
4,413✔
124
}
4,413✔
125

126
// get the location of the iv_table for the given data (not file) position
127
off_t iv_table_pos(off_t pos)
128
{
112,845✔
129
    REALM_ASSERT(pos >= 0);
112,845✔
130
    const size_t index = static_cast<size_t>(pos) / block_size;
112,845✔
131
    const size_t metadata_block = index / blocks_per_metadata_block;
112,845✔
132
    const size_t metadata_index = index & (blocks_per_metadata_block - 1);
112,845✔
133
    return off_t(metadata_block * (blocks_per_metadata_block + 1) * block_size + metadata_index * metadata_size);
112,845✔
134
}
112,845✔
135

136
void check_write(FileDesc fd, off_t pos, const void* data, size_t len)
137
{
213,156✔
138
    uint64_t orig = File::get_file_pos(fd);
213,156✔
139
    File::seek_static(fd, pos);
213,156✔
140
    File::write_static(fd, static_cast<const char*>(data), len);
213,156✔
141
    File::seek_static(fd, orig);
213,156✔
142
}
213,156✔
143

144
size_t check_read(FileDesc fd, off_t pos, void* dst, size_t len)
145
{
501,453✔
146
    uint64_t orig = File::get_file_pos(fd);
501,453✔
147
    File::seek_static(fd, pos);
501,453✔
148
    size_t ret = File::read_static(fd, static_cast<char*>(dst), len);
501,453✔
149
    File::seek_static(fd, orig);
501,453✔
150
    return ret;
501,453✔
151
}
501,453✔
152

153
} // anonymous namespace
154

155
AESCryptor::AESCryptor(const uint8_t* key)
156
    : m_rw_buffer(new char[block_size])
157
    , m_dst_buffer(new char[block_size])
158
{
1,677✔
159
    memcpy(m_aesKey.data(), key, 32);
1,677✔
160
    memcpy(m_hmacKey.data(), key + 32, 32);
1,677✔
161

960✔
162
#if REALM_PLATFORM_APPLE
717✔
163
    // A random iv is passed to CCCryptorReset. This iv is *not used* by Realm; we set it manually prior to
164
    // each call to BCryptEncrypt() and BCryptDecrypt(). We pass this random iv as an attempt to
165
    // suppress a false encryption security warning from the IBM Bluemix Security Analyzer (PR[#2911])
166
    unsigned char u_iv[kCCKeySizeAES256];
717✔
167
    arc4random_buf(u_iv, kCCKeySizeAES256);
717✔
168
    void* iv = u_iv;
717✔
169
    CCCryptorCreate(kCCEncrypt, kCCAlgorithmAES, 0 /* options */, key, kCCKeySizeAES256, iv, &m_encr);
717✔
170
    CCCryptorCreate(kCCDecrypt, kCCAlgorithmAES, 0 /* options */, key, kCCKeySizeAES256, iv, &m_decr);
717✔
171
#elif defined(_WIN32)
172
    BCRYPT_ALG_HANDLE hAesAlg = NULL;
173
    int ret;
174
    ret = BCryptOpenAlgorithmProvider(&hAesAlg, BCRYPT_AES_ALGORITHM, NULL, 0);
175
    REALM_ASSERT_RELEASE_EX(ret == 0 && "BCryptOpenAlgorithmProvider()", ret);
176

177
    ret = BCryptSetProperty(hAesAlg, BCRYPT_CHAINING_MODE, (PBYTE)BCRYPT_CHAIN_MODE_CBC,
178
                            sizeof(BCRYPT_CHAIN_MODE_CBC), 0);
179
    REALM_ASSERT_RELEASE_EX(ret == 0 && "BCryptSetProperty()", ret);
180

181
    ret = BCryptGenerateSymmetricKey(hAesAlg, &m_aes_key_handle, nullptr, 0, (PBYTE)key, 32, 0);
182
    REALM_ASSERT_RELEASE_EX(ret == 0 && "BCryptGenerateSymmetricKey()", ret);
183
#else
184
    m_ctx = EVP_CIPHER_CTX_new();
960✔
185
    if (!m_ctx)
960✔
186
        handle_error();
187
#endif
960✔
188
}
1,677✔
189

190
AESCryptor::~AESCryptor() noexcept
191
{
1,677✔
192
#if REALM_PLATFORM_APPLE
717✔
193
    CCCryptorRelease(m_encr);
717✔
194
    CCCryptorRelease(m_decr);
717✔
195
#elif defined(_WIN32)
196
#else
197
    EVP_CIPHER_CTX_cleanup(m_ctx);
960✔
198
    EVP_CIPHER_CTX_free(m_ctx);
960✔
199
#endif
960✔
200
}
1,677✔
201

202
void AESCryptor::check_key(const uint8_t* key)
203
{
1,497✔
204
    if (memcmp(m_aesKey.data(), key, 32) != 0 || memcmp(m_hmacKey.data(), key + 32, 32) != 0)
1,497✔
205
        throw DecryptionFailed();
6✔
206
}
1,497✔
207

208
void AESCryptor::handle_error()
209
{
×
210
    throw std::runtime_error("Error occurred in encryption layer");
×
211
}
×
212

213
void AESCryptor::set_file_size(off_t new_size)
214
{
3,531✔
215
    REALM_ASSERT(new_size >= 0 && !int_cast_has_overflow<size_t>(new_size));
3,531✔
216
    size_t new_size_casted = size_t(new_size);
3,531✔
217
    size_t block_count = (new_size_casted + block_size - 1) / block_size;
3,531✔
218
    m_iv_buffer.reserve((block_count + blocks_per_metadata_block - 1) & ~(blocks_per_metadata_block - 1));
3,531✔
219
    m_iv_buffer_cache.reserve(m_iv_buffer.capacity());
3,531✔
220
}
3,531✔
221

222
iv_table& AESCryptor::get_iv_table(FileDesc fd, off_t data_pos, IVLookupMode mode) noexcept
223
{
602,178✔
224
    REALM_ASSERT(!int_cast_has_overflow<size_t>(data_pos));
602,178✔
225
    size_t data_pos_casted = size_t(data_pos);
602,178✔
226
    size_t idx = data_pos_casted / block_size;
602,178✔
227
    if (mode == IVLookupMode::UseCache && idx < m_iv_buffer.size())
602,178✔
228
        return m_iv_buffer[idx];
596,067✔
229

3,090✔
230
    size_t block_start = std::min(m_iv_buffer.size(), (idx / blocks_per_metadata_block) * blocks_per_metadata_block);
6,111✔
231
    size_t block_end = 1 + idx / blocks_per_metadata_block;
6,111✔
232
    REALM_ASSERT(block_end * blocks_per_metadata_block <= m_iv_buffer.capacity()); // not safe to allocate here
6,111✔
233
    if (block_end * blocks_per_metadata_block > m_iv_buffer.size()) {
6,111✔
234
        m_iv_buffer.resize(block_end * blocks_per_metadata_block);
4,821✔
235
        m_iv_buffer_cache.resize(m_iv_buffer.size());
4,821✔
236
    }
4,821✔
237

3,090✔
238
    for (size_t i = block_start; i < block_end * blocks_per_metadata_block; i += blocks_per_metadata_block) {
12,318✔
239
        off_t iv_pos = iv_table_pos(off_t(i * block_size));
6,267✔
240
        size_t bytes = check_read(fd, iv_pos, &m_iv_buffer[i], block_size);
6,267✔
241
        if (bytes < block_size)
6,267✔
242
            break; // rest is zero-filled by resize()
60✔
243
    }
6,267✔
244

3,090✔
245
    return m_iv_buffer[idx];
6,111✔
246
}
6,111✔
247

248
bool AESCryptor::check_hmac(const void* src, size_t len, const std::array<uint8_t, 28>& hmac) const
249
{
431,925✔
250
    std::array<uint8_t, 224 / 8> buffer;
431,925✔
251
    hmac_sha224(Span(reinterpret_cast<const uint8_t*>(src), len), buffer, m_hmacKey);
431,925✔
252

208,527✔
253
    // Constant-time memcmp to avoid timing attacks
208,527✔
254
    uint8_t result = 0;
431,925✔
255
    for (size_t i = 0; i < 224 / 8; ++i)
12,525,825✔
256
        result |= buffer[i] ^ hmac[i];
12,093,900✔
257
    return result == 0;
431,925✔
258
}
431,925✔
259

260
util::FlatMap<size_t, IVRefreshState>
261
AESCryptor::refresh_ivs(FileDesc fd, off_t data_pos, size_t page_ndx_in_file_expected, size_t end_page_ndx_in_file)
262
{
1,122✔
263
    REALM_ASSERT_EX(page_ndx_in_file_expected < end_page_ndx_in_file, page_ndx_in_file_expected,
1,122✔
264
                    end_page_ndx_in_file);
1,122✔
265
    // the indices returned are page indices, not block indices
507✔
266
    util::FlatMap<size_t, IVRefreshState> page_states;
1,122✔
267

507✔
268
    REALM_ASSERT(!int_cast_has_overflow<size_t>(data_pos));
1,122✔
269
    size_t data_pos_casted = size_t(data_pos);
1,122✔
270
    // the call to get_iv_table() below reads in all ivs in a chunk with size = blocks_per_metadata_block
507✔
271
    // so we will know if any iv in this chunk has changed
507✔
272
    const size_t block_ndx_refresh_start =
1,122✔
273
        ((data_pos_casted / block_size) / blocks_per_metadata_block) * blocks_per_metadata_block;
1,122✔
274
    const size_t block_ndx_refresh_end = block_ndx_refresh_start + blocks_per_metadata_block;
1,122✔
275
    REALM_ASSERT_EX(block_ndx_refresh_end <= m_iv_buffer.size(), block_ndx_refresh_start, block_ndx_refresh_end,
1,122✔
276
                    m_iv_buffer.size());
1,122✔
277

507✔
278
    get_iv_table(fd, data_pos, IVLookupMode::Refetch);
1,122✔
279

507✔
280
    size_t number_of_identical_blocks = 0;
1,122✔
281
    size_t last_page_index = -1;
1,122✔
282
    constexpr iv_table uninitialized_iv = {};
1,122✔
283
    // there may be multiple iv blocks per page so all must be unchanged for a page
507✔
284
    // to be considered unchanged. If any one of the ivs has changed then the entire page
507✔
285
    // must be refreshed. Eg. with a page_size() of 16k and block_size of 4k, if any of
507✔
286
    // the 4 ivs in that page are different, the entire page must be refreshed.
507✔
287
    const size_t num_required_identical_blocks_for_page_match = page_size() / block_size;
1,122✔
288
    for (size_t block_ndx = block_ndx_refresh_start; block_ndx < block_ndx_refresh_end; ++block_ndx) {
14,568✔
289
        size_t page_index = block_ndx * block_size / page_size();
14,406✔
290
        if (page_index >= end_page_ndx_in_file) {
14,406✔
291
            break;
960✔
292
        }
960✔
293
        if (page_index != last_page_index) {
13,446✔
294
            number_of_identical_blocks = 0;
7,785✔
295
        }
7,785✔
296
        if (m_iv_buffer_cache[block_ndx] != m_iv_buffer[block_ndx] || m_iv_buffer[block_ndx] == uninitialized_iv) {
13,446✔
297
            page_states[page_index] = IVRefreshState::RequiresRefresh;
5,523✔
298
            m_iv_buffer_cache[block_ndx] = m_iv_buffer[block_ndx];
5,523✔
299
        }
5,523✔
300
        else {
7,923✔
301
            ++number_of_identical_blocks;
7,923✔
302
        }
7,923✔
303
        if (number_of_identical_blocks >= num_required_identical_blocks_for_page_match) {
13,446✔
304
            REALM_ASSERT_EX(page_states.count(page_index) == 0, page_index, page_ndx_in_file_expected);
4,602✔
305
            page_states[page_index] = IVRefreshState::UpToDate;
4,602✔
306
        }
4,602✔
307
        last_page_index = page_index;
13,446✔
308
    }
13,446✔
309
    REALM_ASSERT_EX(page_states.count(page_ndx_in_file_expected) == 1, page_states.size(), page_ndx_in_file_expected,
1,122✔
310
                    block_ndx_refresh_start, blocks_per_metadata_block);
1,122✔
311
    return page_states;
1,122✔
312
}
1,122✔
313

314
size_t AESCryptor::read(FileDesc fd, off_t pos, char* dst, size_t size, WriteObserver* observer)
315
{
327,654✔
316
    REALM_ASSERT_EX(size % block_size == 0, size, block_size);
327,654✔
317
    // We need to throw DecryptionFailed if the key is incorrect or there has been a corruption in the data but
258,819✔
318
    // not in a reader starvation scenario where a different process is writing pages and ivs faster than we can read
258,819✔
319
    // them. We also want to optimize for a single process writer since in that case all the cached ivs are correct.
258,819✔
320
    // To do this, we first attempt to use the cached IV, and if it is invalid, read from disk again. During reader
258,819✔
321
    // starvation, the just read IV could already be out of date with the data page, so continue trying to read until
258,819✔
322
    // a match is found (for up to 5 seconds before giving up entirely).
258,819✔
323
    size_t retry_count = 0;
327,654✔
324
    std::pair<iv_table, size_t> last_iv_and_data_hash;
327,654✔
325
    auto retry_start_time = std::chrono::steady_clock::now();
327,654✔
326
    size_t num_identical_reads = 1;
327,654✔
327
    auto retry = [&](std::string_view page_data, const iv_table& iv, const char* debug_from) {
258,939✔
328
        constexpr auto max_retry_period = std::chrono::seconds(5);
216✔
329
        auto elapsed = std::chrono::steady_clock::now() - retry_start_time;
216✔
330
        bool we_are_alone = true;
216✔
331
        // not having an observer set means that we're alone. (or should mean it)
96✔
332
        if (observer) {
216✔
333
            we_are_alone = observer->no_concurrent_writer_seen();
204✔
334
        }
204✔
335
        if (we_are_alone || (retry_count > 0 && elapsed > max_retry_period)) {
216✔
336
            auto str = util::format("unable to decrypt after %1 seconds (retry_count=%2, from=%3, size=%4)",
48✔
337
                                    std::chrono::duration_cast<std::chrono::seconds>(elapsed).count(), retry_count,
48✔
338
                                    debug_from, size);
48✔
339
            // std::cerr << std::endl << "*Timeout: " << str << std::endl;
24✔
340
            throw DecryptionFailed(str);
48✔
341
        }
48✔
342
        else {
168✔
343
            // don't wait on the first retry as we want to optimize the case where the first read
72✔
344
            // from the iv table cache didn't validate and we are fetching the iv block from disk for the first time
72✔
345
            auto cur_iv_and_data_hash = std::make_pair(iv, std::hash<std::string_view>{}(page_data));
168✔
346
            if (retry_count != 0) {
168✔
347
                if (last_iv_and_data_hash == cur_iv_and_data_hash) {
120✔
348
                    ++num_identical_reads;
120✔
349
                }
120✔
350
                // don't retry right away if there are potentially other external writers
54✔
351
                std::this_thread::yield();
120✔
352
            }
120✔
353
            last_iv_and_data_hash = cur_iv_and_data_hash;
168✔
354
            ++retry_count;
168✔
355
        }
168✔
356
    };
216✔
357

258,819✔
358
    auto should_retry = [&]() -> bool {
271,623✔
359
        // if we don't have an observer object, we're guaranteed to be alone in the world,
49,971✔
360
        // and retrying will not help us, since the file is not being changed.
49,971✔
361
        if (!observer)
62,775✔
362
            return false;
62,739✔
363
        // if no-one is mutating the file, retrying will also not help:
364
        if (observer && observer->no_concurrent_writer_seen())
36✔
365
            return false;
12✔
366
        // if we do not observe identical data or iv within several sequential reads then
367
        // this is a multiprocess reader starvation scenario so keep trying until we get a match
368
        return retry_count <= 5 || (retry_count - num_identical_reads > 1 && retry_count < 20);
24!
369
    };
24✔
370

258,819✔
371
    size_t bytes_read = 0;
327,654✔
372
    while (bytes_read < size) {
759,357✔
373
        ssize_t actual = check_read(fd, real_offset(pos), m_rw_buffer.get(), block_size);
495,186✔
374

258,900✔
375
        if (actual == 0)
495,186✔
376
            return bytes_read;
708✔
377

258,390✔
378
        iv_table& iv = get_iv_table(fd, pos, retry_count == 0 ? IVLookupMode::UseCache : IVLookupMode::Refetch);
494,478✔
379
        if (iv.iv1 == 0) {
494,478✔
380
            if (should_retry()) {
62,775✔
381
                retry(std::string_view{m_rw_buffer.get(), block_size}, iv, "iv1 == 0");
24✔
382
                continue;
24✔
383
            }
24✔
384
            // This block has never been written to, so we've just read pre-allocated
49,971✔
385
            // space. No memset() since the code using this doesn't rely on
49,971✔
386
            // pre-allocated space being zeroed.
49,971✔
387
            return bytes_read;
62,751✔
388
        }
62,751✔
389

208,419✔
390
        if (!check_hmac(m_rw_buffer.get(), actual, iv.hmac1)) {
431,703✔
391
            // Either the DB is corrupted or we were interrupted between writing the
108✔
392
            // new IV and writing the data
108✔
393
            if (iv.iv2 == 0) {
222✔
394
                if (should_retry()) {
×
395
                    retry(std::string_view{m_rw_buffer.get(), block_size}, iv, "iv2 == 0");
×
396
                    continue;
×
397
                }
×
398
                // Very first write was interrupted
399
                return bytes_read;
×
400
            }
×
401

108✔
402
            if (check_hmac(m_rw_buffer.get(), actual, iv.hmac2)) {
222✔
403
                // Un-bump the IV since the write with the bumped IV never actually
3✔
404
                // happened
3✔
405
                memcpy(&iv.iv1, &iv.iv2, 32);
6✔
406
            }
6✔
407
            else {
216✔
408
                // If the file has been shrunk and then re-expanded, we may have
105✔
409
                // old hmacs that don't go with this data. ftruncate() is
105✔
410
                // required to fill any added space with zeroes, so assume that's
105✔
411
                // what happened if the buffer is all zeroes
105✔
412
                ssize_t i;
216✔
413
                for (i = 0; i < actual; ++i) {
98,520✔
414
                    if (m_rw_buffer[i] != 0) {
98,496✔
415
                        break;
192✔
416
                    }
192✔
417
                }
98,496✔
418
                if (i != actual) {
216✔
419
                    // at least one byte wasn't zero
96✔
420
                    retry(std::string_view{m_rw_buffer.get(), block_size}, iv, "i != bytes_read");
192✔
421
                    continue;
192✔
422
                }
192✔
423
                return bytes_read;
24✔
424
            }
24✔
425
        }
222✔
426

208,314✔
427
        // We may expect some adress ranges of the destination buffer of
208,314✔
428
        // AESCryptor::read() to stay unmodified, i.e. being overwritten with
208,314✔
429
        // the same bytes as already present, and may have read-access to these
208,314✔
430
        // from other threads while decryption is taking place.
208,314✔
431
        //
208,314✔
432
        // However, some implementations of AES_cbc_encrypt(), in particular
208,314✔
433
        // OpenSSL, will put garbled bytes as an intermediate step during the
208,314✔
434
        // operation which will lead to incorrect data being read by other
208,314✔
435
        // readers concurrently accessing that page. Incorrect data leads to
208,314✔
436
        // crashes.
208,314✔
437
        //
208,314✔
438
        // We therefore decrypt to a temporary buffer first and then copy the
208,314✔
439
        // completely decrypted data after.
208,314✔
440
        crypt(mode_Decrypt, pos, m_dst_buffer.get(), m_rw_buffer.get(), reinterpret_cast<const char*>(&iv.iv1));
431,487✔
441
        memcpy(dst, m_dst_buffer.get(), block_size);
431,487✔
442

208,314✔
443
        pos += block_size;
431,487✔
444
        dst += block_size;
431,487✔
445
        bytes_read += block_size;
431,487✔
446
        retry_count = 0;
431,487✔
447
    }
431,487✔
448
    return bytes_read;
314,661✔
449
}
327,654✔
450

451
void AESCryptor::try_read_block(FileDesc fd, off_t pos, char* dst) noexcept
452
{
×
453
    ssize_t bytes_read = check_read(fd, real_offset(pos), m_rw_buffer.get(), block_size);
×
454

455
    if (bytes_read == 0) {
×
456
        std::cerr << "Read failed: 0x" << std::hex << pos << std::endl;
×
457
        memset(dst, 0x55, block_size);
×
458
        return;
×
459
    }
×
460

461
    iv_table& iv = get_iv_table(fd, pos, IVLookupMode::Refetch);
×
462
    if (iv.iv1 == 0) {
×
463
        std::cerr << "Block never written: 0x" << std::hex << pos << std::endl;
×
464
        memset(dst, 0xAA, block_size);
×
465
        return;
×
466
    }
×
467

468
    if (!check_hmac(m_rw_buffer.get(), bytes_read, iv.hmac1)) {
×
469
        if (iv.iv2 == 0) {
×
470
            std::cerr << "First write interrupted: 0x" << std::hex << pos << std::endl;
×
471
        }
×
472

473
        if (check_hmac(m_rw_buffer.get(), bytes_read, iv.hmac2)) {
×
474
            std::cerr << "Restore old IV: 0x" << std::hex << pos << std::endl;
×
475
            memcpy(&iv.iv1, &iv.iv2, 32);
×
476
        }
×
477
        else {
×
478
            std::cerr << "Checksum failed: 0x" << std::hex << pos << std::endl;
×
479
        }
×
480
    }
×
481
    crypt(mode_Decrypt, pos, dst, m_rw_buffer.get(), reinterpret_cast<const char*>(&iv.iv1));
×
482
}
×
483

484
void AESCryptor::write(FileDesc fd, off_t pos, const char* src, size_t size, WriteMarker* marker) noexcept
485
{
65,664✔
486
    REALM_ASSERT(size % block_size == 0);
65,664✔
487
    while (size > 0) {
172,242✔
488
        iv_table& iv = get_iv_table(fd, pos);
106,578✔
489

51,615✔
490
        memcpy(&iv.iv2, &iv.iv1, 32); // this is also copying the hmac
106,578✔
491
        do {
106,578✔
492
            ++iv.iv1;
106,578✔
493
            // 0 is reserved for never-been-used, so bump if we just wrapped around
51,615✔
494
            if (iv.iv1 == 0)
106,578✔
495
                ++iv.iv1;
×
496

51,615✔
497
            crypt(mode_Encrypt, pos, m_rw_buffer.get(), src, reinterpret_cast<const char*>(&iv.iv1));
106,578✔
498
            hmac_sha224(Span(reinterpret_cast<uint8_t*>(m_rw_buffer.get()), block_size), iv.hmac1, m_hmacKey);
106,578✔
499
            // In the extremely unlikely case that both the old and new versions have
51,615✔
500
            // the same hash we won't know which IV to use, so bump the IV until
51,615✔
501
            // they're different.
51,615✔
502
        } while (REALM_UNLIKELY(iv.hmac1 == iv.hmac2));
106,578✔
503

51,615✔
504
        if (marker)
106,578✔
505
            marker->mark(pos);
2,910✔
506
        check_write(fd, iv_table_pos(pos), &iv, sizeof(iv));
106,578✔
507
        check_write(fd, real_offset(pos), m_rw_buffer.get(), block_size);
106,578✔
508
        if (marker)
106,578✔
509
            marker->unmark();
2,910✔
510

51,615✔
511
        pos += block_size;
106,578✔
512
        src += block_size;
106,578✔
513
        size -= block_size;
106,578✔
514
    }
106,578✔
515
}
65,664✔
516

517
void AESCryptor::crypt(EncryptionMode mode, off_t pos, char* dst, const char* src, const char* stored_iv) noexcept
518
{
538,065✔
519
    uint8_t iv[aes_block_size] = {0};
538,065✔
520
    memcpy(iv, stored_iv, 4);
538,065✔
521
    memcpy(iv + 4, &pos, sizeof(pos));
538,065✔
522

259,929✔
523
#if REALM_PLATFORM_APPLE
278,136✔
524
    CCCryptorRef cryptor = mode == mode_Encrypt ? m_encr : m_decr;
223,173✔
525
    CCCryptorReset(cryptor, iv);
278,136✔
526

527
    size_t bytesEncrypted = 0;
278,136✔
528
    CCCryptorStatus err = CCCryptorUpdate(cryptor, src, block_size, dst, block_size, &bytesEncrypted);
278,136✔
529
    REALM_ASSERT(err == kCCSuccess);
278,136✔
530
    REALM_ASSERT(bytesEncrypted == block_size);
278,136✔
531
#elif defined(_WIN32)
532
    ULONG cbData;
533
    int i;
534

535
    if (mode == mode_Encrypt) {
536
        i = BCryptEncrypt(m_aes_key_handle, (PUCHAR)src, block_size, nullptr, (PUCHAR)iv, sizeof(iv), (PUCHAR)dst,
537
                          block_size, &cbData, 0);
538
        REALM_ASSERT_RELEASE_EX(i == 0 && "BCryptEncrypt()", i);
539
        REALM_ASSERT_RELEASE_EX(cbData == block_size && "BCryptEncrypt()", cbData);
540
    }
541
    else if (mode == mode_Decrypt) {
542
        i = BCryptDecrypt(m_aes_key_handle, (PUCHAR)src, block_size, nullptr, (PUCHAR)iv, sizeof(iv), (PUCHAR)dst,
543
                          block_size, &cbData, 0);
544
        REALM_ASSERT_RELEASE_EX(i == 0 && "BCryptDecrypt()", i);
545
        REALM_ASSERT_RELEASE_EX(cbData == block_size && "BCryptDecrypt()", cbData);
546
    }
547
    else {
548
        REALM_UNREACHABLE();
549
    }
550

551
#else
552
    if (!EVP_CipherInit_ex(m_ctx, EVP_aes_256_cbc(), NULL, m_aesKey.data(), iv, mode))
259,929✔
553
        handle_error();
554

259,929✔
555
    int len;
259,929✔
556
    // Use zero padding - we always write a whole page
259,929✔
557
    EVP_CIPHER_CTX_set_padding(m_ctx, 0);
259,929✔
558

259,929✔
559
    if (!EVP_CipherUpdate(m_ctx, reinterpret_cast<uint8_t*>(dst), &len, reinterpret_cast<const uint8_t*>(src),
259,929✔
560
                          block_size))
259,929✔
561
        handle_error();
562

259,929✔
563
    // Finalize the encryption. Should not output further data.
259,929✔
564
    if (!EVP_CipherFinal_ex(m_ctx, reinterpret_cast<uint8_t*>(dst) + len, &len))
259,929✔
565
        handle_error();
566
#endif
259,929✔
567
}
538,065✔
568

569
EncryptedFileMapping::EncryptedFileMapping(SharedFileInfo& file, size_t file_offset, void* addr, size_t size,
570
                                           File::AccessMode access, util::WriteObserver* observer,
571
                                           util::WriteMarker* marker)
572
    : m_file(file)
573
    , m_page_shift(log2(realm::util::page_size()))
574
    , m_blocks_per_page(static_cast<size_t>(1ULL << m_page_shift) / block_size)
575
    , m_num_decrypted(0)
576
    , m_access(access)
577
    , m_observer(observer)
578
    , m_marker(marker)
579
#ifdef REALM_DEBUG
580
    , m_validate_buffer(new char[static_cast<size_t>(1ULL << m_page_shift)])
581
#endif
582
{
3,072✔
583
    REALM_ASSERT(m_blocks_per_page * block_size == static_cast<size_t>(1ULL << m_page_shift));
3,072✔
584
    set(addr, size, file_offset); // throws
3,072✔
585
    file.mappings.push_back(this);
3,072✔
586
}
3,072✔
587

588
EncryptedFileMapping::~EncryptedFileMapping()
589
{
3,072✔
590
    for (auto& e : m_page_state) {
2,475,636✔
591
        REALM_ASSERT(is_not(e, Writable));
2,475,636✔
592
    }
2,475,636✔
593
    if (m_access == File::access_ReadWrite) {
3,072✔
594
        flush();
2,133✔
595
        sync();
2,133✔
596
    }
2,133✔
597
    m_file.mappings.erase(remove(m_file.mappings.begin(), m_file.mappings.end(), this));
3,072✔
598
}
3,072✔
599

600
char* EncryptedFileMapping::page_addr(size_t local_page_ndx) const noexcept
601
{
440,406✔
602
    REALM_ASSERT_EX(local_page_ndx < m_page_state.size(), local_page_ndx, m_page_state.size());
440,406✔
603
    return static_cast<char*>(m_addr) + (local_page_ndx << m_page_shift);
440,406✔
604
}
440,406✔
605

606
void EncryptedFileMapping::mark_outdated(size_t local_page_ndx) noexcept
607
{
42✔
608
    if (local_page_ndx >= m_page_state.size())
42✔
609
        return;
×
610
    REALM_ASSERT(is_not(m_page_state[local_page_ndx], UpToDate));
42✔
611
    REALM_ASSERT(is_not(m_page_state[local_page_ndx], Dirty));
42✔
612
    REALM_ASSERT(is_not(m_page_state[local_page_ndx], Writable));
42✔
613

30✔
614
    size_t chunk_ndx = local_page_ndx >> page_to_chunk_shift;
42✔
615
    if (m_chunk_dont_scan[chunk_ndx])
42✔
616
        m_chunk_dont_scan[chunk_ndx] = 0;
×
617
}
42✔
618

619
bool EncryptedFileMapping::copy_up_to_date_page(size_t local_page_ndx) noexcept
620
{
129,354✔
621
    REALM_ASSERT_EX(local_page_ndx < m_page_state.size(), local_page_ndx, m_page_state.size());
129,354✔
622
    // Precondition: this method must never be called for a page which
102,078✔
623
    // is already up to date.
102,078✔
624
    REALM_ASSERT(is_not(m_page_state[local_page_ndx], UpToDate));
129,354✔
625
    for (size_t i = 0; i < m_file.mappings.size(); ++i) {
382,533✔
626
        EncryptedFileMapping* m = m_file.mappings[i];
254,094✔
627
        size_t page_ndx_in_file = local_page_ndx + m_first_page;
254,094✔
628
        if (m == this || !m->contains_page(page_ndx_in_file))
254,094✔
629
            continue;
253,002✔
630

534✔
631
        size_t shadow_mapping_local_ndx = page_ndx_in_file - m->m_first_page;
1,092✔
632
        if (is(m->m_page_state[shadow_mapping_local_ndx], UpToDate)) {
1,092✔
633
            memcpy(page_addr(local_page_ndx), m->page_addr(shadow_mapping_local_ndx),
915✔
634
                   static_cast<size_t>(1ULL << m_page_shift));
915✔
635
            return true;
915✔
636
        }
915✔
637
    }
1,092✔
638
    return false;
128,883✔
639
}
129,354✔
640

641
void EncryptedFileMapping::refresh_page(size_t local_page_ndx, size_t required)
642
{
129,354✔
643
    REALM_ASSERT_EX(local_page_ndx < m_page_state.size(), local_page_ndx, m_page_state.size());
129,354✔
644
    REALM_ASSERT(is_not(m_page_state[local_page_ndx], Dirty));
129,354✔
645
    REALM_ASSERT(is_not(m_page_state[local_page_ndx], Writable));
129,354✔
646
    char* addr = page_addr(local_page_ndx);
129,354✔
647

102,078✔
648
    if (!copy_up_to_date_page(local_page_ndx)) {
129,354✔
649
        const size_t page_ndx_in_file = local_page_ndx + m_first_page;
128,439✔
650
        const size_t end_page_ndx_in_file = m_first_page + m_page_state.size();
128,439✔
651
        off_t data_pos = off_t(page_ndx_in_file << m_page_shift);
128,439✔
652
        if (is(m_page_state[local_page_ndx], StaleIV)) {
128,439✔
653
            auto refreshed_ivs =
1,026✔
654
                m_file.cryptor.refresh_ivs(m_file.fd, data_pos, page_ndx_in_file, end_page_ndx_in_file);
1,026✔
655
            for (const auto& [page_ndx, state] : refreshed_ivs) {
4,179✔
656
                size_t local_page_ndx_of_iv_change = page_ndx - m_first_page;
4,179✔
657
                REALM_ASSERT_EX(contains_page(page_ndx), page_ndx, m_first_page, m_page_state.size());
4,179✔
658
                if (is(m_page_state[local_page_ndx_of_iv_change], Dirty | Writable)) {
4,179✔
659
                    continue;
×
660
                }
×
661
                switch (state) {
4,179✔
662
                    case IVRefreshState::UpToDate:
1,524✔
663
                        if (is(m_page_state[local_page_ndx_of_iv_change], StaleIV)) {
1,524✔
664
                            set(m_page_state[local_page_ndx_of_iv_change], UpToDate);
639✔
665
                            clear(m_page_state[local_page_ndx_of_iv_change], StaleIV);
639✔
666
                        }
639✔
667
                        break;
1,524✔
668
                    case IVRefreshState::RequiresRefresh:
2,655✔
669
                        clear(m_page_state[local_page_ndx_of_iv_change], StaleIV);
2,655✔
670
                        clear(m_page_state[local_page_ndx_of_iv_change], UpToDate);
2,655✔
671
                        break;
2,655✔
672
                }
4,179✔
673
            }
4,179✔
674
            REALM_ASSERT_EX(refreshed_ivs.count(page_ndx_in_file) == 1, page_ndx_in_file, refreshed_ivs.size());
1,026✔
675
            if (refreshed_ivs[page_ndx_in_file] == IVRefreshState::UpToDate) {
1,026✔
676
                return;
519✔
677
            }
519✔
678
        }
127,920✔
679
        size_t size = static_cast<size_t>(1ULL << m_page_shift);
127,920✔
680
        size_t actual = m_file.cryptor.read(m_file.fd, data_pos, addr, size, m_observer);
127,920✔
681
        if (actual < size) {
127,920✔
682
            if (actual >= required) {
63,483✔
683
                memset(addr + actual, 0x55, size - actual);
63,465✔
684
            }
63,465✔
685
            else {
18✔
686
                throw DecryptionFailed();
18✔
687
            }
18✔
688
        }
128,817✔
689
    }
127,920✔
690
    if (is_not(m_page_state[local_page_ndx], UpToDate))
128,817✔
691
        m_num_decrypted++;
128,769✔
692
    set(m_page_state[local_page_ndx], UpToDate);
128,817✔
693
    clear(m_page_state[local_page_ndx], StaleIV);
128,817✔
694
}
128,817✔
695

696
void EncryptedFileMapping::mark_pages_for_IV_check()
697
{
1,566✔
698
    for (size_t i = 0; i < m_file.mappings.size(); ++i) {
3,246✔
699
        EncryptedFileMapping* m = m_file.mappings[i];
1,680✔
700
        for (size_t pg = m->get_start_index(); pg < m->get_end_index(); ++pg) {
507,657✔
701
            size_t local_page_ndx = pg - m->m_first_page;
505,977✔
702
            if (is(m->m_page_state[local_page_ndx], UpToDate) &&
505,977✔
703
                is_not(m->m_page_state[local_page_ndx], Dirty | Writable)) {
404,838✔
704
                REALM_ASSERT(is_not(m->m_page_state[local_page_ndx], StaleIV));
1,335✔
705
                clear(m->m_page_state[local_page_ndx], UpToDate);
1,335✔
706
                set(m->m_page_state[local_page_ndx], StaleIV);
1,335✔
707
            }
1,335✔
708
        }
505,977✔
709
    }
1,680✔
710
}
1,566✔
711

712
void EncryptedFileMapping::write_and_update_all(size_t local_page_ndx, size_t begin_offset,
713
                                                size_t end_offset) noexcept
714
{
74,214✔
715
    REALM_ASSERT(is(m_page_state[local_page_ndx], Writable));
74,214✔
716
    REALM_ASSERT(is(m_page_state[local_page_ndx], UpToDate));
74,214✔
717
    // Go through all other mappings of this file and copy changes into those mappings
55,830✔
718
    size_t page_ndx_in_file = local_page_ndx + m_first_page;
74,214✔
719
    for (size_t i = 0; i < m_file.mappings.size(); ++i) {
223,077✔
720
        EncryptedFileMapping* m = m_file.mappings[i];
148,863✔
721
        if (m != this && m->contains_page(page_ndx_in_file)) {
148,863✔
722
            size_t shadow_local_page_ndx = page_ndx_in_file - m->m_first_page;
10,305✔
723
            if (is(m->m_page_state[shadow_local_page_ndx], UpToDate) ||
10,305✔
724
                is(m->m_page_state[shadow_local_page_ndx], StaleIV)) { // only keep up to data pages up to date
10,293✔
725
                memcpy(m->page_addr(shadow_local_page_ndx) + begin_offset, page_addr(local_page_ndx) + begin_offset,
10,263✔
726
                       end_offset - begin_offset);
10,263✔
727
                if (is(m->m_page_state[shadow_local_page_ndx], StaleIV)) {
10,263✔
728
                    set(m->m_page_state[shadow_local_page_ndx], UpToDate);
123✔
729
                    clear(m->m_page_state[shadow_local_page_ndx], StaleIV);
123✔
730
                }
123✔
731
            }
10,263✔
732
            else {
42✔
733
                m->mark_outdated(shadow_local_page_ndx);
42✔
734
            }
42✔
735
        }
10,305✔
736
    }
148,863✔
737
    set(m_page_state[local_page_ndx], Dirty);
74,214✔
738
    clear(m_page_state[local_page_ndx], Writable);
74,214✔
739
    clear(m_page_state[local_page_ndx], StaleIV);
74,214✔
740
    size_t chunk_ndx = local_page_ndx >> page_to_chunk_shift;
74,214✔
741
    if (m_chunk_dont_scan[chunk_ndx])
74,214✔
742
        m_chunk_dont_scan[chunk_ndx] = 0;
×
743
}
74,214✔
744

745

746
void EncryptedFileMapping::validate_page(size_t local_page_ndx) noexcept
747
{
10,803,306✔
748
#ifdef REALM_DEBUG
10,803,306✔
749
    REALM_ASSERT(local_page_ndx < m_page_state.size());
10,803,306✔
750
    if (is_not(m_page_state[local_page_ndx], UpToDate))
10,803,306✔
751
        return;
10,603,644✔
752

157,374✔
753
    const size_t page_ndx_in_file = local_page_ndx + m_first_page;
199,662✔
754
    if (!m_file.cryptor.read(m_file.fd, off_t(page_ndx_in_file << m_page_shift), m_validate_buffer.get(),
199,662✔
755
                             static_cast<size_t>(1ULL << m_page_shift), m_observer))
199,662✔
756
        return;
×
757

157,374✔
758
    for (size_t i = 0; i < m_file.mappings.size(); ++i) {
600,714✔
759
        EncryptedFileMapping* m = m_file.mappings[i];
401,052✔
760
        size_t shadow_mapping_local_ndx = page_ndx_in_file - m->m_first_page;
401,052✔
761
        if (m != this && m->contains_page(page_ndx_in_file) && is(m->m_page_state[shadow_mapping_local_ndx], Dirty)) {
401,052✔
762
            memcpy(m_validate_buffer.get(), m->page_addr(shadow_mapping_local_ndx),
×
763
                   static_cast<size_t>(1ULL << m_page_shift));
×
764
            break;
×
765
        }
×
766
    }
401,052✔
767

157,374✔
768
    if (memcmp(m_validate_buffer.get(), page_addr(local_page_ndx), static_cast<size_t>(1ULL << m_page_shift))) {
199,662✔
769
        std::cerr << "mismatch " << this << ": fd(" << m_file.fd << ")"
×
770
                  << "page(" << local_page_ndx << "/" << m_page_state.size() << ") " << m_validate_buffer.get() << " "
×
771
                  << page_addr(local_page_ndx) << std::endl;
×
772
        REALM_TERMINATE("");
773
    }
×
774
#else
775
    static_cast<void>(local_page_ndx);
776
#endif
777
}
199,662✔
778

779
void EncryptedFileMapping::validate() noexcept
780
{
8,001✔
781
#ifdef REALM_DEBUG
8,001✔
782
    const size_t num_local_pages = m_page_state.size();
8,001✔
783
    for (size_t local_page_ndx = 0; local_page_ndx < num_local_pages; ++local_page_ndx)
5,442,060✔
784
        validate_page(local_page_ndx);
5,434,059✔
785
#endif
8,001✔
786
}
8,001✔
787

788
void EncryptedFileMapping::reclaim_page(size_t page_ndx)
789
{
×
790
#ifdef _WIN32
791
    // On windows we don't know how to replace a page within a page range with a fresh one.
792
    // instead we clear it. If the system runs with same-page-merging, this will reduce
793
    // the number of used pages.
794
    memset(page_addr(page_ndx), 0, static_cast<size_t>(1) << m_page_shift);
795
#else
796
    // On Posix compatible, we can request a new page in the middle of an already
797
    // requested range, so that's what we do. This releases the backing store for the
798
    // old page and gives us a shared zero-page that we can later demand-allocate, thus
799
    // reducing the overall amount of used physical pages.
800
    void* addr = page_addr(page_ndx);
×
801
    void* addr2 = ::mmap(addr, 1 << m_page_shift, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
×
802
    if (addr != addr2) {
×
803
        if (addr2 == 0) {
×
804
            int err = errno;
×
805
            throw SystemError(err, get_errno_msg("using mmap() to clear page failed", err));
×
806
        }
×
807
        throw std::runtime_error("internal error in mmap()");
×
808
    }
×
809
#endif
×
810
}
×
811

812
/* This functions is a bit convoluted. It reclaims pages, but only does a limited amount of work
813
 * each time it's called. It saves the progress in a 'progress_ptr' so that it can resume later
814
 * from where it was stopped.
815
 *
816
 * The workload is composed of workunits, each unit signifying
817
 * 1) A scanning of the state of 4K pages
818
 * 2) One system call (to mmap to release a page and get a new one)
819
 * 3) A scanning of 1K entries in the "don't scan" array (corresponding to 4M pages)
820
 * Approximately
821
 */
822
void EncryptedFileMapping::reclaim_untouched(size_t& progress_index, size_t& work_limit) noexcept
UNCOV
823
{
×
UNCOV
824
    const auto scan_amount_per_workunit = 4096;
×
UNCOV
825
    bool contiguous_scan = false;
×
UNCOV
826
    size_t next_scan_payment = scan_amount_per_workunit;
×
UNCOV
827
    const size_t last_index = get_end_index();
×
828

UNCOV
829
    auto done_some_work = [&]() {
×
UNCOV
830
        if (work_limit > 0)
×
UNCOV
831
            work_limit--;
×
UNCOV
832
    };
×
833

UNCOV
834
    auto visit_and_potentially_reclaim = [&](size_t page_ndx) {
×
UNCOV
835
        PageState& ps = m_page_state[page_ndx];
×
UNCOV
836
        if (is(ps, UpToDate)) {
×
UNCOV
837
            if (is_not(ps, Touched) && is_not(ps, Dirty) && is_not(ps, Writable)) {
×
838
                clear(ps, UpToDate);
×
839
                reclaim_page(page_ndx);
×
840
                m_num_decrypted--;
×
841
                done_some_work();
×
842
            }
×
UNCOV
843
            contiguous_scan = false;
×
UNCOV
844
        }
×
UNCOV
845
        clear(ps, Touched);
×
UNCOV
846
    };
×
847

UNCOV
848
    auto skip_chunk_if_possible = [&](size_t& page_ndx) // update vars corresponding to skipping a chunk if possible
×
UNCOV
849
    {
×
UNCOV
850
        size_t chunk_ndx = page_ndx >> page_to_chunk_shift;
×
UNCOV
851
        if (m_chunk_dont_scan[chunk_ndx]) {
×
852
            // skip to end of chunk
853
            page_ndx = ((chunk_ndx + 1) << page_to_chunk_shift) - 1;
×
854
            progress_index = m_first_page + page_ndx;
×
855
            // postpone next scan payment
856
            next_scan_payment += page_to_chunk_factor;
×
857
            return true;
×
858
        }
×
UNCOV
859
        else
×
UNCOV
860
            return false;
×
UNCOV
861
    };
×
862

UNCOV
863
    auto is_last_page_in_chunk = [](size_t page_ndx) {
×
UNCOV
864
        auto page_to_chunk_mask = page_to_chunk_factor - 1;
×
UNCOV
865
        return (page_ndx & page_to_chunk_mask) == page_to_chunk_mask;
×
UNCOV
866
    };
×
UNCOV
867
    auto is_first_page_in_chunk = [](size_t page_ndx) {
×
UNCOV
868
        auto page_to_chunk_mask = page_to_chunk_factor - 1;
×
UNCOV
869
        return (page_ndx & page_to_chunk_mask) == 0;
×
UNCOV
870
    };
×
871

UNCOV
872
    while (work_limit > 0 && progress_index < last_index) {
×
UNCOV
873
        size_t page_ndx = progress_index - m_first_page;
×
UNCOV
874
        if (!skip_chunk_if_possible(page_ndx)) {
×
UNCOV
875
            if (is_first_page_in_chunk(page_ndx)) {
×
UNCOV
876
                contiguous_scan = true;
×
UNCOV
877
            }
×
UNCOV
878
            visit_and_potentially_reclaim(page_ndx);
×
879
            // if we've scanned a full chunk contiguously, mark it as not needing scans
UNCOV
880
            if (is_last_page_in_chunk(page_ndx)) {
×
UNCOV
881
                if (contiguous_scan) {
×
882
                    m_chunk_dont_scan[page_ndx >> page_to_chunk_shift] = 1;
×
883
                }
×
UNCOV
884
                contiguous_scan = false;
×
UNCOV
885
            }
×
UNCOV
886
        }
×
887
        // account for work performed:
UNCOV
888
        if (page_ndx >= next_scan_payment) {
×
UNCOV
889
            next_scan_payment = page_ndx + scan_amount_per_workunit;
×
UNCOV
890
            done_some_work();
×
UNCOV
891
        }
×
UNCOV
892
        ++progress_index;
×
UNCOV
893
    }
×
UNCOV
894
    return;
×
UNCOV
895
}
×
896

897
void EncryptedFileMapping::flush() noexcept
898
{
8,001✔
899
    const size_t num_dirty_pages = m_page_state.size();
8,001✔
900
    for (size_t local_page_ndx = 0; local_page_ndx < num_dirty_pages; ++local_page_ndx) {
5,442,060✔
901
        if (is_not(m_page_state[local_page_ndx], Dirty)) {
5,434,059✔
902
            validate_page(local_page_ndx);
5,369,247✔
903
            continue;
5,369,247✔
904
        }
5,369,247✔
905

51,180✔
906
        size_t page_ndx_in_file = local_page_ndx + m_first_page;
64,812✔
907
        m_file.cryptor.write(m_file.fd, off_t(page_ndx_in_file << m_page_shift), page_addr(local_page_ndx),
64,812✔
908
                             static_cast<size_t>(1ULL << m_page_shift), m_marker);
64,812✔
909
        clear(m_page_state[local_page_ndx], Dirty);
64,812✔
910
    }
64,812✔
911

4,227✔
912
    validate();
8,001✔
913
}
8,001✔
914

915
#ifdef _MSC_VER
916
#pragma warning(disable : 4297) // throw in noexcept
917
#endif
918
void EncryptedFileMapping::sync() noexcept
919
{
3,804✔
920
#ifdef _WIN32
921
    if (FlushFileBuffers(m_file.fd))
922
        return;
923
    throw std::system_error(GetLastError(), std::system_category(), "FlushFileBuffers() failed");
924
#else
925
    fsync(m_file.fd);
3,804✔
926
    // FIXME: on iOS/OSX fsync may not be enough to ensure crash safety.
2,043✔
927
    // Consider adding fcntl(F_FULLFSYNC). This most likely also applies to msync.
2,043✔
928
    //
2,043✔
929
    // See description of fsync on iOS here:
2,043✔
930
    // https://developer.apple.com/library/ios/documentation/System/Conceptual/ManPages_iPhoneOS/man2/fsync.2.html
2,043✔
931
    //
2,043✔
932
    // See also
2,043✔
933
    // https://developer.apple.com/library/ios/documentation/Cocoa/Conceptual/CoreData/Articles/cdPersistentStores.html
2,043✔
934
    // for a discussion of this related to core data.
2,043✔
935
#endif
3,804✔
936
}
3,804✔
937
#ifdef _MSC_VER
938
#pragma warning(default : 4297)
939
#endif
940

941
void EncryptedFileMapping::write_barrier(const void* addr, size_t size) noexcept
942
{
12,111✔
943
    // Propagate changes to all other decrypted pages mapping the same memory
6,186✔
944

6,186✔
945
    REALM_ASSERT(m_access == File::access_ReadWrite);
12,111✔
946
    size_t first_accessed_local_page = get_local_index_of_address(addr);
12,111✔
947
    size_t first_offset = static_cast<const char*>(addr) - page_addr(first_accessed_local_page);
12,111✔
948
    const char* last_accessed_address = static_cast<const char*>(addr) + (size == 0 ? 0 : size - 1);
12,111✔
949
    size_t last_accessed_local_page = get_local_index_of_address(last_accessed_address);
12,111✔
950
    size_t pages_size = m_page_state.size();
12,111✔
951

6,186✔
952
    // propagate changes to first page (update may be partial, may also be to last page)
6,186✔
953
    if (first_accessed_local_page < pages_size) {
12,111✔
954
        REALM_ASSERT_EX(is(m_page_state[first_accessed_local_page], UpToDate),
12,111✔
955
                        m_page_state[first_accessed_local_page]);
12,111✔
956
        if (first_accessed_local_page == last_accessed_local_page) {
12,111✔
957
            size_t last_offset = last_accessed_address - page_addr(first_accessed_local_page);
11,232✔
958
            write_and_update_all(first_accessed_local_page, first_offset, last_offset + 1);
11,232✔
959
        }
11,232✔
960
        else
879✔
961
            write_and_update_all(first_accessed_local_page, first_offset, static_cast<size_t>(1) << m_page_shift);
879✔
962
    }
12,111✔
963
    // propagate changes to pages between first and last page (update only full pages)
6,186✔
964
    for (size_t idx = first_accessed_local_page + 1; idx < last_accessed_local_page && idx < pages_size; ++idx) {
73,335✔
965
        REALM_ASSERT(is(m_page_state[idx], UpToDate));
61,224✔
966
        write_and_update_all(idx, 0, static_cast<size_t>(1) << m_page_shift);
61,224✔
967
    }
61,224✔
968
    // propagate changes to the last page (update may be partial)
6,186✔
969
    if (first_accessed_local_page < last_accessed_local_page && last_accessed_local_page < pages_size) {
12,111✔
970
        REALM_ASSERT(is(m_page_state[last_accessed_local_page], UpToDate));
879✔
971
        size_t last_offset = last_accessed_address - page_addr(last_accessed_local_page);
879✔
972
        write_and_update_all(last_accessed_local_page, 0, last_offset + 1);
879✔
973
    }
879✔
974
}
12,111✔
975

976
void EncryptedFileMapping::read_barrier(const void* addr, size_t size, Header_to_size header_to_size, bool to_modify)
977
{
1,312,356✔
978
    size_t first_accessed_local_page = get_local_index_of_address(addr);
1,312,356✔
979
    size_t page_size = 1ULL << m_page_shift;
1,312,356✔
980
    size_t required = get_offset_of_address(addr) + size;
1,312,356✔
981
    {
1,312,356✔
982
        // make sure the first page is available
715,869✔
983
        PageState& ps = m_page_state[first_accessed_local_page];
1,312,356✔
984
        if (is_not(ps, Touched))
1,312,356✔
985
            set(ps, Touched);
4,110✔
986
        if (is_not(ps, UpToDate))
1,312,356✔
987
            refresh_page(first_accessed_local_page, to_modify ? 0 : required);
5,178✔
988
        if (to_modify)
1,312,356✔
989
            set(ps, Writable);
12,111✔
990
    }
1,312,356✔
991

715,869✔
992
    // force the page reclaimer to look into pages in this chunk:
715,869✔
993
    size_t chunk_ndx = first_accessed_local_page >> page_to_chunk_shift;
1,312,356✔
994
    if (m_chunk_dont_scan[chunk_ndx])
1,312,356✔
995
        m_chunk_dont_scan[chunk_ndx] = 0;
×
996

715,869✔
997
    if (header_to_size) {
1,312,356✔
998
        // We know it's an array, and array headers are 8-byte aligned, so it is
707,985✔
999
        // included in the first page which was handled above.
707,985✔
1000
        size = header_to_size(static_cast<const char*>(addr));
1,296,714✔
1001
        required = get_offset_of_address(addr) + size;
1,296,714✔
1002
    }
1,296,714✔
1003

715,869✔
1004
    size_t last_idx = get_local_index_of_address(addr, size == 0 ? 0 : size - 1);
1,312,356✔
1005
    size_t pages_size = m_page_state.size();
1,312,356✔
1006

715,869✔
1007
    // We already checked first_accessed_local_page above, so we start the loop
715,869✔
1008
    // at first_accessed_local_page + 1 to check the following page.
715,869✔
1009
    for (size_t idx = first_accessed_local_page + 1; idx <= last_idx && idx < pages_size; ++idx) {
1,594,944✔
1010
        required -= page_size;
282,588✔
1011
        // force the page reclaimer to look into pages in this chunk
245,484✔
1012
        chunk_ndx = idx >> page_to_chunk_shift;
282,588✔
1013
        if (m_chunk_dont_scan[chunk_ndx])
282,588✔
1014
            m_chunk_dont_scan[chunk_ndx] = 0;
×
1015

245,484✔
1016
        PageState& ps = m_page_state[idx];
282,588✔
1017
        if (is_not(ps, Touched))
282,588✔
1018
            set(ps, Touched);
124,176✔
1019
        if (is_not(ps, UpToDate))
282,588✔
1020
            refresh_page(idx, to_modify ? 0 : required);
124,176✔
1021
        if (to_modify)
282,588✔
1022
            set(ps, Writable);
62,103✔
1023
    }
282,588✔
1024
}
1,312,356✔
1025

1026
void EncryptedFileMapping::extend_to(size_t offset, size_t new_size)
1027
{
363✔
1028
    REALM_ASSERT(new_size % (1ULL << m_page_shift) == 0);
363✔
1029
    size_t num_pages = new_size >> m_page_shift;
363✔
1030
    m_page_state.resize(num_pages, PageState::Clean);
363✔
1031
    m_chunk_dont_scan.resize((num_pages + page_to_chunk_factor - 1) >> page_to_chunk_shift, false);
363✔
1032
    m_file.cryptor.set_file_size((off_t)(offset + new_size));
363✔
1033
}
363✔
1034

1035
void EncryptedFileMapping::set(void* new_addr, size_t new_size, size_t new_file_offset)
1036
{
3,072✔
1037
    REALM_ASSERT(new_file_offset % (1ULL << m_page_shift) == 0);
3,072✔
1038
    REALM_ASSERT(new_size % (1ULL << m_page_shift) == 0);
3,072✔
1039

1,644✔
1040
    // This seems dangerous - correct operation in a setting with multiple (partial)
1,644✔
1041
    // mappings of the same file would rely on ordering of individual mapping requests.
1,644✔
1042
    // Currently we only ever extend the file - but when we implement continuous defrag,
1,644✔
1043
    // this design should be revisited.
1,644✔
1044
    m_file.cryptor.set_file_size(off_t(new_size + new_file_offset));
3,072✔
1045

1,644✔
1046
    flush();
3,072✔
1047
    m_addr = new_addr;
3,072✔
1048

1,644✔
1049
    m_first_page = new_file_offset >> m_page_shift;
3,072✔
1050
    size_t num_pages = new_size >> m_page_shift;
3,072✔
1051

1,644✔
1052
    m_num_decrypted = 0;
3,072✔
1053
    m_page_state.clear();
3,072✔
1054
    m_chunk_dont_scan.clear();
3,072✔
1055

1,644✔
1056
    m_page_state.resize(num_pages, PageState(0));
3,072✔
1057
    m_chunk_dont_scan.resize((num_pages + page_to_chunk_factor - 1) >> page_to_chunk_shift, false);
3,072✔
1058
}
3,072✔
1059

1060
File::SizeType encrypted_size_to_data_size(File::SizeType size) noexcept
1061
{
4,623✔
1062
    if (size == 0)
4,623✔
1063
        return 0;
210✔
1064
    return fake_offset(size);
4,413✔
1065
}
4,413✔
1066

1067
File::SizeType data_size_to_encrypted_size(File::SizeType size) noexcept
1068
{
690✔
1069
    size_t ps = page_size();
690✔
1070
    return real_offset((size + ps - 1) & ~(ps - 1));
690✔
1071
}
690✔
1072
} // namespace realm::util
1073
#else
1074

1075
namespace realm::util {
1076
File::SizeType encrypted_size_to_data_size(File::SizeType size) noexcept
1077
{
1078
    return size;
1079
}
1080

1081
File::SizeType data_size_to_encrypted_size(File::SizeType size) noexcept
1082
{
1083
    return size;
1084
}
1085
} // namespace realm::util
1086
#endif // REALM_ENABLE_ENCRYPTION
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc