• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

OSGeo / gdal / 13705748401

06 Mar 2025 06:36PM UTC coverage: 70.348% (-0.001%) from 70.349%
13705748401

Pull #11928

github

web-flow
Merge 00ef26df0 into 6d20ab96f
Pull Request #11928: gdal reproject: add a --size argument, add --bbox-crs as well

26 of 28 new or added lines in 1 file covered. (92.86%)

56 existing lines in 32 files now uncovered.

552790 of 785788 relevant lines covered (70.35%)

221695.43 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

40.25
/port/cpl_virtualmem.cpp
1
/**********************************************************************
2
 *
3
 * Name:     cpl_virtualmem.cpp
4
 * Project:  CPL - Common Portability Library
5
 * Purpose:  Virtual memory
6
 * Author:   Even Rouault, <even dot rouault at spatialys.com>
7
 *
8
 **********************************************************************
9
 * Copyright (c) 2014, Even Rouault <even dot rouault at spatialys.com>
10
 *
11
 * SPDX-License-Identifier: MIT
12
 ****************************************************************************/
13

14
#ifndef _GNU_SOURCE
15
#define _GNU_SOURCE
16
#endif
17

18
// to have off_t on 64bit possibly
19
#ifndef _FILE_OFFSET_BITS
20
#define _FILE_OFFSET_BITS 64
21
#endif
22

23
#include "cpl_virtualmem.h"
24

25
#include <cassert>
26
// TODO(schwehr): Should ucontext.h be included?
27
// #include <ucontext.h>
28

29
#include "cpl_atomic_ops.h"
30
#include "cpl_config.h"
31
#include "cpl_conv.h"
32
#include "cpl_error.h"
33
#include "cpl_multiproc.h"
34

35
#ifdef NDEBUG
36
// Non NDEBUG: Ignore the result.
37
#define IGNORE_OR_ASSERT_IN_DEBUG(expr) CPL_IGNORE_RET_VAL((expr))
38
#else
39
// Debug: Assert.
40
#define IGNORE_OR_ASSERT_IN_DEBUG(expr) assert((expr))
41
#endif
42

43
#if defined(__linux) && defined(CPL_MULTIPROC_PTHREAD)
44
#ifndef HAVE_5ARGS_MREMAP
45
// FIXME? gcore/virtualmem.py tests fail/crash when HAVE_5ARGS_MREMAP
46
// is not defined.
47
#warning "HAVE_5ARGS_MREMAP not found. Disabling HAVE_VIRTUAL_MEM_VMA"
48
#else
49
#define HAVE_VIRTUAL_MEM_VMA
50
#endif
51
#endif
52

53
#if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
54
#include <unistd.h>    // read, write, close, pipe, sysconf
55
#include <sys/mman.h>  // mmap, munmap, mremap
56
#endif
57

58
typedef enum
59
{
60
    VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED,
61
    VIRTUAL_MEM_TYPE_VMA
62
} CPLVirtualMemType;
63

64
struct CPLVirtualMem
65
{
66
    CPLVirtualMemType eType;
67

68
    struct CPLVirtualMem *pVMemBase;
69
    int nRefCount;
70

71
    CPLVirtualMemAccessMode eAccessMode;
72

73
    size_t nPageSize;
74
    // Aligned on nPageSize.
75
    void *pData;
76
    // Returned by mmap(), potentially lower than pData.
77
    void *pDataToFree;
78
    // Requested size (unrounded).
79
    size_t nSize;
80

81
    bool bSingleThreadUsage;
82

83
    void *pCbkUserData;
84
    CPLVirtualMemFreeUserData pfnFreeUserData;
85
};
86

87
#ifdef HAVE_VIRTUAL_MEM_VMA
88

89
#include <sys/select.h>  // select
90
#include <sys/stat.h>    // open()
91
#include <sys/types.h>   // open()
92
#include <errno.h>
93
#include <fcntl.h>   // open()
94
#include <signal.h>  // sigaction
95
#include <stdio.h>
96
#include <stdlib.h>
97
#include <string.h>
98
#include <pthread.h>
99

100
#ifndef HAVE_5ARGS_MREMAP
101
#include "cpl_atomic_ops.h"
102
#endif
103

104
/* Linux specific (i.e. non POSIX compliant) features used:
105
   - returning from a SIGSEGV handler is clearly a POSIX violation, but in
106
     practice most POSIX systems should be happy.
107
   - mremap() with 5 args is Linux specific. It is used when the user
108
     callback is invited to fill a page, we currently mmap() a
109
     writable page, let it filled it, and afterwards mremap() that
110
     temporary page onto the location where the fault occurred.
111
     If we have no mremap(), the workaround is to pause other threads that
112
     consume the current view while we are updating the faulted page, otherwise
113
     a non-paused thread could access a page that is in the middle of being
114
     filled... The way we pause those threads is quite original : we send them
115
     a SIGUSR1 and wait that they are stuck in the temporary SIGUSR1 handler...
116
   - MAP_ANONYMOUS isn't documented in POSIX, but very commonly found
117
     (sometimes called MAP_ANON)
118
   - dealing with the limitation of number of memory mapping regions,
119
     and the 65536 limit.
120
   - other things I've not identified
121
*/
122

123
#define ALIGN_DOWN(p, pagesize)                                                \
124
    reinterpret_cast<void *>((reinterpret_cast<GUIntptr_t>(p)) / (pagesize) *  \
125
                             (pagesize))
126
#define ALIGN_UP(p, pagesize)                                                  \
127
    reinterpret_cast<void *>(                                                  \
128
        (reinterpret_cast<GUIntptr_t>(p) + (pagesize)-1) / (pagesize) *        \
129
        (pagesize))
130

131
#define DEFAULT_PAGE_SIZE (256 * 256)
132
#define MAXIMUM_PAGE_SIZE (32 * 1024 * 1024)
133

134
// Linux Kernel limit.
135
#define MAXIMUM_COUNT_OF_MAPPINGS 65536
136

137
#define BYEBYE_ADDR (reinterpret_cast<void *>(~static_cast<size_t>(0)))
138

139
#define MAPPING_FOUND "yeah"
140
#define MAPPING_NOT_FOUND "doh!"
141

142
#define SET_BIT(ar, bitnumber) ar[(bitnumber) / 8] |= 1 << ((bitnumber) % 8)
143
#define UNSET_BIT(ar, bitnumber)                                               \
144
    ar[(bitnumber) / 8] &= ~(1 << ((bitnumber) % 8))
145
#define TEST_BIT(ar, bitnumber) (ar[(bitnumber) / 8] & (1 << ((bitnumber) % 8)))
146

147
typedef enum
148
{
149
    OP_LOAD,
150
    OP_STORE,
151
    OP_MOVS_RSI_RDI,
152
    OP_UNKNOWN
153
} OpType;
154

155
typedef struct
156
{
157
    CPLVirtualMem sBase;
158

159
    GByte *pabitMappedPages;
160
    GByte *pabitRWMappedPages;
161

162
    int nCacheMaxSizeInPages;  // Maximum size of page array.
163
    int *panLRUPageIndices;    // Array with indices of cached pages.
164
    int iLRUStart;             // Index in array where to
165
                               // write next page index.
166
    int nLRUSize;              // Current size of the array.
167

168
    int iLastPage;  // Last page accessed.
169
    int nRetry;     // Number of consecutive
170
                    // retries to that last page.
171

172
    CPLVirtualMemCachePageCbk pfnCachePage;      // Called when a page is
173
                                                 // mapped.
174
    CPLVirtualMemUnCachePageCbk pfnUnCachePage;  // Called when a (writable)
175
                                                 // page is unmapped.
176

177
#ifndef HAVE_5ARGS_MREMAP
178
    CPLMutex *hMutexThreadArray;
179
    int nThreads;
180
    pthread_t *pahThreads;
181
#endif
182
} CPLVirtualMemVMA;
183

184
typedef struct
185
{
186
    // hVirtualMemManagerMutex protects the 2 following variables.
187
    CPLVirtualMemVMA **pasVirtualMem;
188
    int nVirtualMemCount;
189

190
    int pipefd_to_thread[2];
191
    int pipefd_from_thread[2];
192
    int pipefd_wait_thread[2];
193
    CPLJoinableThread *hHelperThread;
194

195
    // Using sigaction without testing HAVE_SIGACTION since we are in a Linux
196
    // specific code path
197
    struct sigaction oldact;
198
} CPLVirtualMemManager;
199

200
typedef struct
201
{
202
    void *pFaultAddr;
203
    OpType opType;
204
    pthread_t hRequesterThread;
205
} CPLVirtualMemMsgToWorkerThread;
206

207
// TODO: Singletons.
208
static CPLVirtualMemManager *pVirtualMemManager = nullptr;
209
static CPLMutex *hVirtualMemManagerMutex = nullptr;
210

211
static bool CPLVirtualMemManagerInit();
212

213
#ifdef DEBUG_VIRTUALMEM
214

215
/************************************************************************/
216
/*                           fprintfstderr()                            */
217
/************************************************************************/
218

219
// This function may be called from signal handlers where most functions
220
// from the C library are unsafe to be called. fprintf() is clearly one
221
// of those functions (see
222
// http://stackoverflow.com/questions/4554129/linux-glibc-can-i-use-fprintf-in-signal-handler)
223
// vsnprintf() is *probably* safer with respect to that (but there is no
224
// guarantee though).
225
// write() is async-signal-safe.
226
static void fprintfstderr(const char *fmt, ...)
227
{
228
    char buffer[80] = {};
229
    va_list ap;
230
    va_start(ap, fmt);
231
    vsnprintf(buffer, sizeof(buffer), fmt, ap);
232
    va_end(ap);
233
    int offset = 0;
234
    while (true)
235
    {
236
        const size_t nSizeToWrite = strlen(buffer + offset);
237
        int ret = static_cast<int>(write(2, buffer + offset, nSizeToWrite));
238
        if (ret < 0 && errno == EINTR)
239
        {
240
        }
241
        else
242
        {
243
            if (ret == static_cast<int>(nSizeToWrite))
244
                break;
245
            offset += ret;
246
        }
247
    }
248
}
249

250
#endif
251

252
/************************************************************************/
253
/*              CPLVirtualMemManagerRegisterVirtualMem()                */
254
/************************************************************************/
255

256
static bool CPLVirtualMemManagerRegisterVirtualMem(CPLVirtualMemVMA *ctxt)
17✔
257
{
258
    if (!CPLVirtualMemManagerInit())
17✔
259
        return false;
×
260

261
    bool bSuccess = true;
17✔
262
    IGNORE_OR_ASSERT_IN_DEBUG(ctxt);
17✔
263
    CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
17✔
264
    CPLVirtualMemVMA **pasVirtualMemNew = static_cast<CPLVirtualMemVMA **>(
265
        VSI_REALLOC_VERBOSE(pVirtualMemManager->pasVirtualMem,
17✔
266
                            sizeof(CPLVirtualMemVMA *) *
267
                                (pVirtualMemManager->nVirtualMemCount + 1)));
268
    if (pasVirtualMemNew == nullptr)
17✔
269
    {
270
        bSuccess = false;
×
271
    }
272
    else
273
    {
274
        pVirtualMemManager->pasVirtualMem = pasVirtualMemNew;
17✔
275
        pVirtualMemManager
276
            ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount] = ctxt;
17✔
277
        pVirtualMemManager->nVirtualMemCount++;
17✔
278
    }
279
    CPLReleaseMutex(hVirtualMemManagerMutex);
17✔
280
    return bSuccess;
17✔
281
}
282

283
/************************************************************************/
284
/*               CPLVirtualMemManagerUnregisterVirtualMem()             */
285
/************************************************************************/
286

287
static void CPLVirtualMemManagerUnregisterVirtualMem(CPLVirtualMemVMA *ctxt)
17✔
288
{
289
    CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
17✔
290
    for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
23✔
291
    {
292
        if (pVirtualMemManager->pasVirtualMem[i] == ctxt)
23✔
293
        {
294
            if (i < pVirtualMemManager->nVirtualMemCount - 1)
17✔
295
            {
296
                memmove(pVirtualMemManager->pasVirtualMem + i,
9✔
297
                        pVirtualMemManager->pasVirtualMem + i + 1,
9✔
298
                        sizeof(CPLVirtualMem *) *
299
                            (pVirtualMemManager->nVirtualMemCount - i - 1));
9✔
300
            }
301
            pVirtualMemManager->nVirtualMemCount--;
17✔
302
            break;
17✔
303
        }
304
    }
305
    CPLReleaseMutex(hVirtualMemManagerMutex);
17✔
306
}
17✔
307

308
/************************************************************************/
309
/*                           CPLVirtualMemNew()                         */
310
/************************************************************************/
311

312
static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt);
313

314
CPLVirtualMem *CPLVirtualMemNew(size_t nSize, size_t nCacheSize,
17✔
315
                                size_t nPageSizeHint, int bSingleThreadUsage,
316
                                CPLVirtualMemAccessMode eAccessMode,
317
                                CPLVirtualMemCachePageCbk pfnCachePage,
318
                                CPLVirtualMemUnCachePageCbk pfnUnCachePage,
319
                                CPLVirtualMemFreeUserData pfnFreeUserData,
320
                                void *pCbkUserData)
321
{
322
    size_t nMinPageSize = CPLGetPageSize();
17✔
323
    size_t nPageSize = DEFAULT_PAGE_SIZE;
17✔
324

325
    IGNORE_OR_ASSERT_IN_DEBUG(nSize > 0);
17✔
326
    IGNORE_OR_ASSERT_IN_DEBUG(pfnCachePage != nullptr);
17✔
327

328
    if (nPageSizeHint >= nMinPageSize && nPageSizeHint <= MAXIMUM_PAGE_SIZE)
17✔
329
    {
330
        if ((nPageSizeHint % nMinPageSize) == 0)
5✔
331
            nPageSize = nPageSizeHint;
5✔
332
        else
333
        {
334
            int nbits = 0;
×
335
            nPageSize = static_cast<size_t>(nPageSizeHint);
×
336
            do
×
337
            {
338
                nPageSize >>= 1;
×
339
                nbits++;
×
340
            } while (nPageSize > 0);
×
341
            nPageSize = static_cast<size_t>(1) << (nbits - 1);
×
342
            if (nPageSize < static_cast<size_t>(nPageSizeHint))
×
343
                nPageSize <<= 1;
×
344
        }
345
    }
346

347
    if ((nPageSize % nMinPageSize) != 0)
17✔
348
        nPageSize = nMinPageSize;
×
349

350
    if (nCacheSize > nSize)
17✔
351
        nCacheSize = nSize;
16✔
352
    else if (nCacheSize == 0)
1✔
353
        nCacheSize = 1;
×
354

355
    int nMappings = 0;
17✔
356

357
    // Linux specific:
358
    // Count the number of existing memory mappings.
359
    FILE *f = fopen("/proc/self/maps", "rb");
17✔
360
    if (f != nullptr)
17✔
361
    {
362
        char buffer[80] = {};
17✔
363
        while (fgets(buffer, sizeof(buffer), f) != nullptr)
64,402✔
364
            nMappings++;
64,385✔
365
        fclose(f);
17✔
366
    }
367

368
    size_t nCacheMaxSizeInPages = 0;
17✔
369
    while (true)
370
    {
371
        // /proc/self/maps must not have more than 65K lines.
372
        nCacheMaxSizeInPages = (nCacheSize + 2 * nPageSize - 1) / nPageSize;
17✔
373
        if (nCacheMaxSizeInPages >
17✔
374
            static_cast<size_t>((MAXIMUM_COUNT_OF_MAPPINGS * 9 / 10) -
17✔
375
                                nMappings))
376
            nPageSize <<= 1;
×
377
        else
378
            break;
17✔
379
    }
380
    size_t nRoundedMappingSize =
17✔
381
        ((nSize + 2 * nPageSize - 1) / nPageSize) * nPageSize;
17✔
382
    void *pData = mmap(nullptr, nRoundedMappingSize, PROT_NONE,
17✔
383
                       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
384
    if (pData == MAP_FAILED)
17✔
385
    {
386
        perror("mmap");
×
387
        return nullptr;
×
388
    }
389
    CPLVirtualMemVMA *ctxt = static_cast<CPLVirtualMemVMA *>(
390
        VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMemVMA)));
17✔
391
    if (ctxt == nullptr)
17✔
392
    {
393
        munmap(pData, nRoundedMappingSize);
×
394
        return nullptr;
×
395
    }
396
    ctxt->sBase.nRefCount = 1;
17✔
397
    ctxt->sBase.eType = VIRTUAL_MEM_TYPE_VMA;
17✔
398
    ctxt->sBase.eAccessMode = eAccessMode;
17✔
399
    ctxt->sBase.pDataToFree = pData;
17✔
400
    ctxt->sBase.pData = ALIGN_UP(pData, nPageSize);
17✔
401
    ctxt->sBase.nPageSize = nPageSize;
17✔
402
    ctxt->sBase.nSize = nSize;
17✔
403
    ctxt->sBase.bSingleThreadUsage = CPL_TO_BOOL(bSingleThreadUsage);
17✔
404
    ctxt->sBase.pfnFreeUserData = pfnFreeUserData;
17✔
405
    ctxt->sBase.pCbkUserData = pCbkUserData;
17✔
406

407
    ctxt->pabitMappedPages = static_cast<GByte *>(
17✔
408
        VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
17✔
409
    if (ctxt->pabitMappedPages == nullptr)
17✔
410
    {
411
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
×
412
        CPLFree(ctxt);
×
413
        return nullptr;
×
414
    }
415
    ctxt->pabitRWMappedPages = static_cast<GByte *>(
17✔
416
        VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
17✔
417
    if (ctxt->pabitRWMappedPages == nullptr)
17✔
418
    {
419
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
×
420
        CPLFree(ctxt);
×
421
        return nullptr;
×
422
    }
423
    // Need at least 2 pages in case for a rep movs instruction
424
    // that operate in the view.
425
    ctxt->nCacheMaxSizeInPages = static_cast<int>(nCacheMaxSizeInPages);
17✔
426
    ctxt->panLRUPageIndices = static_cast<int *>(
17✔
427
        VSI_MALLOC_VERBOSE(ctxt->nCacheMaxSizeInPages * sizeof(int)));
17✔
428
    if (ctxt->panLRUPageIndices == nullptr)
17✔
429
    {
430
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
×
431
        CPLFree(ctxt);
×
432
        return nullptr;
×
433
    }
434
    ctxt->iLRUStart = 0;
17✔
435
    ctxt->nLRUSize = 0;
17✔
436
    ctxt->iLastPage = -1;
17✔
437
    ctxt->nRetry = 0;
17✔
438
    ctxt->pfnCachePage = pfnCachePage;
17✔
439
    ctxt->pfnUnCachePage = pfnUnCachePage;
17✔
440

441
#ifndef HAVE_5ARGS_MREMAP
442
    if (!ctxt->sBase.bSingleThreadUsage)
443
    {
444
        ctxt->hMutexThreadArray = CPLCreateMutex();
445
        IGNORE_OR_ASSERT_IN_DEBUG(ctxt->hMutexThreadArray != nullptr);
446
        CPLReleaseMutex(ctxt->hMutexThreadArray);
447
        ctxt->nThreads = 0;
448
        ctxt->pahThreads = nullptr;
449
    }
450
#endif
451

452
    if (!CPLVirtualMemManagerRegisterVirtualMem(ctxt))
17✔
453
    {
454
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
×
455
        CPLFree(ctxt);
×
456
        return nullptr;
×
457
    }
458

459
    return reinterpret_cast<CPLVirtualMem *>(ctxt);
17✔
460
}
461

462
/************************************************************************/
463
/*                  CPLVirtualMemFreeFileMemoryMapped()                 */
464
/************************************************************************/
465

466
static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt)
17✔
467
{
468
    CPLVirtualMemManagerUnregisterVirtualMem(ctxt);
17✔
469

470
    size_t nRoundedMappingSize =
17✔
471
        ((ctxt->sBase.nSize + 2 * ctxt->sBase.nPageSize - 1) /
17✔
472
         ctxt->sBase.nPageSize) *
17✔
473
        ctxt->sBase.nPageSize;
17✔
474
    if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
17✔
475
        ctxt->pabitRWMappedPages != nullptr && ctxt->pfnUnCachePage != nullptr)
7✔
476
    {
477
        for (size_t i = 0; i < nRoundedMappingSize / ctxt->sBase.nPageSize; i++)
27✔
478
        {
479
            if (TEST_BIT(ctxt->pabitRWMappedPages, i))
20✔
480
            {
481
                void *addr = static_cast<char *>(ctxt->sBase.pData) +
13✔
482
                             i * ctxt->sBase.nPageSize;
13✔
483
                ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
13✔
484
                                     i * ctxt->sBase.nPageSize, addr,
13✔
485
                                     ctxt->sBase.nPageSize,
486
                                     ctxt->sBase.pCbkUserData);
487
            }
488
        }
489
    }
490
    int nRet = munmap(ctxt->sBase.pDataToFree, nRoundedMappingSize);
17✔
491
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
17✔
492
    CPLFree(ctxt->pabitMappedPages);
17✔
493
    CPLFree(ctxt->pabitRWMappedPages);
17✔
494
    CPLFree(ctxt->panLRUPageIndices);
17✔
495
#ifndef HAVE_5ARGS_MREMAP
496
    if (!ctxt->sBase.bSingleThreadUsage)
497
    {
498
        CPLFree(ctxt->pahThreads);
499
        CPLDestroyMutex(ctxt->hMutexThreadArray);
500
    }
501
#endif
502
}
17✔
503

504
#ifndef HAVE_5ARGS_MREMAP
505

506
static volatile int nCountThreadsInSigUSR1 = 0;
507
static volatile int nWaitHelperThread = 0;
508

509
/************************************************************************/
510
/*                   CPLVirtualMemSIGUSR1Handler()                      */
511
/************************************************************************/
512

513
static void CPLVirtualMemSIGUSR1Handler(int /* signum_unused */,
514
                                        siginfo_t * /* the_info_unused */,
515
                                        void * /* the_ctxt_unused */)
516
{
517
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
518
    fprintfstderr("entering CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
519
#endif
520
    // Rouault guesses this is only POSIX correct if it is implemented by an
521
    // intrinsic.
522
    CPLAtomicInc(&nCountThreadsInSigUSR1);
523
    while (nWaitHelperThread)
524
        // Not explicitly indicated as signal-async-safe, but hopefully ok.
525
        usleep(1);
526
    CPLAtomicDec(&nCountThreadsInSigUSR1);
527
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
528
    fprintfstderr("leaving CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
529
#endif
530
}
531
#endif
532

533
/************************************************************************/
534
/*                      CPLVirtualMemDeclareThread()                    */
535
/************************************************************************/
536

537
void CPLVirtualMemDeclareThread(CPLVirtualMem *ctxt)
2✔
538
{
539
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
2✔
540
        return;
×
541
#ifndef HAVE_5ARGS_MREMAP
542
    CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
543
    IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
544
    CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
545
    ctxtVMA->pahThreads = static_cast<pthread_t *>(CPLRealloc(
546
        ctxtVMA->pahThreads, (ctxtVMA->nThreads + 1) * sizeof(pthread_t)));
547
    ctxtVMA->pahThreads[ctxtVMA->nThreads] = pthread_self();
548
    ctxtVMA->nThreads++;
549

550
    CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
551
#endif
552
}
553

554
/************************************************************************/
555
/*                     CPLVirtualMemUnDeclareThread()                   */
556
/************************************************************************/
557

558
void CPLVirtualMemUnDeclareThread(CPLVirtualMem *ctxt)
2✔
559
{
560
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
2✔
561
        return;
×
562
#ifndef HAVE_5ARGS_MREMAP
563
    CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
564
    pthread_t self = pthread_self();
565
    IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
566
    CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
567
    for (int i = 0; i < ctxtVMA->nThreads; i++)
568
    {
569
        if (ctxtVMA->pahThreads[i] == self)
570
        {
571
            if (i < ctxtVMA->nThreads - 1)
572
                memmove(ctxtVMA->pahThreads + i + 1, ctxtVMA->pahThreads + i,
573
                        (ctxtVMA->nThreads - 1 - i) * sizeof(pthread_t));
574
            ctxtVMA->nThreads--;
575
            break;
576
        }
577
    }
578

579
    CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
580
#endif
581
}
582

583
/************************************************************************/
584
/*                     CPLVirtualMemGetPageToFill()                     */
585
/************************************************************************/
586

587
// Must be paired with CPLVirtualMemAddPage.
588
static void *CPLVirtualMemGetPageToFill(CPLVirtualMemVMA *ctxt,
68,431✔
589
                                        void *start_page_addr)
590
{
591
    void *pPageToFill = nullptr;
68,431✔
592

593
    if (ctxt->sBase.bSingleThreadUsage)
68,431✔
594
    {
595
        pPageToFill = start_page_addr;
×
596
        const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
×
597
                                  PROT_READ | PROT_WRITE);
598
        IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
×
599
    }
600
    else
601
    {
602
#ifndef HAVE_5ARGS_MREMAP
603
        CPLAcquireMutex(ctxt->hMutexThreadArray, 1000.0);
604
        if (ctxt->nThreads == 1)
605
        {
606
            pPageToFill = start_page_addr;
607
            const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
608
                                      PROT_READ | PROT_WRITE);
609
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
610
        }
611
        else
612
#endif
613
        {
614
            // Allocate a temporary writable page that the user
615
            // callback can fill.
616
            pPageToFill =
617
                mmap(nullptr, ctxt->sBase.nPageSize, PROT_READ | PROT_WRITE,
68,431✔
618
                     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
619
            IGNORE_OR_ASSERT_IN_DEBUG(pPageToFill != MAP_FAILED);
68,431✔
620
        }
621
    }
622
    return pPageToFill;
68,431✔
623
}
624

625
/************************************************************************/
626
/*                        CPLVirtualMemAddPage()                        */
627
/************************************************************************/
628

629
static void CPLVirtualMemAddPage(CPLVirtualMemVMA *ctxt, void *target_addr,
68,431✔
630
                                 void *pPageToFill, OpType opType,
631
                                 pthread_t hRequesterThread)
632
{
633
    const int iPage =
68,431✔
634
        static_cast<int>((static_cast<char *>(target_addr) -
68,431✔
635
                          static_cast<char *>(ctxt->sBase.pData)) /
68,431✔
636
                         ctxt->sBase.nPageSize);
68,431✔
637
    if (ctxt->nLRUSize == ctxt->nCacheMaxSizeInPages)
68,431✔
638
    {
639
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
640
        fprintfstderr("uncaching page %d\n", iPage);
641
#endif
642
        int nOldPage = ctxt->panLRUPageIndices[ctxt->iLRUStart];
68,352✔
643
        void *addr = static_cast<char *>(ctxt->sBase.pData) +
68,352✔
644
                     nOldPage * ctxt->sBase.nPageSize;
68,352✔
645
        if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
68,352✔
646
            ctxt->pfnUnCachePage != nullptr &&
×
647
            TEST_BIT(ctxt->pabitRWMappedPages, nOldPage))
×
648
        {
649
            size_t nToBeEvicted = ctxt->sBase.nPageSize;
×
650
            if (static_cast<char *>(addr) + nToBeEvicted >=
×
651
                static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
×
652
                nToBeEvicted = static_cast<char *>(ctxt->sBase.pData) +
×
653
                               ctxt->sBase.nSize - static_cast<char *>(addr);
×
654

655
            ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
×
656
                                 nOldPage * ctxt->sBase.nPageSize, addr,
×
657
                                 nToBeEvicted, ctxt->sBase.pCbkUserData);
658
        }
659
        // "Free" the least recently used page.
660
        UNSET_BIT(ctxt->pabitMappedPages, nOldPage);
68,352✔
661
        UNSET_BIT(ctxt->pabitRWMappedPages, nOldPage);
68,352✔
662
        // Free the old page.
663
        // Not sure how portable it is to do that that way.
664
        const void *const pRet =
665
            mmap(addr, ctxt->sBase.nPageSize, PROT_NONE,
68,352✔
666
                 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
667
        IGNORE_OR_ASSERT_IN_DEBUG(pRet == addr);
68,352✔
668
        // cppcheck-suppress memleak
669
    }
670
    ctxt->panLRUPageIndices[ctxt->iLRUStart] = iPage;
68,431✔
671
    ctxt->iLRUStart = (ctxt->iLRUStart + 1) % ctxt->nCacheMaxSizeInPages;
68,431✔
672
    if (ctxt->nLRUSize < ctxt->nCacheMaxSizeInPages)
68,431✔
673
    {
674
        ctxt->nLRUSize++;
79✔
675
    }
676
    SET_BIT(ctxt->pabitMappedPages, iPage);
68,431✔
677

678
    if (ctxt->sBase.bSingleThreadUsage)
68,431✔
679
    {
680
        if (opType == OP_STORE &&
×
681
            ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
×
682
        {
683
            // Let (and mark) the page writable since the instruction that
684
            // triggered the fault is a store.
685
            SET_BIT(ctxt->pabitRWMappedPages, iPage);
×
686
        }
687
        else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
×
688
        {
689
            const int nRet =
690
                mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
×
691
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
×
692
        }
693
    }
694
    else
695
    {
696
#ifdef HAVE_5ARGS_MREMAP
697
        (void)hRequesterThread;
698

699
        if (opType == OP_STORE &&
68,431✔
700
            ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
8✔
701
        {
702
            // Let (and mark) the page writable since the instruction that
703
            // triggered the fault is a store.
704
            SET_BIT(ctxt->pabitRWMappedPages, iPage);
8✔
705
        }
706
        else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
68,423✔
707
        {
708
            // Turn the temporary page read-only before remapping it.
709
            // Only turn it writtable when a new fault occurs (and the
710
            // mapping is writable).
711
            const int nRet =
712
                mprotect(pPageToFill, ctxt->sBase.nPageSize, PROT_READ);
69✔
713
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
69✔
714
        }
715
        /* Can now remap the pPageToFill onto the target page */
716
        const void *const pRet =
717
            mremap(pPageToFill, ctxt->sBase.nPageSize, ctxt->sBase.nPageSize,
68,431✔
718
                   MREMAP_MAYMOVE | MREMAP_FIXED, target_addr);
719
        IGNORE_OR_ASSERT_IN_DEBUG(pRet == target_addr);
68,431✔
720

721
#else
722
        if (ctxt->nThreads > 1)
723
        {
724
            /* Pause threads that share this mem view */
725
            CPLAtomicInc(&nWaitHelperThread);
726

727
            /* Install temporary SIGUSR1 signal handler */
728
            struct sigaction act, oldact;
729
            act.sa_sigaction = CPLVirtualMemSIGUSR1Handler;
730
            sigemptyset(&act.sa_mask);
731
            /* We don't want the sigsegv handler to be called when we are */
732
            /* running the sigusr1 handler */
733
            IGNORE_OR_ASSERT_IN_DEBUG(sigaddset(&act.sa_mask, SIGSEGV) == 0);
734
            act.sa_flags = 0;
735
            IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &act, &oldact) == 0);
736

737
            for (int i = 0; i < ctxt->nThreads; i++)
738
            {
739
                if (ctxt->pahThreads[i] != hRequesterThread)
740
                {
741
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
742
                    fprintfstderr("stopping thread %X\n", ctxt->pahThreads[i]);
743
#endif
744
                    IGNORE_OR_ASSERT_IN_DEBUG(
745
                        pthread_kill(ctxt->pahThreads[i], SIGUSR1) == 0);
746
                }
747
            }
748

749
            /* Wait that they are all paused */
750
            while (nCountThreadsInSigUSR1 != ctxt->nThreads - 1)
751
                usleep(1);
752

753
            /* Restore old SIGUSR1 signal handler */
754
            IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &oldact, nullptr) ==
755
                                      0);
756

757
            int nRet = mprotect(target_addr, ctxt->sBase.nPageSize,
758
                                PROT_READ | PROT_WRITE);
759
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
760
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
761
            fprintfstderr("memcpying page %d\n", iPage);
762
#endif
763
            memcpy(target_addr, pPageToFill, ctxt->sBase.nPageSize);
764

765
            if (opType == OP_STORE &&
766
                ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
767
            {
768
                // Let (and mark) the page writable since the instruction that
769
                // triggered the fault is a store.
770
                SET_BIT(ctxt->pabitRWMappedPages, iPage);
771
            }
772
            else
773
            {
774
                nRet = mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
775
                IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
776
            }
777

778
            /* Wake up sleeping threads */
779
            CPLAtomicDec(&nWaitHelperThread);
780
            while (nCountThreadsInSigUSR1 != 0)
781
                usleep(1);
782

783
            IGNORE_OR_ASSERT_IN_DEBUG(
784
                munmap(pPageToFill, ctxt->sBase.nPageSize) == 0);
785
        }
786
        else
787
        {
788
            if (opType == OP_STORE &&
789
                ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
790
            {
791
                // Let (and mark) the page writable since the instruction that
792
                // triggered the fault is a store.
793
                SET_BIT(ctxt->pabitRWMappedPages, iPage);
794
            }
795
            else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
796
            {
797
                const int nRet2 =
798
                    mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
799
                IGNORE_OR_ASSERT_IN_DEBUG(nRet2 == 0);
800
            }
801
        }
802

803
        CPLReleaseMutex(ctxt->hMutexThreadArray);
804
#endif
805
    }
806
    // cppcheck-suppress memleak
807
}
68,431✔
808

809
/************************************************************************/
810
/*                    CPLVirtualMemGetOpTypeImm()                       */
811
/************************************************************************/
812

813
#if defined(__x86_64__) || defined(__i386__)
814
static OpType CPLVirtualMemGetOpTypeImm(GByte val_rip)
×
815
{
816
    OpType opType = OP_UNKNOWN;
×
817
    if ((/*val_rip >= 0x00 &&*/ val_rip <= 0x07) ||
×
818
        (val_rip >= 0x40 && val_rip <= 0x47))  // add $, (X)
×
819
        opType = OP_STORE;
×
820
    if ((val_rip >= 0x08 && val_rip <= 0x0f) ||
×
821
        (val_rip >= 0x48 && val_rip <= 0x4f))  // or $, (X)
×
822
        opType = OP_STORE;
×
823
    if ((val_rip >= 0x20 && val_rip <= 0x27) ||
×
824
        (val_rip >= 0x60 && val_rip <= 0x67))  // and $, (X)
×
825
        opType = OP_STORE;
×
826
    if ((val_rip >= 0x28 && val_rip <= 0x2f) ||
×
827
        (val_rip >= 0x68 && val_rip <= 0x6f))  // sub $, (X)
×
828
        opType = OP_STORE;
×
829
    if ((val_rip >= 0x30 && val_rip <= 0x37) ||
×
830
        (val_rip >= 0x70 && val_rip <= 0x77))  // xor $, (X)
×
831
        opType = OP_STORE;
×
832
    if ((val_rip >= 0x38 && val_rip <= 0x3f) ||
×
833
        (val_rip >= 0x78 && val_rip <= 0x7f))  // cmp $, (X)
×
834
        opType = OP_LOAD;
×
835
    return opType;
×
836
}
837
#endif
838

839
/************************************************************************/
840
/*                      CPLVirtualMemGetOpType()                        */
841
/************************************************************************/
842

843
// Don't need exhaustivity. It is just a hint for an optimization:
844
// If the fault occurs on a store operation, then we can directly put
845
// the page in writable mode if the mapping allows it.
846

847
#if defined(__x86_64__) || defined(__i386__)
848
static OpType CPLVirtualMemGetOpType(const GByte *rip)
113,280✔
849
{
850
    OpType opType = OP_UNKNOWN;
113,280✔
851

852
#if defined(__x86_64__) || defined(__i386__)
853
    switch (rip[0])
113,280✔
854
    {
855
        case 0x00: /* add %al,(%rax) */
×
856
        case 0x01: /* add %eax,(%rax) */
857
            opType = OP_STORE;
×
858
            break;
×
859
        case 0x02: /* add (%rax),%al */
×
860
        case 0x03: /* add (%rax),%eax */
861
            opType = OP_LOAD;
×
862
            break;
×
863

864
        case 0x08: /* or %al,(%rax) */
×
865
        case 0x09: /* or %eax,(%rax) */
866
            opType = OP_STORE;
×
867
            break;
×
868
        case 0x0a: /* or (%rax),%al */
×
869
        case 0x0b: /* or (%rax),%eax */
870
            opType = OP_LOAD;
×
871
            break;
×
872

873
        case 0x0f:
113,262✔
874
        {
875
            switch (rip[1])
113,262✔
876
            {
877
                case 0xb6: /* movzbl (%rax),%eax */
113,262✔
878
                case 0xb7: /* movzwl (%rax),%eax */
879
                case 0xbe: /* movsbl (%rax),%eax */
880
                case 0xbf: /* movswl (%rax),%eax */
881
                    opType = OP_LOAD;
113,262✔
882
                    break;
113,262✔
UNCOV
883
                default:
×
UNCOV
884
                    break;
×
885
            }
886
            break;
113,262✔
887
        }
888
        case 0xc6: /* movb $,(%rax) */
8✔
889
        case 0xc7: /* movl $,(%rax) */
890
            opType = OP_STORE;
8✔
891
            break;
8✔
892

893
        case 0x20: /* and %al,(%rax) */
×
894
        case 0x21: /* and %eax,(%rax) */
895
            opType = OP_STORE;
×
896
            break;
×
897
        case 0x22: /* and (%rax),%al */
×
898
        case 0x23: /* and (%rax),%eax */
899
            opType = OP_LOAD;
×
900
            break;
×
901

902
        case 0x28: /* sub %al,(%rax) */
×
903
        case 0x29: /* sub %eax,(%rax) */
904
            opType = OP_STORE;
×
905
            break;
×
906
        case 0x2a: /* sub (%rax),%al */
×
907
        case 0x2b: /* sub (%rax),%eax */
908
            opType = OP_LOAD;
×
909
            break;
×
910

911
        case 0x30: /* xor %al,(%rax) */
×
912
        case 0x31: /* xor %eax,(%rax) */
913
            opType = OP_STORE;
×
914
            break;
×
915
        case 0x32: /* xor (%rax),%al */
×
916
        case 0x33: /* xor (%rax),%eax */
917
            opType = OP_LOAD;
×
918
            break;
×
919

920
        case 0x38: /* cmp %al,(%rax) */
×
921
        case 0x39: /* cmp %eax,(%rax) */
922
            opType = OP_LOAD;
×
923
            break;
×
924
        case 0x40:
×
925
        {
926
            switch (rip[1])
×
927
            {
928
                case 0x00: /* add %spl,(%rax) */
×
929
                    opType = OP_STORE;
×
930
                    break;
×
931
                case 0x02: /* add (%rax),%spl */
×
932
                    opType = OP_LOAD;
×
933
                    break;
×
934
                case 0x28: /* sub %spl,(%rax) */
×
935
                    opType = OP_STORE;
×
936
                    break;
×
937
                case 0x2a: /* sub (%rax),%spl */
×
938
                    opType = OP_LOAD;
×
939
                    break;
×
940
                case 0x3a: /* cmp (%rax),%spl */
×
941
                    opType = OP_LOAD;
×
942
                    break;
×
943
                case 0x8a: /* mov (%rax),%spl */
×
944
                    opType = OP_LOAD;
×
945
                    break;
×
946
                default:
×
947
                    break;
×
948
            }
949
            break;
×
950
        }
951
#if defined(__x86_64__)
952
        case 0x41: /* reg=%al/%eax, X=%r8 */
×
953
        case 0x42: /* reg=%al/%eax, X=%rax,%r8,1 */
954
        case 0x43: /* reg=%al/%eax, X=%r8,%r8,1 */
955
        case 0x44: /* reg=%r8b/%r8w, X = %rax */
956
        case 0x45: /* reg=%r8b/%r8w, X = %r8 */
957
        case 0x46: /* reg=%r8b/%r8w, X = %rax,%r8,1 */
958
        case 0x47: /* reg=%r8b/%r8w, X = %r8,%r8,1 */
959
        {
960
            switch (rip[1])
×
961
            {
962
                case 0x00: /* add regb,(X) */
×
963
                case 0x01: /* add regl,(X) */
964
                    opType = OP_STORE;
×
965
                    break;
×
966
                case 0x02: /* add (X),regb */
×
967
                case 0x03: /* add (X),regl */
968
                    opType = OP_LOAD;
×
969
                    break;
×
970
                case 0x0f:
×
971
                {
972
                    switch (rip[2])
×
973
                    {
974
                        case 0xb6: /* movzbl (X),regl */
×
975
                        case 0xb7: /* movzwl (X),regl */
976
                        case 0xbe: /* movsbl (X),regl */
977
                        case 0xbf: /* movswl (X),regl */
978
                            opType = OP_LOAD;
×
979
                            break;
×
980
                        default:
×
981
                            break;
×
982
                    }
983
                    break;
×
984
                }
985
                case 0x28: /* sub regb,(X) */
×
986
                case 0x29: /* sub regl,(X) */
987
                    opType = OP_STORE;
×
988
                    break;
×
989
                case 0x2a: /* sub (X),regb */
×
990
                case 0x2b: /* sub (X),regl */
991
                    opType = OP_LOAD;
×
992
                    break;
×
993
                case 0x38: /* cmp regb,(X) */
×
994
                case 0x39: /* cmp regl,(X) */
995
                    opType = OP_LOAD;
×
996
                    break;
×
997
                case 0x80: /* cmpb,... $,(X) */
×
998
                case 0x81: /* cmpl,... $,(X) */
999
                case 0x83: /* cmpl,... $,(X) */
1000
                    opType = CPLVirtualMemGetOpTypeImm(rip[2]);
×
1001
                    break;
×
1002
                case 0x88: /* mov regb,(X) */
×
1003
                case 0x89: /* mov regl,(X) */
1004
                    opType = OP_STORE;
×
1005
                    break;
×
1006
                case 0x8a: /* mov (X),regb */
×
1007
                case 0x8b: /* mov (X),regl */
1008
                    opType = OP_LOAD;
×
1009
                    break;
×
1010
                case 0xc6: /* movb $,(X) */
×
1011
                case 0xc7: /* movl $,(X) */
1012
                    opType = OP_STORE;
×
1013
                    break;
×
1014
                case 0x84: /* test %al,(X) */
×
1015
                    opType = OP_LOAD;
×
1016
                    break;
×
1017
                case 0xf6: /* testb $,(X) or notb (X) */
×
1018
                case 0xf7: /* testl $,(X) or notl (X)*/
1019
                {
1020
                    if (rip[2] < 0x10) /* test (X) */
×
1021
                        opType = OP_LOAD;
×
1022
                    else /* not (X) */
1023
                        opType = OP_STORE;
×
1024
                    break;
×
1025
                }
1026
                default:
×
1027
                    break;
×
1028
            }
1029
            break;
×
1030
        }
1031
        case 0x48: /* reg=%rax, X=%rax or %rax,%rax,1 */
×
1032
        case 0x49: /* reg=%rax, X=%r8 or %r8,%rax,1 */
1033
        case 0x4a: /* reg=%rax, X=%rax,%r8,1 */
1034
        case 0x4b: /* reg=%rax, X=%r8,%r8,1 */
1035
        case 0x4c: /* reg=%r8, X=%rax or %rax,%rax,1 */
1036
        case 0x4d: /* reg=%r8, X=%r8 or %r8,%rax,1 */
1037
        case 0x4e: /* reg=%r8, X=%rax,%r8,1 */
1038
        case 0x4f: /* reg=%r8, X=%r8,%r8,1 */
1039
        {
1040
            switch (rip[1])
×
1041
            {
1042
                case 0x01: /* add reg,(X) */
×
1043
                    opType = OP_STORE;
×
1044
                    break;
×
1045
                case 0x03: /* add (X),reg */
×
1046
                    opType = OP_LOAD;
×
1047
                    break;
×
1048

1049
                case 0x09: /* or reg,(%rax) */
×
1050
                    opType = OP_STORE;
×
1051
                    break;
×
1052
                case 0x0b: /* or (%rax),reg */
×
1053
                    opType = OP_LOAD;
×
1054
                    break;
×
1055
                case 0x0f:
×
1056
                {
1057
                    switch (rip[2])
×
1058
                    {
1059
                        case 0xc3: /* movnti reg,(X) */
×
1060
                            opType = OP_STORE;
×
1061
                            break;
×
1062
                        default:
×
1063
                            break;
×
1064
                    }
1065
                    break;
×
1066
                }
1067
                case 0x21: /* and reg,(X) */
×
1068
                    opType = OP_STORE;
×
1069
                    break;
×
1070
                case 0x23: /* and (X),reg */
×
1071
                    opType = OP_LOAD;
×
1072
                    break;
×
1073

1074
                case 0x29: /* sub reg,(X) */
×
1075
                    opType = OP_STORE;
×
1076
                    break;
×
1077
                case 0x2b: /* sub (X),reg */
×
1078
                    opType = OP_LOAD;
×
1079
                    break;
×
1080

1081
                case 0x31: /* xor reg,(X) */
×
1082
                    opType = OP_STORE;
×
1083
                    break;
×
1084
                case 0x33: /* xor (X),reg */
×
1085
                    opType = OP_LOAD;
×
1086
                    break;
×
1087

1088
                case 0x39: /* cmp reg,(X) */
×
1089
                    opType = OP_LOAD;
×
1090
                    break;
×
1091

1092
                case 0x81:
×
1093
                case 0x83:
1094
                    opType = CPLVirtualMemGetOpTypeImm(rip[2]);
×
1095
                    break;
×
1096

1097
                case 0x85: /* test reg,(X) */
×
1098
                    opType = OP_LOAD;
×
1099
                    break;
×
1100

1101
                case 0x89: /* mov reg,(X) */
×
1102
                    opType = OP_STORE;
×
1103
                    break;
×
1104
                case 0x8b: /* mov (X),reg */
×
1105
                    opType = OP_LOAD;
×
1106
                    break;
×
1107

1108
                case 0xc7: /* movq $,(X) */
×
1109
                    opType = OP_STORE;
×
1110
                    break;
×
1111

1112
                case 0xf7:
×
1113
                {
1114
                    if (rip[2] < 0x10) /* testq $,(X) */
×
1115
                        opType = OP_LOAD;
×
1116
                    else /* notq (X) */
1117
                        opType = OP_STORE;
×
1118
                    break;
×
1119
                }
1120
                default:
×
1121
                    break;
×
1122
            }
1123
            break;
×
1124
        }
1125
#endif
1126
        case 0x66:
×
1127
        {
1128
            switch (rip[1])
×
1129
            {
1130
                case 0x01: /* add %ax,(%rax) */
×
1131
                    opType = OP_STORE;
×
1132
                    break;
×
1133
                case 0x03: /* add (%rax),%ax */
×
1134
                    opType = OP_LOAD;
×
1135
                    break;
×
1136
                case 0x0f:
×
1137
                {
1138
                    switch (rip[2])
×
1139
                    {
1140
                        case 0x2e: /* ucomisd (%rax),%xmm0 */
×
1141
                            opType = OP_LOAD;
×
1142
                            break;
×
1143
                        case 0x6f: /* movdqa (%rax),%xmm0 */
×
1144
                            opType = OP_LOAD;
×
1145
                            break;
×
1146
                        case 0x7f: /* movdqa %xmm0,(%rax) */
×
1147
                            opType = OP_STORE;
×
1148
                            break;
×
1149
                        case 0xb6: /* movzbw (%rax),%ax */
×
1150
                            opType = OP_LOAD;
×
1151
                            break;
×
1152
                        case 0xe7: /* movntdq %xmm0,(%rax) */
×
1153
                            opType = OP_STORE;
×
1154
                            break;
×
1155
                        default:
×
1156
                            break;
×
1157
                    }
1158
                    break;
×
1159
                }
1160
                case 0x29: /* sub %ax,(%rax) */
×
1161
                    opType = OP_STORE;
×
1162
                    break;
×
1163
                case 0x2b: /* sub (%rax),%ax */
×
1164
                    opType = OP_LOAD;
×
1165
                    break;
×
1166
                case 0x39: /* cmp %ax,(%rax) */
×
1167
                    opType = OP_LOAD;
×
1168
                    break;
×
1169
#if defined(__x86_64__)
1170
                case 0x41: /* reg = %ax (or %xmm0), X = %r8 */
×
1171
                case 0x42: /* reg = %ax (or %xmm0), X = %rax,%r8,1 */
1172
                case 0x43: /* reg = %ax (or %xmm0), X = %r8,%r8,1 */
1173
                case 0x44: /* reg = %r8w (or %xmm8), X = %rax */
1174
                case 0x45: /* reg = %r8w (or %xmm8), X = %r8 */
1175
                case 0x46: /* reg = %r8w (or %xmm8), X = %rax,%r8,1 */
1176
                case 0x47: /* reg = %r8w (or %xmm8), X = %r8,%r8,1 */
1177
                {
1178
                    switch (rip[2])
×
1179
                    {
1180
                        case 0x01: /* add reg,(X) */
×
1181
                            opType = OP_STORE;
×
1182
                            break;
×
1183
                        case 0x03: /* add (X),reg */
×
1184
                            opType = OP_LOAD;
×
1185
                            break;
×
1186
                        case 0x0f:
×
1187
                        {
1188
                            switch (rip[3])
×
1189
                            {
1190
                                case 0x2e: /* ucomisd (X),reg */
×
1191
                                    opType = OP_LOAD;
×
1192
                                    break;
×
1193
                                case 0x6f: /* movdqa (X),reg */
×
1194
                                    opType = OP_LOAD;
×
1195
                                    break;
×
1196
                                case 0x7f: /* movdqa reg,(X) */
×
1197
                                    opType = OP_STORE;
×
1198
                                    break;
×
1199
                                case 0xb6: /* movzbw (X),reg */
×
1200
                                    opType = OP_LOAD;
×
1201
                                    break;
×
1202
                                case 0xe7: /* movntdq reg,(X) */
×
1203
                                    opType = OP_STORE;
×
1204
                                    break;
×
1205
                                default:
×
1206
                                    break;
×
1207
                            }
1208
                            break;
×
1209
                        }
1210
                        case 0x29: /* sub reg,(X) */
×
1211
                            opType = OP_STORE;
×
1212
                            break;
×
1213
                        case 0x2b: /* sub (X),reg */
×
1214
                            opType = OP_LOAD;
×
1215
                            break;
×
1216
                        case 0x39: /* cmp reg,(X) */
×
1217
                            opType = OP_LOAD;
×
1218
                            break;
×
1219
                        case 0x81: /* cmpw,... $,(X) */
×
1220
                        case 0x83: /* cmpw,... $,(X) */
1221
                            opType = CPLVirtualMemGetOpTypeImm(rip[3]);
×
1222
                            break;
×
1223
                        case 0x85: /* test reg,(X) */
×
1224
                            opType = OP_LOAD;
×
1225
                            break;
×
1226
                        case 0x89: /* mov reg,(X) */
×
1227
                            opType = OP_STORE;
×
1228
                            break;
×
1229
                        case 0x8b: /* mov (X),reg */
×
1230
                            opType = OP_LOAD;
×
1231
                            break;
×
1232
                        case 0xc7: /* movw $,(X) */
×
1233
                            opType = OP_STORE;
×
1234
                            break;
×
1235
                        case 0xf7:
×
1236
                        {
1237
                            if (rip[3] < 0x10) /* testw $,(X) */
×
1238
                                opType = OP_LOAD;
×
1239
                            else /* notw (X) */
1240
                                opType = OP_STORE;
×
1241
                            break;
×
1242
                        }
1243
                        default:
×
1244
                            break;
×
1245
                    }
1246
                    break;
×
1247
                }
1248
#endif
1249
                case 0x81: /* cmpw,... $,(%rax) */
×
1250
                case 0x83: /* cmpw,... $,(%rax) */
1251
                    opType = CPLVirtualMemGetOpTypeImm(rip[2]);
×
1252
                    break;
×
1253

1254
                case 0x85: /* test %ax,(%rax) */
×
1255
                    opType = OP_LOAD;
×
1256
                    break;
×
1257
                case 0x89: /* mov %ax,(%rax) */
×
1258
                    opType = OP_STORE;
×
1259
                    break;
×
1260
                case 0x8b: /* mov (%rax),%ax */
×
1261
                    opType = OP_LOAD;
×
1262
                    break;
×
1263
                case 0xc7: /* movw $,(%rax) */
×
1264
                    opType = OP_STORE;
×
1265
                    break;
×
1266
                case 0xf3:
×
1267
                {
1268
                    switch (rip[2])
×
1269
                    {
1270
                        case 0xa5: /* rep movsw %ds:(%rsi),%es:(%rdi) */
×
1271
                            opType = OP_MOVS_RSI_RDI;
×
1272
                            break;
×
1273
                        default:
×
1274
                            break;
×
1275
                    }
1276
                    break;
×
1277
                }
1278
                case 0xf7: /* testw $,(%rax) or notw (%rax) */
×
1279
                {
1280
                    if (rip[2] < 0x10) /* test */
×
1281
                        opType = OP_LOAD;
×
1282
                    else /* not */
1283
                        opType = OP_STORE;
×
1284
                    break;
×
1285
                }
1286
                default:
×
1287
                    break;
×
1288
            }
1289
            break;
×
1290
        }
1291
        case 0x80: /* cmpb,... $,(%rax) */
×
1292
        case 0x81: /* cmpl,... $,(%rax) */
1293
        case 0x83: /* cmpl,... $,(%rax) */
1294
            opType = CPLVirtualMemGetOpTypeImm(rip[1]);
×
1295
            break;
×
1296
        case 0x84: /* test %al,(%rax) */
×
1297
        case 0x85: /* test %eax,(%rax) */
1298
            opType = OP_LOAD;
×
1299
            break;
×
1300
        case 0x88: /* mov %al,(%rax) */
×
1301
            opType = OP_STORE;
×
1302
            break;
×
1303
        case 0x89: /* mov %eax,(%rax) */
×
1304
            opType = OP_STORE;
×
1305
            break;
×
1306
        case 0x8a: /* mov (%rax),%al */
×
1307
            opType = OP_LOAD;
×
1308
            break;
×
1309
        case 0x8b: /* mov (%rax),%eax */
×
1310
            opType = OP_LOAD;
×
1311
            break;
×
1312
        case 0xd9: /* 387 float */
×
1313
        {
1314
            if (rip[1] < 0x08) /* flds (%eax) */
×
1315
                opType = OP_LOAD;
×
1316
            else if (rip[1] >= 0x18 && rip[1] <= 0x20) /* fstps (%eax) */
×
1317
                opType = OP_STORE;
×
1318
            break;
×
1319
        }
1320
        case 0xf2: /* SSE 2 */
×
1321
        {
1322
            switch (rip[1])
×
1323
            {
1324
                case 0x0f:
×
1325
                {
1326
                    switch (rip[2])
×
1327
                    {
1328
                        case 0x10: /* movsd (%rax),%xmm0 */
×
1329
                            opType = OP_LOAD;
×
1330
                            break;
×
1331
                        case 0x11: /* movsd %xmm0,(%rax) */
×
1332
                            opType = OP_STORE;
×
1333
                            break;
×
1334
                        case 0x58: /* addsd (%rax),%xmm0 */
×
1335
                            opType = OP_LOAD;
×
1336
                            break;
×
1337
                        case 0x59: /* mulsd (%rax),%xmm0 */
×
1338
                            opType = OP_LOAD;
×
1339
                            break;
×
1340
                        case 0x5c: /* subsd (%rax),%xmm0 */
×
1341
                            opType = OP_LOAD;
×
1342
                            break;
×
1343
                        case 0x5e: /* divsd (%rax),%xmm0 */
×
1344
                            opType = OP_LOAD;
×
1345
                            break;
×
1346
                        default:
×
1347
                            break;
×
1348
                    }
1349
                    break;
×
1350
                }
1351
#if defined(__x86_64__)
1352
                case 0x41: /* reg=%xmm0, X=%r8 or %r8,%rax,1 */
×
1353
                case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1354
                case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1355
                case 0x44: /* reg=%xmm8, X=%rax or %rax,%rax,1*/
1356
                case 0x45: /* reg=%xmm8, X=%r8 or %r8,%rax,1 */
1357
                case 0x46: /* reg=%xmm8, X=%rax,%r8,1 */
1358
                case 0x47: /* reg=%xmm8, X=%r8,%r8,1 */
1359
                {
1360
                    switch (rip[2])
×
1361
                    {
1362
                        case 0x0f:
×
1363
                        {
1364
                            switch (rip[3])
×
1365
                            {
1366
                                case 0x10: /* movsd (X),reg */
×
1367
                                    opType = OP_LOAD;
×
1368
                                    break;
×
1369
                                case 0x11: /* movsd reg,(X) */
×
1370
                                    opType = OP_STORE;
×
1371
                                    break;
×
1372
                                case 0x58: /* addsd (X),reg */
×
1373
                                    opType = OP_LOAD;
×
1374
                                    break;
×
1375
                                case 0x59: /* mulsd (X),reg */
×
1376
                                    opType = OP_LOAD;
×
1377
                                    break;
×
1378
                                case 0x5c: /* subsd (X),reg */
×
1379
                                    opType = OP_LOAD;
×
1380
                                    break;
×
1381
                                case 0x5e: /* divsd (X),reg */
×
1382
                                    opType = OP_LOAD;
×
1383
                                    break;
×
1384
                                default:
×
1385
                                    break;
×
1386
                            }
1387
                            break;
×
1388
                        }
1389
                        default:
×
1390
                            break;
×
1391
                    }
1392
                    break;
×
1393
                }
1394
#endif
1395
                default:
×
1396
                    break;
×
1397
            }
1398
            break;
×
1399
        }
1400
        case 0xf3:
6✔
1401
        {
1402
            switch (rip[1])
6✔
1403
            {
1404
                case 0x0f: /* SSE 2 */
×
1405
                {
1406
                    switch (rip[2])
×
1407
                    {
1408
                        case 0x10: /* movss (%rax),%xmm0 */
×
1409
                            opType = OP_LOAD;
×
1410
                            break;
×
1411
                        case 0x11: /* movss %xmm0,(%rax) */
×
1412
                            opType = OP_STORE;
×
1413
                            break;
×
1414
                        case 0x6f: /* movdqu (%rax),%xmm0 */
×
1415
                            opType = OP_LOAD;
×
1416
                            break;
×
1417
                        case 0x7f: /* movdqu %xmm0,(%rax) */
×
1418
                            opType = OP_STORE;
×
1419
                            break;
×
1420
                        default:
×
1421
                            break;
×
1422
                    }
1423
                    break;
×
1424
                }
1425
#if defined(__x86_64__)
1426
                case 0x41: /* reg=%xmm0, X=%r8 */
4✔
1427
                case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1428
                case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1429
                case 0x44: /* reg=%xmm8, X = %rax */
1430
                case 0x45: /* reg=%xmm8, X = %r8 */
1431
                case 0x46: /* reg=%xmm8, X = %rax,%r8,1 */
1432
                case 0x47: /* reg=%xmm8, X = %r8,%r8,1 */
1433
                {
1434
                    switch (rip[2])
4✔
1435
                    {
1436
                        case 0x0f: /* SSE 2 */
4✔
1437
                        {
1438
                            switch (rip[3])
4✔
1439
                            {
1440
                                case 0x10: /* movss (X),reg */
×
1441
                                    opType = OP_LOAD;
×
1442
                                    break;
×
1443
                                case 0x11: /* movss reg,(X) */
×
1444
                                    opType = OP_STORE;
×
1445
                                    break;
×
1446
                                case 0x6f: /* movdqu (X),reg */
4✔
1447
                                    opType = OP_LOAD;
4✔
1448
                                    break;
4✔
1449
                                case 0x7f: /* movdqu reg,(X) */
×
1450
                                    opType = OP_STORE;
×
1451
                                    break;
×
1452
                                default:
×
1453
                                    break;
×
1454
                            }
1455
                            break;
4✔
1456
                        }
1457
                        default:
×
1458
                            break;
×
1459
                    }
1460
                    break;
4✔
1461
                }
1462
                case 0x48:
×
1463
                {
1464
                    switch (rip[2])
×
1465
                    {
1466
                        case 0xa5: /* rep movsq %ds:(%rsi),%es:(%rdi) */
×
1467
                            opType = OP_MOVS_RSI_RDI;
×
1468
                            break;
×
1469
                        default:
×
1470
                            break;
×
1471
                    }
1472
                    break;
×
1473
                }
1474
#endif
1475
                case 0xa4: /* rep movsb %ds:(%rsi),%es:(%rdi) */
×
1476
                case 0xa5: /* rep movsl %ds:(%rsi),%es:(%rdi) */
1477
                    opType = OP_MOVS_RSI_RDI;
×
1478
                    break;
×
1479
                case 0xa6: /* repz cmpsb %es:(%rdi),%ds:(%rsi) */
×
1480
                    opType = OP_LOAD;
×
1481
                    break;
×
1482
                default:
2✔
1483
                    break;
2✔
1484
            }
1485
            break;
6✔
1486
        }
1487
        case 0xf6: /* testb $,(%rax) or notb (%rax) */
×
1488
        case 0xf7: /* testl $,(%rax) or notl (%rax) */
1489
        {
1490
            if (rip[1] < 0x10) /* test */
×
1491
                opType = OP_LOAD;
×
1492
            else /* not */
1493
                opType = OP_STORE;
×
1494
            break;
×
1495
        }
1496
        default:
4✔
1497
            break;
4✔
1498
    }
1499
#endif
1500
    return opType;
113,280✔
1501
}
1502
#endif
1503

1504
/************************************************************************/
1505
/*                    CPLVirtualMemManagerPinAddrInternal()             */
1506
/************************************************************************/
1507

1508
static int
1509
CPLVirtualMemManagerPinAddrInternal(CPLVirtualMemMsgToWorkerThread *msg)
113,282✔
1510
{
1511
    char wait_ready = '\0';
113,282✔
1512
    char response_buf[4] = {};
113,282✔
1513

1514
    // Wait for the helper thread to be ready to process another request.
1515
    while (true)
1516
    {
1517
        const int ret = static_cast<int>(
1518
            read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1));
113,282✔
1519
        if (ret < 0 && errno == EINTR)
113,286✔
1520
        {
1521
            // NOP
1522
        }
1523
        else
1524
        {
1525
            IGNORE_OR_ASSERT_IN_DEBUG(ret == 1);
113,286✔
1526
            break;
113,286✔
1527
        }
1528
    }
×
1529

1530
    // Pass the address that caused the fault to the helper thread.
1531
    const ssize_t nRetWrite =
1532
        write(pVirtualMemManager->pipefd_to_thread[1], msg, sizeof(*msg));
113,286✔
1533
    IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(*msg));
113,286✔
1534

1535
    // Wait that the helper thread has fixed the fault.
1536
    while (true)
1537
    {
1538
        const int ret = static_cast<int>(
1539
            read(pVirtualMemManager->pipefd_from_thread[0], response_buf, 4));
113,286✔
1540
        if (ret < 0 && errno == EINTR)
113,286✔
1541
        {
1542
            // NOP
1543
        }
1544
        else
1545
        {
1546
            IGNORE_OR_ASSERT_IN_DEBUG(ret == 4);
113,286✔
1547
            break;
113,286✔
1548
        }
1549
    }
×
1550

1551
    // In case the helper thread did not recognize the address as being
1552
    // one that it should take care of, just rely on the previous SIGSEGV
1553
    // handler (with might abort the process).
1554
    return (memcmp(response_buf, MAPPING_FOUND, 4) == 0);
113,286✔
1555
}
1556

1557
/************************************************************************/
1558
/*                      CPLVirtualMemPin()                              */
1559
/************************************************************************/
1560

1561
void CPLVirtualMemPin(CPLVirtualMem *ctxt, void *pAddr, size_t nSize,
×
1562
                      int bWriteOp)
1563
{
1564
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
×
1565
        return;
×
1566

1567
    CPLVirtualMemMsgToWorkerThread msg;
1568

1569
    memset(&msg, 0, sizeof(msg));
×
1570
    msg.hRequesterThread = pthread_self();
×
1571
    msg.opType = (bWriteOp) ? OP_STORE : OP_LOAD;
×
1572

1573
    char *pBase = reinterpret_cast<char *>(ALIGN_DOWN(pAddr, ctxt->nPageSize));
×
1574
    const size_t n = (reinterpret_cast<char *>(pAddr) - pBase + nSize +
×
1575
                      ctxt->nPageSize - 1) /
×
1576
                     ctxt->nPageSize;
×
1577
    for (size_t i = 0; i < n; i++)
×
1578
    {
1579
        msg.pFaultAddr = reinterpret_cast<char *>(pBase) + i * ctxt->nPageSize;
×
1580
        CPLVirtualMemManagerPinAddrInternal(&msg);
×
1581
    }
1582
}
1583

1584
/************************************************************************/
1585
/*                   CPLVirtualMemManagerSIGSEGVHandler()               */
1586
/************************************************************************/
1587

1588
#if defined(__x86_64__)
1589
#define REG_IP REG_RIP
1590
#define REG_SI REG_RSI
1591
#define REG_DI REG_RDI
1592
#elif defined(__i386__)
1593
#define REG_IP REG_EIP
1594
#define REG_SI REG_ESI
1595
#define REG_DI REG_EDI
1596
#endif
1597

1598
// Must take care of only using "asynchronous-signal-safe" functions in a signal
1599
// handler pthread_self(), read() and write() are such.  See:
1600
// https://www.securecoding.cert.org/confluence/display/seccode/SIG30-C.+Call+only+asynchronous-safe+functions+within+signal+handlers
1601
static void CPLVirtualMemManagerSIGSEGVHandler(int the_signal,
113,283✔
1602
                                               siginfo_t *the_info,
1603
                                               void *the_ctxt)
1604
{
1605
    CPLVirtualMemMsgToWorkerThread msg;
1606

1607
    memset(&msg, 0, sizeof(msg));
113,283✔
1608
    msg.pFaultAddr = the_info->si_addr;
113,283✔
1609
    msg.hRequesterThread = pthread_self();
113,283✔
1610

1611
#if defined(__x86_64__) || defined(__i386__)
1612
    ucontext_t *the_ucontext = static_cast<ucontext_t *>(the_ctxt);
113,283✔
1613
    const GByte *rip = reinterpret_cast<const GByte *>(
113,283✔
1614
        the_ucontext->uc_mcontext.gregs[REG_IP]);
113,283✔
1615
    msg.opType = CPLVirtualMemGetOpType(rip);
113,283✔
1616
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1617
    fprintfstderr("at rip %p, bytes: %02x %02x %02x %02x\n", rip, rip[0],
1618
                  rip[1], rip[2], rip[3]);
1619
#endif
1620
    if (msg.opType == OP_MOVS_RSI_RDI)
113,283✔
1621
    {
1622
        void *rsi =
×
1623
            reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_SI]);
×
1624
        void *rdi =
×
1625
            reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_DI]);
×
1626

1627
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1628
        fprintfstderr("fault=%p rsi=%p rsi=%p\n", msg.pFaultAddr, rsi, rdi);
1629
#endif
1630
        if (msg.pFaultAddr == rsi)
×
1631
        {
1632
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1633
            fprintfstderr("load\n");
1634
#endif
1635
            msg.opType = OP_LOAD;
×
1636
        }
1637
        else if (msg.pFaultAddr == rdi)
×
1638
        {
1639
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1640
            fprintfstderr("store\n");
1641
#endif
1642
            msg.opType = OP_STORE;
×
1643
        }
1644
    }
1645
#ifdef DEBUG_VIRTUALMEM
1646
    else if (msg.opType == OP_UNKNOWN)
1647
    {
1648
        static bool bHasWarned = false;
1649
        if (!bHasWarned)
1650
        {
1651
            bHasWarned = true;
1652
            fprintfstderr("at rip %p, unknown bytes: %02x %02x %02x %02x\n",
1653
                          rip, rip[0], rip[1], rip[2], rip[3]);
1654
        }
1655
    }
1656
#endif
1657
#else
1658
    msg.opType = OP_UNKNOWN;
1659
#endif
1660

1661
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1662
    fprintfstderr("entering handler for %X (addr=%p)\n", pthread_self(),
1663
                  the_info->si_addr);
1664
#endif
1665

1666
    if (the_info->si_code != SEGV_ACCERR)
113,283✔
1667
    {
1668
        pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
×
1669
        return;
×
1670
    }
1671

1672
    if (!CPLVirtualMemManagerPinAddrInternal(&msg))
113,283✔
1673
    {
1674
        // In case the helper thread did not recognize the address as being
1675
        // one that it should take care of, just rely on the previous SIGSEGV
1676
        // handler (with might abort the process).
1677
        pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
×
1678
    }
1679

1680
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1681
    fprintfstderr("leaving handler for %X (addr=%p)\n", pthread_self(),
1682
                  the_info->si_addr);
1683
#endif
1684
}
1685

1686
/************************************************************************/
1687
/*                      CPLVirtualMemManagerThread()                    */
1688
/************************************************************************/
1689

1690
static void CPLVirtualMemManagerThread(void * /* unused_param */)
113,288✔
1691
{
1692
    while (true)
1693
    {
1694
        char i_m_ready = 1;
113,288✔
1695
        CPLVirtualMemVMA *ctxt = nullptr;
113,288✔
1696
        bool bMappingFound = false;
113,288✔
1697
        CPLVirtualMemMsgToWorkerThread msg;
1698

1699
        // Signal that we are ready to process a new request.
1700
        ssize_t nRetWrite =
1701
            write(pVirtualMemManager->pipefd_wait_thread[1], &i_m_ready, 1);
113,288✔
1702
        IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 1);
113,288✔
1703

1704
        // Fetch the address to process.
1705
        const ssize_t nRetRead =
1706
            read(pVirtualMemManager->pipefd_to_thread[0], &msg, sizeof(msg));
113,288✔
1707
        IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == sizeof(msg));
113,287✔
1708

1709
        // If CPLVirtualMemManagerTerminate() is called, it will use BYEBYE_ADDR
1710
        // as a means to ask for our termination.
1711
        if (msg.pFaultAddr == BYEBYE_ADDR)
113,287✔
1712
            break;
1✔
1713

1714
        /* Lookup for a mapping that contains addr */
1715
        CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
113,286✔
1716
        for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
113,548✔
1717
        {
1718
            ctxt = pVirtualMemManager->pasVirtualMem[i];
113,548✔
1719
            if (static_cast<char *>(msg.pFaultAddr) >=
113,548✔
1720
                    static_cast<char *>(ctxt->sBase.pData) &&
113,548✔
1721
                static_cast<char *>(msg.pFaultAddr) <
113,304✔
1722
                    static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
113,304✔
1723
            {
1724
                bMappingFound = true;
113,286✔
1725
                break;
113,286✔
1726
            }
1727
        }
1728
        CPLReleaseMutex(hVirtualMemManagerMutex);
113,286✔
1729

1730
        if (bMappingFound)
113,286✔
1731
        {
1732
            char *const start_page_addr = static_cast<char *>(
113,286✔
1733
                ALIGN_DOWN(msg.pFaultAddr, ctxt->sBase.nPageSize));
113,286✔
1734
            const int iPage =
113,286✔
1735
                static_cast<int>((static_cast<char *>(start_page_addr) -
113,286✔
1736
                                  static_cast<char *>(ctxt->sBase.pData)) /
113,286✔
1737
                                 ctxt->sBase.nPageSize);
113,286✔
1738

1739
            if (iPage == ctxt->iLastPage)
113,286✔
1740
            {
1741
                // In case 2 threads try to access the same page concurrently it
1742
                // is possible that we are asked to mapped the page again
1743
                // whereas it is always mapped. However, if that number of
1744
                // successive retries is too high, this is certainly a sign that
1745
                // something else happen, like trying to write-access a
1746
                // read-only page 100 is a bit of magic number. Rouault believes
1747
                // it must be at least the number of concurrent threads. 100
1748
                // seems to be really safe!
1749
                ctxt->nRetry++;
44,194✔
1750
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1751
                fprintfstderr("retry on page %d : %d\n", iPage, ctxt->nRetry);
1752
#endif
1753
                if (ctxt->nRetry >= 100)
44,194✔
1754
                {
1755
                    CPLError(CE_Failure, CPLE_AppDefined,
×
1756
                             "CPLVirtualMemManagerThread: trying to "
1757
                             "write into read-only mapping");
1758
                    nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
×
1759
                                      MAPPING_NOT_FOUND, 4);
1760
                    IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
×
1761
                    break;
×
1762
                }
1763
                else if (msg.opType != OP_LOAD &&
44,194✔
1764
                         ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
5✔
1765
                         !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
5✔
1766
                {
1767
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1768
                    fprintfstderr("switching page %d to write mode\n", iPage);
1769
#endif
1770
                    SET_BIT(ctxt->pabitRWMappedPages, iPage);
5✔
1771
                    const int nRet =
1772
                        mprotect(start_page_addr, ctxt->sBase.nPageSize,
5✔
1773
                                 PROT_READ | PROT_WRITE);
1774
                    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
5✔
1775
                }
1776
            }
1777
            else
1778
            {
1779
                ctxt->iLastPage = iPage;
69,092✔
1780
                ctxt->nRetry = 0;
69,092✔
1781

1782
                if (TEST_BIT(ctxt->pabitMappedPages, iPage))
69,092✔
1783
                {
1784
                    if (msg.opType != OP_LOAD &&
661✔
1785
                        ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
×
1786
                        !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
×
1787
                    {
1788
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1789
                        fprintfstderr("switching page %d to write mode\n",
1790
                                      iPage);
1791
#endif
1792
                        SET_BIT(ctxt->pabitRWMappedPages, iPage);
×
1793
                        const int nRet =
1794
                            mprotect(start_page_addr, ctxt->sBase.nPageSize,
×
1795
                                     PROT_READ | PROT_WRITE);
1796
                        IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
×
1797
                    }
1798
                    else
1799
                    {
1800
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1801
                        fprintfstderr("unexpected case for page %d\n", iPage);
1802
#endif
1803
                    }
1804
                }
1805
                else
1806
                {
1807
                    void *const pPageToFill =
1808
                        CPLVirtualMemGetPageToFill(ctxt, start_page_addr);
68,431✔
1809

1810
                    size_t nToFill = ctxt->sBase.nPageSize;
68,431✔
1811
                    if (start_page_addr + nToFill >=
68,431✔
1812
                        static_cast<char *>(ctxt->sBase.pData) +
68,431✔
1813
                            ctxt->sBase.nSize)
68,431✔
1814
                    {
1815
                        nToFill = static_cast<char *>(ctxt->sBase.pData) +
22,800✔
1816
                                  ctxt->sBase.nSize - start_page_addr;
22,800✔
1817
                    }
1818

1819
                    ctxt->pfnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
68,431✔
1820
                                       start_page_addr - static_cast<char *>(
68,431✔
1821
                                                             ctxt->sBase.pData),
68,431✔
1822
                                       pPageToFill, nToFill,
1823
                                       ctxt->sBase.pCbkUserData);
1824

1825
                    // Now remap this page to its target address and
1826
                    // register it in the LRU.
1827
                    CPLVirtualMemAddPage(ctxt, start_page_addr, pPageToFill,
68,431✔
1828
                                         msg.opType, msg.hRequesterThread);
1829
                }
1830
            }
1831

1832
            // Warn the segfault handler that we have finished our job.
1833
            nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
113,286✔
1834
                              MAPPING_FOUND, 4);
1835
            IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
113,286✔
1836
        }
1837
        else
1838
        {
1839
            // Warn the segfault handler that we have finished our job
1840
            // but that the fault didn't occur in a memory range that
1841
            // is under our responsibility.
1842
            CPLError(CE_Failure, CPLE_AppDefined,
×
1843
                     "CPLVirtualMemManagerThread: no mapping found");
1844
            nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
×
1845
                              MAPPING_NOT_FOUND, 4);
1846
            IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
×
1847
        }
1848
    }
113,286✔
1849
}
1✔
1850

1851
/************************************************************************/
1852
/*                       CPLVirtualMemManagerInit()                     */
1853
/************************************************************************/
1854

1855
static bool CPLVirtualMemManagerInit()
17✔
1856
{
1857
    CPLMutexHolderD(&hVirtualMemManagerMutex);
34✔
1858
    if (pVirtualMemManager != nullptr)
17✔
1859
        return true;
15✔
1860

1861
    struct sigaction act;
1862
    pVirtualMemManager = static_cast<CPLVirtualMemManager *>(
2✔
1863
        VSI_MALLOC_VERBOSE(sizeof(CPLVirtualMemManager)));
2✔
1864
    if (pVirtualMemManager == nullptr)
2✔
1865
        return false;
×
1866
    pVirtualMemManager->pasVirtualMem = nullptr;
2✔
1867
    pVirtualMemManager->nVirtualMemCount = 0;
2✔
1868
    int nRet = pipe(pVirtualMemManager->pipefd_to_thread);
2✔
1869
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
2✔
1870
    nRet = pipe(pVirtualMemManager->pipefd_from_thread);
2✔
1871
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
2✔
1872
    nRet = pipe(pVirtualMemManager->pipefd_wait_thread);
2✔
1873
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
2✔
1874

1875
    // Install our custom SIGSEGV handler.
1876
    act.sa_sigaction = CPLVirtualMemManagerSIGSEGVHandler;
2✔
1877
    sigemptyset(&act.sa_mask);
2✔
1878
    act.sa_flags = SA_SIGINFO;
2✔
1879
    nRet = sigaction(SIGSEGV, &act, &pVirtualMemManager->oldact);
2✔
1880
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
2✔
1881

1882
    // Starts the helper thread.
1883
    pVirtualMemManager->hHelperThread =
4✔
1884
        CPLCreateJoinableThread(CPLVirtualMemManagerThread, nullptr);
2✔
1885
    if (pVirtualMemManager->hHelperThread == nullptr)
2✔
1886
    {
1887
        VSIFree(pVirtualMemManager);
×
1888
        pVirtualMemManager = nullptr;
×
1889
        return false;
×
1890
    }
1891
    return true;
2✔
1892
}
1893

1894
/************************************************************************/
1895
/*                      CPLVirtualMemManagerTerminate()                 */
1896
/************************************************************************/
1897

1898
void CPLVirtualMemManagerTerminate(void)
1✔
1899
{
1900
    if (pVirtualMemManager == nullptr)
1✔
1901
        return;
×
1902

1903
    CPLVirtualMemMsgToWorkerThread msg;
1904
    msg.pFaultAddr = BYEBYE_ADDR;
1✔
1905
    msg.opType = OP_UNKNOWN;
1✔
1906
    memset(&msg.hRequesterThread, 0, sizeof(msg.hRequesterThread));
1907

1908
    // Wait for the helper thread to be ready.
1909
    char wait_ready;
1910
    const ssize_t nRetRead =
1911
        read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1);
1✔
1912
    IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == 1);
1✔
1913

1914
    // Ask it to terminate.
1915
    const ssize_t nRetWrite =
1916
        write(pVirtualMemManager->pipefd_to_thread[1], &msg, sizeof(msg));
1✔
1917
    IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(msg));
1✔
1918

1919
    // Wait for its termination.
1920
    CPLJoinThread(pVirtualMemManager->hHelperThread);
1✔
1921

1922
    // Cleanup everything.
1923
    while (pVirtualMemManager->nVirtualMemCount > 0)
1✔
1924
        CPLVirtualMemFree(reinterpret_cast<CPLVirtualMem *>(
×
1925
            pVirtualMemManager
1926
                ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount - 1]));
×
1927
    CPLFree(pVirtualMemManager->pasVirtualMem);
1✔
1928

1929
    close(pVirtualMemManager->pipefd_to_thread[0]);
1✔
1930
    close(pVirtualMemManager->pipefd_to_thread[1]);
1✔
1931
    close(pVirtualMemManager->pipefd_from_thread[0]);
1✔
1932
    close(pVirtualMemManager->pipefd_from_thread[1]);
1✔
1933
    close(pVirtualMemManager->pipefd_wait_thread[0]);
1✔
1934
    close(pVirtualMemManager->pipefd_wait_thread[1]);
1✔
1935

1936
    // Restore previous handler.
1937
    sigaction(SIGSEGV, &pVirtualMemManager->oldact, nullptr);
1✔
1938

1939
    CPLFree(pVirtualMemManager);
1✔
1940
    pVirtualMemManager = nullptr;
1✔
1941

1942
    CPLDestroyMutex(hVirtualMemManagerMutex);
1✔
1943
    hVirtualMemManagerMutex = nullptr;
1✔
1944
}
1945

1946
#else  // HAVE_VIRTUAL_MEM_VMA
1947

1948
CPLVirtualMem *CPLVirtualMemNew(
1949
    size_t /* nSize */, size_t /* nCacheSize */, size_t /* nPageSizeHint */,
1950
    int /* bSingleThreadUsage */, CPLVirtualMemAccessMode /* eAccessMode */,
1951
    CPLVirtualMemCachePageCbk /* pfnCachePage */,
1952
    CPLVirtualMemUnCachePageCbk /* pfnUnCachePage */,
1953
    CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
1954
{
1955
    CPLError(CE_Failure, CPLE_NotSupported,
1956
             "CPLVirtualMemNew() unsupported on "
1957
             "this operating system / configuration");
1958
    return nullptr;
1959
}
1960

1961
void CPLVirtualMemDeclareThread(CPLVirtualMem * /* ctxt */)
1962
{
1963
}
1964

1965
void CPLVirtualMemUnDeclareThread(CPLVirtualMem * /* ctxt */)
1966
{
1967
}
1968

1969
void CPLVirtualMemPin(CPLVirtualMem * /* ctxt */, void * /* pAddr */,
1970
                      size_t /* nSize */, int /* bWriteOp */)
1971
{
1972
}
1973

1974
void CPLVirtualMemManagerTerminate(void)
1975
{
1976
}
1977

1978
#endif  // HAVE_VIRTUAL_MEM_VMA
1979

1980
#ifdef HAVE_MMAP
1981

1982
/************************************************************************/
1983
/*                     CPLVirtualMemFreeFileMemoryMapped()              */
1984
/************************************************************************/
1985

1986
static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMem *ctxt)
28✔
1987
{
1988
    const size_t nMappingSize = ctxt->nSize +
28✔
1989
                                static_cast<GByte *>(ctxt->pData) -
28✔
1990
                                static_cast<GByte *>(ctxt->pDataToFree);
28✔
1991
    const int nRet = munmap(ctxt->pDataToFree, nMappingSize);
28✔
1992
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
28✔
1993
}
28✔
1994

1995
/************************************************************************/
1996
/*                       CPLVirtualMemFileMapNew()                      */
1997
/************************************************************************/
1998

1999
CPLVirtualMem *CPLVirtualMemFileMapNew(
28✔
2000
    VSILFILE *fp, vsi_l_offset nOffset, vsi_l_offset nLength,
2001
    CPLVirtualMemAccessMode eAccessMode,
2002
    CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2003
{
2004
#if SIZEOF_VOIDP == 4
2005
    if (nLength != static_cast<size_t>(nLength))
2006
    {
2007
        CPLError(CE_Failure, CPLE_AppDefined,
2008
                 "nLength = " CPL_FRMT_GUIB
2009
                 " incompatible with 32 bit architecture",
2010
                 nLength);
2011
        return nullptr;
2012
    }
2013
    if (nOffset + CPLGetPageSize() !=
2014
        static_cast<vsi_l_offset>(
2015
            static_cast<off_t>(nOffset + CPLGetPageSize())))
2016
    {
2017
        CPLError(CE_Failure, CPLE_AppDefined,
2018
                 "nOffset = " CPL_FRMT_GUIB
2019
                 " incompatible with 32 bit architecture",
2020
                 nOffset);
2021
        return nullptr;
2022
    }
2023
#endif
2024

2025
    int fd = static_cast<int>(
2026
        reinterpret_cast<GUIntptr_t>(VSIFGetNativeFileDescriptorL(fp)));
28✔
2027
    if (fd == 0)
28✔
2028
    {
2029
        CPLError(CE_Failure, CPLE_AppDefined,
×
2030
                 "Cannot operate on a virtual file");
2031
        return nullptr;
×
2032
    }
2033

2034
    const off_t nAlignedOffset =
2035
        static_cast<off_t>((nOffset / CPLGetPageSize()) * CPLGetPageSize());
28✔
2036
    size_t nAlignment = static_cast<size_t>(nOffset - nAlignedOffset);
28✔
2037
    size_t nMappingSize = static_cast<size_t>(nLength + nAlignment);
28✔
2038

2039
    // Need to ensure that the requested extent fits into the file size
2040
    // otherwise SIGBUS errors will occur when using the mapping.
2041
    vsi_l_offset nCurPos = VSIFTellL(fp);
28✔
2042
    if (VSIFSeekL(fp, 0, SEEK_END) != 0)
28✔
2043
        return nullptr;
×
2044
    vsi_l_offset nFileSize = VSIFTellL(fp);
28✔
2045
    if (nFileSize < nOffset + nLength)
28✔
2046
    {
2047
        if (eAccessMode != VIRTUALMEM_READWRITE)
4✔
2048
        {
2049
            CPLError(CE_Failure, CPLE_AppDefined,
×
2050
                     "Trying to map an extent outside of the file");
2051
            CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
×
2052
            return nullptr;
×
2053
        }
2054
        else
2055
        {
2056
            char ch = 0;
4✔
2057
            if (VSIFSeekL(fp, nOffset + nLength - 1, SEEK_SET) != 0 ||
8✔
2058
                VSIFWriteL(&ch, 1, 1, fp) != 1)
4✔
2059
            {
2060
                CPLError(CE_Failure, CPLE_AppDefined,
×
2061
                         "Cannot extend file to mapping size");
2062
                CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
×
2063
                return nullptr;
×
2064
            }
2065
        }
2066
    }
2067
    if (VSIFSeekL(fp, nCurPos, SEEK_SET) != 0)
28✔
2068
        return nullptr;
×
2069

2070
    CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2071
        VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
28✔
2072
    if (ctxt == nullptr)
28✔
2073
        return nullptr;
×
2074

2075
    void *addr =
2076
        mmap(nullptr, nMappingSize,
28✔
2077
             eAccessMode == VIRTUALMEM_READWRITE ? PROT_READ | PROT_WRITE
2078
                                                 : PROT_READ,
2079
             MAP_SHARED, fd, nAlignedOffset);
2080
    if (addr == MAP_FAILED)
28✔
2081
    {
2082
        int myerrno = errno;
×
2083
        CPLError(CE_Failure, CPLE_AppDefined, "mmap() failed : %s",
×
2084
                 strerror(myerrno));
2085
        VSIFree(ctxt);
×
2086
        // cppcheck thinks we are leaking addr.
2087
        // cppcheck-suppress memleak
2088
        return nullptr;
×
2089
    }
2090

2091
    ctxt->eType = VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
28✔
2092
    ctxt->nRefCount = 1;
28✔
2093
    ctxt->eAccessMode = eAccessMode;
28✔
2094
    ctxt->pData = static_cast<GByte *>(addr) + nAlignment;
28✔
2095
    ctxt->pDataToFree = addr;
28✔
2096
    ctxt->nSize = static_cast<size_t>(nLength);
28✔
2097
    ctxt->nPageSize = CPLGetPageSize();
28✔
2098
    ctxt->bSingleThreadUsage = false;
28✔
2099
    ctxt->pfnFreeUserData = pfnFreeUserData;
28✔
2100
    ctxt->pCbkUserData = pCbkUserData;
28✔
2101

2102
    return ctxt;
28✔
2103
}
2104

2105
#else  // HAVE_MMAP
2106

2107
CPLVirtualMem *CPLVirtualMemFileMapNew(
2108
    VSILFILE * /* fp */, vsi_l_offset /* nOffset */, vsi_l_offset /* nLength */,
2109
    CPLVirtualMemAccessMode /* eAccessMode */,
2110
    CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
2111
{
2112
    CPLError(CE_Failure, CPLE_NotSupported,
2113
             "CPLVirtualMemFileMapNew() unsupported on this "
2114
             "operating system / configuration");
2115
    return nullptr;
2116
}
2117

2118
#endif  // HAVE_MMAP
2119

2120
/************************************************************************/
2121
/*                         CPLGetPageSize()                             */
2122
/************************************************************************/
2123

2124
size_t CPLGetPageSize(void)
105✔
2125
{
2126
#if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
2127
    return static_cast<size_t>(sysconf(_SC_PAGESIZE));
105✔
2128
#else
2129
    return 0;
2130
#endif
2131
}
2132

2133
/************************************************************************/
2134
/*                   CPLIsVirtualMemFileMapAvailable()                  */
2135
/************************************************************************/
2136

2137
int CPLIsVirtualMemFileMapAvailable(void)
30✔
2138
{
2139
#ifdef HAVE_MMAP
2140
    return TRUE;
30✔
2141
#else
2142
    return FALSE;
2143
#endif
2144
}
2145

2146
/************************************************************************/
2147
/*                        CPLVirtualMemFree()                           */
2148
/************************************************************************/
2149

2150
void CPLVirtualMemFree(CPLVirtualMem *ctxt)
61✔
2151
{
2152
    if (ctxt == nullptr || --(ctxt->nRefCount) > 0)
61✔
2153
        return;
8✔
2154

2155
    if (ctxt->pVMemBase != nullptr)
53✔
2156
    {
2157
        CPLVirtualMemFree(ctxt->pVMemBase);
8✔
2158
        if (ctxt->pfnFreeUserData != nullptr)
8✔
2159
            ctxt->pfnFreeUserData(ctxt->pCbkUserData);
8✔
2160
        CPLFree(ctxt);
8✔
2161
        return;
8✔
2162
    }
2163

2164
#ifdef HAVE_MMAP
2165
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
45✔
2166
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
28✔
2167
#endif
2168
#ifdef HAVE_VIRTUAL_MEM_VMA
2169
    if (ctxt->eType == VIRTUAL_MEM_TYPE_VMA)
45✔
2170
        CPLVirtualMemFreeFileMemoryMapped(
17✔
2171
            reinterpret_cast<CPLVirtualMemVMA *>(ctxt));
2172
#endif
2173

2174
    if (ctxt->pfnFreeUserData != nullptr)
45✔
2175
        ctxt->pfnFreeUserData(ctxt->pCbkUserData);
16✔
2176
    CPLFree(ctxt);
45✔
2177
}
2178

2179
/************************************************************************/
2180
/*                      CPLVirtualMemGetAddr()                          */
2181
/************************************************************************/
2182

2183
void *CPLVirtualMemGetAddr(CPLVirtualMem *ctxt)
302✔
2184
{
2185
    return ctxt->pData;
302✔
2186
}
2187

2188
/************************************************************************/
2189
/*                     CPLVirtualMemIsFileMapping()                     */
2190
/************************************************************************/
2191

2192
int CPLVirtualMemIsFileMapping(CPLVirtualMem *ctxt)
4✔
2193
{
2194
    return ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
4✔
2195
}
2196

2197
/************************************************************************/
2198
/*                     CPLVirtualMemGetAccessMode()                     */
2199
/************************************************************************/
2200

2201
CPLVirtualMemAccessMode CPLVirtualMemGetAccessMode(CPLVirtualMem *ctxt)
×
2202
{
2203
    return ctxt->eAccessMode;
×
2204
}
2205

2206
/************************************************************************/
2207
/*                      CPLVirtualMemGetPageSize()                      */
2208
/************************************************************************/
2209

2210
size_t CPLVirtualMemGetPageSize(CPLVirtualMem *ctxt)
5✔
2211
{
2212
    return ctxt->nPageSize;
5✔
2213
}
2214

2215
/************************************************************************/
2216
/*                        CPLVirtualMemGetSize()                        */
2217
/************************************************************************/
2218

2219
size_t CPLVirtualMemGetSize(CPLVirtualMem *ctxt)
271✔
2220
{
2221
    return ctxt->nSize;
271✔
2222
}
2223

2224
/************************************************************************/
2225
/*                   CPLVirtualMemIsAccessThreadSafe()                  */
2226
/************************************************************************/
2227

2228
int CPLVirtualMemIsAccessThreadSafe(CPLVirtualMem *ctxt)
1✔
2229
{
2230
    return !ctxt->bSingleThreadUsage;
1✔
2231
}
2232

2233
/************************************************************************/
2234
/*                       CPLVirtualMemDerivedNew()                      */
2235
/************************************************************************/
2236

2237
CPLVirtualMem *CPLVirtualMemDerivedNew(
8✔
2238
    CPLVirtualMem *pVMemBase, vsi_l_offset nOffset, vsi_l_offset nSize,
2239
    CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2240
{
2241
    if (nOffset + nSize > pVMemBase->nSize)
8✔
2242
        return nullptr;
×
2243

2244
    CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2245
        VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
8✔
2246
    if (ctxt == nullptr)
8✔
2247
        return nullptr;
×
2248

2249
    ctxt->eType = pVMemBase->eType;
8✔
2250
    ctxt->nRefCount = 1;
8✔
2251
    ctxt->pVMemBase = pVMemBase;
8✔
2252
    pVMemBase->nRefCount++;
8✔
2253
    ctxt->eAccessMode = pVMemBase->eAccessMode;
8✔
2254
    ctxt->pData = static_cast<GByte *>(pVMemBase->pData) + nOffset;
8✔
2255
    ctxt->pDataToFree = nullptr;
8✔
2256
    ctxt->nSize = static_cast<size_t>(nSize);
8✔
2257
    ctxt->nPageSize = pVMemBase->nPageSize;
8✔
2258
    ctxt->bSingleThreadUsage = CPL_TO_BOOL(pVMemBase->bSingleThreadUsage);
8✔
2259
    ctxt->pfnFreeUserData = pfnFreeUserData;
8✔
2260
    ctxt->pCbkUserData = pCbkUserData;
8✔
2261

2262
    return ctxt;
8✔
2263
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc