• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 15263807472

26 May 2025 08:53PM UTC coverage: 72.046% (-0.002%) from 72.048%
15263807472

push

github

yuwata
src/core/manager.c: log preset activity on first boot

This gives us a little more information about what units were enabled
or disabled on that first boot and will be useful for OS developers
tracking down the source of unit state.

An example with this enabled looks like:

```
NET: Registered PF_VSOCK protocol family
systemd[1]: Applying preset policy.
systemd[1]: Unit /etc/systemd/system/dnsmasq.service is masked, ignoring.
systemd[1]: Unit /etc/systemd/system/systemd-repart.service is masked, ignoring.
systemd[1]: Removed '/etc/systemd/system/sockets.target.wants/systemd-resolved-monitor.socket'.
systemd[1]: Removed '/etc/systemd/system/sockets.target.wants/systemd-resolved-varlink.socket'.
systemd[1]: Created symlink '/etc/systemd/system/multi-user.target.wants/var-mnt-workdir.mount' → '/etc/systemd/system/var-mnt-workdir.mount'.
systemd[1]: Created symlink '/etc/systemd/system/multi-user.target.wants/var-mnt-workdir\x2dtmp.mount' → '/etc/systemd/system/var-mnt-workdir\x2dtmp.mount'.
systemd[1]: Created symlink '/etc/systemd/system/afterburn-sshkeys.target.requires/afterburn-sshkeys@core.service' → '/usr/lib/systemd/system/afterburn-sshkeys@.service'.
systemd[1]: Created symlink '/etc/systemd/system/sockets.target.wants/systemd-resolved-varlink.socket' → '/usr/lib/systemd/system/systemd-resolved-varlink.socket'.
systemd[1]: Created symlink '/etc/systemd/system/sockets.target.wants/systemd-resolved-monitor.socket' → '/usr/lib/systemd/system/systemd-resolved-monitor.socket'.
systemd[1]: Populated /etc with preset unit settings.
```

Considering it only happens on first boot and not on every boot I think
the extra information is worth the extra verbosity in the logs just for
that boot.

5 of 6 new or added lines in 1 file covered. (83.33%)

5463 existing lines in 165 files now uncovered.

299151 of 415222 relevant lines covered (72.05%)

702386.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.64
/src/basic/hashmap.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <fnmatch.h>
4
#include <pthread.h>
5
#if HAVE_VALGRIND_VALGRIND_H
6
#  include <valgrind/valgrind.h>
7
#endif
8

9
#include "alloc-util.h"
10
#include "extract-word.h"
11
#include "hashmap.h"
12
#include "log.h"
13
#include "logarithm.h"
14
#include "memory-util.h"
15
#include "mempool.h"
16
#include "process-util.h"
17
#include "random-util.h"
18
#include "set.h"
19
#include "siphash24.h"
20
#include "sort-util.h"
21
#include "string-util.h"
22
#include "strv.h"
23

24
#if ENABLE_DEBUG_HASHMAP
25
#include "list.h"
26
#endif
27

28
/*
29
 * Implementation of hashmaps.
30
 * Addressing: open
31
 *   - uses less RAM compared to closed addressing (chaining), because
32
 *     our entries are small (especially in Sets, which tend to contain
33
 *     the majority of entries in systemd).
34
 * Collision resolution: Robin Hood
35
 *   - tends to equalize displacement of entries from their optimal buckets.
36
 * Probe sequence: linear
37
 *   - though theoretically worse than random probing/uniform hashing/double
38
 *     hashing, it is good for cache locality.
39
 *
40
 * References:
41
 * Celis, P. 1986. Robin Hood Hashing.
42
 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
43
 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
44
 * - The results are derived for random probing. Suggests deletion with
45
 *   tombstones and two mean-centered search methods. None of that works
46
 *   well for linear probing.
47
 *
48
 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
49
 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
50
 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
51
 * http://www.math.uu.se/~svante/papers/sj157.pdf
52
 * - Applies to Robin Hood with linear probing. Contains remarks on
53
 *   the unsuitability of mean-centered search with linear probing.
54
 *
55
 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
56
 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
57
 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
58
 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
59
 *   in a successful search), and Janson writes about displacement. C = d + 1.
60
 *
61
 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
62
 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
63
 * - Explanation of backward shift deletion with pictures.
64
 *
65
 * Khuong, P. 2013. The Other Robin Hood Hashing.
66
 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
67
 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
68
 */
69

70
/*
71
 * XXX Ideas for improvement:
72
 * For unordered hashmaps, randomize iteration order, similarly to Perl:
73
 * http://blog.booking.com/hardening-perls-hash-function.html
74
 */
75

76
/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
77
 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
78
#define INV_KEEP_FREE            5U
79

80
/* Fields common to entries of all hashmap/set types */
81
struct hashmap_base_entry {
82
        const void *key;
83
};
84

85
/* Entry types for specific hashmap/set types
86
 * hashmap_base_entry must be at the beginning of each entry struct. */
87

88
struct plain_hashmap_entry {
89
        struct hashmap_base_entry b;
90
        void *value;
91
};
92

93
struct ordered_hashmap_entry {
94
        struct plain_hashmap_entry p;
95
        unsigned iterate_next, iterate_previous;
96
};
97

98
struct set_entry {
99
        struct hashmap_base_entry b;
100
};
101

102
/* In several functions it is advantageous to have the hash table extended
103
 * virtually by a couple of additional buckets. We reserve special index values
104
 * for these "swap" buckets. */
105
#define _IDX_SWAP_BEGIN     (UINT_MAX - 3)
106
#define IDX_PUT             (_IDX_SWAP_BEGIN + 0)
107
#define IDX_TMP             (_IDX_SWAP_BEGIN + 1)
108
#define _IDX_SWAP_END       (_IDX_SWAP_BEGIN + 2)
109

110
#define IDX_FIRST           (UINT_MAX - 1) /* special index for freshly initialized iterators */
111
#define IDX_NIL             UINT_MAX       /* special index value meaning "none" or "end" */
112

113
assert_cc(IDX_FIRST == _IDX_SWAP_END);
114
assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
115

116
/* Storage space for the "swap" buckets.
117
 * All entry types can fit into an ordered_hashmap_entry. */
118
struct swap_entries {
119
        struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
120
};
121

122
/* Distance from Initial Bucket */
123
typedef uint8_t dib_raw_t;
124
#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU)   /* indicates DIB value is greater than representable */
125
#define DIB_RAW_REHASH   ((dib_raw_t)0xfeU)   /* entry yet to be rehashed during in-place resize */
126
#define DIB_RAW_FREE     ((dib_raw_t)0xffU)   /* a free bucket */
127
#define DIB_RAW_INIT     ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
128

129
#define DIB_FREE UINT_MAX
130

131
#if ENABLE_DEBUG_HASHMAP
132
struct hashmap_debug_info {
133
        LIST_FIELDS(struct hashmap_debug_info, debug_list);
134
        unsigned max_entries;  /* high watermark of n_entries */
135

136
        /* fields to detect modification while iterating */
137
        unsigned put_count;    /* counts puts into the hashmap */
138
        unsigned rem_count;    /* counts removals from hashmap */
139
        unsigned last_rem_idx; /* remembers last removal index */
140
};
141

142
/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
143
static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
144
static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
145
#endif
146

147
enum HashmapType {
148
        HASHMAP_TYPE_PLAIN,
149
        HASHMAP_TYPE_ORDERED,
150
        HASHMAP_TYPE_SET,
151
        _HASHMAP_TYPE_MAX
152
};
153

154
struct _packed_ indirect_storage {
155
        void *storage;                     /* where buckets and DIBs are stored */
156
        uint8_t  hash_key[HASH_KEY_SIZE];  /* hash key; changes during resize */
157

158
        unsigned n_entries;                /* number of stored entries */
159
        unsigned n_buckets;                /* number of buckets */
160

161
        unsigned idx_lowest_entry;         /* Index below which all buckets are free.
162
                                              Makes "while (hashmap_steal_first())" loops
163
                                              O(n) instead of O(n^2) for unordered hashmaps. */
164
        uint8_t  _pad[3];                  /* padding for the whole HashmapBase */
165
        /* The bitfields in HashmapBase complete the alignment of the whole thing. */
166
};
167

168
struct direct_storage {
169
        /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
170
         * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
171
         *              or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
172
        uint8_t storage[sizeof(struct indirect_storage)];
173
};
174

175
#define DIRECT_BUCKETS(entry_t) \
176
        (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
177

178
/* We should be able to store at least one entry directly. */
179
assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
180

181
/* We have 3 bits for n_direct_entries. */
182
assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
183

184
/* Hashmaps with directly stored entries all use this shared hash key.
185
 * It's no big deal if the key is guessed, because there can be only
186
 * a handful of directly stored entries in a hashmap. When a hashmap
187
 * outgrows direct storage, it gets its own key for indirect storage. */
188
static uint8_t shared_hash_key[HASH_KEY_SIZE];
189

190
/* Fields that all hashmap/set types must have */
191
struct HashmapBase {
192
        const struct hash_ops *hash_ops;  /* hash and compare ops to use */
193

194
        union _packed_ {
195
                struct indirect_storage indirect; /* if  has_indirect */
196
                struct direct_storage direct;     /* if !has_indirect */
197
        };
198

199
        enum HashmapType type:2;     /* HASHMAP_TYPE_* */
200
        bool has_indirect:1;         /* whether indirect storage is used */
201
        unsigned n_direct_entries:3; /* Number of entries in direct storage.
202
                                      * Only valid if !has_indirect. */
203
        bool from_pool:1;            /* whether was allocated from mempool */
204
        bool dirty:1;                /* whether dirtied since last iterated_cache_get() */
205
        bool cached:1;               /* whether this hashmap is being cached */
206

207
#if ENABLE_DEBUG_HASHMAP
208
        struct hashmap_debug_info debug;
209
#endif
210
};
211

212
/* Specific hash types
213
 * HashmapBase must be at the beginning of each hashmap struct. */
214

215
struct Hashmap {
216
        struct HashmapBase b;
217
};
218

219
struct OrderedHashmap {
220
        struct HashmapBase b;
221
        unsigned iterate_list_head, iterate_list_tail;
222
};
223

224
struct Set {
225
        struct HashmapBase b;
226
};
227

228
typedef struct CacheMem {
229
        const void **ptr;
230
        size_t n_populated;
231
        bool active:1;
232
} CacheMem;
233

234
struct IteratedCache {
235
        HashmapBase *hashmap;
236
        CacheMem keys, values;
237
};
238

239
DEFINE_MEMPOOL(hashmap_pool,         Hashmap,        8);
240
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
241
/* No need for a separate Set pool */
242
assert_cc(sizeof(Hashmap) == sizeof(Set));
243

244
struct hashmap_type_info {
245
        size_t head_size;
246
        size_t entry_size;
247
        struct mempool *mempool;
248
        unsigned n_direct_buckets;
249
};
250

251
static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
252
        [HASHMAP_TYPE_PLAIN] = {
253
                .head_size        = sizeof(Hashmap),
254
                .entry_size       = sizeof(struct plain_hashmap_entry),
255
                .mempool          = &hashmap_pool,
256
                .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
257
        },
258
        [HASHMAP_TYPE_ORDERED] = {
259
                .head_size        = sizeof(OrderedHashmap),
260
                .entry_size       = sizeof(struct ordered_hashmap_entry),
261
                .mempool          = &ordered_hashmap_pool,
262
                .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
263
        },
264
        [HASHMAP_TYPE_SET] = {
265
                .head_size        = sizeof(Set),
266
                .entry_size       = sizeof(struct set_entry),
267
                .mempool          = &hashmap_pool,
268
                .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
269
        },
270
};
271

272
void hashmap_trim_pools(void) {
2✔
273
        int r;
2✔
274

275
        /* The pool is only allocated by the main thread, but the memory can be passed to other
276
         * threads. Let's clean up if we are the main thread and no other threads are live. */
277

278
        /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
279
         * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
280
        if (getpid() != gettid())
2✔
UNCOV
281
                return (void) log_debug("Not cleaning up memory pools, not in main thread.");
×
282

283
        r = get_process_threads(0);
2✔
284
        if (r < 0)
2✔
UNCOV
285
                return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
×
286
        if (r != 1)
2✔
UNCOV
287
                return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
×
288

289
        mempool_trim(&hashmap_pool);
2✔
290
        mempool_trim(&ordered_hashmap_pool);
2✔
291
}
292

293
#if HAVE_VALGRIND_VALGRIND_H
294
_destructor_ static void cleanup_pools(void) {
295
        /* Be nice to valgrind */
296
        if (RUNNING_ON_VALGRIND)
297
                hashmap_trim_pools();
298
}
299
#endif
300

301
static unsigned n_buckets(HashmapBase *h) {
1,103,100,910✔
302
        return h->has_indirect ? h->indirect.n_buckets
1,103,100,910✔
303
                               : hashmap_type_info[h->type].n_direct_buckets;
1,103,100,910✔
304
}
305

306
static unsigned n_entries(HashmapBase *h) {
132,763,164✔
307
        return h->has_indirect ? h->indirect.n_entries
132,763,164✔
308
                               : h->n_direct_entries;
132,763,164✔
309
}
310

311
static void n_entries_inc(HashmapBase *h) {
34,513,136✔
312
        if (h->has_indirect)
34,513,136✔
313
                h->indirect.n_entries++;
27,453,383✔
314
        else
315
                h->n_direct_entries++;
7,059,753✔
316
}
34,513,136✔
317

318
static void n_entries_dec(HashmapBase *h) {
31,673,241✔
319
        if (h->has_indirect)
31,673,241✔
320
                h->indirect.n_entries--;
26,635,036✔
321
        else
322
                h->n_direct_entries--;
5,038,205✔
323
}
31,673,241✔
324

325
static void* storage_ptr(HashmapBase *h) {
1,099,752,458✔
326
        return h->has_indirect ? h->indirect.storage
1,099,752,458✔
327
                               : h->direct.storage;
1,099,752,458✔
328
}
329

330
static uint8_t* hash_key(HashmapBase *h) {
156,853,159✔
331
        return h->has_indirect ? h->indirect.hash_key
156,853,159✔
332
                               : shared_hash_key;
156,853,159✔
333
}
334

335
static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
156,853,159✔
336
        struct siphash state;
156,853,159✔
337
        uint64_t hash;
156,853,159✔
338

339
        siphash24_init(&state, hash_key(h));
156,853,159✔
340

341
        h->hash_ops->hash(p, &state);
156,853,159✔
342

343
        hash = siphash24_finalize(&state);
156,853,159✔
344

345
        return (unsigned) (hash % n_buckets(h));
156,853,159✔
346
}
347
#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
348

349
static void base_set_dirty(HashmapBase *h) {
70,760,759✔
350
        h->dirty = true;
70,760,759✔
351
}
70,760,759✔
352
#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
353

354
static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
3,264,715✔
355
        static uint8_t current[HASH_KEY_SIZE];
3,264,715✔
356
        static bool current_initialized = false;
3,264,715✔
357

358
        /* Returns a hash function key to use. In order to keep things
359
         * fast we will not generate a new key each time we allocate a
360
         * new hash table. Instead, we'll just reuse the most recently
361
         * generated one, except if we never generated one or when we
362
         * are rehashing an entire hash table because we reached a
363
         * fill level */
364

365
        if (!current_initialized || !reuse_is_ok) {
3,264,715✔
366
                random_bytes(current, sizeof(current));
2,009,754✔
367
                current_initialized = true;
2,009,754✔
368
        }
369

370
        memcpy(hash_key, current, sizeof(current));
3,264,715✔
371
}
3,264,715✔
372

373
static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
703,212,457✔
374
        return CAST_ALIGN_PTR(
703,212,457✔
375
                        struct hashmap_base_entry,
376
                        (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
377
}
378

379
static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
2,318,392✔
380
        return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
2,318,392✔
381
}
382

383
static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
107,175,741✔
384
        return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
107,175,741✔
385
}
386

387
static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
4✔
388
        return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
4✔
389
}
390

391
static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
255,162,475✔
392
        return &swap->e[idx - _IDX_SWAP_BEGIN];
255,162,475✔
393
}
394

395
/* Returns a pointer to the bucket at index idx.
396
 * Understands real indexes and swap indexes, hence "_virtual". */
397
static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
472,206,093✔
398
                                                    unsigned idx) {
399
        if (idx < _IDX_SWAP_BEGIN)
472,206,093✔
400
                return bucket_at(h, idx);
291,751,229✔
401

402
        if (idx < _IDX_SWAP_END)
180,454,864✔
403
                return &bucket_at_swap(swap, idx)->p.b;
180,454,864✔
404

UNCOV
405
        assert_not_reached();
×
406
}
407

408
static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
396,540,001✔
409
        return (dib_raw_t*)
793,080,002✔
410
                ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
396,540,001✔
411
}
412

UNCOV
413
static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
×
UNCOV
414
        return idx >= from ? idx - from
×
UNCOV
415
                           : n_buckets(h) + idx - from;
×
416
}
417

418
static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
205,850,950✔
419
        unsigned initial_bucket;
205,850,950✔
420

421
        if (raw_dib == DIB_RAW_FREE)
205,850,950✔
422
                return DIB_FREE;
423

424
        if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
205,850,950✔
425
                return raw_dib;
205,850,950✔
426

427
        /*
428
         * Having an overflow DIB value is very unlikely. The hash function
429
         * would have to be bad. For example, in a table of size 2^24 filled
430
         * to load factor 0.9 the maximum observed DIB is only about 60.
431
         * In theory (assuming I used Maxima correctly), for an infinite size
432
         * hash table with load factor 0.8 the probability of a given entry
433
         * having DIB > 40 is 1.9e-8.
434
         * This returns the correct DIB value by recomputing the hash value in
435
         * the unlikely case. XXX Hitting this case could be a hint to rehash.
436
         */
UNCOV
437
        initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
×
UNCOV
438
        return bucket_distance(h, idx, initial_bucket);
×
439
}
440

441
static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
127,491,610✔
442
        dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
127,491,610✔
443
}
127,491,610✔
444

445
static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
46,928,044✔
446
        dib_raw_t *dibs;
46,928,044✔
447

448
        dibs = dib_raw_ptr(h);
46,928,044✔
449

450
        for ( ; idx < n_buckets(h); idx++)
143,047,166✔
451
                if (dibs[idx] != DIB_RAW_FREE)
88,098,761✔
452
                        return idx;
453

454
        return IDX_NIL;
455
}
456

457
static void bucket_mark_free(HashmapBase *h, unsigned idx) {
31,673,241✔
458
        memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
31,673,241✔
459
        bucket_set_dib(h, idx, DIB_FREE);
31,673,241✔
460
}
31,673,241✔
461

462
static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
167,220,211✔
463
                              unsigned from, unsigned to) {
464
        struct hashmap_base_entry *e_from, *e_to;
167,220,211✔
465

466
        assert(from != to);
167,220,211✔
467

468
        e_from = bucket_at_virtual(h, swap, from);
167,220,211✔
469
        e_to   = bucket_at_virtual(h, swap, to);
167,220,211✔
470

471
        memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
167,220,211✔
472

473
        if (h->type == HASHMAP_TYPE_ORDERED) {
167,220,211✔
474
                OrderedHashmap *lh = (OrderedHashmap*) h;
85,902,425✔
475
                struct ordered_hashmap_entry *le, *le_to;
85,902,425✔
476

477
                le_to = (struct ordered_hashmap_entry*) e_to;
85,902,425✔
478

479
                if (le_to->iterate_next != IDX_NIL) {
85,902,425✔
480
                        le = (struct ordered_hashmap_entry*)
60,723,179✔
481
                             bucket_at_virtual(h, swap, le_to->iterate_next);
60,723,179✔
482
                        le->iterate_previous = to;
60,723,179✔
483
                }
484

485
                if (le_to->iterate_previous != IDX_NIL) {
85,902,425✔
486
                        le = (struct ordered_hashmap_entry*)
77,042,492✔
487
                             bucket_at_virtual(h, swap, le_to->iterate_previous);
77,042,492✔
488
                        le->iterate_next = to;
77,042,492✔
489
                }
490

491
                if (lh->iterate_list_head == from)
85,902,425✔
492
                        lh->iterate_list_head = to;
8,859,933✔
493
                if (lh->iterate_list_tail == from)
85,902,425✔
494
                        lh->iterate_list_tail = to;
25,179,246✔
495
        }
496
}
167,220,211✔
497

498
static unsigned next_idx(HashmapBase *h, unsigned idx) {
225,455,705✔
499
        return (idx + 1U) % n_buckets(h);
225,455,705✔
500
}
501

502
static unsigned prev_idx(HashmapBase *h, unsigned idx) {
73✔
503
        return (n_buckets(h) + idx - 1U) % n_buckets(h);
73✔
504
}
505

506
static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
90,377,147✔
507
        switch (h->type) {
90,377,147✔
508

509
        case HASHMAP_TYPE_PLAIN:
82,400,096✔
510
        case HASHMAP_TYPE_ORDERED:
511
                return ((struct plain_hashmap_entry*)e)->value;
82,400,096✔
512

513
        case HASHMAP_TYPE_SET:
7,977,051✔
514
                return (void*) e->key;
7,977,051✔
515

UNCOV
516
        default:
×
UNCOV
517
                assert_not_reached();
×
518
        }
519
}
520

521
static void base_remove_entry(HashmapBase *h, unsigned idx) {
31,673,241✔
522
        unsigned left, right, prev, dib;
31,673,241✔
523
        dib_raw_t raw_dib, *dibs;
31,673,241✔
524

525
        dibs = dib_raw_ptr(h);
31,673,241✔
526
        assert(dibs[idx] != DIB_RAW_FREE);
31,673,241✔
527

528
#if ENABLE_DEBUG_HASHMAP
529
        h->debug.rem_count++;
530
        h->debug.last_rem_idx = idx;
531
#endif
532

533
        left = idx;
31,673,241✔
534
        /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
535
        for (right = next_idx(h, left); ; right = next_idx(h, right)) {
31,673,241✔
536
                raw_dib = dibs[right];
46,343,346✔
537
                if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
46,343,346✔
538
                        break;
539

540
                /* The buckets are not supposed to be all occupied and with DIB > 0.
541
                 * That would mean we could make everyone better off by shifting them
542
                 * backward. This scenario is impossible. */
543
                assert(left != right);
14,670,105✔
544
        }
545

546
        if (h->type == HASHMAP_TYPE_ORDERED) {
31,673,241✔
547
                OrderedHashmap *lh = (OrderedHashmap*) h;
19,251,679✔
548
                struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
19,251,679✔
549

550
                if (le->iterate_next != IDX_NIL)
19,251,679✔
551
                        ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
18,007,500✔
552
                else
553
                        lh->iterate_list_tail = le->iterate_previous;
1,244,179✔
554

555
                if (le->iterate_previous != IDX_NIL)
19,251,679✔
556
                        ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
14,631✔
557
                else
558
                        lh->iterate_list_head = le->iterate_next;
19,237,048✔
559
        }
560

561
        /* Now shift all buckets in the interval (left, right) one step backwards */
562
        for (prev = left, left = next_idx(h, left); left != right;
46,343,346✔
563
             prev = left, left = next_idx(h, left)) {
14,670,105✔
564
                dib = bucket_calculate_dib(h, left, dibs[left]);
14,670,105✔
565
                assert(dib != 0);
14,670,105✔
566
                bucket_move_entry(h, NULL, left, prev);
14,670,105✔
567
                bucket_set_dib(h, prev, dib - 1);
14,670,105✔
568
        }
569

570
        bucket_mark_free(h, prev);
31,673,241✔
571
        n_entries_dec(h);
31,673,241✔
572
        base_set_dirty(h);
31,673,241✔
573
}
31,673,241✔
574
#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
575

576
static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
27,820,387✔
577
        struct ordered_hashmap_entry *e;
27,820,387✔
578
        unsigned idx;
27,820,387✔
579

580
        assert(h);
27,820,387✔
581
        assert(i);
27,820,387✔
582

583
        if (i->idx == IDX_NIL)
27,820,387✔
584
                goto at_end;
694,922✔
585

586
        if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
27,125,465✔
587
                goto at_end;
333,322✔
588

589
        if (i->idx == IDX_FIRST) {
26,792,143✔
590
                idx = h->iterate_list_head;
19,913,653✔
591
                e = ordered_bucket_at(h, idx);
19,913,653✔
592
        } else {
593
                idx = i->idx;
6,878,490✔
594
                e = ordered_bucket_at(h, idx);
6,878,490✔
595
                /*
596
                 * We allow removing the current entry while iterating, but removal may cause
597
                 * a backward shift. The next entry may thus move one bucket to the left.
598
                 * To detect when it happens, we remember the key pointer of the entry we were
599
                 * going to iterate next. If it does not match, there was a backward shift.
600
                 */
601
                if (e->p.b.key != i->next_key) {
6,878,490✔
602
                        idx = prev_idx(HASHMAP_BASE(h), idx);
67✔
603
                        e = ordered_bucket_at(h, idx);
67✔
604
                }
605
                assert(e->p.b.key == i->next_key);
6,878,490✔
606
        }
607

608
#if ENABLE_DEBUG_HASHMAP
609
        i->prev_idx = idx;
610
#endif
611

612
        if (e->iterate_next != IDX_NIL) {
26,792,143✔
613
                struct ordered_hashmap_entry *n;
24,892,833✔
614
                i->idx = e->iterate_next;
24,892,833✔
615
                n = ordered_bucket_at(h, i->idx);
24,892,833✔
616
                i->next_key = n->p.b.key;
24,892,833✔
617
        } else
618
                i->idx = IDX_NIL;
1,899,310✔
619

620
        return idx;
621

622
at_end:
1,028,244✔
623
        i->idx = IDX_NIL;
1,028,244✔
624
        return IDX_NIL;
1,028,244✔
625
}
626

627
static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
36,215,768✔
628
        unsigned idx;
36,215,768✔
629

630
        assert(h);
36,215,768✔
631
        assert(i);
36,215,768✔
632

633
        if (i->idx == IDX_NIL)
36,215,768✔
634
                goto at_end;
2,801,863✔
635

636
        if (i->idx == IDX_FIRST) {
33,413,905✔
637
                /* fast forward to the first occupied bucket */
638
                if (h->has_indirect) {
13,948,208✔
639
                        i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
6,644,368✔
640
                        h->indirect.idx_lowest_entry = i->idx;
6,644,368✔
641
                } else
642
                        i->idx = skip_free_buckets(h, 0);
7,303,840✔
643

644
                if (i->idx == IDX_NIL)
13,948,208✔
645
                        goto at_end;
434,069✔
646
        } else {
647
                struct hashmap_base_entry *e;
19,465,697✔
648

649
                assert(i->idx > 0);
19,465,697✔
650

651
                e = bucket_at(h, i->idx);
19,465,697✔
652
                /*
653
                 * We allow removing the current entry while iterating, but removal may cause
654
                 * a backward shift. The next entry may thus move one bucket to the left.
655
                 * To detect when it happens, we remember the key pointer of the entry we were
656
                 * going to iterate next. If it does not match, there was a backward shift.
657
                 */
658
                if (e->key != i->next_key)
19,465,697✔
659
                        e = bucket_at(h, --i->idx);
32,185✔
660

661
                assert(e->key == i->next_key);
19,465,697✔
662
        }
663

664
        idx = i->idx;
32,979,836✔
665
#if ENABLE_DEBUG_HASHMAP
666
        i->prev_idx = idx;
667
#endif
668

669
        i->idx = skip_free_buckets(h, i->idx + 1);
32,979,836✔
670
        if (i->idx != IDX_NIL)
32,979,836✔
671
                i->next_key = bucket_at(h, i->idx)->key;
25,393,544✔
672
        else
673
                i->idx = IDX_NIL;
7,586,292✔
674

675
        return idx;
676

677
at_end:
3,235,932✔
678
        i->idx = IDX_NIL;
3,235,932✔
679
        return IDX_NIL;
3,235,932✔
680
}
681

682
static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
74,067,453✔
683
        if (!h) {
74,067,453✔
684
                i->idx = IDX_NIL;
10,031,298✔
685
                return IDX_NIL;
10,031,298✔
686
        }
687

688
#if ENABLE_DEBUG_HASHMAP
689
        if (i->idx == IDX_FIRST) {
690
                i->put_count = h->debug.put_count;
691
                i->rem_count = h->debug.rem_count;
692
        } else {
693
                /* While iterating, must not add any new entries */
694
                assert(i->put_count == h->debug.put_count);
695
                /* ... or remove entries other than the current one */
696
                assert(i->rem_count == h->debug.rem_count ||
697
                       (i->rem_count == h->debug.rem_count - 1 &&
698
                        i->prev_idx == h->debug.last_rem_idx));
699
                /* Reset our removals counter */
700
                i->rem_count = h->debug.rem_count;
701
        }
702
#endif
703

704
        return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
27,820,387✔
705
                                               : hashmap_iterate_in_internal_order(h, i);
64,036,155✔
706
}
707

708
bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
46,538,845✔
709
        struct hashmap_base_entry *e;
46,538,845✔
710
        void *data;
46,538,845✔
711
        unsigned idx;
46,538,845✔
712

713
        idx = hashmap_iterate_entry(h, i);
46,538,845✔
714
        if (idx == IDX_NIL) {
46,538,845✔
715
                if (value)
14,265,326✔
716
                        *value = NULL;
13,678,717✔
717
                if (key)
14,265,326✔
718
                        *key = NULL;
3,463,214✔
719

720
                return false;
14,265,326✔
721
        }
722

723
        e = bucket_at(h, idx);
32,273,519✔
724
        data = entry_value(h, e);
32,273,519✔
725
        if (value)
32,273,519✔
726
                *value = data;
26,099,126✔
727
        if (key)
32,273,519✔
728
                *key = e->key;
19,601,283✔
729

730
        return true;
731
}
732

733
#define HASHMAP_FOREACH_IDX(idx, h, i) \
734
        for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
735
             (idx != IDX_NIL); \
736
             (idx) = hashmap_iterate_entry((h), &(i)))
737

738
IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
6,305✔
739
        IteratedCache *cache;
6,305✔
740

741
        assert(h);
6,305✔
742
        assert(!h->cached);
6,305✔
743

744
        if (h->cached)
6,305✔
745
                return NULL;
746

747
        cache = new0(IteratedCache, 1);
6,305✔
748
        if (!cache)
6,305✔
749
                return NULL;
750

751
        cache->hashmap = h;
6,305✔
752
        h->cached = true;
6,305✔
753

754
        return cache;
6,305✔
755
}
756

757
static void reset_direct_storage(HashmapBase *h) {
6,942,049✔
758
        const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
6,942,049✔
759
        void *p;
6,942,049✔
760

761
        assert(!h->has_indirect);
6,942,049✔
762

763
        p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
6,942,049✔
764
        memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
6,942,049✔
765
}
6,942,049✔
766

767
static void shared_hash_key_initialize(void) {
73,123✔
768
        random_bytes(shared_hash_key, sizeof(shared_hash_key));
73,123✔
769
}
73,123✔
770

771
static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type) {
3,482,133✔
772
        HashmapBase *h;
3,482,133✔
773
        const struct hashmap_type_info *hi = &hashmap_type_info[type];
3,482,133✔
774

775
        bool use_pool = mempool_enabled && mempool_enabled();  /* mempool_enabled is a weak symbol */
3,482,133✔
776

777
        h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
3,562,993✔
778
        if (!h)
3,482,133✔
779
                return NULL;
780

781
        h->type = type;
3,482,133✔
782
        h->from_pool = use_pool;
3,482,133✔
783
        h->hash_ops = hash_ops ?: &trivial_hash_ops;
3,482,133✔
784

785
        if (type == HASHMAP_TYPE_ORDERED) {
3,482,133✔
786
                OrderedHashmap *lh = (OrderedHashmap*)h;
1,126,026✔
787
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,126,026✔
788
        }
789

790
        reset_direct_storage(h);
3,482,133✔
791

792
        static pthread_once_t once = PTHREAD_ONCE_INIT;
3,482,133✔
793
        assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
3,482,133✔
794

795
#if ENABLE_DEBUG_HASHMAP
796
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
797
        LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
798
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
799
#endif
800

801
        return h;
802
}
803

804
Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
413,382✔
805
        return (Hashmap*)        hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN);
413,382✔
806
}
807

808
OrderedHashmap *ordered_hashmap_new(const struct hash_ops *hash_ops) {
50,894✔
809
        return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED);
50,894✔
810
}
811

812
Set *set_new(const struct hash_ops *hash_ops) {
53,969✔
813
        return (Set*)            hashmap_base_new(hash_ops, HASHMAP_TYPE_SET);
53,969✔
814
}
815

816
static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
31,270,900✔
817
                                         enum HashmapType type) {
818
        HashmapBase *q;
31,270,900✔
819

820
        assert(h);
31,270,900✔
821

822
        if (*h) {
31,270,900✔
823
                assert((*h)->hash_ops == (hash_ops ?: &trivial_hash_ops));
31,393,621✔
824
                return 0;
825
        }
826

827
        q = hashmap_base_new(hash_ops, type);
2,963,885✔
828
        if (!q)
2,963,885✔
829
                return -ENOMEM;
830

831
        *h = q;
2,963,885✔
832
        return 1;
2,963,885✔
833
}
834

835
int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
6,069,833✔
836
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN);
6,069,833✔
837
}
838

839
int ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops) {
20,237,330✔
840
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED);
20,237,330✔
841
}
842

843
int set_ensure_allocated(Set **s, const struct hash_ops *hash_ops) {
4,963,737✔
844
        return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET);
4,963,737✔
845
}
846

847
int hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
5,448,277✔
848
        int r;
5,448,277✔
849

850
        r = hashmap_ensure_allocated(h, hash_ops);
5,448,277✔
851
        if (r < 0)
5,448,277✔
852
                return r;
853

854
        return hashmap_put(*h, key, value);
5,448,277✔
855
}
856

857
int ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
7,788,432✔
858
        int r;
7,788,432✔
859

860
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
7,788,432✔
861
        if (r < 0)
7,788,432✔
862
                return r;
863

864
        return ordered_hashmap_put(*h, key, value);
7,788,432✔
865
}
866

867
int ordered_hashmap_ensure_replace(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
6,734✔
868
        int r;
6,734✔
869

870
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
6,734✔
871
        if (r < 0)
6,734✔
872
                return r;
873

874
        return ordered_hashmap_replace(*h, key, value);
6,734✔
875
}
876

877
int hashmap_ensure_replace(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
24,142✔
878
        int r;
24,142✔
879

880
        r = hashmap_ensure_allocated(h, hash_ops);
24,142✔
881
        if (r < 0)
24,142✔
882
                return r;
883

884
        return hashmap_replace(*h, key, value);
24,142✔
885
}
886

887
static void hashmap_free_no_clear(HashmapBase *h) {
3,443,496✔
888
        assert(!h->has_indirect);
3,443,496✔
889
        assert(h->n_direct_entries == 0);
3,443,496✔
890

891
#if ENABLE_DEBUG_HASHMAP
892
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
893
        LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
894
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
895
#endif
896

897
        if (h->from_pool) {
3,443,496✔
898
                /* Ensure that the object didn't get migrated between threads. */
899
                assert_se(is_main_thread());
3,363,301✔
900
                mempool_free_tile(hashmap_type_info[h->type].mempool, h);
3,363,301✔
901
        } else
902
                free(h);
80,195✔
903
}
3,443,496✔
904

905
HashmapBase* _hashmap_free(HashmapBase *h) {
14,590,627✔
906
        if (h) {
14,590,627✔
907
                _hashmap_clear(h);
3,443,496✔
908
                hashmap_free_no_clear(h);
3,443,496✔
909
        }
910

911
        return NULL;
14,590,627✔
912
}
913

914
void _hashmap_clear(HashmapBase *h) {
3,591,039✔
915
        if (!h)
3,591,039✔
916
                return;
917

918
        if (h->hash_ops->free_key || h->hash_ops->free_value) {
3,459,916✔
919

920
                /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
921
                 * hash table, and only then call the destructor functions. If these destructors then try to unregister
922
                 * themselves from our hash table a second time, the entry is already gone. */
923

924
                while (_hashmap_size(h) > 0) {
20,818,588✔
925
                        void *k = NULL;
18,017,584✔
926
                        void *v;
18,017,584✔
927

928
                        v = _hashmap_first_key_and_value(h, true, &k);
18,017,584✔
929

930
                        if (h->hash_ops->free_key)
18,017,584✔
931
                                h->hash_ops->free_key(k);
13,831,069✔
932

933
                        if (h->hash_ops->free_value)
18,017,584✔
934
                                h->hash_ops->free_value(v);
16,208,977✔
935
                }
936
        }
937

938
        if (h->has_indirect) {
3,459,916✔
939
                free(h->indirect.storage);
1,294,822✔
940
                h->has_indirect = false;
1,294,822✔
941
        }
942

943
        h->n_direct_entries = 0;
3,459,916✔
944
        reset_direct_storage(h);
3,459,916✔
945

946
        if (h->type == HASHMAP_TYPE_ORDERED) {
3,459,916✔
947
                OrderedHashmap *lh = (OrderedHashmap*) h;
1,123,779✔
948
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,123,779✔
949
        }
950

951
        base_set_dirty(h);
3,459,916✔
952
}
953

954
static int resize_buckets(HashmapBase *h, unsigned entries_add);
955

956
/*
957
 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
958
 * Performs Robin Hood swaps as it goes. The entry to put must be placed
959
 * by the caller into swap slot IDX_PUT.
960
 * If used for in-place resizing, may leave a displaced entry in swap slot
961
 * IDX_PUT. Caller must rehash it next.
962
 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
963
 *          false otherwise.
964
 */
965
static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
62,062,889✔
966
                                   struct swap_entries *swap) {
967
        dib_raw_t raw_dib, *dibs;
62,062,889✔
968
        unsigned dib, distance;
62,062,889✔
969

970
#if ENABLE_DEBUG_HASHMAP
971
        h->debug.put_count++;
972
#endif
973

974
        dibs = dib_raw_ptr(h);
62,062,889✔
975

976
        for (distance = 0; ; distance++) {
62,062,889✔
977
                raw_dib = dibs[idx];
113,795,613✔
978
                if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
113,795,613✔
979
                        if (raw_dib == DIB_RAW_REHASH)
62,062,889✔
980
                                bucket_move_entry(h, swap, idx, IDX_TMP);
5,681,339✔
981

982
                        if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
62,062,889✔
983
                                h->indirect.idx_lowest_entry = idx;
34,306✔
984

985
                        bucket_set_dib(h, idx, distance);
62,062,889✔
986
                        bucket_move_entry(h, swap, IDX_PUT, idx);
62,062,889✔
987
                        if (raw_dib == DIB_RAW_REHASH) {
62,062,889✔
988
                                bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
5,681,339✔
989
                                return true;
5,681,339✔
990
                        }
991

992
                        return false;
993
                }
994

995
                dib = bucket_calculate_dib(h, idx, raw_dib);
51,732,724✔
996

997
                if (dib < distance) {
51,732,724✔
998
                        /* Found a wealthier entry. Go Robin Hood! */
999
                        bucket_set_dib(h, idx, distance);
19,085,375✔
1000

1001
                        /* swap the entries */
1002
                        bucket_move_entry(h, swap, idx, IDX_TMP);
19,085,375✔
1003
                        bucket_move_entry(h, swap, IDX_PUT, idx);
19,085,375✔
1004
                        bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
19,085,375✔
1005

1006
                        distance = dib;
19,085,375✔
1007
                }
1008

1009
                idx = next_idx(h, idx);
51,732,724✔
1010
        }
1011
}
1012

1013
/*
1014
 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1015
 * The caller must place the entry (only its key and value, not link indexes)
1016
 * in swap slot IDX_PUT.
1017
 * Caller must ensure: the key does not exist yet in the hashmap.
1018
 *                     that resize is not needed if !may_resize.
1019
 * Returns: 1 if entry was put successfully.
1020
 *          -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1021
 *          Cannot return -ENOMEM if !may_resize.
1022
 */
1023
static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
34,513,136✔
1024
                                   struct swap_entries *swap, bool may_resize) {
1025
        struct ordered_hashmap_entry *new_entry;
34,513,136✔
1026
        int r;
34,513,136✔
1027

1028
        assert(idx < n_buckets(h));
34,513,136✔
1029

1030
        new_entry = bucket_at_swap(swap, IDX_PUT);
34,513,136✔
1031

1032
        if (may_resize) {
34,513,136✔
1033
                r = resize_buckets(h, 1);
34,510,272✔
1034
                if (r < 0)
34,510,272✔
1035
                        return r;
1036
                if (r > 0)
34,510,272✔
1037
                        idx = bucket_hash(h, new_entry->p.b.key);
3,262,941✔
1038
        }
1039
        assert(n_entries(h) < n_buckets(h));
34,513,136✔
1040

1041
        if (h->type == HASHMAP_TYPE_ORDERED) {
34,513,136✔
1042
                OrderedHashmap *lh = (OrderedHashmap*) h;
19,470,733✔
1043

1044
                new_entry->iterate_next = IDX_NIL;
19,470,733✔
1045
                new_entry->iterate_previous = lh->iterate_list_tail;
19,470,733✔
1046

1047
                if (lh->iterate_list_tail != IDX_NIL) {
19,470,733✔
1048
                        struct ordered_hashmap_entry *old_tail;
18,216,437✔
1049

1050
                        old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
18,216,437✔
1051
                        assert(old_tail->iterate_next == IDX_NIL);
18,216,437✔
1052
                        old_tail->iterate_next = IDX_PUT;
18,216,437✔
1053
                }
1054

1055
                lh->iterate_list_tail = IDX_PUT;
19,470,733✔
1056
                if (lh->iterate_list_head == IDX_NIL)
19,470,733✔
1057
                        lh->iterate_list_head = IDX_PUT;
1,254,296✔
1058
        }
1059

1060
        assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
34,513,136✔
1061

1062
        n_entries_inc(h);
34,513,136✔
1063
#if ENABLE_DEBUG_HASHMAP
1064
        h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1065
#endif
1066

1067
        base_set_dirty(h);
34,513,136✔
1068

1069
        return 1;
34,513,136✔
1070
}
1071
#define hashmap_put_boldly(h, idx, swap, may_resize) \
1072
        hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1073

1074
/*
1075
 * Returns 0 if resize is not needed.
1076
 *         1 if successfully resized.
1077
 *         -ENOMEM on allocation failure.
1078
 */
1079
static int resize_buckets(HashmapBase *h, unsigned entries_add) {
34,514,698✔
1080
        struct swap_entries swap;
34,514,698✔
1081
        void *new_storage;
34,514,698✔
1082
        dib_raw_t *old_dibs, *new_dibs;
34,514,698✔
1083
        const struct hashmap_type_info *hi;
34,514,698✔
1084
        unsigned idx, optimal_idx;
34,514,698✔
1085
        unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
34,514,698✔
1086
        uint8_t new_shift;
34,514,698✔
1087
        bool rehash_next;
34,514,698✔
1088

1089
        assert(h);
34,514,698✔
1090

1091
        hi = &hashmap_type_info[h->type];
34,514,698✔
1092
        new_n_entries = n_entries(h) + entries_add;
34,514,698✔
1093

1094
        /* overflow? */
1095
        if (_unlikely_(new_n_entries < entries_add))
34,514,698✔
1096
                return -ENOMEM;
34,514,698✔
1097

1098
        /* For direct storage we allow 100% load, because it's tiny. */
1099
        if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
34,514,696✔
1100
                return 0;
1101

1102
        /*
1103
         * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1104
         * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1105
         */
1106
        new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
27,454,794✔
1107
        /* overflow? */
1108
        if (_unlikely_(new_n_buckets < new_n_entries))
27,454,794✔
1109
                return -ENOMEM;
1110

1111
        if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
27,454,792✔
1112
                return -ENOMEM;
1113

1114
        old_n_buckets = n_buckets(h);
27,454,792✔
1115

1116
        if (_likely_(new_n_buckets <= old_n_buckets))
27,454,792✔
1117
                return 0;
1118

1119
        new_shift = log2u_round_up(MAX(
3,264,715✔
1120
                        new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1121
                        2 * sizeof(struct direct_storage)));
1122

1123
        /* Realloc storage (buckets and DIB array). */
1124
        new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1,957,576✔
1125
                              1U << new_shift);
3,264,715✔
1126
        if (!new_storage)
3,264,715✔
1127
                return -ENOMEM;
1128

1129
        /* Must upgrade direct to indirect storage. */
1130
        if (!h->has_indirect) {
3,264,715✔
1131
                memcpy(new_storage, h->direct.storage,
1,307,139✔
1132
                       old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1,307,139✔
1133
                h->indirect.n_entries = h->n_direct_entries;
1,307,139✔
1134
                h->indirect.idx_lowest_entry = 0;
1,307,139✔
1135
                h->n_direct_entries = 0;
1,307,139✔
1136
        }
1137

1138
        /* Get a new hash key. If we've just upgraded to indirect storage,
1139
         * allow reusing a previously generated key. It's still a different key
1140
         * from the shared one that we used for direct storage. */
1141
        get_hash_key(h->indirect.hash_key, !h->has_indirect);
3,264,715✔
1142

1143
        h->has_indirect = true;
3,264,715✔
1144
        h->indirect.storage = new_storage;
3,264,715✔
1145
        h->indirect.n_buckets = (1U << new_shift) /
3,264,715✔
1146
                                (hi->entry_size + sizeof(dib_raw_t));
1147

1148
        old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
3,264,715✔
1149
        new_dibs = dib_raw_ptr(h);
3,264,715✔
1150

1151
        /*
1152
         * Move the DIB array to the new place, replacing valid DIB values with
1153
         * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1154
         * Note: Overlap is not possible, because we have at least doubled the
1155
         * number of buckets and dib_raw_t is smaller than any entry type.
1156
         */
1157
        for (idx = 0; idx < old_n_buckets; idx++) {
41,549,549✔
1158
                assert(old_dibs[idx] != DIB_RAW_REHASH);
35,020,119✔
1159
                new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
63,490,835✔
1160
                                                              : DIB_RAW_REHASH;
1161
        }
1162

1163
        /* Zero the area of newly added entries (including the old DIB area) */
1164
        memzero(bucket_at(h, old_n_buckets),
3,264,715✔
1165
               (n_buckets(h) - old_n_buckets) * hi->entry_size);
1166

1167
        /* The upper half of the new DIB array needs initialization */
1168
        memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
6,529,430✔
1169
               (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
3,264,715✔
1170

1171
        /* Rehash entries that need it */
1172
        n_rehashed = 0;
3,264,715✔
1173
        for (idx = 0; idx < old_n_buckets; idx++) {
38,284,834✔
1174
                if (new_dibs[idx] != DIB_RAW_REHASH)
35,020,119✔
1175
                        continue;
12,230,742✔
1176

1177
                optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
22,789,377✔
1178

1179
                /*
1180
                 * Not much to do if by luck the entry hashes to its current
1181
                 * location. Just set its DIB.
1182
                 */
1183
                if (optimal_idx == idx) {
22,789,377✔
1184
                        new_dibs[idx] = 0;
920,963✔
1185
                        n_rehashed++;
920,963✔
1186
                        continue;
920,963✔
1187
                }
1188

1189
                new_dibs[idx] = DIB_RAW_FREE;
21,868,414✔
1190
                bucket_move_entry(h, &swap, idx, IDX_PUT);
21,868,414✔
1191
                /* bucket_move_entry does not clear the source */
1192
                memzero(bucket_at(h, idx), hi->entry_size);
21,868,414✔
1193

1194
                do {
27,549,753✔
1195
                        /*
1196
                         * Find the new bucket for the current entry. This may make
1197
                         * another entry homeless and load it into IDX_PUT.
1198
                         */
1199
                        rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
27,549,753✔
1200
                        n_rehashed++;
27,549,753✔
1201

1202
                        /* Did the current entry displace another one? */
1203
                        if (rehash_next)
27,549,753✔
1204
                                optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
5,681,339✔
1205
                } while (rehash_next);
27,549,753✔
1206
        }
1207

1208
        assert_se(n_rehashed == n_entries(h));
3,264,715✔
1209

1210
        return 1;
1211
}
1212

1213
/*
1214
 * Finds an entry with a matching key
1215
 * Returns: index of the found entry, or IDX_NIL if not found.
1216
 */
1217
static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
125,119,502✔
1218
        struct hashmap_base_entry *e;
125,119,502✔
1219
        unsigned dib, distance;
125,119,502✔
1220
        dib_raw_t *dibs = dib_raw_ptr(h);
125,119,502✔
1221

1222
        assert(idx < n_buckets(h));
125,119,502✔
1223

1224
        for (distance = 0; ; distance++) {
81,036,289✔
1225
                if (dibs[idx] == DIB_RAW_FREE)
206,155,791✔
1226
                        return IDX_NIL;
1227

1228
                dib = bucket_calculate_dib(h, idx, dibs[idx]);
139,448,121✔
1229

1230
                if (dib < distance)
139,448,121✔
1231
                        return IDX_NIL;
1232
                if (dib == distance) {
115,124,638✔
1233
                        e = bucket_at(h, idx);
87,094,157✔
1234
                        if (h->hash_ops->compare(e->key, key) == 0)
87,094,157✔
1235
                                return idx;
1236
                }
1237

1238
                idx = next_idx(h, idx);
81,036,289✔
1239
        }
1240
}
1241
#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1242

1243
int hashmap_put(Hashmap *h, const void *key, void *value) {
17,100,435✔
1244
        struct swap_entries swap;
17,100,435✔
1245
        struct plain_hashmap_entry *e;
17,100,435✔
1246
        unsigned hash, idx;
17,100,435✔
1247

1248
        assert(h);
17,100,435✔
1249

1250
        hash = bucket_hash(h, key);
17,100,435✔
1251
        idx = bucket_scan(h, hash, key);
17,100,435✔
1252
        if (idx != IDX_NIL) {
17,100,435✔
1253
                e = plain_bucket_at(h, idx);
155,313✔
1254
                if (e->value == value)
155,313✔
1255
                        return 0;
17,100,435✔
1256
                return -EEXIST;
33,087✔
1257
        }
1258

1259
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
16,945,122✔
1260
        e->b.key = key;
16,945,122✔
1261
        e->value = value;
16,945,122✔
1262
        return hashmap_put_boldly(h, hash, &swap, true);
16,945,122✔
1263
}
1264

1265
int set_put(Set *s, const void *key) {
5,109,717✔
1266
        struct swap_entries swap;
5,109,717✔
1267
        struct hashmap_base_entry *e;
5,109,717✔
1268
        unsigned hash, idx;
5,109,717✔
1269

1270
        assert(s);
5,109,717✔
1271

1272
        hash = bucket_hash(s, key);
5,109,717✔
1273
        idx = bucket_scan(s, hash, key);
5,109,717✔
1274
        if (idx != IDX_NIL)
5,109,717✔
1275
                return 0;
5,109,717✔
1276

1277
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
5,096,253✔
1278
        e->key = key;
5,096,253✔
1279
        return hashmap_put_boldly(s, hash, &swap, true);
5,096,253✔
1280
}
1281

1282
int set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key) {
4,034,790✔
1283
        int r;
4,034,790✔
1284

1285
        r = set_ensure_allocated(s, hash_ops);
4,034,790✔
1286
        if (r < 0)
4,034,790✔
1287
                return r;
1288

1289
        return set_put(*s, key);
4,034,790✔
1290
}
1291

1292
int set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key) {
1,299,075✔
1293
        int r;
1,299,075✔
1294

1295
        r = set_ensure_put(s, hash_ops, key);
1,299,075✔
1296
        if (r <= 0) {
1,299,075✔
1297
                if (hash_ops && hash_ops->free_key)
903✔
1298
                        hash_ops->free_key(key);
903✔
1299
                else
UNCOV
1300
                        free(key);
×
1301
        }
1302

1303
        return r;
1,299,075✔
1304
}
1305

1306
int hashmap_replace(Hashmap *h, const void *key, void *value) {
13,567,871✔
1307
        struct swap_entries swap;
13,567,871✔
1308
        struct plain_hashmap_entry *e;
13,567,871✔
1309
        unsigned hash, idx;
13,567,871✔
1310

1311
        assert(h);
13,567,871✔
1312

1313
        hash = bucket_hash(h, key);
13,567,871✔
1314
        idx = bucket_scan(h, hash, key);
13,567,871✔
1315
        if (idx != IDX_NIL) {
13,567,871✔
1316
                e = plain_bucket_at(h, idx);
1,104,913✔
1317
#if ENABLE_DEBUG_HASHMAP
1318
                /* Although the key is equal, the key pointer may have changed,
1319
                 * and this would break our assumption for iterating. So count
1320
                 * this operation as incompatible with iteration. */
1321
                if (e->b.key != key) {
1322
                        h->b.debug.put_count++;
1323
                        h->b.debug.rem_count++;
1324
                        h->b.debug.last_rem_idx = idx;
1325
                }
1326
#endif
1327
                e->b.key = key;
1,104,913✔
1328
                e->value = value;
1,104,913✔
1329
                hashmap_set_dirty(h);
1,104,913✔
1330

1331
                return 0;
1,104,913✔
1332
        }
1333

1334
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
12,462,958✔
1335
        e->b.key = key;
12,462,958✔
1336
        e->value = value;
12,462,958✔
1337
        return hashmap_put_boldly(h, hash, &swap, true);
12,462,958✔
1338
}
1339

1340
int hashmap_update(Hashmap *h, const void *key, void *value) {
9,555✔
1341
        struct plain_hashmap_entry *e;
9,555✔
1342
        unsigned hash, idx;
9,555✔
1343

1344
        assert(h);
9,555✔
1345

1346
        hash = bucket_hash(h, key);
9,555✔
1347
        idx = bucket_scan(h, hash, key);
9,555✔
1348
        if (idx == IDX_NIL)
9,555✔
1349
                return -ENOENT;
1350

1351
        e = plain_bucket_at(h, idx);
9,553✔
1352
        e->value = value;
9,553✔
1353
        hashmap_set_dirty(h);
9,553✔
1354

1355
        return 0;
9,553✔
1356
}
1357

1358
void* _hashmap_get(HashmapBase *h, const void *key) {
74,003,923✔
1359
        struct hashmap_base_entry *e;
74,003,923✔
1360
        unsigned hash, idx;
74,003,923✔
1361

1362
        if (!h)
74,003,923✔
1363
                return NULL;
1364

1365
        hash = bucket_hash(h, key);
65,005,040✔
1366
        idx = bucket_scan(h, hash, key);
65,005,040✔
1367
        if (idx == IDX_NIL)
65,005,040✔
1368
                return NULL;
1369

1370
        e = bucket_at(h, idx);
25,643,603✔
1371
        return entry_value(h, e);
25,643,603✔
1372
}
1373

1374
void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
12,527,713✔
1375
        struct plain_hashmap_entry *e;
12,527,713✔
1376
        unsigned hash, idx;
12,527,713✔
1377

1378
        if (!h)
12,527,713✔
1379
                return NULL;
1380

1381
        hash = bucket_hash(h, key);
12,527,697✔
1382
        idx = bucket_scan(h, hash, key);
12,527,697✔
1383
        if (idx == IDX_NIL)
12,527,697✔
1384
                return NULL;
1385

1386
        e = plain_bucket_at(h, idx);
1,037,444✔
1387
        if (key2)
1,037,444✔
1388
                *key2 = (void*) e->b.key;
1,037,444✔
1389

1390
        return e->value;
1,037,444✔
1391
}
1392

1393
bool _hashmap_contains(HashmapBase *h, const void *key) {
3,111,008✔
1394
        unsigned hash;
3,111,008✔
1395

1396
        if (!h)
3,111,008✔
1397
                return false;
1398

1399
        hash = bucket_hash(h, key);
2,554,753✔
1400
        return bucket_scan(h, hash, key) != IDX_NIL;
2,554,753✔
1401
}
1402

1403
void* _hashmap_remove(HashmapBase *h, const void *key) {
9,416,919✔
1404
        struct hashmap_base_entry *e;
9,416,919✔
1405
        unsigned hash, idx;
9,416,919✔
1406
        void *data;
9,416,919✔
1407

1408
        if (!h)
9,416,919✔
1409
                return NULL;
1410

1411
        hash = bucket_hash(h, key);
8,397,446✔
1412
        idx = bucket_scan(h, hash, key);
8,397,446✔
1413
        if (idx == IDX_NIL)
8,397,446✔
1414
                return NULL;
1415

1416
        e = bucket_at(h, idx);
4,675,918✔
1417
        data = entry_value(h, e);
4,675,918✔
1418
        remove_entry(h, idx);
4,675,918✔
1419

1420
        return data;
4,675,918✔
1421
}
1422

1423
void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
582,645✔
1424
        struct plain_hashmap_entry *e;
582,645✔
1425
        unsigned hash, idx;
582,645✔
1426
        void *data;
582,645✔
1427

1428
        if (!h) {
582,645✔
1429
                if (rkey)
107,405✔
1430
                        *rkey = NULL;
107,405✔
1431
                return NULL;
107,405✔
1432
        }
1433

1434
        hash = bucket_hash(h, key);
475,240✔
1435
        idx = bucket_scan(h, hash, key);
475,240✔
1436
        if (idx == IDX_NIL) {
475,240✔
1437
                if (rkey)
464,083✔
1438
                        *rkey = NULL;
464,083✔
1439
                return NULL;
464,083✔
1440
        }
1441

1442
        e = plain_bucket_at(h, idx);
11,157✔
1443
        data = e->value;
11,157✔
1444
        if (rkey)
11,157✔
1445
                *rkey = (void*) e->b.key;
11,157✔
1446

1447
        remove_entry(h, idx);
11,157✔
1448

1449
        return data;
11,157✔
1450
}
1451

1452
int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
10✔
1453
        struct swap_entries swap;
10✔
1454
        struct plain_hashmap_entry *e;
10✔
1455
        unsigned old_hash, new_hash, idx;
10✔
1456

1457
        if (!h)
10✔
1458
                return -ENOENT;
10✔
1459

1460
        old_hash = bucket_hash(h, old_key);
8✔
1461
        idx = bucket_scan(h, old_hash, old_key);
8✔
1462
        if (idx == IDX_NIL)
8✔
1463
                return -ENOENT;
1464

1465
        new_hash = bucket_hash(h, new_key);
6✔
1466
        if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
6✔
1467
                return -EEXIST;
1468

1469
        remove_entry(h, idx);
4✔
1470

1471
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
4✔
1472
        e->b.key = new_key;
4✔
1473
        e->value = value;
4✔
1474
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
4✔
1475

1476
        return 0;
1477
}
1478

1479
int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
318✔
1480
        struct swap_entries swap;
318✔
1481
        struct hashmap_base_entry *e;
318✔
1482
        unsigned old_hash, new_hash, idx;
318✔
1483

1484
        if (!s)
318✔
1485
                return -ENOENT;
318✔
1486

1487
        old_hash = bucket_hash(s, old_key);
318✔
1488
        idx = bucket_scan(s, old_hash, old_key);
318✔
1489
        if (idx == IDX_NIL)
318✔
1490
                return -ENOENT;
1491

1492
        new_hash = bucket_hash(s, new_key);
318✔
1493
        if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
318✔
1494
                return -EEXIST;
1495

1496
        remove_entry(s, idx);
318✔
1497

1498
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
318✔
1499
        e->key = new_key;
318✔
1500
        assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
318✔
1501

1502
        return 0;
1503
}
1504

1505
int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
48✔
1506
        struct swap_entries swap;
48✔
1507
        struct plain_hashmap_entry *e;
48✔
1508
        unsigned old_hash, new_hash, idx_old, idx_new;
48✔
1509

1510
        if (!h)
48✔
1511
                return -ENOENT;
48✔
1512

1513
        old_hash = bucket_hash(h, old_key);
46✔
1514
        idx_old = bucket_scan(h, old_hash, old_key);
46✔
1515
        if (idx_old == IDX_NIL)
46✔
1516
                return -ENOENT;
1517

1518
        old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
44✔
1519

1520
        new_hash = bucket_hash(h, new_key);
44✔
1521
        idx_new = bucket_scan(h, new_hash, new_key);
44✔
1522
        if (idx_new != IDX_NIL)
44✔
1523
                if (idx_old != idx_new) {
42✔
1524
                        remove_entry(h, idx_new);
42✔
1525
                        /* Compensate for a possible backward shift. */
1526
                        if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
42✔
1527
                                idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
6✔
1528
                        assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
42✔
1529
                }
1530

1531
        remove_entry(h, idx_old);
44✔
1532

1533
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
44✔
1534
        e->b.key = new_key;
44✔
1535
        e->value = value;
44✔
1536
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
44✔
1537

1538
        return 0;
1539
}
1540

1541
void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
356,413✔
1542
        struct hashmap_base_entry *e;
356,413✔
1543
        unsigned hash, idx;
356,413✔
1544

1545
        if (!h)
356,413✔
1546
                return NULL;
1547

1548
        hash = bucket_hash(h, key);
356,321✔
1549
        idx = bucket_scan(h, hash, key);
356,321✔
1550
        if (idx == IDX_NIL)
356,321✔
1551
                return NULL;
1552

1553
        e = bucket_at(h, idx);
288,210✔
1554
        if (entry_value(h, e) != value)
288,210✔
1555
                return NULL;
1556

1557
        remove_entry(h, idx);
287,943✔
1558

1559
        return value;
287,943✔
1560
}
1561

1562
static unsigned find_first_entry(HashmapBase *h) {
27,858,709✔
1563
        Iterator i = ITERATOR_FIRST;
27,858,709✔
1564

1565
        if (!h || !n_entries(h))
27,858,709✔
1566
                return IDX_NIL;
27,858,709✔
1567

1568
        return hashmap_iterate_entry(h, &i);
26,830,767✔
1569
}
1570

1571
void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
27,858,709✔
1572
        struct hashmap_base_entry *e;
27,858,709✔
1573
        void *key, *data;
27,858,709✔
1574
        unsigned idx;
27,858,709✔
1575

1576
        idx = find_first_entry(h);
27,858,709✔
1577
        if (idx == IDX_NIL) {
27,858,709✔
1578
                if (ret_key)
1,027,942✔
1579
                        *ret_key = NULL;
461,750✔
1580
                return NULL;
1,027,942✔
1581
        }
1582

1583
        e = bucket_at(h, idx);
26,830,767✔
1584
        key = (void*) e->key;
26,830,767✔
1585
        data = entry_value(h, e);
26,830,767✔
1586

1587
        if (remove)
26,830,767✔
1588
                remove_entry(h, idx);
26,689,378✔
1589

1590
        if (ret_key)
26,830,767✔
1591
                *ret_key = key;
18,518,810✔
1592

1593
        return data;
1594
}
1595

1596
unsigned _hashmap_size(HashmapBase *h) {
45,412,211✔
1597
        if (!h)
45,412,211✔
1598
                return 0;
1599

1600
        return n_entries(h);
32,687,297✔
1601
}
1602

1603
unsigned _hashmap_buckets(HashmapBase *h) {
2,784✔
1604
        if (!h)
2,784✔
1605
                return 0;
1606

1607
        return n_buckets(h);
2,781✔
1608
}
1609

1610
int _hashmap_merge(Hashmap *h, Hashmap *other) {
4✔
1611
        Iterator i;
4✔
1612
        unsigned idx;
4✔
1613

1614
        assert(h);
4✔
1615

1616
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
16✔
1617
                struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
12✔
1618
                int r;
12✔
1619

1620
                r = hashmap_put(h, pe->b.key, pe->value);
12✔
1621
                if (r < 0 && r != -EEXIST)
12✔
1622
                        return r;
1623
        }
1624

1625
        return 0;
1626
}
1627

1628
int set_merge(Set *s, Set *other) {
1✔
1629
        Iterator i;
1✔
1630
        unsigned idx;
1✔
1631

1632
        assert(s);
1✔
1633

1634
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
5✔
1635
                struct set_entry *se = set_bucket_at(other, idx);
4✔
1636
                int r;
4✔
1637

1638
                r = set_put(s, se->b.key);
4✔
1639
                if (r < 0)
4✔
1640
                        return r;
1641
        }
1642

1643
        return 0;
1644
}
1645

1646
int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1,928✔
1647
        int r;
1,928✔
1648

1649
        assert(h);
1,928✔
1650

1651
        r = resize_buckets(h, entries_add);
1,928✔
1652
        if (r < 0)
1,928✔
1653
                return r;
4✔
1654

1655
        return 0;
1656
}
1657

1658
/*
1659
 * The same as hashmap_merge(), but every new item from other is moved to h.
1660
 * Keys already in h are skipped and stay in other.
1661
 * Returns: 0 on success.
1662
 *          -ENOMEM on alloc failure, in which case no move has been done.
1663
 */
1664
int _hashmap_move(HashmapBase *h, HashmapBase *other) {
2,501✔
1665
        struct swap_entries swap;
2,501✔
1666
        struct hashmap_base_entry *e, *n;
2,501✔
1667
        Iterator i;
2,501✔
1668
        unsigned idx;
2,501✔
1669
        int r;
2,501✔
1670

1671
        assert(h);
2,501✔
1672

1673
        if (!other)
2,501✔
1674
                return 0;
2,501✔
1675

1676
        assert(other->type == h->type);
2,498✔
1677

1678
        /*
1679
         * This reserves buckets for the worst case, where none of other's
1680
         * entries are yet present in h. This is preferable to risking
1681
         * an allocation failure in the middle of the moving and having to
1682
         * rollback or return a partial result.
1683
         */
1684
        r = resize_buckets(h, n_entries(other));
2,498✔
1685
        if (r < 0)
2,498✔
1686
                return r;
1687

1688
        HASHMAP_FOREACH_IDX(idx, other, i) {
4,998✔
1689
                unsigned h_hash;
2,500✔
1690

1691
                e = bucket_at(other, idx);
2,500✔
1692
                h_hash = bucket_hash(h, e->key);
2,500✔
1693
                if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
2,500✔
1694
                        continue;
2✔
1695

1696
                n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
2,498✔
1697
                n->key = e->key;
2,498✔
1698
                if (h->type != HASHMAP_TYPE_SET)
2,498✔
1699
                        ((struct plain_hashmap_entry*) n)->value =
2,498✔
1700
                                ((struct plain_hashmap_entry*) e)->value;
2,498✔
1701
                assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
2,498✔
1702

1703
                remove_entry(other, idx);
2,498✔
1704
        }
1705

1706
        return 0;
1707
}
1708

1709
int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
5,945✔
1710
        struct swap_entries swap;
5,945✔
1711
        unsigned h_hash, other_hash, idx;
5,945✔
1712
        struct hashmap_base_entry *e, *n;
5,945✔
1713
        int r;
5,945✔
1714

1715
        assert(h);
5,945✔
1716

1717
        h_hash = bucket_hash(h, key);
5,945✔
1718
        if (bucket_scan(h, h_hash, key) != IDX_NIL)
5,945✔
1719
                return -EEXIST;
5,945✔
1720

1721
        if (!other)
5,943✔
1722
                return -ENOENT;
1723

1724
        assert(other->type == h->type);
5,941✔
1725

1726
        other_hash = bucket_hash(other, key);
5,941✔
1727
        idx = bucket_scan(other, other_hash, key);
5,941✔
1728
        if (idx == IDX_NIL)
5,941✔
1729
                return -ENOENT;
1730

1731
        e = bucket_at(other, idx);
5,939✔
1732

1733
        n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
5,939✔
1734
        n->key = e->key;
5,939✔
1735
        if (h->type != HASHMAP_TYPE_SET)
5,939✔
1736
                ((struct plain_hashmap_entry*) n)->value =
581✔
1737
                        ((struct plain_hashmap_entry*) e)->value;
581✔
1738
        r = hashmap_put_boldly(h, h_hash, &swap, true);
5,939✔
1739
        if (r < 0)
5,939✔
1740
                return r;
1741

1742
        remove_entry(other, idx);
5,939✔
1743
        return 0;
1744
}
1745

1746
HashmapBase* _hashmap_copy(HashmapBase *h) {
3✔
1747
        HashmapBase *copy;
3✔
1748
        int r;
3✔
1749

1750
        assert(h);
3✔
1751

1752
        copy = hashmap_base_new(h->hash_ops, h->type);
3✔
1753
        if (!copy)
3✔
1754
                return NULL;
1755

1756
        switch (h->type) {
3✔
1757
        case HASHMAP_TYPE_PLAIN:
2✔
1758
        case HASHMAP_TYPE_ORDERED:
1759
                r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
2✔
1760
                break;
2✔
1761
        case HASHMAP_TYPE_SET:
1✔
1762
                r = set_merge((Set*)copy, (Set*)h);
1✔
1763
                break;
1✔
UNCOV
1764
        default:
×
UNCOV
1765
                assert_not_reached();
×
1766
        }
1767

1768
        if (r < 0)
3✔
1769
                return _hashmap_free(copy);
×
1770

1771
        return copy;
1772
}
1773

1774
char** _hashmap_get_strv(HashmapBase *h) {
1,349✔
1775
        char **sv;
1,349✔
1776
        Iterator i;
1,349✔
1777
        unsigned idx, n;
1,349✔
1778

1779
        if (!h)
1,349✔
1780
                return new0(char*, 1);
1,349✔
1781

1782
        sv = new(char*, n_entries(h)+1);
44✔
1783
        if (!sv)
44✔
1784
                return NULL;
1785

1786
        n = 0;
44✔
1787
        HASHMAP_FOREACH_IDX(idx, h, i)
599✔
1788
                sv[n++] = entry_value(h, bucket_at(h, idx));
555✔
1789
        sv[n] = NULL;
44✔
1790

1791
        return sv;
44✔
1792
}
1793

1794
char** set_to_strv(Set **s) {
18✔
1795
        assert(s);
18✔
1796

1797
        /* This is similar to set_get_strv(), but invalidates the set on success. */
1798

1799
        char **v = new(char*, set_size(*s) + 1);
18✔
1800
        if (!v)
18✔
1801
                return NULL;
1802

1803
        for (char **p = v; (*p = set_steal_first(*s)); p++)
1,766✔
1804
                ;
1805

1806
        assert(set_isempty(*s));
18✔
1807
        *s = set_free(*s);
18✔
1808
        return v;
18✔
1809
}
1810

1811
void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
302✔
1812
        struct ordered_hashmap_entry *e;
302✔
1813
        unsigned hash, idx;
302✔
1814

1815
        if (!h)
302✔
1816
                return NULL;
1817

1818
        hash = bucket_hash(h, key);
301✔
1819
        idx = bucket_scan(h, hash, key);
301✔
1820
        if (idx == IDX_NIL)
301✔
1821
                return NULL;
1822

1823
        e = ordered_bucket_at(h, idx);
300✔
1824
        if (e->iterate_next == IDX_NIL)
300✔
1825
                return NULL;
1826
        return ordered_bucket_at(h, e->iterate_next)->p.value;
151✔
1827
}
1828

1829
int set_consume(Set *s, void *value) {
631,972✔
1830
        int r;
631,972✔
1831

1832
        assert(s);
631,972✔
1833
        assert(value);
631,972✔
1834

1835
        r = set_put(s, value);
631,972✔
1836
        if (r <= 0)
631,972✔
1837
                free(value);
1✔
1838

1839
        return r;
631,972✔
1840
}
1841

1842
int hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v) {
1,998✔
1843
        int r;
1,998✔
1844

1845
        r = hashmap_ensure_allocated(h, hash_ops);
1,998✔
1846
        if (r < 0)
1,998✔
1847
                return r;
1,998✔
1848

1849
        _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1,998✔
1850

1851
        kdup = strdup(k);
1,998✔
1852
        if (!kdup)
1,998✔
1853
                return -ENOMEM;
1854

1855
        if (v) {
1,998✔
1856
                vdup = strdup(v);
317✔
1857
                if (!vdup)
317✔
1858
                        return -ENOMEM;
1859
        }
1860

1861
        r = hashmap_put(*h, kdup, vdup);
1,998✔
1862
        if (r < 0) {
1,998✔
1863
                if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
10✔
1864
                        return 0;
1865
                return r;
4✔
1866
        }
1867

1868
        /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1869
        assert(vdup == NULL || r > 0);
1,988✔
1870
        if (r > 0)
1,680✔
1871
                kdup = vdup = NULL;
1,987✔
1872

1873
        return r;
1874
}
1875

1876
int set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n) {
921,544✔
1877
        char *c;
921,544✔
1878
        int r;
921,544✔
1879

1880
        assert(s);
921,544✔
1881
        assert(p);
921,544✔
1882

1883
        r = set_ensure_allocated(s, hash_ops);
921,544✔
1884
        if (r < 0)
921,544✔
1885
                return r;
1886

1887
        if (n == SIZE_MAX) {
921,544✔
1888
                if (set_contains(*s, (char*) p))
894,839✔
1889
                        return 0;
1890

1891
                c = strdup(p);
597,287✔
1892
        } else
1893
                c = strndup(p, n);
26,705✔
1894
        if (!c)
623,992✔
1895
                return -ENOMEM;
1896

1897
        return set_consume(*s, c);
623,992✔
1898
}
1899

1900
int set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l) {
1,645✔
1901
        int n = 0, r;
1,645✔
1902

1903
        assert(s);
1,645✔
1904

1905
        STRV_FOREACH(i, l) {
1,688✔
1906
                r = set_put_strndup_full(s, hash_ops, *i, SIZE_MAX);
43✔
1907
                if (r < 0)
43✔
1908
                        return r;
1909

1910
                n += r;
43✔
1911
        }
1912

1913
        return n;
1914
}
1915

1916
int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1✔
1917
        const char *p = ASSERT_PTR(v);
1✔
1918
        int r;
1✔
1919

1920
        assert(s);
1✔
1921

1922
        for (;;) {
2✔
1923
                char *word;
3✔
1924

1925
                r = extract_first_word(&p, &word, separators, flags);
3✔
1926
                if (r <= 0)
3✔
1927
                        return r;
1✔
1928

1929
                r = set_consume(s, word);
2✔
1930
                if (r < 0)
2✔
1931
                        return r;
1932
        }
1933
}
1934

1935
/* expand the cachemem if needed, return true if newly (re)activated. */
1936
static int cachemem_maintain(CacheMem *mem, size_t size) {
325,616✔
1937
        assert(mem);
325,616✔
1938

1939
        if (!GREEDY_REALLOC(mem->ptr, size)) {
325,616✔
UNCOV
1940
                if (size > 0)
×
1941
                        return -ENOMEM;
1942
        }
1943

1944
        if (!mem->active) {
325,616✔
1945
                mem->active = true;
2,308✔
1946
                return true;
2,308✔
1947
        }
1948

1949
        return false;
1950
}
1951

1952
int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
325,504✔
1953
        bool sync_keys = false, sync_values = false;
325,504✔
1954
        size_t size;
325,504✔
1955
        int r;
325,504✔
1956

1957
        assert(cache);
325,504✔
1958
        assert(cache->hashmap);
325,504✔
1959

1960
        size = n_entries(cache->hashmap);
325,504✔
1961

1962
        if (res_keys) {
325,504✔
1963
                r = cachemem_maintain(&cache->keys, size);
112✔
1964
                if (r < 0)
112✔
1965
                        return r;
1966

1967
                sync_keys = r;
112✔
1968
        } else
1969
                cache->keys.active = false;
325,392✔
1970

1971
        if (res_values) {
325,504✔
1972
                r = cachemem_maintain(&cache->values, size);
325,504✔
1973
                if (r < 0)
325,504✔
1974
                        return r;
1975

1976
                sync_values = r;
325,504✔
1977
        } else
UNCOV
1978
                cache->values.active = false;
×
1979

1980
        if (cache->hashmap->dirty) {
325,504✔
1981
                if (cache->keys.active)
2,406✔
1982
                        sync_keys = true;
111✔
1983
                if (cache->values.active)
2,406✔
1984
                        sync_values = true;
2,406✔
1985

1986
                cache->hashmap->dirty = false;
2,406✔
1987
        }
1988

1989
        if (sync_keys || sync_values) {
325,504✔
1990
                unsigned i, idx;
2,418✔
1991
                Iterator iter;
2,418✔
1992

1993
                i = 0;
2,418✔
1994
                HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
503,505✔
1995
                        struct hashmap_base_entry *e;
501,087✔
1996

1997
                        e = bucket_at(cache->hashmap, idx);
501,087✔
1998

1999
                        if (sync_keys)
501,087✔
2000
                                cache->keys.ptr[i] = e->key;
491,500✔
2001
                        if (sync_values)
501,087✔
2002
                                cache->values.ptr[i] = entry_value(cache->hashmap, e);
501,087✔
2003
                        i++;
501,087✔
2004
                }
2005
        }
2006

2007
        if (res_keys)
325,504✔
2008
                *res_keys = cache->keys.ptr;
112✔
2009
        if (res_values)
325,504✔
2010
                *res_values = cache->values.ptr;
325,504✔
2011
        if (res_n_entries)
325,504✔
2012
                *res_n_entries = size;
325,504✔
2013

2014
        return 0;
2015
}
2016

2017
IteratedCache* iterated_cache_free(IteratedCache *cache) {
6,305✔
2018
        if (cache) {
6,305✔
2019
                free(cache->keys.ptr);
6,305✔
2020
                free(cache->values.ptr);
6,305✔
2021
        }
2022

2023
        return mfree(cache);
6,305✔
2024
}
2025

2026
int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
568,470✔
2027
        _cleanup_free_ char *str = NULL;
568,470✔
2028
        size_t separator_len, len = 0;
568,470✔
2029
        const char *value;
568,470✔
2030
        bool first;
568,470✔
2031

2032
        assert(ret);
568,470✔
2033

2034
        if (set_isempty(s)) {
568,470✔
2035
                *ret = NULL;
11,112✔
2036
                return 0;
11,112✔
2037
        }
2038

2039
        separator_len = strlen_ptr(separator);
557,358✔
2040

2041
        if (separator_len == 0)
557,354✔
2042
                wrap_with_separator = false;
2043

2044
        first = !wrap_with_separator;
557,358✔
2045

2046
        SET_FOREACH(value, s) {
1,912,124✔
2047
                size_t l = strlen_ptr(value);
1,354,766✔
2048

2049
                if (l == 0)
1,354,766✔
UNCOV
2050
                        continue;
×
2051

2052
                if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
3,504,329✔
UNCOV
2053
                        return -ENOMEM;
×
2054

2055
                if (separator_len > 0 && !first) {
1,354,766✔
2056
                        memcpy(str + len, separator, separator_len);
1,112,830✔
2057
                        len += separator_len;
1,112,830✔
2058
                }
2059

2060
                memcpy(str + len, value, l);
1,354,766✔
2061
                len += l;
1,354,766✔
2062
                first = false;
1,354,766✔
2063
        }
2064

2065
        if (wrap_with_separator) {
557,358✔
2066
                memcpy(str + len, separator, separator_len);
315,426✔
2067
                len += separator_len;
315,426✔
2068
        }
2069

2070
        str[len] = '\0';
557,358✔
2071

2072
        *ret = TAKE_PTR(str);
557,358✔
2073
        return 0;
557,358✔
2074
}
2075

2076
bool set_equal(Set *a, Set *b) {
200✔
2077
        void *p;
200✔
2078

2079
        /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2080
         * entries */
2081

2082
        if (a == b)
200✔
2083
                return true;
200✔
2084

2085
        if (set_isempty(a) && set_isempty(b))
41✔
2086
                return true;
2087

2088
        if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
23✔
2089
                                         * already */
2090
                return false;
2091

2092
        SET_FOREACH(p, a)
2,393✔
2093
                if (!set_contains(b, p))
2,381✔
UNCOV
2094
                        return false;
×
2095

2096
        /* If we have the same hashops, then we don't need to check things backwards given we compared the
2097
         * size and that all of a is in b. */
2098
        if (a->b.hash_ops == b->b.hash_ops)
12✔
2099
                return true;
2100

UNCOV
2101
        SET_FOREACH(p, b)
×
UNCOV
2102
                if (!set_contains(a, p))
×
UNCOV
2103
                        return false;
×
2104

UNCOV
2105
        return true;
×
2106
}
2107

2108
static bool set_fnmatch_one(Set *patterns, const char *needle) {
584,765✔
2109
        const char *p;
584,765✔
2110

2111
        assert(needle);
584,765✔
2112

2113
        /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2114

2115
        SET_FOREACH(p, patterns)
728,393✔
2116
                if (fnmatch(p, needle, 0) == 0)
178,789✔
2117
                        return true;
35,161✔
2118

2119
        return false;
549,604✔
2120
}
2121

2122
bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
417,213✔
2123
        assert(needle);
417,213✔
2124

2125
        if (set_fnmatch_one(exclude_patterns, needle))
417,213✔
2126
                return false;
2127

2128
        if (set_isempty(include_patterns))
417,082✔
2129
                return true;
2130

2131
        return set_fnmatch_one(include_patterns, needle);
167,552✔
2132
}
2133

2134
static int hashmap_entry_compare(
545,363✔
2135
                struct hashmap_base_entry * const *a,
2136
                struct hashmap_base_entry * const *b,
2137
                compare_func_t compare) {
2138

2139
        assert(a && *a);
545,363✔
2140
        assert(b && *b);
545,363✔
2141
        assert(compare);
545,363✔
2142

2143
        return compare((*a)->key, (*b)->key);
545,363✔
2144
}
2145

2146
static int _hashmap_dump_entries_sorted(
107,875✔
2147
                HashmapBase *h,
2148
                void ***ret,
2149
                size_t *ret_n) {
2150
        _cleanup_free_ void **entries = NULL;
107,875✔
2151
        Iterator iter;
107,875✔
2152
        unsigned idx;
107,875✔
2153
        size_t n = 0;
107,875✔
2154

2155
        assert(ret);
107,875✔
2156
        assert(ret_n);
107,875✔
2157

2158
        if (_hashmap_size(h) == 0) {
107,875✔
2159
                *ret = NULL;
82,692✔
2160
                *ret_n = 0;
82,692✔
2161
                return 0;
82,692✔
2162
        }
2163

2164
        /* We append one more element than needed so that the resulting array can be used as a strv. We
2165
         * don't count this entry in the returned size. */
2166
        entries = new(void*, _hashmap_size(h) + 1);
25,183✔
2167
        if (!entries)
25,183✔
2168
                return -ENOMEM;
2169

2170
        HASHMAP_FOREACH_IDX(idx, h, iter)
188,718✔
2171
                entries[n++] = bucket_at(h, idx);
163,535✔
2172

2173
        assert(n == _hashmap_size(h));
25,183✔
2174
        entries[n] = NULL;
25,183✔
2175

2176
        typesafe_qsort_r((struct hashmap_base_entry**) entries, n,
25,183✔
2177
                         hashmap_entry_compare, h->hash_ops->compare);
2178

2179
        *ret = TAKE_PTR(entries);
25,183✔
2180
        *ret_n = n;
25,183✔
2181
        return 0;
25,183✔
2182
}
2183

2184
int _hashmap_dump_keys_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
88✔
2185
        _cleanup_free_ void **entries = NULL;
88✔
2186
        size_t n;
88✔
2187
        int r;
88✔
2188

2189
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
88✔
2190
        if (r < 0)
88✔
2191
                return r;
2192

2193
        /* Reuse the array. */
2194
        FOREACH_ARRAY(e, entries, n)
135✔
2195
                *e = (void*) (*(struct hashmap_base_entry**) e)->key;
47✔
2196

2197
        *ret = TAKE_PTR(entries);
88✔
2198
        if (ret_n)
88✔
2199
                *ret_n = n;
88✔
2200
        return 0;
2201
}
2202

2203
int _hashmap_dump_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
107,787✔
2204
        _cleanup_free_ void **entries = NULL;
107,787✔
2205
        size_t n;
107,787✔
2206
        int r;
107,787✔
2207

2208
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
107,787✔
2209
        if (r < 0)
107,787✔
2210
                return r;
2211

2212
        /* Reuse the array. */
2213
        FOREACH_ARRAY(e, entries, n)
271,275✔
2214
                *e = entry_value(h, *(struct hashmap_base_entry**) e);
163,488✔
2215

2216
        *ret = TAKE_PTR(entries);
107,787✔
2217
        if (ret_n)
107,787✔
2218
                *ret_n = n;
427✔
2219
        return 0;
2220
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc