• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 16687602495

01 Aug 2025 05:41PM UTC coverage: 72.21% (-0.04%) from 72.249%
16687602495

push

github

bluca
build(deps): bump github/codeql-action from 3.29.2 to 3.29.5

Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.2 to 3.29.5.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/181d5eefc...51f77329a)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-version: 3.29.5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

302783 of 419308 relevant lines covered (72.21%)

648727.74 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.64
/src/basic/hashmap.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <fnmatch.h>
4
#include <pthread.h>
5
#include <unistd.h>
6
#if HAVE_VALGRIND_VALGRIND_H
7
#  include <valgrind/valgrind.h>
8
#endif
9

10
#include "alloc-util.h"
11
#include "extract-word.h"
12
#include "hashmap.h"
13
#include "log.h"
14
#include "logarithm.h"
15
#include "memory-util.h"
16
#include "mempool.h"
17
#include "process-util.h"
18
#include "random-util.h"
19
#include "set.h"
20
#include "siphash24.h"
21
#include "sort-util.h"
22
#include "string-util.h"
23
#include "strv.h"
24

25
#if ENABLE_DEBUG_HASHMAP
26
#include "list.h"
27
#endif
28

29
/*
30
 * Implementation of hashmaps.
31
 * Addressing: open
32
 *   - uses less RAM compared to closed addressing (chaining), because
33
 *     our entries are small (especially in Sets, which tend to contain
34
 *     the majority of entries in systemd).
35
 * Collision resolution: Robin Hood
36
 *   - tends to equalize displacement of entries from their optimal buckets.
37
 * Probe sequence: linear
38
 *   - though theoretically worse than random probing/uniform hashing/double
39
 *     hashing, it is good for cache locality.
40
 *
41
 * References:
42
 * Celis, P. 1986. Robin Hood Hashing.
43
 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
44
 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
45
 * - The results are derived for random probing. Suggests deletion with
46
 *   tombstones and two mean-centered search methods. None of that works
47
 *   well for linear probing.
48
 *
49
 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
50
 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
51
 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
52
 * http://www.math.uu.se/~svante/papers/sj157.pdf
53
 * - Applies to Robin Hood with linear probing. Contains remarks on
54
 *   the unsuitability of mean-centered search with linear probing.
55
 *
56
 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
57
 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
58
 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
59
 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
60
 *   in a successful search), and Janson writes about displacement. C = d + 1.
61
 *
62
 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
63
 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
64
 * - Explanation of backward shift deletion with pictures.
65
 *
66
 * Khuong, P. 2013. The Other Robin Hood Hashing.
67
 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
68
 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
69
 */
70

71
/*
72
 * XXX Ideas for improvement:
73
 * For unordered hashmaps, randomize iteration order, similarly to Perl:
74
 * http://blog.booking.com/hardening-perls-hash-function.html
75
 */
76

77
/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
78
 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
79
#define INV_KEEP_FREE            5U
80

81
/* Fields common to entries of all hashmap/set types */
82
struct hashmap_base_entry {
83
        const void *key;
84
};
85

86
/* Entry types for specific hashmap/set types
87
 * hashmap_base_entry must be at the beginning of each entry struct. */
88

89
struct plain_hashmap_entry {
90
        struct hashmap_base_entry b;
91
        void *value;
92
};
93

94
struct ordered_hashmap_entry {
95
        struct plain_hashmap_entry p;
96
        unsigned iterate_next, iterate_previous;
97
};
98

99
struct set_entry {
100
        struct hashmap_base_entry b;
101
};
102

103
/* In several functions it is advantageous to have the hash table extended
104
 * virtually by a couple of additional buckets. We reserve special index values
105
 * for these "swap" buckets. */
106
#define _IDX_SWAP_BEGIN     (UINT_MAX - 3)
107
#define IDX_PUT             (_IDX_SWAP_BEGIN + 0)
108
#define IDX_TMP             (_IDX_SWAP_BEGIN + 1)
109
#define _IDX_SWAP_END       (_IDX_SWAP_BEGIN + 2)
110

111
#define IDX_FIRST           (UINT_MAX - 1) /* special index for freshly initialized iterators */
112
#define IDX_NIL             UINT_MAX       /* special index value meaning "none" or "end" */
113

114
assert_cc(IDX_FIRST == _IDX_SWAP_END);
115
assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
116

117
/* Storage space for the "swap" buckets.
118
 * All entry types can fit into an ordered_hashmap_entry. */
119
struct swap_entries {
120
        struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
121
};
122

123
/* Distance from Initial Bucket */
124
typedef uint8_t dib_raw_t;
125
#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU)   /* indicates DIB value is greater than representable */
126
#define DIB_RAW_REHASH   ((dib_raw_t)0xfeU)   /* entry yet to be rehashed during in-place resize */
127
#define DIB_RAW_FREE     ((dib_raw_t)0xffU)   /* a free bucket */
128
#define DIB_RAW_INIT     ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
129

130
#define DIB_FREE UINT_MAX
131

132
#if ENABLE_DEBUG_HASHMAP
133
struct hashmap_debug_info {
134
        LIST_FIELDS(struct hashmap_debug_info, debug_list);
135
        unsigned max_entries;  /* high watermark of n_entries */
136

137
        /* fields to detect modification while iterating */
138
        unsigned put_count;    /* counts puts into the hashmap */
139
        unsigned rem_count;    /* counts removals from hashmap */
140
        unsigned last_rem_idx; /* remembers last removal index */
141
};
142

143
/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
144
static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
145
static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
146
#endif
147

148
enum HashmapType {
149
        HASHMAP_TYPE_PLAIN,
150
        HASHMAP_TYPE_ORDERED,
151
        HASHMAP_TYPE_SET,
152
        _HASHMAP_TYPE_MAX
153
};
154

155
struct _packed_ indirect_storage {
156
        void *storage;                     /* where buckets and DIBs are stored */
157
        uint8_t  hash_key[HASH_KEY_SIZE];  /* hash key; changes during resize */
158

159
        unsigned n_entries;                /* number of stored entries */
160
        unsigned n_buckets;                /* number of buckets */
161

162
        unsigned idx_lowest_entry;         /* Index below which all buckets are free.
163
                                              Makes "while (hashmap_steal_first())" loops
164
                                              O(n) instead of O(n^2) for unordered hashmaps. */
165
        uint8_t  _pad[3];                  /* padding for the whole HashmapBase */
166
        /* The bitfields in HashmapBase complete the alignment of the whole thing. */
167
};
168

169
struct direct_storage {
170
        /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
171
         * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
172
         *              or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
173
        uint8_t storage[sizeof(struct indirect_storage)];
174
};
175

176
#define DIRECT_BUCKETS(entry_t) \
177
        (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
178

179
/* We should be able to store at least one entry directly. */
180
assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
181

182
/* We have 3 bits for n_direct_entries. */
183
assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
184

185
/* Hashmaps with directly stored entries all use this shared hash key.
186
 * It's no big deal if the key is guessed, because there can be only
187
 * a handful of directly stored entries in a hashmap. When a hashmap
188
 * outgrows direct storage, it gets its own key for indirect storage. */
189
static uint8_t shared_hash_key[HASH_KEY_SIZE];
190

191
/* Fields that all hashmap/set types must have */
192
struct HashmapBase {
193
        const struct hash_ops *hash_ops;  /* hash and compare ops to use */
194

195
        union _packed_ {
196
                struct indirect_storage indirect; /* if  has_indirect */
197
                struct direct_storage direct;     /* if !has_indirect */
198
        };
199

200
        enum HashmapType type:2;     /* HASHMAP_TYPE_* */
201
        bool has_indirect:1;         /* whether indirect storage is used */
202
        unsigned n_direct_entries:3; /* Number of entries in direct storage.
203
                                      * Only valid if !has_indirect. */
204
        bool from_pool:1;            /* whether was allocated from mempool */
205
        bool dirty:1;                /* whether dirtied since last iterated_cache_get() */
206
        bool cached:1;               /* whether this hashmap is being cached */
207

208
#if ENABLE_DEBUG_HASHMAP
209
        struct hashmap_debug_info debug;
210
#endif
211
};
212

213
/* Specific hash types
214
 * HashmapBase must be at the beginning of each hashmap struct. */
215

216
struct Hashmap {
217
        struct HashmapBase b;
218
};
219

220
struct OrderedHashmap {
221
        struct HashmapBase b;
222
        unsigned iterate_list_head, iterate_list_tail;
223
};
224

225
struct Set {
226
        struct HashmapBase b;
227
};
228

229
typedef struct CacheMem {
230
        const void **ptr;
231
        size_t n_populated;
232
        bool active:1;
233
} CacheMem;
234

235
struct IteratedCache {
236
        HashmapBase *hashmap;
237
        CacheMem keys, values;
238
};
239

240
DEFINE_MEMPOOL(hashmap_pool,         Hashmap,        8);
241
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
242
/* No need for a separate Set pool */
243
assert_cc(sizeof(Hashmap) == sizeof(Set));
244

245
struct hashmap_type_info {
246
        size_t head_size;
247
        size_t entry_size;
248
        struct mempool *mempool;
249
        unsigned n_direct_buckets;
250
};
251

252
static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
253
        [HASHMAP_TYPE_PLAIN] = {
254
                .head_size        = sizeof(Hashmap),
255
                .entry_size       = sizeof(struct plain_hashmap_entry),
256
                .mempool          = &hashmap_pool,
257
                .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
258
        },
259
        [HASHMAP_TYPE_ORDERED] = {
260
                .head_size        = sizeof(OrderedHashmap),
261
                .entry_size       = sizeof(struct ordered_hashmap_entry),
262
                .mempool          = &ordered_hashmap_pool,
263
                .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
264
        },
265
        [HASHMAP_TYPE_SET] = {
266
                .head_size        = sizeof(Set),
267
                .entry_size       = sizeof(struct set_entry),
268
                .mempool          = &hashmap_pool,
269
                .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
270
        },
271
};
272

273
void hashmap_trim_pools(void) {
2✔
274
        int r;
2✔
275

276
        /* The pool is only allocated by the main thread, but the memory can be passed to other
277
         * threads. Let's clean up if we are the main thread and no other threads are live. */
278

279
        /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
280
         * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
281
        if (getpid() != gettid())
2✔
282
                return (void) log_debug("Not cleaning up memory pools, not in main thread.");
×
283

284
        r = get_process_threads(0);
2✔
285
        if (r < 0)
2✔
286
                return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
×
287
        if (r != 1)
2✔
288
                return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
×
289

290
        mempool_trim(&hashmap_pool);
2✔
291
        mempool_trim(&ordered_hashmap_pool);
2✔
292
}
293

294
#if HAVE_VALGRIND_VALGRIND_H
295
_destructor_ static void cleanup_pools(void) {
296
        /* Be nice to valgrind */
297
        if (RUNNING_ON_VALGRIND)
298
                hashmap_trim_pools();
299
}
300
#endif
301

302
static unsigned n_buckets(HashmapBase *h) {
968,686,913✔
303
        return h->has_indirect ? h->indirect.n_buckets
968,686,913✔
304
                               : hashmap_type_info[h->type].n_direct_buckets;
968,686,913✔
305
}
306

307
static unsigned n_entries(HashmapBase *h) {
111,641,925✔
308
        return h->has_indirect ? h->indirect.n_entries
111,641,925✔
309
                               : h->n_direct_entries;
111,641,925✔
310
}
311

312
static void n_entries_inc(HashmapBase *h) {
29,029,413✔
313
        if (h->has_indirect)
29,029,413✔
314
                h->indirect.n_entries++;
22,266,789✔
315
        else
316
                h->n_direct_entries++;
6,762,624✔
317
}
29,029,413✔
318

319
static void n_entries_dec(HashmapBase *h) {
26,690,677✔
320
        if (h->has_indirect)
26,690,677✔
321
                h->indirect.n_entries--;
21,784,468✔
322
        else
323
                h->n_direct_entries--;
4,906,209✔
324
}
26,690,677✔
325

326
static void* storage_ptr(HashmapBase *h) {
943,368,694✔
327
        return h->has_indirect ? h->indirect.storage
943,368,694✔
328
                               : h->direct.storage;
943,368,694✔
329
}
330

331
static uint8_t* hash_key(HashmapBase *h) {
136,587,010✔
332
        return h->has_indirect ? h->indirect.hash_key
136,587,010✔
333
                               : shared_hash_key;
136,587,010✔
334
}
335

336
static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
136,587,010✔
337
        struct siphash state;
136,587,010✔
338
        uint64_t hash;
136,587,010✔
339

340
        siphash24_init(&state, hash_key(h));
136,587,010✔
341

342
        h->hash_ops->hash(p, &state);
136,587,010✔
343

344
        hash = siphash24_finalize(&state);
136,587,010✔
345

346
        return (unsigned) (hash % n_buckets(h));
136,587,010✔
347
}
348
#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
349

350
static void base_set_dirty(HashmapBase *h) {
59,917,735✔
351
        h->dirty = true;
59,917,735✔
352
}
59,917,735✔
353
#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
354

355
static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
2,911,800✔
356
        static uint8_t current[HASH_KEY_SIZE];
2,911,800✔
357
        static bool current_initialized = false;
2,911,800✔
358

359
        /* Returns a hash function key to use. In order to keep things
360
         * fast we will not generate a new key each time we allocate a
361
         * new hash table. Instead, we'll just reuse the most recently
362
         * generated one, except if we never generated one or when we
363
         * are rehashing an entire hash table because we reached a
364
         * fill level */
365

366
        if (!current_initialized || !reuse_is_ok) {
2,911,800✔
367
                random_bytes(current, sizeof(current));
1,760,526✔
368
                current_initialized = true;
1,760,526✔
369
        }
370

371
        memcpy(hash_key, current, sizeof(current));
2,911,800✔
372
}
2,911,800✔
373

374
static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
597,189,436✔
375
        return CAST_ALIGN_PTR(
597,189,436✔
376
                        struct hashmap_base_entry,
377
                        (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
378
}
379

380
static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
2,074,216✔
381
        return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
2,074,216✔
382
}
383

384
static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
83,444,679✔
385
        return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
83,444,679✔
386
}
387

388
static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
4✔
389
        return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
4✔
390
}
391

392
static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
220,682,650✔
393
        return &swap->e[idx - _IDX_SWAP_BEGIN];
220,682,650✔
394
}
395

396
/* Returns a pointer to the bucket at index idx.
397
 * Understands real indexes and swap indexes, hence "_virtual". */
398
static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
401,862,793✔
399
                                                    unsigned idx) {
400
        if (idx < _IDX_SWAP_BEGIN)
401,862,793✔
401
                return bucket_at(h, idx);
244,329,606✔
402

403
        if (idx < _IDX_SWAP_END)
157,533,187✔
404
                return &bucket_at_swap(swap, idx)->p.b;
157,533,187✔
405

406
        assert_not_reached();
×
407
}
408

409
static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
346,179,258✔
410
        return (dib_raw_t*)
692,358,516✔
411
                ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
346,179,258✔
412
}
413

414
static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
×
415
        return idx >= from ? idx - from
×
416
                           : n_buckets(h) + idx - from;
×
417
}
418

419
static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
181,650,946✔
420
        unsigned initial_bucket;
181,650,946✔
421

422
        if (raw_dib == DIB_RAW_FREE)
181,650,946✔
423
                return DIB_FREE;
424

425
        if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
181,650,946✔
426
                return raw_dib;
181,650,946✔
427

428
        /*
429
         * Having an overflow DIB value is very unlikely. The hash function
430
         * would have to be bad. For example, in a table of size 2^24 filled
431
         * to load factor 0.9 the maximum observed DIB is only about 60.
432
         * In theory (assuming I used Maxima correctly), for an infinite size
433
         * hash table with load factor 0.8 the probability of a given entry
434
         * having DIB > 40 is 1.9e-8.
435
         * This returns the correct DIB value by recomputing the hash value in
436
         * the unlikely case. XXX Hitting this case could be a hint to rehash.
437
         */
438
        initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
×
439
        return bucket_distance(h, idx, initial_bucket);
×
440
}
441

442
static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
108,908,145✔
443
        dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
108,908,145✔
444
}
108,908,145✔
445

446
static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
45,781,094✔
447
        dib_raw_t *dibs;
45,781,094✔
448

449
        dibs = dib_raw_ptr(h);
45,781,094✔
450

451
        for ( ; idx < n_buckets(h); idx++)
139,391,494✔
452
                if (dibs[idx] != DIB_RAW_FREE)
86,044,405✔
453
                        return idx;
454

455
        return IDX_NIL;
456
}
457

458
static void bucket_mark_free(HashmapBase *h, unsigned idx) {
26,690,677✔
459
        memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
26,690,677✔
460
        bucket_set_dib(h, idx, DIB_FREE);
26,690,677✔
461
}
26,690,677✔
462

463
static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
145,208,578✔
464
                              unsigned from, unsigned to) {
465
        struct hashmap_base_entry *e_from, *e_to;
145,208,578✔
466

467
        assert(from != to);
145,208,578✔
468

469
        e_from = bucket_at_virtual(h, swap, from);
145,208,578✔
470
        e_to   = bucket_at_virtual(h, swap, to);
145,208,578✔
471

472
        memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
145,208,578✔
473

474
        if (h->type == HASHMAP_TYPE_ORDERED) {
145,208,578✔
475
                OrderedHashmap *lh = (OrderedHashmap*) h;
69,291,116✔
476
                struct ordered_hashmap_entry *le, *le_to;
69,291,116✔
477

478
                le_to = (struct ordered_hashmap_entry*) e_to;
69,291,116✔
479

480
                if (le_to->iterate_next != IDX_NIL) {
69,291,116✔
481
                        le = (struct ordered_hashmap_entry*)
49,715,072✔
482
                             bucket_at_virtual(h, swap, le_to->iterate_next);
49,715,072✔
483
                        le->iterate_previous = to;
49,715,072✔
484
                }
485

486
                if (le_to->iterate_previous != IDX_NIL) {
69,291,116✔
487
                        le = (struct ordered_hashmap_entry*)
61,730,565✔
488
                             bucket_at_virtual(h, swap, le_to->iterate_previous);
61,730,565✔
489
                        le->iterate_next = to;
61,730,565✔
490
                }
491

492
                if (lh->iterate_list_head == from)
69,291,116✔
493
                        lh->iterate_list_head = to;
7,560,551✔
494
                if (lh->iterate_list_tail == from)
69,291,116✔
495
                        lh->iterate_list_tail = to;
19,576,044✔
496
        }
497
}
145,208,578✔
498

499
static unsigned next_idx(HashmapBase *h, unsigned idx) {
197,961,769✔
500
        return (idx + 1U) % n_buckets(h);
197,961,769✔
501
}
502

503
static unsigned prev_idx(HashmapBase *h, unsigned idx) {
101✔
504
        return (n_buckets(h) + idx - 1U) % n_buckets(h);
101✔
505
}
506

507
static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
78,106,723✔
508
        switch (h->type) {
78,106,723✔
509

510
        case HASHMAP_TYPE_PLAIN:
70,504,276✔
511
        case HASHMAP_TYPE_ORDERED:
512
                return ((struct plain_hashmap_entry*)e)->value;
70,504,276✔
513

514
        case HASHMAP_TYPE_SET:
7,602,447✔
515
                return (void*) e->key;
7,602,447✔
516

517
        default:
×
518
                assert_not_reached();
×
519
        }
520
}
521

522
static void base_remove_entry(HashmapBase *h, unsigned idx) {
26,690,677✔
523
        unsigned left, right, prev, dib;
26,690,677✔
524
        dib_raw_t raw_dib, *dibs;
26,690,677✔
525

526
        dibs = dib_raw_ptr(h);
26,690,677✔
527
        assert(dibs[idx] != DIB_RAW_FREE);
26,690,677✔
528

529
#if ENABLE_DEBUG_HASHMAP
530
        h->debug.rem_count++;
531
        h->debug.last_rem_idx = idx;
532
#endif
533

534
        left = idx;
26,690,677✔
535
        /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
536
        for (right = next_idx(h, left); ; right = next_idx(h, right)) {
26,690,677✔
537
                raw_dib = dibs[right];
38,593,281✔
538
                if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
38,593,281✔
539
                        break;
540

541
                /* The buckets are not supposed to be all occupied and with DIB > 0.
542
                 * That would mean we could make everyone better off by shifting them
543
                 * backward. This scenario is impossible. */
544
                assert(left != right);
11,902,604✔
545
        }
546

547
        if (h->type == HASHMAP_TYPE_ORDERED) {
26,690,677✔
548
                OrderedHashmap *lh = (OrderedHashmap*) h;
14,497,998✔
549
                struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
14,497,998✔
550

551
                if (le->iterate_next != IDX_NIL)
14,497,998✔
552
                        ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
13,336,840✔
553
                else
554
                        lh->iterate_list_tail = le->iterate_previous;
1,161,158✔
555

556
                if (le->iterate_previous != IDX_NIL)
14,497,998✔
557
                        ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
14,564✔
558
                else
559
                        lh->iterate_list_head = le->iterate_next;
14,483,434✔
560
        }
561

562
        /* Now shift all buckets in the interval (left, right) one step backwards */
563
        for (prev = left, left = next_idx(h, left); left != right;
38,593,281✔
564
             prev = left, left = next_idx(h, left)) {
11,902,604✔
565
                dib = bucket_calculate_dib(h, left, dibs[left]);
11,902,604✔
566
                assert(dib != 0);
11,902,604✔
567
                bucket_move_entry(h, NULL, left, prev);
11,902,604✔
568
                bucket_set_dib(h, prev, dib - 1);
11,902,604✔
569
        }
570

571
        bucket_mark_free(h, prev);
26,690,677✔
572
        n_entries_dec(h);
26,690,677✔
573
        base_set_dirty(h);
26,690,677✔
574
}
26,690,677✔
575
#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
576

577
static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
23,013,498✔
578
        struct ordered_hashmap_entry *e;
23,013,498✔
579
        unsigned idx;
23,013,498✔
580

581
        assert(h);
23,013,498✔
582
        assert(i);
23,013,498✔
583

584
        if (i->idx == IDX_NIL)
23,013,498✔
585
                goto at_end;
706,842✔
586

587
        if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
22,306,656✔
588
                goto at_end;
368,481✔
589

590
        if (i->idx == IDX_FIRST) {
21,938,175✔
591
                idx = h->iterate_list_head;
15,177,241✔
592
                e = ordered_bucket_at(h, idx);
15,177,241✔
593
        } else {
594
                idx = i->idx;
6,760,934✔
595
                e = ordered_bucket_at(h, idx);
6,760,934✔
596
                /*
597
                 * We allow removing the current entry while iterating, but removal may cause
598
                 * a backward shift. The next entry may thus move one bucket to the left.
599
                 * To detect when it happens, we remember the key pointer of the entry we were
600
                 * going to iterate next. If it does not match, there was a backward shift.
601
                 */
602
                if (e->p.b.key != i->next_key) {
6,760,934✔
603
                        idx = prev_idx(HASHMAP_BASE(h), idx);
96✔
604
                        e = ordered_bucket_at(h, idx);
96✔
605
                }
606
                assert(e->p.b.key == i->next_key);
6,760,934✔
607
        }
608

609
#if ENABLE_DEBUG_HASHMAP
610
        i->prev_idx = idx;
611
#endif
612

613
        if (e->iterate_next != IDX_NIL) {
21,938,175✔
614
                struct ordered_hashmap_entry *n;
20,110,454✔
615
                i->idx = e->iterate_next;
20,110,454✔
616
                n = ordered_bucket_at(h, i->idx);
20,110,454✔
617
                i->next_key = n->p.b.key;
20,110,454✔
618
        } else
619
                i->idx = IDX_NIL;
1,827,721✔
620

621
        return idx;
622

623
at_end:
1,075,323✔
624
        i->idx = IDX_NIL;
1,075,323✔
625
        return IDX_NIL;
1,075,323✔
626
}
627

628
static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
35,375,842✔
629
        unsigned idx;
35,375,842✔
630

631
        assert(h);
35,375,842✔
632
        assert(i);
35,375,842✔
633

634
        if (i->idx == IDX_NIL)
35,375,842✔
635
                goto at_end;
2,781,227✔
636

637
        if (i->idx == IDX_FIRST) {
32,594,615✔
638
                /* fast forward to the first occupied bucket */
639
                if (h->has_indirect) {
13,533,391✔
640
                        i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
6,739,299✔
641
                        h->indirect.idx_lowest_entry = i->idx;
6,739,299✔
642
                } else
643
                        i->idx = skip_free_buckets(h, 0);
6,794,092✔
644

645
                if (i->idx == IDX_NIL)
13,533,391✔
646
                        goto at_end;
346,912✔
647
        } else {
648
                struct hashmap_base_entry *e;
19,061,224✔
649

650
                assert(i->idx > 0);
19,061,224✔
651

652
                e = bucket_at(h, i->idx);
19,061,224✔
653
                /*
654
                 * We allow removing the current entry while iterating, but removal may cause
655
                 * a backward shift. The next entry may thus move one bucket to the left.
656
                 * To detect when it happens, we remember the key pointer of the entry we were
657
                 * going to iterate next. If it does not match, there was a backward shift.
658
                 */
659
                if (e->key != i->next_key)
19,061,224✔
660
                        e = bucket_at(h, --i->idx);
32,055✔
661

662
                assert(e->key == i->next_key);
19,061,224✔
663
        }
664

665
        idx = i->idx;
32,247,703✔
666
#if ENABLE_DEBUG_HASHMAP
667
        i->prev_idx = idx;
668
#endif
669

670
        i->idx = skip_free_buckets(h, i->idx + 1);
32,247,703✔
671
        if (i->idx != IDX_NIL)
32,247,703✔
672
                i->next_key = bucket_at(h, i->idx)->key;
25,028,620✔
673
        else
674
                i->idx = IDX_NIL;
7,219,083✔
675

676
        return idx;
677

678
at_end:
3,128,139✔
679
        i->idx = IDX_NIL;
3,128,139✔
680
        return IDX_NIL;
3,128,139✔
681
}
682

683
static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
68,669,451✔
684
        if (!h) {
68,669,451✔
685
                i->idx = IDX_NIL;
10,280,111✔
686
                return IDX_NIL;
10,280,111✔
687
        }
688

689
#if ENABLE_DEBUG_HASHMAP
690
        if (i->idx == IDX_FIRST) {
691
                i->put_count = h->debug.put_count;
692
                i->rem_count = h->debug.rem_count;
693
        } else {
694
                /* While iterating, must not add any new entries */
695
                assert(i->put_count == h->debug.put_count);
696
                /* ... or remove entries other than the current one */
697
                assert(i->rem_count == h->debug.rem_count ||
698
                       (i->rem_count == h->debug.rem_count - 1 &&
699
                        i->prev_idx == h->debug.last_rem_idx));
700
                /* Reset our removals counter */
701
                i->rem_count = h->debug.rem_count;
702
        }
703
#endif
704

705
        return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
23,013,498✔
706
                                               : hashmap_iterate_in_internal_order(h, i);
58,389,340✔
707
}
708

709
bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
45,998,040✔
710
        struct hashmap_base_entry *e;
45,998,040✔
711
        void *data;
45,998,040✔
712
        unsigned idx;
45,998,040✔
713

714
        idx = hashmap_iterate_entry(h, i);
45,998,040✔
715
        if (idx == IDX_NIL) {
45,998,040✔
716
                if (value)
14,454,636✔
717
                        *value = NULL;
13,976,340✔
718
                if (key)
14,454,636✔
719
                        *key = NULL;
3,223,227✔
720

721
                return false;
14,454,636✔
722
        }
723

724
        e = bucket_at(h, idx);
31,543,404✔
725
        data = entry_value(h, e);
31,543,404✔
726
        if (value)
31,543,404✔
727
                *value = data;
25,854,752✔
728
        if (key)
31,543,404✔
729
                *key = e->key;
18,718,799✔
730

731
        return true;
732
}
733

734
#define HASHMAP_FOREACH_IDX(idx, h, i) \
735
        for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
736
             (idx != IDX_NIL); \
737
             (idx) = hashmap_iterate_entry((h), &(i)))
738

739
IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
6,385✔
740
        IteratedCache *cache;
6,385✔
741

742
        assert(h);
6,385✔
743
        assert(!h->cached);
6,385✔
744

745
        if (h->cached)
6,385✔
746
                return NULL;
747

748
        cache = new0(IteratedCache, 1);
6,385✔
749
        if (!cache)
6,385✔
750
                return NULL;
751

752
        cache->hashmap = h;
6,385✔
753
        h->cached = true;
6,385✔
754

755
        return cache;
6,385✔
756
}
757

758
static void reset_direct_storage(HashmapBase *h) {
6,404,286✔
759
        const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
6,404,286✔
760
        void *p;
6,404,286✔
761

762
        assert(!h->has_indirect);
6,404,286✔
763

764
        p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
6,404,286✔
765
        memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
6,404,286✔
766
}
6,404,286✔
767

768
static void shared_hash_key_initialize(void) {
59,098✔
769
        random_bytes(shared_hash_key, sizeof(shared_hash_key));
59,098✔
770
}
59,098✔
771

772
static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type) {
3,212,547✔
773
        HashmapBase *h;
3,212,547✔
774
        const struct hashmap_type_info *hi = &hashmap_type_info[type];
3,212,547✔
775

776
        bool use_pool = mempool_enabled && mempool_enabled();  /* mempool_enabled is a weak symbol */
3,212,547✔
777

778
        h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
3,296,025✔
779
        if (!h)
3,212,547✔
780
                return NULL;
781

782
        h->type = type;
3,212,547✔
783
        h->from_pool = use_pool;
3,212,547✔
784
        h->hash_ops = hash_ops ?: &trivial_hash_ops;
3,212,547✔
785

786
        if (type == HASHMAP_TYPE_ORDERED) {
3,212,547✔
787
                OrderedHashmap *lh = (OrderedHashmap*)h;
1,019,686✔
788
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,019,686✔
789
        }
790

791
        reset_direct_storage(h);
3,212,547✔
792

793
        static pthread_once_t once = PTHREAD_ONCE_INIT;
3,212,547✔
794
        assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
3,212,547✔
795

796
#if ENABLE_DEBUG_HASHMAP
797
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
798
        LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
799
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
800
#endif
801

802
        return h;
803
}
804

805
Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
411,570✔
806
        return (Hashmap*)        hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN);
411,570✔
807
}
808

809
OrderedHashmap *ordered_hashmap_new(const struct hash_ops *hash_ops) {
53,595✔
810
        return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED);
53,595✔
811
}
812

813
Set *set_new(const struct hash_ops *hash_ops) {
48,832✔
814
        return (Set*)            hashmap_base_new(hash_ops, HASHMAP_TYPE_SET);
48,832✔
815
}
816

817
static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
25,581,940✔
818
                                         enum HashmapType type) {
819
        HashmapBase *q;
25,581,940✔
820

821
        assert(h);
25,581,940✔
822

823
        if (*h) {
25,581,940✔
824
                assert((*h)->hash_ops == (hash_ops ?: &trivial_hash_ops));
26,038,811✔
825
                return 0;
826
        }
827

828
        q = hashmap_base_new(hash_ops, type);
2,698,547✔
829
        if (!q)
2,698,547✔
830
                return -ENOMEM;
831

832
        *h = q;
2,698,547✔
833
        return 1;
2,698,547✔
834
}
835

836
int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
5,969,452✔
837
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN);
5,969,452✔
838
}
839

840
int ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops) {
15,350,043✔
841
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED);
15,350,043✔
842
}
843

844
int set_ensure_allocated(Set **s, const struct hash_ops *hash_ops) {
4,262,445✔
845
        return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET);
4,262,445✔
846
}
847

848
int hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
5,383,898✔
849
        int r;
5,383,898✔
850

851
        r = hashmap_ensure_allocated(h, hash_ops);
5,383,898✔
852
        if (r < 0)
5,383,898✔
853
                return r;
854

855
        return hashmap_put(*h, key, value);
5,383,898✔
856
}
857

858
int ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
4,422,420✔
859
        int r;
4,422,420✔
860

861
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
4,422,420✔
862
        if (r < 0)
4,422,420✔
863
                return r;
864

865
        return ordered_hashmap_put(*h, key, value);
4,422,420✔
866
}
867

868
int ordered_hashmap_ensure_replace(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
6,621✔
869
        int r;
6,621✔
870

871
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
6,621✔
872
        if (r < 0)
6,621✔
873
                return r;
874

875
        return ordered_hashmap_replace(*h, key, value);
6,621✔
876
}
877

878
int hashmap_ensure_replace(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
25,215✔
879
        int r;
25,215✔
880

881
        r = hashmap_ensure_allocated(h, hash_ops);
25,215✔
882
        if (r < 0)
25,215✔
883
                return r;
884

885
        return hashmap_replace(*h, key, value);
25,215✔
886
}
887

888
static void hashmap_free_no_clear(HashmapBase *h) {
3,174,676✔
889
        assert(!h->has_indirect);
3,174,676✔
890
        assert(h->n_direct_entries == 0);
3,174,676✔
891

892
#if ENABLE_DEBUG_HASHMAP
893
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
894
        LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
895
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
896
#endif
897

898
        if (h->from_pool) {
3,174,676✔
899
                /* Ensure that the object didn't get migrated between threads. */
900
                assert_se(is_main_thread());
3,091,920✔
901
                mempool_free_tile(hashmap_type_info[h->type].mempool, h);
3,091,920✔
902
        } else
903
                free(h);
82,756✔
904
}
3,174,676✔
905

906
HashmapBase* _hashmap_free(HashmapBase *h) {
13,746,498✔
907
        if (h) {
13,746,498✔
908
                _hashmap_clear(h);
3,174,676✔
909
                hashmap_free_no_clear(h);
3,174,676✔
910
        }
911

912
        return NULL;
13,746,498✔
913
}
914

915
void _hashmap_clear(HashmapBase *h) {
3,322,162✔
916
        if (!h)
3,322,162✔
917
                return;
918

919
        if (h->hash_ops->free_key || h->hash_ops->free_value) {
3,191,739✔
920

921
                /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
922
                 * hash table, and only then call the destructor functions. If these destructors then try to unregister
923
                 * themselves from our hash table a second time, the entry is already gone. */
924

925
                while (_hashmap_size(h) > 0) {
19,071,603✔
926
                        void *k = NULL;
16,509,981✔
927
                        void *v;
16,509,981✔
928

929
                        v = _hashmap_first_key_and_value(h, true, &k);
16,509,981✔
930

931
                        if (h->hash_ops->free_key)
16,509,981✔
932
                                h->hash_ops->free_key(k);
12,258,751✔
933

934
                        if (h->hash_ops->free_value)
16,509,981✔
935
                                h->hash_ops->free_value(v);
14,800,279✔
936
                }
937
        }
938

939
        if (h->has_indirect) {
3,191,739✔
940
                free(h->indirect.storage);
1,176,410✔
941
                h->has_indirect = false;
1,176,410✔
942
        }
943

944
        h->n_direct_entries = 0;
3,191,739✔
945
        reset_direct_storage(h);
3,191,739✔
946

947
        if (h->type == HASHMAP_TYPE_ORDERED) {
3,191,739✔
948
                OrderedHashmap *lh = (OrderedHashmap*) h;
1,017,909✔
949
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,017,909✔
950
        }
951

952
        base_set_dirty(h);
3,191,739✔
953
}
954

955
static int resize_buckets(HashmapBase *h, unsigned entries_add);
956

957
/*
958
 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
959
 * Performs Robin Hood swaps as it goes. The entry to put must be placed
960
 * by the caller into swap slot IDX_PUT.
961
 * If used for in-place resizing, may leave a displaced entry in swap slot
962
 * IDX_PUT. Caller must rehash it next.
963
 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
964
 *          false otherwise.
965
 */
966
static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
53,699,842✔
967
                                   struct swap_entries *swap) {
968
        dib_raw_t raw_dib, *dibs;
53,699,842✔
969
        unsigned dib, distance;
53,699,842✔
970

971
#if ENABLE_DEBUG_HASHMAP
972
        h->debug.put_count++;
973
#endif
974

975
        dibs = dib_raw_ptr(h);
53,699,842✔
976

977
        for (distance = 0; ; distance++) {
53,699,842✔
978
                raw_dib = dibs[idx];
98,643,564✔
979
                if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
98,643,564✔
980
                        if (raw_dib == DIB_RAW_REHASH)
53,699,842✔
981
                                bucket_move_entry(h, swap, idx, IDX_TMP);
5,090,637✔
982

983
                        if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
53,699,842✔
984
                                h->indirect.idx_lowest_entry = idx;
34,726✔
985

986
                        bucket_set_dib(h, idx, distance);
53,699,842✔
987
                        bucket_move_entry(h, swap, IDX_PUT, idx);
53,699,842✔
988
                        if (raw_dib == DIB_RAW_REHASH) {
53,699,842✔
989
                                bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
5,090,637✔
990
                                return true;
5,090,637✔
991
                        }
992

993
                        return false;
994
                }
995

996
                dib = bucket_calculate_dib(h, idx, raw_dib);
44,943,722✔
997

998
                if (dib < distance) {
44,943,722✔
999
                        /* Found a wealthier entry. Go Robin Hood! */
1000
                        bucket_set_dib(h, idx, distance);
16,615,022✔
1001

1002
                        /* swap the entries */
1003
                        bucket_move_entry(h, swap, idx, IDX_TMP);
16,615,022✔
1004
                        bucket_move_entry(h, swap, IDX_PUT, idx);
16,615,022✔
1005
                        bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
16,615,022✔
1006

1007
                        distance = dib;
16,615,022✔
1008
                }
1009

1010
                idx = next_idx(h, idx);
44,943,722✔
1011
        }
1012
}
1013

1014
/*
1015
 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1016
 * The caller must place the entry (only its key and value, not link indexes)
1017
 * in swap slot IDX_PUT.
1018
 * Caller must ensure: the key does not exist yet in the hashmap.
1019
 *                     that resize is not needed if !may_resize.
1020
 * Returns: 1 if entry was put successfully.
1021
 *          -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1022
 *          Cannot return -ENOMEM if !may_resize.
1023
 */
1024
static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
29,029,413✔
1025
                                   struct swap_entries *swap, bool may_resize) {
1026
        struct ordered_hashmap_entry *new_entry;
29,029,413✔
1027
        int r;
29,029,413✔
1028

1029
        assert(idx < n_buckets(h));
29,029,413✔
1030

1031
        new_entry = bucket_at_swap(swap, IDX_PUT);
29,029,413✔
1032

1033
        if (may_resize) {
29,029,413✔
1034
                r = resize_buckets(h, 1);
29,026,406✔
1035
                if (r < 0)
29,026,406✔
1036
                        return r;
1037
                if (r > 0)
29,026,406✔
1038
                        idx = bucket_hash(h, new_entry->p.b.key);
2,908,559✔
1039
        }
1040
        assert(n_entries(h) < n_buckets(h));
29,029,413✔
1041

1042
        if (h->type == HASHMAP_TYPE_ORDERED) {
29,029,413✔
1043
                OrderedHashmap *lh = (OrderedHashmap*) h;
14,717,674✔
1044

1045
                new_entry->iterate_next = IDX_NIL;
14,717,674✔
1046
                new_entry->iterate_previous = lh->iterate_list_tail;
14,717,674✔
1047

1048
                if (lh->iterate_list_tail != IDX_NIL) {
14,717,674✔
1049
                        struct ordered_hashmap_entry *old_tail;
13,546,101✔
1050

1051
                        old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
13,546,101✔
1052
                        assert(old_tail->iterate_next == IDX_NIL);
13,546,101✔
1053
                        old_tail->iterate_next = IDX_PUT;
13,546,101✔
1054
                }
1055

1056
                lh->iterate_list_tail = IDX_PUT;
14,717,674✔
1057
                if (lh->iterate_list_head == IDX_NIL)
14,717,674✔
1058
                        lh->iterate_list_head = IDX_PUT;
1,171,573✔
1059
        }
1060

1061
        assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
29,029,413✔
1062

1063
        n_entries_inc(h);
29,029,413✔
1064
#if ENABLE_DEBUG_HASHMAP
1065
        h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1066
#endif
1067

1068
        base_set_dirty(h);
29,029,413✔
1069

1070
        return 1;
29,029,413✔
1071
}
1072
#define hashmap_put_boldly(h, idx, swap, may_resize) \
1073
        hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1074

1075
/*
1076
 * Returns 0 if resize is not needed.
1077
 *         1 if successfully resized.
1078
 *         -ENOMEM on allocation failure.
1079
 */
1080
static int resize_buckets(HashmapBase *h, unsigned entries_add) {
29,038,263✔
1081
        struct swap_entries swap;
29,038,263✔
1082
        void *new_storage;
29,038,263✔
1083
        dib_raw_t *old_dibs, *new_dibs;
29,038,263✔
1084
        const struct hashmap_type_info *hi;
29,038,263✔
1085
        unsigned idx, optimal_idx;
29,038,263✔
1086
        unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
29,038,263✔
1087
        uint8_t new_shift;
29,038,263✔
1088
        bool rehash_next;
29,038,263✔
1089

1090
        assert(h);
29,038,263✔
1091

1092
        hi = &hashmap_type_info[h->type];
29,038,263✔
1093
        new_n_entries = n_entries(h) + entries_add;
29,038,263✔
1094

1095
        /* overflow? */
1096
        if (_unlikely_(new_n_entries < entries_add))
29,038,263✔
1097
                return -ENOMEM;
29,038,263✔
1098

1099
        /* For direct storage we allow 100% load, because it's tiny. */
1100
        if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
29,038,261✔
1101
                return 0;
1102

1103
        /*
1104
         * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1105
         * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1106
         */
1107
        new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
22,275,468✔
1108
        /* overflow? */
1109
        if (_unlikely_(new_n_buckets < new_n_entries))
22,275,468✔
1110
                return -ENOMEM;
1111

1112
        if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
22,275,466✔
1113
                return -ENOMEM;
1114

1115
        old_n_buckets = n_buckets(h);
22,275,466✔
1116

1117
        if (_likely_(new_n_buckets <= old_n_buckets))
22,275,466✔
1118
                return 0;
1119

1120
        new_shift = log2u_round_up(MAX(
2,911,800✔
1121
                        new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1122
                        2 * sizeof(struct direct_storage)));
1123

1124
        /* Realloc storage (buckets and DIB array). */
1125
        new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1,723,404✔
1126
                              1U << new_shift);
2,911,800✔
1127
        if (!new_storage)
2,911,800✔
1128
                return -ENOMEM;
1129

1130
        /* Must upgrade direct to indirect storage. */
1131
        if (!h->has_indirect) {
2,911,800✔
1132
                memcpy(new_storage, h->direct.storage,
1,188,396✔
1133
                       old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1,188,396✔
1134
                h->indirect.n_entries = h->n_direct_entries;
1,188,396✔
1135
                h->indirect.idx_lowest_entry = 0;
1,188,396✔
1136
                h->n_direct_entries = 0;
1,188,396✔
1137
        }
1138

1139
        /* Get a new hash key. If we've just upgraded to indirect storage,
1140
         * allow reusing a previously generated key. It's still a different key
1141
         * from the shared one that we used for direct storage. */
1142
        get_hash_key(h->indirect.hash_key, !h->has_indirect);
2,911,800✔
1143

1144
        h->has_indirect = true;
2,911,800✔
1145
        h->indirect.storage = new_storage;
2,911,800✔
1146
        h->indirect.n_buckets = (1U << new_shift) /
2,911,800✔
1147
                                (hi->entry_size + sizeof(dib_raw_t));
1148

1149
        old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
2,911,800✔
1150
        new_dibs = dib_raw_ptr(h);
2,911,800✔
1151

1152
        /*
1153
         * Move the DIB array to the new place, replacing valid DIB values with
1154
         * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1155
         * Note: Overlap is not possible, because we have at least doubled the
1156
         * number of buckets and dib_raw_t is smaller than any entry type.
1157
         */
1158
        for (idx = 0; idx < old_n_buckets; idx++) {
37,183,499✔
1159
                assert(old_dibs[idx] != DIB_RAW_REHASH);
31,359,899✔
1160
                new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
56,850,650✔
1161
                                                              : DIB_RAW_REHASH;
1162
        }
1163

1164
        /* Zero the area of newly added entries (including the old DIB area) */
1165
        memzero(bucket_at(h, old_n_buckets),
2,911,800✔
1166
               (n_buckets(h) - old_n_buckets) * hi->entry_size);
1167

1168
        /* The upper half of the new DIB array needs initialization */
1169
        memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
5,823,600✔
1170
               (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
2,911,800✔
1171

1172
        /* Rehash entries that need it */
1173
        n_rehashed = 0;
2,911,800✔
1174
        for (idx = 0; idx < old_n_buckets; idx++) {
34,271,699✔
1175
                if (new_dibs[idx] != DIB_RAW_REHASH)
31,359,899✔
1176
                        continue;
10,959,785✔
1177

1178
                optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
20,400,114✔
1179

1180
                /*
1181
                 * Not much to do if by luck the entry hashes to its current
1182
                 * location. Just set its DIB.
1183
                 */
1184
                if (optimal_idx == idx) {
20,400,114✔
1185
                        new_dibs[idx] = 0;
820,322✔
1186
                        n_rehashed++;
820,322✔
1187
                        continue;
820,322✔
1188
                }
1189

1190
                new_dibs[idx] = DIB_RAW_FREE;
19,579,792✔
1191
                bucket_move_entry(h, &swap, idx, IDX_PUT);
19,579,792✔
1192
                /* bucket_move_entry does not clear the source */
1193
                memzero(bucket_at(h, idx), hi->entry_size);
19,579,792✔
1194

1195
                do {
24,670,429✔
1196
                        /*
1197
                         * Find the new bucket for the current entry. This may make
1198
                         * another entry homeless and load it into IDX_PUT.
1199
                         */
1200
                        rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
24,670,429✔
1201
                        n_rehashed++;
24,670,429✔
1202

1203
                        /* Did the current entry displace another one? */
1204
                        if (rehash_next)
24,670,429✔
1205
                                optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
5,090,637✔
1206
                } while (rehash_next);
24,670,429✔
1207
        }
1208

1209
        assert_se(n_rehashed == n_entries(h));
2,911,800✔
1210

1211
        return 1;
1212
}
1213

1214
/*
1215
 * Finds an entry with a matching key
1216
 * Returns: index of the found entry, or IDX_NIL if not found.
1217
 */
1218
static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
108,187,700✔
1219
        struct hashmap_base_entry *e;
108,187,700✔
1220
        unsigned dib, distance;
108,187,700✔
1221
        dib_raw_t *dibs = dib_raw_ptr(h);
108,187,700✔
1222

1223
        assert(idx < n_buckets(h));
108,187,700✔
1224

1225
        for (distance = 0; ; distance++) {
75,831,485✔
1226
                if (dibs[idx] == DIB_RAW_FREE)
184,019,185✔
1227
                        return IDX_NIL;
1228

1229
                dib = bucket_calculate_dib(h, idx, dibs[idx]);
124,804,620✔
1230

1231
                if (dib < distance)
124,804,620✔
1232
                        return IDX_NIL;
1233
                if (dib == distance) {
102,972,115✔
1234
                        e = bucket_at(h, idx);
75,524,087✔
1235
                        if (h->hash_ops->compare(e->key, key) == 0)
75,524,087✔
1236
                                return idx;
1237
                }
1238

1239
                idx = next_idx(h, idx);
75,831,485✔
1240
        }
1241
}
1242
#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1243

1244
int hashmap_put(Hashmap *h, const void *key, void *value) {
13,755,444✔
1245
        struct swap_entries swap;
13,755,444✔
1246
        struct plain_hashmap_entry *e;
13,755,444✔
1247
        unsigned hash, idx;
13,755,444✔
1248

1249
        assert(h);
13,755,444✔
1250

1251
        hash = bucket_hash(h, key);
13,755,444✔
1252
        idx = bucket_scan(h, hash, key);
13,755,444✔
1253
        if (idx != IDX_NIL) {
13,755,444✔
1254
                e = plain_bucket_at(h, idx);
134,329✔
1255
                if (e->value == value)
134,329✔
1256
                        return 0;
13,755,444✔
1257
                return -EEXIST;
33,617✔
1258
        }
1259

1260
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
13,621,115✔
1261
        e->b.key = key;
13,621,115✔
1262
        e->value = value;
13,621,115✔
1263
        return hashmap_put_boldly(h, hash, &swap, true);
13,621,115✔
1264
}
1265

1266
int set_put(Set *s, const void *key) {
4,412,716✔
1267
        struct swap_entries swap;
4,412,716✔
1268
        struct hashmap_base_entry *e;
4,412,716✔
1269
        unsigned hash, idx;
4,412,716✔
1270

1271
        assert(s);
4,412,716✔
1272

1273
        hash = bucket_hash(s, key);
4,412,716✔
1274
        idx = bucket_scan(s, hash, key);
4,412,716✔
1275
        if (idx != IDX_NIL)
4,412,716✔
1276
                return 0;
4,412,716✔
1277

1278
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
4,399,324✔
1279
        e->key = key;
4,399,324✔
1280
        return hashmap_put_boldly(s, hash, &swap, true);
4,399,324✔
1281
}
1282

1283
int set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key) {
3,358,382✔
1284
        int r;
3,358,382✔
1285

1286
        r = set_ensure_allocated(s, hash_ops);
3,358,382✔
1287
        if (r < 0)
3,358,382✔
1288
                return r;
1289

1290
        return set_put(*s, key);
3,358,382✔
1291
}
1292

1293
int set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key) {
1,202,044✔
1294
        int r;
1,202,044✔
1295

1296
        r = set_ensure_put(s, hash_ops, key);
1,202,044✔
1297
        if (r <= 0) {
1,202,044✔
1298
                if (hash_ops && hash_ops->free_key)
899✔
1299
                        hash_ops->free_key(key);
899✔
1300
                else
1301
                        free(key);
×
1302
        }
1303

1304
        return r;
1,202,044✔
1305
}
1306

1307
int hashmap_replace(Hashmap *h, const void *key, void *value) {
12,000,041✔
1308
        struct swap_entries swap;
12,000,041✔
1309
        struct plain_hashmap_entry *e;
12,000,041✔
1310
        unsigned hash, idx;
12,000,041✔
1311

1312
        assert(h);
12,000,041✔
1313

1314
        hash = bucket_hash(h, key);
12,000,041✔
1315
        idx = bucket_scan(h, hash, key);
12,000,041✔
1316
        if (idx != IDX_NIL) {
12,000,041✔
1317
                e = plain_bucket_at(h, idx);
997,094✔
1318
#if ENABLE_DEBUG_HASHMAP
1319
                /* Although the key is equal, the key pointer may have changed,
1320
                 * and this would break our assumption for iterating. So count
1321
                 * this operation as incompatible with iteration. */
1322
                if (e->b.key != key) {
1323
                        h->b.debug.put_count++;
1324
                        h->b.debug.rem_count++;
1325
                        h->b.debug.last_rem_idx = idx;
1326
                }
1327
#endif
1328
                e->b.key = key;
997,094✔
1329
                e->value = value;
997,094✔
1330
                hashmap_set_dirty(h);
997,094✔
1331

1332
                return 0;
997,094✔
1333
        }
1334

1335
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
11,002,947✔
1336
        e->b.key = key;
11,002,947✔
1337
        e->value = value;
11,002,947✔
1338
        return hashmap_put_boldly(h, hash, &swap, true);
11,002,947✔
1339
}
1340

1341
int hashmap_update(Hashmap *h, const void *key, void *value) {
8,814✔
1342
        struct plain_hashmap_entry *e;
8,814✔
1343
        unsigned hash, idx;
8,814✔
1344

1345
        assert(h);
8,814✔
1346

1347
        hash = bucket_hash(h, key);
8,814✔
1348
        idx = bucket_scan(h, hash, key);
8,814✔
1349
        if (idx == IDX_NIL)
8,814✔
1350
                return -ENOENT;
1351

1352
        e = plain_bucket_at(h, idx);
8,812✔
1353
        e->value = value;
8,812✔
1354
        hashmap_set_dirty(h);
8,812✔
1355

1356
        return 0;
8,812✔
1357
}
1358

1359
void* _hashmap_get(HashmapBase *h, const void *key) {
64,183,921✔
1360
        struct hashmap_base_entry *e;
64,183,921✔
1361
        unsigned hash, idx;
64,183,921✔
1362

1363
        if (!h)
64,183,921✔
1364
                return NULL;
1365

1366
        hash = bucket_hash(h, key);
55,610,315✔
1367
        idx = bucket_scan(h, hash, key);
55,610,315✔
1368
        if (idx == IDX_NIL)
55,610,315✔
1369
                return NULL;
1370

1371
        e = bucket_at(h, idx);
19,064,219✔
1372
        return entry_value(h, e);
19,064,219✔
1373
}
1374

1375
void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
11,004,864✔
1376
        struct plain_hashmap_entry *e;
11,004,864✔
1377
        unsigned hash, idx;
11,004,864✔
1378

1379
        if (!h)
11,004,864✔
1380
                return NULL;
1381

1382
        hash = bucket_hash(h, key);
11,004,849✔
1383
        idx = bucket_scan(h, hash, key);
11,004,849✔
1384
        if (idx == IDX_NIL)
11,004,849✔
1385
                return NULL;
1386

1387
        e = plain_bucket_at(h, idx);
922,815✔
1388
        if (key2)
922,815✔
1389
                *key2 = (void*) e->b.key;
922,815✔
1390

1391
        return e->value;
922,815✔
1392
}
1393

1394
bool _hashmap_contains(HashmapBase *h, const void *key) {
3,121,808✔
1395
        unsigned hash;
3,121,808✔
1396

1397
        if (!h)
3,121,808✔
1398
                return false;
1399

1400
        hash = bucket_hash(h, key);
2,535,496✔
1401
        return bucket_scan(h, hash, key) != IDX_NIL;
2,535,496✔
1402
}
1403

1404
void* _hashmap_remove(HashmapBase *h, const void *key) {
8,907,352✔
1405
        struct hashmap_base_entry *e;
8,907,352✔
1406
        unsigned hash, idx;
8,907,352✔
1407
        void *data;
8,907,352✔
1408

1409
        if (!h)
8,907,352✔
1410
                return NULL;
1411

1412
        hash = bucket_hash(h, key);
7,986,401✔
1413
        idx = bucket_scan(h, hash, key);
7,986,401✔
1414
        if (idx == IDX_NIL)
7,986,401✔
1415
                return NULL;
1416

1417
        e = bucket_at(h, idx);
4,545,887✔
1418
        data = entry_value(h, e);
4,545,887✔
1419
        remove_entry(h, idx);
4,545,887✔
1420

1421
        return data;
4,545,887✔
1422
}
1423

1424
void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
582,081✔
1425
        struct plain_hashmap_entry *e;
582,081✔
1426
        unsigned hash, idx;
582,081✔
1427
        void *data;
582,081✔
1428

1429
        if (!h) {
582,081✔
1430
                if (rkey)
103,640✔
1431
                        *rkey = NULL;
103,640✔
1432
                return NULL;
103,640✔
1433
        }
1434

1435
        hash = bucket_hash(h, key);
478,441✔
1436
        idx = bucket_scan(h, hash, key);
478,441✔
1437
        if (idx == IDX_NIL) {
478,441✔
1438
                if (rkey)
467,287✔
1439
                        *rkey = NULL;
467,287✔
1440
                return NULL;
467,287✔
1441
        }
1442

1443
        e = plain_bucket_at(h, idx);
11,154✔
1444
        data = e->value;
11,154✔
1445
        if (rkey)
11,154✔
1446
                *rkey = (void*) e->b.key;
11,154✔
1447

1448
        remove_entry(h, idx);
11,154✔
1449

1450
        return data;
11,154✔
1451
}
1452

1453
int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
10✔
1454
        struct swap_entries swap;
10✔
1455
        struct plain_hashmap_entry *e;
10✔
1456
        unsigned old_hash, new_hash, idx;
10✔
1457

1458
        if (!h)
10✔
1459
                return -ENOENT;
10✔
1460

1461
        old_hash = bucket_hash(h, old_key);
8✔
1462
        idx = bucket_scan(h, old_hash, old_key);
8✔
1463
        if (idx == IDX_NIL)
8✔
1464
                return -ENOENT;
1465

1466
        new_hash = bucket_hash(h, new_key);
6✔
1467
        if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
6✔
1468
                return -EEXIST;
1469

1470
        remove_entry(h, idx);
4✔
1471

1472
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
4✔
1473
        e->b.key = new_key;
4✔
1474
        e->value = value;
4✔
1475
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
4✔
1476

1477
        return 0;
1478
}
1479

1480
int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
317✔
1481
        struct swap_entries swap;
317✔
1482
        struct hashmap_base_entry *e;
317✔
1483
        unsigned old_hash, new_hash, idx;
317✔
1484

1485
        if (!s)
317✔
1486
                return -ENOENT;
317✔
1487

1488
        old_hash = bucket_hash(s, old_key);
317✔
1489
        idx = bucket_scan(s, old_hash, old_key);
317✔
1490
        if (idx == IDX_NIL)
317✔
1491
                return -ENOENT;
1492

1493
        new_hash = bucket_hash(s, new_key);
317✔
1494
        if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
317✔
1495
                return -EEXIST;
1496

1497
        remove_entry(s, idx);
317✔
1498

1499
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
317✔
1500
        e->key = new_key;
317✔
1501
        assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
317✔
1502

1503
        return 0;
1504
}
1505

1506
int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
48✔
1507
        struct swap_entries swap;
48✔
1508
        struct plain_hashmap_entry *e;
48✔
1509
        unsigned old_hash, new_hash, idx_old, idx_new;
48✔
1510

1511
        if (!h)
48✔
1512
                return -ENOENT;
48✔
1513

1514
        old_hash = bucket_hash(h, old_key);
46✔
1515
        idx_old = bucket_scan(h, old_hash, old_key);
46✔
1516
        if (idx_old == IDX_NIL)
46✔
1517
                return -ENOENT;
1518

1519
        old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
44✔
1520

1521
        new_hash = bucket_hash(h, new_key);
44✔
1522
        idx_new = bucket_scan(h, new_hash, new_key);
44✔
1523
        if (idx_new != IDX_NIL)
44✔
1524
                if (idx_old != idx_new) {
42✔
1525
                        remove_entry(h, idx_new);
42✔
1526
                        /* Compensate for a possible backward shift. */
1527
                        if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
42✔
1528
                                idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
5✔
1529
                        assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
42✔
1530
                }
1531

1532
        remove_entry(h, idx_old);
44✔
1533

1534
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
44✔
1535
        e->b.key = new_key;
44✔
1536
        e->value = value;
44✔
1537
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
44✔
1538

1539
        return 0;
1540
}
1541

1542
void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
385,564✔
1543
        struct hashmap_base_entry *e;
385,564✔
1544
        unsigned hash, idx;
385,564✔
1545

1546
        if (!h)
385,564✔
1547
                return NULL;
1548

1549
        hash = bucket_hash(h, key);
385,452✔
1550
        idx = bucket_scan(h, hash, key);
385,452✔
1551
        if (idx == IDX_NIL)
385,452✔
1552
                return NULL;
1553

1554
        e = bucket_at(h, idx);
313,446✔
1555
        if (entry_value(h, e) != value)
313,446✔
1556
                return NULL;
1557

1558
        remove_entry(h, idx);
313,145✔
1559

1560
        return value;
313,145✔
1561
}
1562

1563
static unsigned find_first_entry(HashmapBase *h) {
22,948,939✔
1564
        Iterator i = ITERATOR_FIRST;
22,948,939✔
1565

1566
        if (!h || !n_entries(h))
22,948,939✔
1567
                return IDX_NIL;
22,948,939✔
1568

1569
        return hashmap_iterate_entry(h, &i);
21,973,559✔
1570
}
1571

1572
void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
22,948,939✔
1573
        struct hashmap_base_entry *e;
22,948,939✔
1574
        void *key, *data;
22,948,939✔
1575
        unsigned idx;
22,948,939✔
1576

1577
        idx = find_first_entry(h);
22,948,939✔
1578
        if (idx == IDX_NIL) {
22,948,939✔
1579
                if (ret_key)
975,380✔
1580
                        *ret_key = NULL;
447,946✔
1581
                return NULL;
975,380✔
1582
        }
1583

1584
        e = bucket_at(h, idx);
21,973,559✔
1585
        key = (void*) e->key;
21,973,559✔
1586
        data = entry_value(h, e);
21,973,559✔
1587

1588
        if (remove)
21,973,559✔
1589
                remove_entry(h, idx);
21,814,422✔
1590

1591
        if (ret_key)
21,973,559✔
1592
                *ret_key = key;
17,027,550✔
1593

1594
        return data;
1595
}
1596

1597
unsigned _hashmap_size(HashmapBase *h) {
39,700,137✔
1598
        if (!h)
39,700,137✔
1599
                return 0;
1600

1601
        return n_entries(h);
27,660,772✔
1602
}
1603

1604
unsigned _hashmap_buckets(HashmapBase *h) {
2,685✔
1605
        if (!h)
2,685✔
1606
                return 0;
1607

1608
        return n_buckets(h);
2,682✔
1609
}
1610

1611
int _hashmap_merge(Hashmap *h, Hashmap *other) {
4✔
1612
        Iterator i;
4✔
1613
        unsigned idx;
4✔
1614

1615
        assert(h);
4✔
1616

1617
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
16✔
1618
                struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
12✔
1619
                int r;
12✔
1620

1621
                r = hashmap_put(h, pe->b.key, pe->value);
12✔
1622
                if (r < 0 && r != -EEXIST)
12✔
1623
                        return r;
1624
        }
1625

1626
        return 0;
1627
}
1628

1629
int set_merge(Set *s, Set *other) {
1✔
1630
        Iterator i;
1✔
1631
        unsigned idx;
1✔
1632

1633
        assert(s);
1✔
1634

1635
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
5✔
1636
                struct set_entry *se = set_bucket_at(other, idx);
4✔
1637
                int r;
4✔
1638

1639
                r = set_put(s, se->b.key);
4✔
1640
                if (r < 0)
4✔
1641
                        return r;
1642
        }
1643

1644
        return 0;
1645
}
1646

1647
int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
9,215✔
1648
        int r;
9,215✔
1649

1650
        assert(h);
9,215✔
1651

1652
        r = resize_buckets(h, entries_add);
9,215✔
1653
        if (r < 0)
9,215✔
1654
                return r;
4✔
1655

1656
        return 0;
1657
}
1658

1659
/*
1660
 * The same as hashmap_merge(), but every new item from other is moved to h.
1661
 * Keys already in h are skipped and stay in other.
1662
 * Returns: 0 on success.
1663
 *          -ENOMEM on alloc failure, in which case no move has been done.
1664
 */
1665
int _hashmap_move(HashmapBase *h, HashmapBase *other) {
2,645✔
1666
        struct swap_entries swap;
2,645✔
1667
        struct hashmap_base_entry *e, *n;
2,645✔
1668
        Iterator i;
2,645✔
1669
        unsigned idx;
2,645✔
1670
        int r;
2,645✔
1671

1672
        assert(h);
2,645✔
1673

1674
        if (!other)
2,645✔
1675
                return 0;
2,645✔
1676

1677
        assert(other->type == h->type);
2,642✔
1678

1679
        /*
1680
         * This reserves buckets for the worst case, where none of other's
1681
         * entries are yet present in h. This is preferable to risking
1682
         * an allocation failure in the middle of the moving and having to
1683
         * rollback or return a partial result.
1684
         */
1685
        r = resize_buckets(h, n_entries(other));
2,642✔
1686
        if (r < 0)
2,642✔
1687
                return r;
1688

1689
        HASHMAP_FOREACH_IDX(idx, other, i) {
5,286✔
1690
                unsigned h_hash;
2,644✔
1691

1692
                e = bucket_at(other, idx);
2,644✔
1693
                h_hash = bucket_hash(h, e->key);
2,644✔
1694
                if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
2,644✔
1695
                        continue;
2✔
1696

1697
                n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
2,642✔
1698
                n->key = e->key;
2,642✔
1699
                if (h->type != HASHMAP_TYPE_SET)
2,642✔
1700
                        ((struct plain_hashmap_entry*) n)->value =
2,642✔
1701
                                ((struct plain_hashmap_entry*) e)->value;
2,642✔
1702
                assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
2,642✔
1703

1704
                remove_entry(other, idx);
2,642✔
1705
        }
1706

1707
        return 0;
1708
}
1709

1710
int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
3,026✔
1711
        struct swap_entries swap;
3,026✔
1712
        unsigned h_hash, other_hash, idx;
3,026✔
1713
        struct hashmap_base_entry *e, *n;
3,026✔
1714
        int r;
3,026✔
1715

1716
        assert(h);
3,026✔
1717

1718
        h_hash = bucket_hash(h, key);
3,026✔
1719
        if (bucket_scan(h, h_hash, key) != IDX_NIL)
3,026✔
1720
                return -EEXIST;
3,026✔
1721

1722
        if (!other)
3,024✔
1723
                return -ENOENT;
1724

1725
        assert(other->type == h->type);
3,022✔
1726

1727
        other_hash = bucket_hash(other, key);
3,022✔
1728
        idx = bucket_scan(other, other_hash, key);
3,022✔
1729
        if (idx == IDX_NIL)
3,022✔
1730
                return -ENOENT;
1731

1732
        e = bucket_at(other, idx);
3,020✔
1733

1734
        n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
3,020✔
1735
        n->key = e->key;
3,020✔
1736
        if (h->type != HASHMAP_TYPE_SET)
3,020✔
1737
                ((struct plain_hashmap_entry*) n)->value =
581✔
1738
                        ((struct plain_hashmap_entry*) e)->value;
581✔
1739
        r = hashmap_put_boldly(h, h_hash, &swap, true);
3,020✔
1740
        if (r < 0)
3,020✔
1741
                return r;
1742

1743
        remove_entry(other, idx);
3,020✔
1744
        return 0;
1745
}
1746

1747
HashmapBase* _hashmap_copy(HashmapBase *h) {
3✔
1748
        HashmapBase *copy;
3✔
1749
        int r;
3✔
1750

1751
        assert(h);
3✔
1752

1753
        copy = hashmap_base_new(h->hash_ops, h->type);
3✔
1754
        if (!copy)
3✔
1755
                return NULL;
1756

1757
        switch (h->type) {
3✔
1758
        case HASHMAP_TYPE_PLAIN:
2✔
1759
        case HASHMAP_TYPE_ORDERED:
1760
                r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
2✔
1761
                break;
2✔
1762
        case HASHMAP_TYPE_SET:
1✔
1763
                r = set_merge((Set*)copy, (Set*)h);
1✔
1764
                break;
1✔
1765
        default:
×
1766
                assert_not_reached();
×
1767
        }
1768

1769
        if (r < 0)
3✔
1770
                return _hashmap_free(copy);
×
1771

1772
        return copy;
1773
}
1774

1775
char** _hashmap_get_strv(HashmapBase *h) {
1,227✔
1776
        char **sv;
1,227✔
1777
        Iterator i;
1,227✔
1778
        unsigned idx, n;
1,227✔
1779

1780
        if (!h)
1,227✔
1781
                return new0(char*, 1);
1,227✔
1782

1783
        sv = new(char*, n_entries(h)+1);
47✔
1784
        if (!sv)
47✔
1785
                return NULL;
1786

1787
        n = 0;
47✔
1788
        HASHMAP_FOREACH_IDX(idx, h, i)
608✔
1789
                sv[n++] = entry_value(h, bucket_at(h, idx));
561✔
1790
        sv[n] = NULL;
47✔
1791

1792
        return sv;
47✔
1793
}
1794

1795
char** set_to_strv(Set **s) {
20✔
1796
        assert(s);
20✔
1797

1798
        /* This is similar to set_get_strv(), but invalidates the set on success. */
1799

1800
        char **v = new(char*, set_size(*s) + 1);
20✔
1801
        if (!v)
20✔
1802
                return NULL;
1803

1804
        for (char **p = v; (*p = set_steal_first(*s)); p++)
2,034✔
1805
                ;
1806

1807
        assert(set_isempty(*s));
20✔
1808
        *s = set_free(*s);
20✔
1809
        return v;
20✔
1810
}
1811

1812
void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
302✔
1813
        struct ordered_hashmap_entry *e;
302✔
1814
        unsigned hash, idx;
302✔
1815

1816
        if (!h)
302✔
1817
                return NULL;
1818

1819
        hash = bucket_hash(h, key);
301✔
1820
        idx = bucket_scan(h, hash, key);
301✔
1821
        if (idx == IDX_NIL)
301✔
1822
                return NULL;
1823

1824
        e = ordered_bucket_at(h, idx);
300✔
1825
        if (e->iterate_next == IDX_NIL)
300✔
1826
                return NULL;
1827
        return ordered_bucket_at(h, e->iterate_next)->p.value;
151✔
1828
}
1829

1830
int set_consume(Set *s, void *value) {
613,888✔
1831
        int r;
613,888✔
1832

1833
        assert(s);
613,888✔
1834
        assert(value);
613,888✔
1835

1836
        r = set_put(s, value);
613,888✔
1837
        if (r <= 0)
613,888✔
1838
                free(value);
1✔
1839

1840
        return r;
613,888✔
1841
}
1842

1843
int hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v) {
2,062✔
1844
        int r;
2,062✔
1845

1846
        r = hashmap_ensure_allocated(h, hash_ops);
2,062✔
1847
        if (r < 0)
2,062✔
1848
                return r;
2,062✔
1849

1850
        _cleanup_free_ char *kdup = NULL, *vdup = NULL;
2,062✔
1851

1852
        kdup = strdup(k);
2,062✔
1853
        if (!kdup)
2,062✔
1854
                return -ENOMEM;
1855

1856
        if (v) {
2,062✔
1857
                vdup = strdup(v);
308✔
1858
                if (!vdup)
308✔
1859
                        return -ENOMEM;
1860
        }
1861

1862
        r = hashmap_put(*h, kdup, vdup);
2,062✔
1863
        if (r < 0) {
2,062✔
1864
                if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
10✔
1865
                        return 0;
1866
                return r;
4✔
1867
        }
1868

1869
        /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1870
        assert(vdup == NULL || r > 0);
2,052✔
1871
        if (r > 0)
1,753✔
1872
                kdup = vdup = NULL;
2,051✔
1873

1874
        return r;
1875
}
1876

1877
int set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n) {
895,900✔
1878
        char *c;
895,900✔
1879
        int r;
895,900✔
1880

1881
        assert(s);
895,900✔
1882
        assert(p);
895,900✔
1883

1884
        r = set_ensure_allocated(s, hash_ops);
895,900✔
1885
        if (r < 0)
895,900✔
1886
                return r;
1887

1888
        if (n == SIZE_MAX) {
895,900✔
1889
                if (set_contains(*s, (char*) p))
870,327✔
1890
                        return 0;
1891

1892
                c = strdup(p);
580,045✔
1893
        } else
1894
                c = strndup(p, n);
25,573✔
1895
        if (!c)
605,618✔
1896
                return -ENOMEM;
1897

1898
        return set_consume(*s, c);
605,618✔
1899
}
1900

1901
int set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l) {
1,701✔
1902
        int n = 0, r;
1,701✔
1903

1904
        assert(s);
1,701✔
1905

1906
        STRV_FOREACH(i, l) {
1,743✔
1907
                r = set_put_strndup_full(s, hash_ops, *i, SIZE_MAX);
42✔
1908
                if (r < 0)
42✔
1909
                        return r;
1910

1911
                n += r;
42✔
1912
        }
1913

1914
        return n;
1915
}
1916

1917
int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1✔
1918
        const char *p = ASSERT_PTR(v);
1✔
1919
        int r;
1✔
1920

1921
        assert(s);
1✔
1922

1923
        for (;;) {
2✔
1924
                char *word;
3✔
1925

1926
                r = extract_first_word(&p, &word, separators, flags);
3✔
1927
                if (r <= 0)
3✔
1928
                        return r;
1✔
1929

1930
                r = set_consume(s, word);
2✔
1931
                if (r < 0)
2✔
1932
                        return r;
1933
        }
1934
}
1935

1936
/* expand the cachemem if needed, return true if newly (re)activated. */
1937
static int cachemem_maintain(CacheMem *mem, size_t size) {
406,640✔
1938
        assert(mem);
406,640✔
1939

1940
        if (!GREEDY_REALLOC(mem->ptr, size)) {
406,640✔
1941
                if (size > 0)
×
1942
                        return -ENOMEM;
1943
        }
1944

1945
        if (!mem->active) {
406,640✔
1946
                mem->active = true;
2,384✔
1947
                return true;
2,384✔
1948
        }
1949

1950
        return false;
1951
}
1952

1953
int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
406,528✔
1954
        bool sync_keys = false, sync_values = false;
406,528✔
1955
        size_t size;
406,528✔
1956
        int r;
406,528✔
1957

1958
        assert(cache);
406,528✔
1959
        assert(cache->hashmap);
406,528✔
1960

1961
        size = n_entries(cache->hashmap);
406,528✔
1962

1963
        if (res_keys) {
406,528✔
1964
                r = cachemem_maintain(&cache->keys, size);
112✔
1965
                if (r < 0)
112✔
1966
                        return r;
1967

1968
                sync_keys = r;
112✔
1969
        } else
1970
                cache->keys.active = false;
406,416✔
1971

1972
        if (res_values) {
406,528✔
1973
                r = cachemem_maintain(&cache->values, size);
406,528✔
1974
                if (r < 0)
406,528✔
1975
                        return r;
1976

1977
                sync_values = r;
406,528✔
1978
        } else
1979
                cache->values.active = false;
×
1980

1981
        if (cache->hashmap->dirty) {
406,528✔
1982
                if (cache->keys.active)
2,484✔
1983
                        sync_keys = true;
111✔
1984
                if (cache->values.active)
2,484✔
1985
                        sync_values = true;
2,484✔
1986

1987
                cache->hashmap->dirty = false;
2,484✔
1988
        }
1989

1990
        if (sync_keys || sync_values) {
406,528✔
1991
                unsigned i, idx;
2,494✔
1992
                Iterator iter;
2,494✔
1993

1994
                i = 0;
2,494✔
1995
                HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
504,161✔
1996
                        struct hashmap_base_entry *e;
501,667✔
1997

1998
                        e = bucket_at(cache->hashmap, idx);
501,667✔
1999

2000
                        if (sync_keys)
501,667✔
2001
                                cache->keys.ptr[i] = e->key;
491,500✔
2002
                        if (sync_values)
501,667✔
2003
                                cache->values.ptr[i] = entry_value(cache->hashmap, e);
501,667✔
2004
                        i++;
501,667✔
2005
                }
2006
        }
2007

2008
        if (res_keys)
406,528✔
2009
                *res_keys = cache->keys.ptr;
112✔
2010
        if (res_values)
406,528✔
2011
                *res_values = cache->values.ptr;
406,528✔
2012
        if (res_n_entries)
406,528✔
2013
                *res_n_entries = size;
406,528✔
2014

2015
        return 0;
2016
}
2017

2018
IteratedCache* iterated_cache_free(IteratedCache *cache) {
6,385✔
2019
        if (cache) {
6,385✔
2020
                free(cache->keys.ptr);
6,385✔
2021
                free(cache->values.ptr);
6,385✔
2022
        }
2023

2024
        return mfree(cache);
6,385✔
2025
}
2026

2027
int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
562,723✔
2028
        _cleanup_free_ char *str = NULL;
562,723✔
2029
        size_t separator_len, len = 0;
562,723✔
2030
        const char *value;
562,723✔
2031
        bool first;
562,723✔
2032

2033
        assert(ret);
562,723✔
2034

2035
        if (set_isempty(s)) {
562,723✔
2036
                *ret = NULL;
11,905✔
2037
                return 0;
11,905✔
2038
        }
2039

2040
        separator_len = strlen_ptr(separator);
550,818✔
2041

2042
        if (separator_len == 0)
550,814✔
2043
                wrap_with_separator = false;
2044

2045
        first = !wrap_with_separator;
550,818✔
2046

2047
        SET_FOREACH(value, s) {
1,889,707✔
2048
                size_t l = strlen_ptr(value);
1,338,889✔
2049

2050
                if (l == 0)
1,338,889✔
2051
                        continue;
×
2052

2053
                if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
3,462,513✔
2054
                        return -ENOMEM;
×
2055

2056
                if (separator_len > 0 && !first) {
1,338,889✔
2057
                        memcpy(str + len, separator, separator_len);
1,105,104✔
2058
                        len += separator_len;
1,105,104✔
2059
                }
2060

2061
                memcpy(str + len, value, l);
1,338,889✔
2062
                len += l;
1,338,889✔
2063
                first = false;
1,338,889✔
2064
        }
2065

2066
        if (wrap_with_separator) {
550,818✔
2067
                memcpy(str + len, separator, separator_len);
317,037✔
2068
                len += separator_len;
317,037✔
2069
        }
2070

2071
        str[len] = '\0';
550,818✔
2072

2073
        *ret = TAKE_PTR(str);
550,818✔
2074
        return 0;
550,818✔
2075
}
2076

2077
bool set_equal(Set *a, Set *b) {
263✔
2078
        void *p;
263✔
2079

2080
        /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2081
         * entries */
2082

2083
        if (a == b)
263✔
2084
                return true;
263✔
2085

2086
        if (set_isempty(a) && set_isempty(b))
47✔
2087
                return true;
2088

2089
        if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
29✔
2090
                                         * already */
2091
                return false;
2092

2093
        SET_FOREACH(p, a)
2,401✔
2094
                if (!set_contains(b, p))
2,383✔
2095
                        return false;
×
2096

2097
        /* If we have the same hashops, then we don't need to check things backwards given we compared the
2098
         * size and that all of a is in b. */
2099
        if (a->b.hash_ops == b->b.hash_ops)
18✔
2100
                return true;
2101

2102
        SET_FOREACH(p, b)
×
2103
                if (!set_contains(a, p))
×
2104
                        return false;
×
2105

2106
        return true;
×
2107
}
2108

2109
static bool set_fnmatch_one(Set *patterns, const char *needle) {
596,433✔
2110
        const char *p;
596,433✔
2111

2112
        assert(needle);
596,433✔
2113

2114
        /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2115

2116
        SET_FOREACH(p, patterns)
780,221✔
2117
                if (fnmatch(p, needle, 0) == 0)
219,707✔
2118
                        return true;
35,919✔
2119

2120
        return false;
560,514✔
2121
}
2122

2123
bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
424,818✔
2124
        assert(needle);
424,818✔
2125

2126
        if (set_fnmatch_one(exclude_patterns, needle))
424,818✔
2127
                return false;
2128

2129
        if (set_isempty(include_patterns))
424,447✔
2130
                return true;
2131

2132
        return set_fnmatch_one(include_patterns, needle);
171,615✔
2133
}
2134

2135
static int hashmap_entry_compare(
556,003✔
2136
                struct hashmap_base_entry * const *a,
2137
                struct hashmap_base_entry * const *b,
2138
                compare_func_t compare) {
2139

2140
        assert(a && *a);
556,003✔
2141
        assert(b && *b);
556,003✔
2142
        assert(compare);
556,003✔
2143

2144
        return compare((*a)->key, (*b)->key);
556,003✔
2145
}
2146

2147
static int _hashmap_dump_entries_sorted(
97,147✔
2148
                HashmapBase *h,
2149
                void ***ret,
2150
                size_t *ret_n) {
2151
        _cleanup_free_ void **entries = NULL;
97,147✔
2152
        Iterator iter;
97,147✔
2153
        unsigned idx;
97,147✔
2154
        size_t n = 0;
97,147✔
2155

2156
        assert(ret);
97,147✔
2157
        assert(ret_n);
97,147✔
2158

2159
        if (_hashmap_size(h) == 0) {
97,147✔
2160
                *ret = NULL;
73,398✔
2161
                *ret_n = 0;
73,398✔
2162
                return 0;
73,398✔
2163
        }
2164

2165
        /* We append one more element than needed so that the resulting array can be used as a strv. We
2166
         * don't count this entry in the returned size. */
2167
        entries = new(void*, _hashmap_size(h) + 1);
23,749✔
2168
        if (!entries)
23,749✔
2169
                return -ENOMEM;
2170

2171
        HASHMAP_FOREACH_IDX(idx, h, iter)
187,776✔
2172
                entries[n++] = bucket_at(h, idx);
164,027✔
2173

2174
        assert(n == _hashmap_size(h));
23,749✔
2175
        entries[n] = NULL;
23,749✔
2176

2177
        typesafe_qsort_r((struct hashmap_base_entry**) entries, n,
23,749✔
2178
                         hashmap_entry_compare, h->hash_ops->compare);
2179

2180
        *ret = TAKE_PTR(entries);
23,749✔
2181
        *ret_n = n;
23,749✔
2182
        return 0;
23,749✔
2183
}
2184

2185
int _hashmap_dump_keys_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
87✔
2186
        _cleanup_free_ void **entries = NULL;
87✔
2187
        size_t n;
87✔
2188
        int r;
87✔
2189

2190
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
87✔
2191
        if (r < 0)
87✔
2192
                return r;
2193

2194
        /* Reuse the array. */
2195
        FOREACH_ARRAY(e, entries, n)
134✔
2196
                *e = (void*) (*(struct hashmap_base_entry**) e)->key;
47✔
2197

2198
        *ret = TAKE_PTR(entries);
87✔
2199
        if (ret_n)
87✔
2200
                *ret_n = n;
87✔
2201
        return 0;
2202
}
2203

2204
int _hashmap_dump_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
97,060✔
2205
        _cleanup_free_ void **entries = NULL;
97,060✔
2206
        size_t n;
97,060✔
2207
        int r;
97,060✔
2208

2209
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
97,060✔
2210
        if (r < 0)
97,060✔
2211
                return r;
2212

2213
        /* Reuse the array. */
2214
        FOREACH_ARRAY(e, entries, n)
261,040✔
2215
                *e = entry_value(h, *(struct hashmap_base_entry**) e);
163,980✔
2216

2217
        *ret = TAKE_PTR(entries);
97,060✔
2218
        if (ret_n)
97,060✔
2219
                *ret_n = n;
94,659✔
2220
        return 0;
2221
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc