• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 14872145375

06 May 2025 09:07PM UTC coverage: 72.232% (+0.02%) from 72.214%
14872145375

push

github

DaanDeMeyer
string-table: annotate _to_string and _from_string with _const_ and _pure_, respectively

Follow-up for c94f6ab1b

297286 of 411572 relevant lines covered (72.23%)

695615.99 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.64
/src/basic/hashmap.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <errno.h>
4
#include <fnmatch.h>
5
#include <pthread.h>
6
#include <stdint.h>
7
#include <stdlib.h>
8
#if HAVE_VALGRIND_VALGRIND_H
9
#  include <valgrind/valgrind.h>
10
#endif
11

12
#include "alloc-util.h"
13
#include "fileio.h"
14
#include "hashmap.h"
15
#include "log.h"
16
#include "logarithm.h"
17
#include "macro.h"
18
#include "memory-util.h"
19
#include "mempool.h"
20
#include "missing_syscall.h"
21
#include "process-util.h"
22
#include "random-util.h"
23
#include "set.h"
24
#include "siphash24.h"
25
#include "sort-util.h"
26
#include "string-util.h"
27
#include "strv.h"
28

29
#if ENABLE_DEBUG_HASHMAP
30
#include "list.h"
31
#endif
32

33
/*
34
 * Implementation of hashmaps.
35
 * Addressing: open
36
 *   - uses less RAM compared to closed addressing (chaining), because
37
 *     our entries are small (especially in Sets, which tend to contain
38
 *     the majority of entries in systemd).
39
 * Collision resolution: Robin Hood
40
 *   - tends to equalize displacement of entries from their optimal buckets.
41
 * Probe sequence: linear
42
 *   - though theoretically worse than random probing/uniform hashing/double
43
 *     hashing, it is good for cache locality.
44
 *
45
 * References:
46
 * Celis, P. 1986. Robin Hood Hashing.
47
 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
48
 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
49
 * - The results are derived for random probing. Suggests deletion with
50
 *   tombstones and two mean-centered search methods. None of that works
51
 *   well for linear probing.
52
 *
53
 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
54
 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
55
 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
56
 * http://www.math.uu.se/~svante/papers/sj157.pdf
57
 * - Applies to Robin Hood with linear probing. Contains remarks on
58
 *   the unsuitability of mean-centered search with linear probing.
59
 *
60
 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
61
 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
62
 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
63
 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
64
 *   in a successful search), and Janson writes about displacement. C = d + 1.
65
 *
66
 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
67
 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
68
 * - Explanation of backward shift deletion with pictures.
69
 *
70
 * Khuong, P. 2013. The Other Robin Hood Hashing.
71
 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
72
 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
73
 */
74

75
/*
76
 * XXX Ideas for improvement:
77
 * For unordered hashmaps, randomize iteration order, similarly to Perl:
78
 * http://blog.booking.com/hardening-perls-hash-function.html
79
 */
80

81
/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
82
 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
83
#define INV_KEEP_FREE            5U
84

85
/* Fields common to entries of all hashmap/set types */
86
struct hashmap_base_entry {
87
        const void *key;
88
};
89

90
/* Entry types for specific hashmap/set types
91
 * hashmap_base_entry must be at the beginning of each entry struct. */
92

93
struct plain_hashmap_entry {
94
        struct hashmap_base_entry b;
95
        void *value;
96
};
97

98
struct ordered_hashmap_entry {
99
        struct plain_hashmap_entry p;
100
        unsigned iterate_next, iterate_previous;
101
};
102

103
struct set_entry {
104
        struct hashmap_base_entry b;
105
};
106

107
/* In several functions it is advantageous to have the hash table extended
108
 * virtually by a couple of additional buckets. We reserve special index values
109
 * for these "swap" buckets. */
110
#define _IDX_SWAP_BEGIN     (UINT_MAX - 3)
111
#define IDX_PUT             (_IDX_SWAP_BEGIN + 0)
112
#define IDX_TMP             (_IDX_SWAP_BEGIN + 1)
113
#define _IDX_SWAP_END       (_IDX_SWAP_BEGIN + 2)
114

115
#define IDX_FIRST           (UINT_MAX - 1) /* special index for freshly initialized iterators */
116
#define IDX_NIL             UINT_MAX       /* special index value meaning "none" or "end" */
117

118
assert_cc(IDX_FIRST == _IDX_SWAP_END);
119
assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
120

121
/* Storage space for the "swap" buckets.
122
 * All entry types can fit into an ordered_hashmap_entry. */
123
struct swap_entries {
124
        struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
125
};
126

127
/* Distance from Initial Bucket */
128
typedef uint8_t dib_raw_t;
129
#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU)   /* indicates DIB value is greater than representable */
130
#define DIB_RAW_REHASH   ((dib_raw_t)0xfeU)   /* entry yet to be rehashed during in-place resize */
131
#define DIB_RAW_FREE     ((dib_raw_t)0xffU)   /* a free bucket */
132
#define DIB_RAW_INIT     ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
133

134
#define DIB_FREE UINT_MAX
135

136
#if ENABLE_DEBUG_HASHMAP
137
struct hashmap_debug_info {
138
        LIST_FIELDS(struct hashmap_debug_info, debug_list);
139
        unsigned max_entries;  /* high watermark of n_entries */
140

141
        /* fields to detect modification while iterating */
142
        unsigned put_count;    /* counts puts into the hashmap */
143
        unsigned rem_count;    /* counts removals from hashmap */
144
        unsigned last_rem_idx; /* remembers last removal index */
145
};
146

147
/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
148
static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
149
static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
150
#endif
151

152
enum HashmapType {
153
        HASHMAP_TYPE_PLAIN,
154
        HASHMAP_TYPE_ORDERED,
155
        HASHMAP_TYPE_SET,
156
        _HASHMAP_TYPE_MAX
157
};
158

159
struct _packed_ indirect_storage {
160
        void *storage;                     /* where buckets and DIBs are stored */
161
        uint8_t  hash_key[HASH_KEY_SIZE];  /* hash key; changes during resize */
162

163
        unsigned n_entries;                /* number of stored entries */
164
        unsigned n_buckets;                /* number of buckets */
165

166
        unsigned idx_lowest_entry;         /* Index below which all buckets are free.
167
                                              Makes "while (hashmap_steal_first())" loops
168
                                              O(n) instead of O(n^2) for unordered hashmaps. */
169
        uint8_t  _pad[3];                  /* padding for the whole HashmapBase */
170
        /* The bitfields in HashmapBase complete the alignment of the whole thing. */
171
};
172

173
struct direct_storage {
174
        /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
175
         * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
176
         *              or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
177
        uint8_t storage[sizeof(struct indirect_storage)];
178
};
179

180
#define DIRECT_BUCKETS(entry_t) \
181
        (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
182

183
/* We should be able to store at least one entry directly. */
184
assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
185

186
/* We have 3 bits for n_direct_entries. */
187
assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
188

189
/* Hashmaps with directly stored entries all use this shared hash key.
190
 * It's no big deal if the key is guessed, because there can be only
191
 * a handful of directly stored entries in a hashmap. When a hashmap
192
 * outgrows direct storage, it gets its own key for indirect storage. */
193
static uint8_t shared_hash_key[HASH_KEY_SIZE];
194

195
/* Fields that all hashmap/set types must have */
196
struct HashmapBase {
197
        const struct hash_ops *hash_ops;  /* hash and compare ops to use */
198

199
        union _packed_ {
200
                struct indirect_storage indirect; /* if  has_indirect */
201
                struct direct_storage direct;     /* if !has_indirect */
202
        };
203

204
        enum HashmapType type:2;     /* HASHMAP_TYPE_* */
205
        bool has_indirect:1;         /* whether indirect storage is used */
206
        unsigned n_direct_entries:3; /* Number of entries in direct storage.
207
                                      * Only valid if !has_indirect. */
208
        bool from_pool:1;            /* whether was allocated from mempool */
209
        bool dirty:1;                /* whether dirtied since last iterated_cache_get() */
210
        bool cached:1;               /* whether this hashmap is being cached */
211

212
#if ENABLE_DEBUG_HASHMAP
213
        struct hashmap_debug_info debug;
214
#endif
215
};
216

217
/* Specific hash types
218
 * HashmapBase must be at the beginning of each hashmap struct. */
219

220
struct Hashmap {
221
        struct HashmapBase b;
222
};
223

224
struct OrderedHashmap {
225
        struct HashmapBase b;
226
        unsigned iterate_list_head, iterate_list_tail;
227
};
228

229
struct Set {
230
        struct HashmapBase b;
231
};
232

233
typedef struct CacheMem {
234
        const void **ptr;
235
        size_t n_populated;
236
        bool active:1;
237
} CacheMem;
238

239
struct IteratedCache {
240
        HashmapBase *hashmap;
241
        CacheMem keys, values;
242
};
243

244
DEFINE_MEMPOOL(hashmap_pool,         Hashmap,        8);
245
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
246
/* No need for a separate Set pool */
247
assert_cc(sizeof(Hashmap) == sizeof(Set));
248

249
struct hashmap_type_info {
250
        size_t head_size;
251
        size_t entry_size;
252
        struct mempool *mempool;
253
        unsigned n_direct_buckets;
254
};
255

256
static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
257
        [HASHMAP_TYPE_PLAIN] = {
258
                .head_size        = sizeof(Hashmap),
259
                .entry_size       = sizeof(struct plain_hashmap_entry),
260
                .mempool          = &hashmap_pool,
261
                .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
262
        },
263
        [HASHMAP_TYPE_ORDERED] = {
264
                .head_size        = sizeof(OrderedHashmap),
265
                .entry_size       = sizeof(struct ordered_hashmap_entry),
266
                .mempool          = &ordered_hashmap_pool,
267
                .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
268
        },
269
        [HASHMAP_TYPE_SET] = {
270
                .head_size        = sizeof(Set),
271
                .entry_size       = sizeof(struct set_entry),
272
                .mempool          = &hashmap_pool,
273
                .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
274
        },
275
};
276

277
void hashmap_trim_pools(void) {
2✔
278
        int r;
2✔
279

280
        /* The pool is only allocated by the main thread, but the memory can be passed to other
281
         * threads. Let's clean up if we are the main thread and no other threads are live. */
282

283
        /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
284
         * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
285
        if (getpid() != gettid())
2✔
286
                return (void) log_debug("Not cleaning up memory pools, not in main thread.");
×
287

288
        r = get_process_threads(0);
2✔
289
        if (r < 0)
2✔
290
                return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
×
291
        if (r != 1)
2✔
292
                return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
×
293

294
        mempool_trim(&hashmap_pool);
2✔
295
        mempool_trim(&ordered_hashmap_pool);
2✔
296
}
297

298
#if HAVE_VALGRIND_VALGRIND_H
299
_destructor_ static void cleanup_pools(void) {
300
        /* Be nice to valgrind */
301
        if (RUNNING_ON_VALGRIND)
302
                hashmap_trim_pools();
303
}
304
#endif
305

306
static unsigned n_buckets(HashmapBase *h) {
1,091,246,380✔
307
        return h->has_indirect ? h->indirect.n_buckets
1,091,246,380✔
308
                               : hashmap_type_info[h->type].n_direct_buckets;
1,091,246,380✔
309
}
310

311
static unsigned n_entries(HashmapBase *h) {
130,684,866✔
312
        return h->has_indirect ? h->indirect.n_entries
130,684,866✔
313
                               : h->n_direct_entries;
130,684,866✔
314
}
315

316
static void n_entries_inc(HashmapBase *h) {
33,985,994✔
317
        if (h->has_indirect)
33,985,994✔
318
                h->indirect.n_entries++;
27,185,859✔
319
        else
320
                h->n_direct_entries++;
6,800,135✔
321
}
33,985,994✔
322

323
static void n_entries_dec(HashmapBase *h) {
31,155,367✔
324
        if (h->has_indirect)
31,155,367✔
325
                h->indirect.n_entries--;
26,333,552✔
326
        else
327
                h->n_direct_entries--;
4,821,815✔
328
}
31,155,367✔
329

330
static void* storage_ptr(HashmapBase *h) {
1,093,160,237✔
331
        return h->has_indirect ? h->indirect.storage
1,093,160,237✔
332
                               : h->direct.storage;
1,093,160,237✔
333
}
334

335
static uint8_t* hash_key(HashmapBase *h) {
154,054,983✔
336
        return h->has_indirect ? h->indirect.hash_key
154,054,983✔
337
                               : shared_hash_key;
154,054,983✔
338
}
339

340
static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
154,054,983✔
341
        struct siphash state;
154,054,983✔
342
        uint64_t hash;
154,054,983✔
343

344
        siphash24_init(&state, hash_key(h));
154,054,983✔
345

346
        h->hash_ops->hash(p, &state);
154,054,983✔
347

348
        hash = siphash24_finalize(&state);
154,054,983✔
349

350
        return (unsigned) (hash % n_buckets(h));
154,054,983✔
351
}
352
#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
353

354
static void base_set_dirty(HashmapBase *h) {
69,595,097✔
355
        h->dirty = true;
69,595,097✔
356
}
69,595,097✔
357
#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
358

359
static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
3,214,924✔
360
        static uint8_t current[HASH_KEY_SIZE];
3,214,924✔
361
        static bool current_initialized = false;
3,214,924✔
362

363
        /* Returns a hash function key to use. In order to keep things
364
         * fast we will not generate a new key each time we allocate a
365
         * new hash table. Instead, we'll just reuse the most recently
366
         * generated one, except if we never generated one or when we
367
         * are rehashing an entire hash table because we reached a
368
         * fill level */
369

370
        if (!current_initialized || !reuse_is_ok) {
3,214,924✔
371
                random_bytes(current, sizeof(current));
1,983,581✔
372
                current_initialized = true;
1,983,581✔
373
        }
374

375
        memcpy(hash_key, current, sizeof(current));
3,214,924✔
376
}
3,214,924✔
377

378
static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
701,671,091✔
379
        return CAST_ALIGN_PTR(
701,671,091✔
380
                        struct hashmap_base_entry,
381
                        (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
382
}
383

384
static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
2,267,704✔
385
        return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
2,267,704✔
386
}
387

388
static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
106,128,449✔
389
        return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
106,128,449✔
390
}
391

392
static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
4✔
393
        return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
4✔
394
}
395

396
static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
255,097,184✔
397
        return &swap->e[idx - _IDX_SWAP_BEGIN];
255,097,184✔
398
}
399

400
/* Returns a pointer to the bucket at index idx.
401
 * Understands real indexes and swap indexes, hence "_virtual". */
402
static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
477,874,363✔
403
                                                    unsigned idx) {
404
        if (idx < _IDX_SWAP_BEGIN)
477,874,363✔
405
                return bucket_at(h, idx);
296,378,813✔
406

407
        if (idx < _IDX_SWAP_END)
181,495,550✔
408
                return &bucket_at_swap(swap, idx)->p.b;
181,495,550✔
409

410
        assert_not_reached();
×
411
}
412

413
static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
391,489,146✔
414
        return (dib_raw_t*)
782,978,292✔
415
                ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
391,489,146✔
416
}
417

418
static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
×
419
        return idx >= from ? idx - from
×
420
                           : n_buckets(h) + idx - from;
×
421
}
422

423
static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
207,013,308✔
424
        unsigned initial_bucket;
207,013,308✔
425

426
        if (raw_dib == DIB_RAW_FREE)
207,013,308✔
427
                return DIB_FREE;
428

429
        if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
207,013,308✔
430
                return raw_dib;
207,013,308✔
431

432
        /*
433
         * Having an overflow DIB value is very unlikely. The hash function
434
         * would have to be bad. For example, in a table of size 2^24 filled
435
         * to load factor 0.9 the maximum observed DIB is only about 60.
436
         * In theory (assuming I used Maxima correctly), for an infinite size
437
         * hash table with load factor 0.8 the probability of a given entry
438
         * having DIB > 40 is 1.9e-8.
439
         * This returns the correct DIB value by recomputing the hash value in
440
         * the unlikely case. XXX Hitting this case could be a hint to rehash.
441
         */
442
        initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
×
443
        return bucket_distance(h, idx, initial_bucket);
×
444
}
445

446
static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
127,275,855✔
447
        dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
127,275,855✔
448
}
127,275,855✔
449

450
static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
45,924,557✔
451
        dib_raw_t *dibs;
45,924,557✔
452

453
        dibs = dib_raw_ptr(h);
45,924,557✔
454

455
        for ( ; idx < n_buckets(h); idx++)
139,868,788✔
456
                if (dibs[idx] != DIB_RAW_FREE)
86,012,691✔
457
                        return idx;
458

459
        return IDX_NIL;
460
}
461

462
static void bucket_mark_free(HashmapBase *h, unsigned idx) {
31,155,367✔
463
        memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
31,155,367✔
464
        bucket_set_dib(h, idx, DIB_FREE);
31,155,367✔
465
}
31,155,367✔
466

467
static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
168,202,462✔
468
                              unsigned from, unsigned to) {
469
        struct hashmap_base_entry *e_from, *e_to;
168,202,462✔
470

471
        assert(from != to);
168,202,462✔
472

473
        e_from = bucket_at_virtual(h, swap, from);
168,202,462✔
474
        e_to   = bucket_at_virtual(h, swap, to);
168,202,462✔
475

476
        memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
168,202,462✔
477

478
        if (h->type == HASHMAP_TYPE_ORDERED) {
168,202,462✔
479
                OrderedHashmap *lh = (OrderedHashmap*) h;
87,646,199✔
480
                struct ordered_hashmap_entry *le, *le_to;
87,646,199✔
481

482
                le_to = (struct ordered_hashmap_entry*) e_to;
87,646,199✔
483

484
                if (le_to->iterate_next != IDX_NIL) {
87,646,199✔
485
                        le = (struct ordered_hashmap_entry*)
62,703,572✔
486
                             bucket_at_virtual(h, swap, le_to->iterate_next);
62,703,572✔
487
                        le->iterate_previous = to;
62,703,572✔
488
                }
489

490
                if (le_to->iterate_previous != IDX_NIL) {
87,646,199✔
491
                        le = (struct ordered_hashmap_entry*)
78,765,867✔
492
                             bucket_at_virtual(h, swap, le_to->iterate_previous);
78,765,867✔
493
                        le->iterate_next = to;
78,765,867✔
494
                }
495

496
                if (lh->iterate_list_head == from)
87,646,199✔
497
                        lh->iterate_list_head = to;
8,880,332✔
498
                if (lh->iterate_list_tail == from)
87,646,199✔
499
                        lh->iterate_list_tail = to;
24,942,627✔
500
        }
501
}
168,202,462✔
502

503
static unsigned next_idx(HashmapBase *h, unsigned idx) {
227,470,296✔
504
        return (idx + 1U) % n_buckets(h);
227,470,296✔
505
}
506

507
static unsigned prev_idx(HashmapBase *h, unsigned idx) {
83✔
508
        return (n_buckets(h) + idx - 1U) % n_buckets(h);
83✔
509
}
510

511
static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
88,060,181✔
512
        switch (h->type) {
88,060,181✔
513

514
        case HASHMAP_TYPE_PLAIN:
80,291,889✔
515
        case HASHMAP_TYPE_ORDERED:
516
                return ((struct plain_hashmap_entry*)e)->value;
80,291,889✔
517

518
        case HASHMAP_TYPE_SET:
7,768,292✔
519
                return (void*) e->key;
7,768,292✔
520

521
        default:
×
522
                assert_not_reached();
×
523
        }
524
}
525

526
static void base_remove_entry(HashmapBase *h, unsigned idx) {
31,155,367✔
527
        unsigned left, right, prev, dib;
31,155,367✔
528
        dib_raw_t raw_dib, *dibs;
31,155,367✔
529

530
        dibs = dib_raw_ptr(h);
31,155,367✔
531
        assert(dibs[idx] != DIB_RAW_FREE);
31,155,367✔
532

533
#if ENABLE_DEBUG_HASHMAP
534
        h->debug.rem_count++;
535
        h->debug.last_rem_idx = idx;
536
#endif
537

538
        left = idx;
31,155,367✔
539
        /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
540
        for (right = next_idx(h, left); ; right = next_idx(h, right)) {
46,445,482✔
541
                raw_dib = dibs[right];
46,445,482✔
542
                if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
46,445,482✔
543
                        break;
544

545
                /* The buckets are not supposed to be all occupied and with DIB > 0.
546
                 * That would mean we could make everyone better off by shifting them
547
                 * backward. This scenario is impossible. */
548
                assert(left != right);
15,290,115✔
549
        }
550

551
        if (h->type == HASHMAP_TYPE_ORDERED) {
31,155,367✔
552
                OrderedHashmap *lh = (OrderedHashmap*) h;
19,056,020✔
553
                struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
19,056,020✔
554

555
                if (le->iterate_next != IDX_NIL)
19,056,020✔
556
                        ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
17,825,928✔
557
                else
558
                        lh->iterate_list_tail = le->iterate_previous;
1,230,092✔
559

560
                if (le->iterate_previous != IDX_NIL)
19,056,020✔
561
                        ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
14,549✔
562
                else
563
                        lh->iterate_list_head = le->iterate_next;
19,041,471✔
564
        }
565

566
        /* Now shift all buckets in the interval (left, right) one step backwards */
567
        for (prev = left, left = next_idx(h, left); left != right;
46,445,482✔
568
             prev = left, left = next_idx(h, left)) {
15,290,115✔
569
                dib = bucket_calculate_dib(h, left, dibs[left]);
15,290,115✔
570
                assert(dib != 0);
15,290,115✔
571
                bucket_move_entry(h, NULL, left, prev);
15,290,115✔
572
                bucket_set_dib(h, prev, dib - 1);
15,290,115✔
573
        }
574

575
        bucket_mark_free(h, prev);
31,155,367✔
576
        n_entries_dec(h);
31,155,367✔
577
        base_set_dirty(h);
31,155,367✔
578
}
31,155,367✔
579
#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
580

581
static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
27,555,510✔
582
        struct ordered_hashmap_entry *e;
27,555,510✔
583
        unsigned idx;
27,555,510✔
584

585
        assert(h);
27,555,510✔
586
        assert(i);
27,555,510✔
587

588
        if (i->idx == IDX_NIL)
27,555,510✔
589
                goto at_end;
685,887✔
590

591
        if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
26,869,623✔
592
                goto at_end;
332,387✔
593

594
        if (i->idx == IDX_FIRST) {
26,537,236✔
595
                idx = h->iterate_list_head;
19,709,360✔
596
                e = ordered_bucket_at(h, idx);
19,709,360✔
597
        } else {
598
                idx = i->idx;
6,827,876✔
599
                e = ordered_bucket_at(h, idx);
6,827,876✔
600
                /*
601
                 * We allow removing the current entry while iterating, but removal may cause
602
                 * a backward shift. The next entry may thus move one bucket to the left.
603
                 * To detect when it happens, we remember the key pointer of the entry we were
604
                 * going to iterate next. If it does not match, there was a backward shift.
605
                 */
606
                if (e->p.b.key != i->next_key) {
6,827,876✔
607
                        idx = prev_idx(HASHMAP_BASE(h), idx);
78✔
608
                        e = ordered_bucket_at(h, idx);
78✔
609
                }
610
                assert(e->p.b.key == i->next_key);
6,827,876✔
611
        }
612

613
#if ENABLE_DEBUG_HASHMAP
614
        i->prev_idx = idx;
615
#endif
616

617
        if (e->iterate_next != IDX_NIL) {
26,537,236✔
618
                struct ordered_hashmap_entry *n;
24,660,860✔
619
                i->idx = e->iterate_next;
24,660,860✔
620
                n = ordered_bucket_at(h, i->idx);
24,660,860✔
621
                i->next_key = n->p.b.key;
24,660,860✔
622
        } else
623
                i->idx = IDX_NIL;
1,876,376✔
624

625
        return idx;
626

627
at_end:
1,018,274✔
628
        i->idx = IDX_NIL;
1,018,274✔
629
        return IDX_NIL;
1,018,274✔
630
}
631

632
static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
35,301,537✔
633
        unsigned idx;
35,301,537✔
634

635
        assert(h);
35,301,537✔
636
        assert(i);
35,301,537✔
637

638
        if (i->idx == IDX_NIL)
35,301,537✔
639
                goto at_end;
2,765,610✔
640

641
        if (i->idx == IDX_FIRST) {
32,535,927✔
642
                /* fast forward to the first occupied bucket */
643
                if (h->has_indirect) {
13,736,261✔
644
                        i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
6,523,765✔
645
                        h->indirect.idx_lowest_entry = i->idx;
6,523,765✔
646
                } else
647
                        i->idx = skip_free_buckets(h, 0);
7,212,496✔
648

649
                if (i->idx == IDX_NIL)
13,736,261✔
650
                        goto at_end;
347,631✔
651
        } else {
652
                struct hashmap_base_entry *e;
18,799,666✔
653

654
                assert(i->idx > 0);
18,799,666✔
655

656
                e = bucket_at(h, i->idx);
18,799,666✔
657
                /*
658
                 * We allow removing the current entry while iterating, but removal may cause
659
                 * a backward shift. The next entry may thus move one bucket to the left.
660
                 * To detect when it happens, we remember the key pointer of the entry we were
661
                 * going to iterate next. If it does not match, there was a backward shift.
662
                 */
663
                if (e->key != i->next_key)
18,799,666✔
664
                        e = bucket_at(h, --i->idx);
31,906✔
665

666
                assert(e->key == i->next_key);
18,799,666✔
667
        }
668

669
        idx = i->idx;
32,188,296✔
670
#if ENABLE_DEBUG_HASHMAP
671
        i->prev_idx = idx;
672
#endif
673

674
        i->idx = skip_free_buckets(h, i->idx + 1);
32,188,296✔
675
        if (i->idx != IDX_NIL)
32,188,296✔
676
                i->next_key = bucket_at(h, i->idx)->key;
24,604,387✔
677
        else
678
                i->idx = IDX_NIL;
7,583,909✔
679

680
        return idx;
681

682
at_end:
3,113,241✔
683
        i->idx = IDX_NIL;
3,113,241✔
684
        return IDX_NIL;
3,113,241✔
685
}
686

687
static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
72,270,738✔
688
        if (!h) {
72,270,738✔
689
                i->idx = IDX_NIL;
9,413,691✔
690
                return IDX_NIL;
9,413,691✔
691
        }
692

693
#if ENABLE_DEBUG_HASHMAP
694
        if (i->idx == IDX_FIRST) {
695
                i->put_count = h->debug.put_count;
696
                i->rem_count = h->debug.rem_count;
697
        } else {
698
                /* While iterating, must not add any new entries */
699
                assert(i->put_count == h->debug.put_count);
700
                /* ... or remove entries other than the current one */
701
                assert(i->rem_count == h->debug.rem_count ||
702
                       (i->rem_count == h->debug.rem_count - 1 &&
703
                        i->prev_idx == h->debug.last_rem_idx));
704
                /* Reset our removals counter */
705
                i->rem_count = h->debug.rem_count;
706
        }
707
#endif
708

709
        return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
27,555,510✔
710
                                               : hashmap_iterate_in_internal_order(h, i);
62,857,047✔
711
}
712

713
bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
45,053,658✔
714
        struct hashmap_base_entry *e;
45,053,658✔
715
        void *data;
45,053,658✔
716
        unsigned idx;
45,053,658✔
717

718
        idx = hashmap_iterate_entry(h, i);
45,053,658✔
719
        if (idx == IDX_NIL) {
45,053,658✔
720
                if (value)
13,515,224✔
721
                        *value = NULL;
13,016,455✔
722
                if (key)
13,515,224✔
723
                        *key = NULL;
3,240,982✔
724

725
                return false;
13,515,224✔
726
        }
727

728
        e = bucket_at(h, idx);
31,538,434✔
729
        data = entry_value(h, e);
31,538,434✔
730
        if (value)
31,538,434✔
731
                *value = data;
25,356,114✔
732
        if (key)
31,538,434✔
733
                *key = e->key;
19,297,058✔
734

735
        return true;
736
}
737

738
#define HASHMAP_FOREACH_IDX(idx, h, i) \
739
        for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
740
             (idx != IDX_NIL); \
741
             (idx) = hashmap_iterate_entry((h), &(i)))
742

743
IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
6,291✔
744
        IteratedCache *cache;
6,291✔
745

746
        assert(h);
6,291✔
747
        assert(!h->cached);
6,291✔
748

749
        if (h->cached)
6,291✔
750
                return NULL;
751

752
        cache = new0(IteratedCache, 1);
6,291✔
753
        if (!cache)
6,291✔
754
                return NULL;
755

756
        cache->hashmap = h;
6,291✔
757
        h->cached = true;
6,291✔
758

759
        return cache;
6,291✔
760
}
761

762
static void reset_direct_storage(HashmapBase *h) {
6,765,414✔
763
        const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
6,765,414✔
764
        void *p;
6,765,414✔
765

766
        assert(!h->has_indirect);
6,765,414✔
767

768
        p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
6,765,414✔
769
        memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
6,765,414✔
770
}
6,765,414✔
771

772
static void shared_hash_key_initialize(void) {
72,470✔
773
        random_bytes(shared_hash_key, sizeof(shared_hash_key));
72,470✔
774
}
72,470✔
775

776
static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type) {
3,393,758✔
777
        HashmapBase *h;
3,393,758✔
778
        const struct hashmap_type_info *hi = &hashmap_type_info[type];
3,393,758✔
779

780
        bool use_pool = mempool_enabled && mempool_enabled();  /* mempool_enabled is a weak symbol */
3,393,758✔
781

782
        h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
3,474,348✔
783
        if (!h)
3,393,758✔
784
                return NULL;
785

786
        h->type = type;
3,393,758✔
787
        h->from_pool = use_pool;
3,393,758✔
788
        h->hash_ops = hash_ops ?: &trivial_hash_ops;
3,393,758✔
789

790
        if (type == HASHMAP_TYPE_ORDERED) {
3,393,758✔
791
                OrderedHashmap *lh = (OrderedHashmap*)h;
1,107,453✔
792
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,107,453✔
793
        }
794

795
        reset_direct_storage(h);
3,393,758✔
796

797
        static pthread_once_t once = PTHREAD_ONCE_INIT;
3,393,758✔
798
        assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
3,393,758✔
799

800
#if ENABLE_DEBUG_HASHMAP
801
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
802
        LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
803
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
804
#endif
805

806
        return h;
807
}
808

809
Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
410,595✔
810
        return (Hashmap*)        hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN);
410,595✔
811
}
812

813
OrderedHashmap *ordered_hashmap_new(const struct hash_ops *hash_ops) {
50,767✔
814
        return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED);
50,767✔
815
}
816

817
Set *set_new(const struct hash_ops *hash_ops) {
54,330✔
818
        return (Set*)            hashmap_base_new(hash_ops, HASHMAP_TYPE_SET);
54,330✔
819
}
820

821
static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
30,714,912✔
822
                                         enum HashmapType type) {
823
        HashmapBase *q;
30,714,912✔
824

825
        assert(h);
30,714,912✔
826

827
        if (*h)
30,714,912✔
828
                return 0;
829

830
        q = hashmap_base_new(hash_ops, type);
2,878,063✔
831
        if (!q)
2,878,063✔
832
                return -ENOMEM;
833

834
        *h = q;
2,878,063✔
835
        return 1;
2,878,063✔
836
}
837

838
int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
5,894,766✔
839
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN);
5,894,766✔
840
}
841

842
int ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops) {
20,022,730✔
843
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED);
20,022,730✔
844
}
845

846
int _set_ensure_allocated(Set **s, const struct hash_ops *hash_ops) {
4,797,416✔
847
        return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET);
4,797,416✔
848
}
849

850
int hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
5,289,316✔
851
        int r;
5,289,316✔
852

853
        r = hashmap_ensure_allocated(h, hash_ops);
5,289,316✔
854
        if (r < 0)
5,289,316✔
855
                return r;
856

857
        return hashmap_put(*h, key, value);
5,289,316✔
858
}
859

860
int ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
7,774,394✔
861
        int r;
7,774,394✔
862

863
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
7,774,394✔
864
        if (r < 0)
7,774,394✔
865
                return r;
866

867
        return ordered_hashmap_put(*h, key, value);
7,774,394✔
868
}
869

870
int ordered_hashmap_ensure_replace(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
6,590✔
871
        int r;
6,590✔
872

873
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
6,590✔
874
        if (r < 0)
6,590✔
875
                return r;
876

877
        return ordered_hashmap_replace(*h, key, value);
6,590✔
878
}
879

880
int hashmap_ensure_replace(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
24,005✔
881
        int r;
24,005✔
882

883
        r = hashmap_ensure_allocated(h, hash_ops);
24,005✔
884
        if (r < 0)
24,005✔
885
                return r;
886

887
        return hashmap_replace(*h, key, value);
24,005✔
888
}
889

890
static void hashmap_free_no_clear(HashmapBase *h) {
3,355,463✔
891
        assert(!h->has_indirect);
3,355,463✔
892
        assert(h->n_direct_entries == 0);
3,355,463✔
893

894
#if ENABLE_DEBUG_HASHMAP
895
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
896
        LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
897
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
898
#endif
899

900
        if (h->from_pool) {
3,355,463✔
901
                /* Ensure that the object didn't get migrated between threads. */
902
                assert_se(is_main_thread());
3,275,595✔
903
                mempool_free_tile(hashmap_type_info[h->type].mempool, h);
3,275,595✔
904
        } else
905
                free(h);
79,868✔
906
}
3,355,463✔
907

908
HashmapBase* _hashmap_free(HashmapBase *h) {
14,151,027✔
909
        if (h) {
14,151,027✔
910
                _hashmap_clear(h);
3,355,463✔
911
                hashmap_free_no_clear(h);
3,355,463✔
912
        }
913

914
        return NULL;
14,151,027✔
915
}
916

917
void _hashmap_clear(HashmapBase *h) {
3,502,016✔
918
        if (!h)
3,502,016✔
919
                return;
920

921
        if (h->hash_ops->free_key || h->hash_ops->free_value) {
3,371,656✔
922

923
                /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
924
                 * hash table, and only then call the destructor functions. If these destructors then try to unregister
925
                 * themselves from our hash table a second time, the entry is already gone. */
926

927
                while (_hashmap_size(h) > 0) {
20,509,180✔
928
                        void *k = NULL;
17,764,584✔
929
                        void *v;
17,764,584✔
930

931
                        v = _hashmap_first_key_and_value(h, true, &k);
17,764,584✔
932

933
                        if (h->hash_ops->free_key)
17,764,584✔
934
                                h->hash_ops->free_key(k);
13,605,102✔
935

936
                        if (h->hash_ops->free_value)
17,764,584✔
937
                                h->hash_ops->free_value(v);
15,998,105✔
938
                }
939
        }
940

941
        if (h->has_indirect) {
3,371,656✔
942
                free(h->indirect.storage);
1,270,693✔
943
                h->has_indirect = false;
1,270,693✔
944
        }
945

946
        h->n_direct_entries = 0;
3,371,656✔
947
        reset_direct_storage(h);
3,371,656✔
948

949
        if (h->type == HASHMAP_TYPE_ORDERED) {
3,371,656✔
950
                OrderedHashmap *lh = (OrderedHashmap*) h;
1,104,957✔
951
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,104,957✔
952
        }
953

954
        base_set_dirty(h);
3,371,656✔
955
}
956

957
static int resize_buckets(HashmapBase *h, unsigned entries_add);
958

959
/*
960
 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
961
 * Performs Robin Hood swaps as it goes. The entry to put must be placed
962
 * by the caller into swap slot IDX_PUT.
963
 * If used for in-place resizing, may leave a displaced entry in swap slot
964
 * IDX_PUT. Caller must rehash it next.
965
 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
966
 *          false otherwise.
967
 */
968
static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
61,222,424✔
969
                                   struct swap_entries *swap) {
970
        dib_raw_t raw_dib, *dibs;
61,222,424✔
971
        unsigned dib, distance;
61,222,424✔
972

973
#if ENABLE_DEBUG_HASHMAP
974
        h->debug.put_count++;
975
#endif
976

977
        dibs = dib_raw_ptr(h);
61,222,424✔
978

979
        for (distance = 0; ; distance++) {
61,222,424✔
980
                raw_dib = dibs[idx];
113,622,521✔
981
                if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
113,622,521✔
982
                        if (raw_dib == DIB_RAW_REHASH)
61,222,424✔
983
                                bucket_move_entry(h, swap, idx, IDX_TMP);
5,629,646✔
984

985
                        if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
61,222,424✔
986
                                h->indirect.idx_lowest_entry = idx;
34,884✔
987

988
                        bucket_set_dib(h, idx, distance);
61,222,424✔
989
                        bucket_move_entry(h, swap, IDX_PUT, idx);
61,222,424✔
990
                        if (raw_dib == DIB_RAW_REHASH) {
61,222,424✔
991
                                bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
5,629,646✔
992
                                return true;
5,629,646✔
993
                        }
994

995
                        return false;
996
                }
997

998
                dib = bucket_calculate_dib(h, idx, raw_dib);
52,400,097✔
999

1000
                if (dib < distance) {
52,400,097✔
1001
                        /* Found a wealthier entry. Go Robin Hood! */
1002
                        bucket_set_dib(h, idx, distance);
19,607,949✔
1003

1004
                        /* swap the entries */
1005
                        bucket_move_entry(h, swap, idx, IDX_TMP);
19,607,949✔
1006
                        bucket_move_entry(h, swap, IDX_PUT, idx);
19,607,949✔
1007
                        bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
19,607,949✔
1008

1009
                        distance = dib;
19,607,949✔
1010
                }
1011

1012
                idx = next_idx(h, idx);
52,400,097✔
1013
        }
1014
}
1015

1016
/*
1017
 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1018
 * The caller must place the entry (only its key and value, not link indexes)
1019
 * in swap slot IDX_PUT.
1020
 * Caller must ensure: the key does not exist yet in the hashmap.
1021
 *                     that resize is not needed if !may_resize.
1022
 * Returns: 1 if entry was put successfully.
1023
 *          -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1024
 *          Cannot return -ENOMEM if !may_resize.
1025
 */
1026
static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
33,985,994✔
1027
                                   struct swap_entries *swap, bool may_resize) {
1028
        struct ordered_hashmap_entry *new_entry;
33,985,994✔
1029
        int r;
33,985,994✔
1030

1031
        assert(idx < n_buckets(h));
33,985,994✔
1032

1033
        new_entry = bucket_at_swap(swap, IDX_PUT);
33,985,994✔
1034

1035
        if (may_resize) {
33,985,994✔
1036
                r = resize_buckets(h, 1);
33,983,143✔
1037
                if (r < 0)
33,983,143✔
1038
                        return r;
1039
                if (r > 0)
33,983,143✔
1040
                        idx = bucket_hash(h, new_entry->p.b.key);
3,213,245✔
1041
        }
1042
        assert(n_entries(h) < n_buckets(h));
33,985,994✔
1043

1044
        if (h->type == HASHMAP_TYPE_ORDERED) {
33,985,994✔
1045
                OrderedHashmap *lh = (OrderedHashmap*) h;
19,273,605✔
1046

1047
                new_entry->iterate_next = IDX_NIL;
19,273,605✔
1048
                new_entry->iterate_previous = lh->iterate_list_tail;
19,273,605✔
1049

1050
                if (lh->iterate_list_tail != IDX_NIL) {
19,273,605✔
1051
                        struct ordered_hashmap_entry *old_tail;
18,033,327✔
1052

1053
                        old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
18,033,327✔
1054
                        assert(old_tail->iterate_next == IDX_NIL);
18,033,327✔
1055
                        old_tail->iterate_next = IDX_PUT;
18,033,327✔
1056
                }
1057

1058
                lh->iterate_list_tail = IDX_PUT;
19,273,605✔
1059
                if (lh->iterate_list_head == IDX_NIL)
19,273,605✔
1060
                        lh->iterate_list_head = IDX_PUT;
1,240,278✔
1061
        }
1062

1063
        assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
33,985,994✔
1064

1065
        n_entries_inc(h);
33,985,994✔
1066
#if ENABLE_DEBUG_HASHMAP
1067
        h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1068
#endif
1069

1070
        base_set_dirty(h);
33,985,994✔
1071

1072
        return 1;
33,985,994✔
1073
}
1074
#define hashmap_put_boldly(h, idx, swap, may_resize) \
1075
        hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1076

1077
/*
1078
 * Returns 0 if resize is not needed.
1079
 *         1 if successfully resized.
1080
 *         -ENOMEM on allocation failure.
1081
 */
1082
static int resize_buckets(HashmapBase *h, unsigned entries_add) {
33,987,451✔
1083
        struct swap_entries swap;
33,987,451✔
1084
        void *new_storage;
33,987,451✔
1085
        dib_raw_t *old_dibs, *new_dibs;
33,987,451✔
1086
        const struct hashmap_type_info *hi;
33,987,451✔
1087
        unsigned idx, optimal_idx;
33,987,451✔
1088
        unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
33,987,451✔
1089
        uint8_t new_shift;
33,987,451✔
1090
        bool rehash_next;
33,987,451✔
1091

1092
        assert(h);
33,987,451✔
1093

1094
        hi = &hashmap_type_info[h->type];
33,987,451✔
1095
        new_n_entries = n_entries(h) + entries_add;
33,987,451✔
1096

1097
        /* overflow? */
1098
        if (_unlikely_(new_n_entries < entries_add))
33,987,451✔
1099
                return -ENOMEM;
33,987,451✔
1100

1101
        /* For direct storage we allow 100% load, because it's tiny. */
1102
        if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
33,987,449✔
1103
                return 0;
1104

1105
        /*
1106
         * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1107
         * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1108
         */
1109
        new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
27,187,179✔
1110
        /* overflow? */
1111
        if (_unlikely_(new_n_buckets < new_n_entries))
27,187,179✔
1112
                return -ENOMEM;
1113

1114
        if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
27,187,177✔
1115
                return -ENOMEM;
1116

1117
        old_n_buckets = n_buckets(h);
27,187,177✔
1118

1119
        if (_likely_(new_n_buckets <= old_n_buckets))
27,187,177✔
1120
                return 0;
1121

1122
        new_shift = log2u_round_up(MAX(
3,214,924✔
1123
                        new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1124
                        2 * sizeof(struct direct_storage)));
1125

1126
        /* Realloc storage (buckets and DIB array). */
1127
        new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1,932,012✔
1128
                              1U << new_shift);
3,214,924✔
1129
        if (!new_storage)
3,214,924✔
1130
                return -ENOMEM;
1131

1132
        /* Must upgrade direct to indirect storage. */
1133
        if (!h->has_indirect) {
3,214,924✔
1134
                memcpy(new_storage, h->direct.storage,
1,282,912✔
1135
                       old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1,282,912✔
1136
                h->indirect.n_entries = h->n_direct_entries;
1,282,912✔
1137
                h->indirect.idx_lowest_entry = 0;
1,282,912✔
1138
                h->n_direct_entries = 0;
1,282,912✔
1139
        }
1140

1141
        /* Get a new hash key. If we've just upgraded to indirect storage,
1142
         * allow reusing a previously generated key. It's still a different key
1143
         * from the shared one that we used for direct storage. */
1144
        get_hash_key(h->indirect.hash_key, !h->has_indirect);
3,214,924✔
1145

1146
        h->has_indirect = true;
3,214,924✔
1147
        h->indirect.storage = new_storage;
3,214,924✔
1148
        h->indirect.n_buckets = (1U << new_shift) /
3,214,924✔
1149
                                (hi->entry_size + sizeof(dib_raw_t));
1150

1151
        old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
3,214,924✔
1152
        new_dibs = dib_raw_ptr(h);
3,214,924✔
1153

1154
        /*
1155
         * Move the DIB array to the new place, replacing valid DIB values with
1156
         * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1157
         * Note: Overlap is not possible, because we have at least doubled the
1158
         * number of buckets and dib_raw_t is smaller than any entry type.
1159
         */
1160
        for (idx = 0; idx < old_n_buckets; idx++) {
41,054,933✔
1161
                assert(old_dibs[idx] != DIB_RAW_REHASH);
34,625,085✔
1162
                new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
62,770,804✔
1163
                                                              : DIB_RAW_REHASH;
1164
        }
1165

1166
        /* Zero the area of newly added entries (including the old DIB area) */
1167
        memzero(bucket_at(h, old_n_buckets),
3,214,924✔
1168
               (n_buckets(h) - old_n_buckets) * hi->entry_size);
1169

1170
        /* The upper half of the new DIB array needs initialization */
1171
        memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
6,429,848✔
1172
               (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
3,214,924✔
1173

1174
        /* Rehash entries that need it */
1175
        n_rehashed = 0;
3,214,924✔
1176
        for (idx = 0; idx < old_n_buckets; idx++) {
37,840,009✔
1177
                if (new_dibs[idx] != DIB_RAW_REHASH)
34,625,085✔
1178
                        continue;
12,109,012✔
1179

1180
                optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
22,516,073✔
1181

1182
                /*
1183
                 * Not much to do if by luck the entry hashes to its current
1184
                 * location. Just set its DIB.
1185
                 */
1186
                if (optimal_idx == idx) {
22,516,073✔
1187
                        new_dibs[idx] = 0;
909,289✔
1188
                        n_rehashed++;
909,289✔
1189
                        continue;
909,289✔
1190
                }
1191

1192
                new_dibs[idx] = DIB_RAW_FREE;
21,606,784✔
1193
                bucket_move_entry(h, &swap, idx, IDX_PUT);
21,606,784✔
1194
                /* bucket_move_entry does not clear the source */
1195
                memzero(bucket_at(h, idx), hi->entry_size);
21,606,784✔
1196

1197
                do {
27,236,430✔
1198
                        /*
1199
                         * Find the new bucket for the current entry. This may make
1200
                         * another entry homeless and load it into IDX_PUT.
1201
                         */
1202
                        rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
27,236,430✔
1203
                        n_rehashed++;
27,236,430✔
1204

1205
                        /* Did the current entry displace another one? */
1206
                        if (rehash_next)
27,236,430✔
1207
                                optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
5,629,646✔
1208
                } while (rehash_next);
27,236,430✔
1209
        }
1210

1211
        assert_se(n_rehashed == n_entries(h));
3,214,924✔
1212

1213
        return 1;
1214
}
1215

1216
/*
1217
 * Finds an entry with a matching key
1218
 * Returns: index of the found entry, or IDX_NIL if not found.
1219
 */
1220
static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
122,696,019✔
1221
        struct hashmap_base_entry *e;
122,696,019✔
1222
        unsigned dib, distance;
122,696,019✔
1223
        dib_raw_t *dibs = dib_raw_ptr(h);
122,696,019✔
1224

1225
        assert(idx < n_buckets(h));
122,696,019✔
1226

1227
        for (distance = 0; ; distance++) {
82,179,235✔
1228
                if (dibs[idx] == DIB_RAW_FREE)
204,875,254✔
1229
                        return IDX_NIL;
1230

1231
                dib = bucket_calculate_dib(h, idx, dibs[idx]);
139,323,096✔
1232

1233
                if (dib < distance)
139,323,096✔
1234
                        return IDX_NIL;
1235
                if (dib == distance) {
114,939,339✔
1236
                        e = bucket_at(h, idx);
86,898,317✔
1237
                        if (h->hash_ops->compare(e->key, key) == 0)
86,898,317✔
1238
                                return idx;
1239
                }
1240

1241
                idx = next_idx(h, idx);
82,179,235✔
1242
        }
1243
}
1244
#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1245

1246
int hashmap_put(Hashmap *h, const void *key, void *value) {
16,925,782✔
1247
        struct swap_entries swap;
16,925,782✔
1248
        struct plain_hashmap_entry *e;
16,925,782✔
1249
        unsigned hash, idx;
16,925,782✔
1250

1251
        assert(h);
16,925,782✔
1252

1253
        hash = bucket_hash(h, key);
16,925,782✔
1254
        idx = bucket_scan(h, hash, key);
16,925,782✔
1255
        if (idx != IDX_NIL) {
16,925,782✔
1256
                e = plain_bucket_at(h, idx);
154,613✔
1257
                if (e->value == value)
154,613✔
1258
                        return 0;
16,925,782✔
1259
                return -EEXIST;
32,389✔
1260
        }
1261

1262
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
16,771,169✔
1263
        e->b.key = key;
16,771,169✔
1264
        e->value = value;
16,771,169✔
1265
        return hashmap_put_boldly(h, hash, &swap, true);
16,771,169✔
1266
}
1267

1268
int set_put(Set *s, const void *key) {
4,944,378✔
1269
        struct swap_entries swap;
4,944,378✔
1270
        struct hashmap_base_entry *e;
4,944,378✔
1271
        unsigned hash, idx;
4,944,378✔
1272

1273
        assert(s);
4,944,378✔
1274

1275
        hash = bucket_hash(s, key);
4,944,378✔
1276
        idx = bucket_scan(s, hash, key);
4,944,378✔
1277
        if (idx != IDX_NIL)
4,944,378✔
1278
                return 0;
4,944,378✔
1279

1280
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
4,930,889✔
1281
        e->key = key;
4,930,889✔
1282
        return hashmap_put_boldly(s, hash, &swap, true);
4,930,889✔
1283
}
1284

1285
int set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key) {
3,881,647✔
1286
        int r;
3,881,647✔
1287

1288
        r = _set_ensure_allocated(s, hash_ops);
3,881,647✔
1289
        if (r < 0)
3,881,647✔
1290
                return r;
1291

1292
        return set_put(*s, key);
3,881,647✔
1293
}
1294

1295
int set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key) {
1,242,669✔
1296
        int r;
1,242,669✔
1297

1298
        r = set_ensure_put(s, hash_ops, key);
1,242,669✔
1299
        if (r <= 0) {
1,242,669✔
1300
                if (hash_ops && hash_ops->free_key)
900✔
1301
                        hash_ops->free_key(key);
900✔
1302
                else
1303
                        free(key);
×
1304
        }
1305

1306
        return r;
1,242,669✔
1307
}
1308

1309
int hashmap_replace(Hashmap *h, const void *key, void *value) {
13,347,504✔
1310
        struct swap_entries swap;
13,347,504✔
1311
        struct plain_hashmap_entry *e;
13,347,504✔
1312
        unsigned hash, idx;
13,347,504✔
1313

1314
        assert(h);
13,347,504✔
1315

1316
        hash = bucket_hash(h, key);
13,347,504✔
1317
        idx = bucket_scan(h, hash, key);
13,347,504✔
1318
        if (idx != IDX_NIL) {
13,347,504✔
1319
                e = plain_bucket_at(h, idx);
1,072,273✔
1320
#if ENABLE_DEBUG_HASHMAP
1321
                /* Although the key is equal, the key pointer may have changed,
1322
                 * and this would break our assumption for iterating. So count
1323
                 * this operation as incompatible with iteration. */
1324
                if (e->b.key != key) {
1325
                        h->b.debug.put_count++;
1326
                        h->b.debug.rem_count++;
1327
                        h->b.debug.last_rem_idx = idx;
1328
                }
1329
#endif
1330
                e->b.key = key;
1,072,273✔
1331
                e->value = value;
1,072,273✔
1332
                hashmap_set_dirty(h);
1,072,273✔
1333

1334
                return 0;
1,072,273✔
1335
        }
1336

1337
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
12,275,231✔
1338
        e->b.key = key;
12,275,231✔
1339
        e->value = value;
12,275,231✔
1340
        return hashmap_put_boldly(h, hash, &swap, true);
12,275,231✔
1341
}
1342

1343
int hashmap_update(Hashmap *h, const void *key, void *value) {
9,809✔
1344
        struct plain_hashmap_entry *e;
9,809✔
1345
        unsigned hash, idx;
9,809✔
1346

1347
        assert(h);
9,809✔
1348

1349
        hash = bucket_hash(h, key);
9,809✔
1350
        idx = bucket_scan(h, hash, key);
9,809✔
1351
        if (idx == IDX_NIL)
9,809✔
1352
                return -ENOENT;
1353

1354
        e = plain_bucket_at(h, idx);
9,807✔
1355
        e->value = value;
9,807✔
1356
        hashmap_set_dirty(h);
9,807✔
1357

1358
        return 0;
9,807✔
1359
}
1360

1361
void* _hashmap_get(HashmapBase *h, const void *key) {
72,502,749✔
1362
        struct hashmap_base_entry *e;
72,502,749✔
1363
        unsigned hash, idx;
72,502,749✔
1364

1365
        if (!h)
72,502,749✔
1366
                return NULL;
1367

1368
        hash = bucket_hash(h, key);
63,622,653✔
1369
        idx = bucket_scan(h, hash, key);
63,622,653✔
1370
        if (idx == IDX_NIL)
63,622,653✔
1371
                return NULL;
1372

1373
        e = bucket_at(h, idx);
24,575,235✔
1374
        return entry_value(h, e);
24,575,235✔
1375
}
1376

1377
void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
12,327,195✔
1378
        struct plain_hashmap_entry *e;
12,327,195✔
1379
        unsigned hash, idx;
12,327,195✔
1380

1381
        if (!h)
12,327,195✔
1382
                return NULL;
1383

1384
        hash = bucket_hash(h, key);
12,327,179✔
1385
        idx = bucket_scan(h, hash, key);
12,327,179✔
1386
        if (idx == IDX_NIL)
12,327,179✔
1387
                return NULL;
1388

1389
        e = plain_bucket_at(h, idx);
1,019,903✔
1390
        if (key2)
1,019,903✔
1391
                *key2 = (void*) e->b.key;
1,019,903✔
1392

1393
        return e->value;
1,019,903✔
1394
}
1395

1396
bool _hashmap_contains(HashmapBase *h, const void *key) {
3,081,940✔
1397
        unsigned hash;
3,081,940✔
1398

1399
        if (!h)
3,081,940✔
1400
                return false;
1401

1402
        hash = bucket_hash(h, key);
2,530,400✔
1403
        return bucket_scan(h, hash, key) != IDX_NIL;
2,530,400✔
1404
}
1405

1406
void* _hashmap_remove(HashmapBase *h, const void *key) {
9,155,725✔
1407
        struct hashmap_base_entry *e;
9,155,725✔
1408
        unsigned hash, idx;
9,155,725✔
1409
        void *data;
9,155,725✔
1410

1411
        if (!h)
9,155,725✔
1412
                return NULL;
1413

1414
        hash = bucket_hash(h, key);
8,151,962✔
1415
        idx = bucket_scan(h, hash, key);
8,151,962✔
1416
        if (idx == IDX_NIL)
8,151,962✔
1417
                return NULL;
1418

1419
        e = bucket_at(h, idx);
4,472,429✔
1420
        data = entry_value(h, e);
4,472,429✔
1421
        remove_entry(h, idx);
4,472,429✔
1422

1423
        return data;
4,472,429✔
1424
}
1425

1426
void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
572,289✔
1427
        struct plain_hashmap_entry *e;
572,289✔
1428
        unsigned hash, idx;
572,289✔
1429
        void *data;
572,289✔
1430

1431
        if (!h) {
572,289✔
1432
                if (rkey)
107,272✔
1433
                        *rkey = NULL;
107,272✔
1434
                return NULL;
107,272✔
1435
        }
1436

1437
        hash = bucket_hash(h, key);
465,017✔
1438
        idx = bucket_scan(h, hash, key);
465,017✔
1439
        if (idx == IDX_NIL) {
465,017✔
1440
                if (rkey)
453,921✔
1441
                        *rkey = NULL;
453,921✔
1442
                return NULL;
453,921✔
1443
        }
1444

1445
        e = plain_bucket_at(h, idx);
11,096✔
1446
        data = e->value;
11,096✔
1447
        if (rkey)
11,096✔
1448
                *rkey = (void*) e->b.key;
11,096✔
1449

1450
        remove_entry(h, idx);
11,096✔
1451

1452
        return data;
11,096✔
1453
}
1454

1455
int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
10✔
1456
        struct swap_entries swap;
10✔
1457
        struct plain_hashmap_entry *e;
10✔
1458
        unsigned old_hash, new_hash, idx;
10✔
1459

1460
        if (!h)
10✔
1461
                return -ENOENT;
10✔
1462

1463
        old_hash = bucket_hash(h, old_key);
8✔
1464
        idx = bucket_scan(h, old_hash, old_key);
8✔
1465
        if (idx == IDX_NIL)
8✔
1466
                return -ENOENT;
1467

1468
        new_hash = bucket_hash(h, new_key);
6✔
1469
        if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
6✔
1470
                return -EEXIST;
1471

1472
        remove_entry(h, idx);
4✔
1473

1474
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
4✔
1475
        e->b.key = new_key;
4✔
1476
        e->value = value;
4✔
1477
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
4✔
1478

1479
        return 0;
1480
}
1481

1482
int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
318✔
1483
        struct swap_entries swap;
318✔
1484
        struct hashmap_base_entry *e;
318✔
1485
        unsigned old_hash, new_hash, idx;
318✔
1486

1487
        if (!s)
318✔
1488
                return -ENOENT;
318✔
1489

1490
        old_hash = bucket_hash(s, old_key);
318✔
1491
        idx = bucket_scan(s, old_hash, old_key);
318✔
1492
        if (idx == IDX_NIL)
318✔
1493
                return -ENOENT;
1494

1495
        new_hash = bucket_hash(s, new_key);
318✔
1496
        if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
318✔
1497
                return -EEXIST;
1498

1499
        remove_entry(s, idx);
318✔
1500

1501
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
318✔
1502
        e->key = new_key;
318✔
1503
        assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
318✔
1504

1505
        return 0;
1506
}
1507

1508
int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
48✔
1509
        struct swap_entries swap;
48✔
1510
        struct plain_hashmap_entry *e;
48✔
1511
        unsigned old_hash, new_hash, idx_old, idx_new;
48✔
1512

1513
        if (!h)
48✔
1514
                return -ENOENT;
48✔
1515

1516
        old_hash = bucket_hash(h, old_key);
46✔
1517
        idx_old = bucket_scan(h, old_hash, old_key);
46✔
1518
        if (idx_old == IDX_NIL)
46✔
1519
                return -ENOENT;
1520

1521
        old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
44✔
1522

1523
        new_hash = bucket_hash(h, new_key);
44✔
1524
        idx_new = bucket_scan(h, new_hash, new_key);
44✔
1525
        if (idx_new != IDX_NIL)
44✔
1526
                if (idx_old != idx_new) {
42✔
1527
                        remove_entry(h, idx_new);
42✔
1528
                        /* Compensate for a possible backward shift. */
1529
                        if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
42✔
1530
                                idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
5✔
1531
                        assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
42✔
1532
                }
1533

1534
        remove_entry(h, idx_old);
44✔
1535

1536
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
44✔
1537
        e->b.key = new_key;
44✔
1538
        e->value = value;
44✔
1539
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
44✔
1540

1541
        return 0;
1542
}
1543

1544
void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
356,183✔
1545
        struct hashmap_base_entry *e;
356,183✔
1546
        unsigned hash, idx;
356,183✔
1547

1548
        if (!h)
356,183✔
1549
                return NULL;
1550

1551
        hash = bucket_hash(h, key);
356,091✔
1552
        idx = bucket_scan(h, hash, key);
356,091✔
1553
        if (idx == IDX_NIL)
356,091✔
1554
                return NULL;
1555

1556
        e = bucket_at(h, idx);
289,535✔
1557
        if (entry_value(h, e) != value)
289,535✔
1558
                return NULL;
1559

1560
        remove_entry(h, idx);
289,264✔
1561

1562
        return value;
289,264✔
1563
}
1564

1565
static unsigned find_first_entry(HashmapBase *h) {
27,464,856✔
1566
        Iterator i = ITERATOR_FIRST;
27,464,856✔
1567

1568
        if (!h || !n_entries(h))
27,464,856✔
1569
                return IDX_NIL;
27,464,856✔
1570

1571
        return hashmap_iterate_entry(h, &i);
26,519,834✔
1572
}
1573

1574
void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
27,464,856✔
1575
        struct hashmap_base_entry *e;
27,464,856✔
1576
        void *key, *data;
27,464,856✔
1577
        unsigned idx;
27,464,856✔
1578

1579
        idx = find_first_entry(h);
27,464,856✔
1580
        if (idx == IDX_NIL) {
27,464,856✔
1581
                if (ret_key)
945,022✔
1582
                        *ret_key = NULL;
457,589✔
1583
                return NULL;
945,022✔
1584
        }
1585

1586
        e = bucket_at(h, idx);
26,519,834✔
1587
        key = (void*) e->key;
26,519,834✔
1588
        data = entry_value(h, e);
26,519,834✔
1589

1590
        if (remove)
26,519,834✔
1591
                remove_entry(h, idx);
26,373,831✔
1592

1593
        if (ret_key)
26,519,834✔
1594
                *ret_key = key;
18,261,037✔
1595

1596
        return data;
1597
}
1598

1599
unsigned _hashmap_size(HashmapBase *h) {
44,655,648✔
1600
        if (!h)
44,655,648✔
1601
                return 0;
1602

1603
        return n_entries(h);
31,961,782✔
1604
}
1605

1606
unsigned _hashmap_buckets(HashmapBase *h) {
2,529✔
1607
        if (!h)
2,529✔
1608
                return 0;
1609

1610
        return n_buckets(h);
2,526✔
1611
}
1612

1613
int _hashmap_merge(Hashmap *h, Hashmap *other) {
4✔
1614
        Iterator i;
4✔
1615
        unsigned idx;
4✔
1616

1617
        assert(h);
4✔
1618

1619
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
16✔
1620
                struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
12✔
1621
                int r;
12✔
1622

1623
                r = hashmap_put(h, pe->b.key, pe->value);
12✔
1624
                if (r < 0 && r != -EEXIST)
12✔
1625
                        return r;
1626
        }
1627

1628
        return 0;
1629
}
1630

1631
int set_merge(Set *s, Set *other) {
1✔
1632
        Iterator i;
1✔
1633
        unsigned idx;
1✔
1634

1635
        assert(s);
1✔
1636

1637
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
5✔
1638
                struct set_entry *se = set_bucket_at(other, idx);
4✔
1639
                int r;
4✔
1640

1641
                r = set_put(s, se->b.key);
4✔
1642
                if (r < 0)
4✔
1643
                        return r;
1644
        }
1645

1646
        return 0;
1647
}
1648

1649
int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1,823✔
1650
        int r;
1,823✔
1651

1652
        assert(h);
1,823✔
1653

1654
        r = resize_buckets(h, entries_add);
1,823✔
1655
        if (r < 0)
1,823✔
1656
                return r;
4✔
1657

1658
        return 0;
1659
}
1660

1661
/*
1662
 * The same as hashmap_merge(), but every new item from other is moved to h.
1663
 * Keys already in h are skipped and stay in other.
1664
 * Returns: 0 on success.
1665
 *          -ENOMEM on alloc failure, in which case no move has been done.
1666
 */
1667
int _hashmap_move(HashmapBase *h, HashmapBase *other) {
2,488✔
1668
        struct swap_entries swap;
2,488✔
1669
        struct hashmap_base_entry *e, *n;
2,488✔
1670
        Iterator i;
2,488✔
1671
        unsigned idx;
2,488✔
1672
        int r;
2,488✔
1673

1674
        assert(h);
2,488✔
1675

1676
        if (!other)
2,488✔
1677
                return 0;
2,488✔
1678

1679
        assert(other->type == h->type);
2,485✔
1680

1681
        /*
1682
         * This reserves buckets for the worst case, where none of other's
1683
         * entries are yet present in h. This is preferable to risking
1684
         * an allocation failure in the middle of the moving and having to
1685
         * rollback or return a partial result.
1686
         */
1687
        r = resize_buckets(h, n_entries(other));
2,485✔
1688
        if (r < 0)
2,485✔
1689
                return r;
1690

1691
        HASHMAP_FOREACH_IDX(idx, other, i) {
4,972✔
1692
                unsigned h_hash;
2,487✔
1693

1694
                e = bucket_at(other, idx);
2,487✔
1695
                h_hash = bucket_hash(h, e->key);
2,487✔
1696
                if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
2,487✔
1697
                        continue;
2✔
1698

1699
                n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
2,485✔
1700
                n->key = e->key;
2,485✔
1701
                if (h->type != HASHMAP_TYPE_SET)
2,485✔
1702
                        ((struct plain_hashmap_entry*) n)->value =
2,485✔
1703
                                ((struct plain_hashmap_entry*) e)->value;
2,485✔
1704
                assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
2,485✔
1705

1706
                remove_entry(other, idx);
2,485✔
1707
        }
1708

1709
        return 0;
1710
}
1711

1712
int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
5,860✔
1713
        struct swap_entries swap;
5,860✔
1714
        unsigned h_hash, other_hash, idx;
5,860✔
1715
        struct hashmap_base_entry *e, *n;
5,860✔
1716
        int r;
5,860✔
1717

1718
        assert(h);
5,860✔
1719

1720
        h_hash = bucket_hash(h, key);
5,860✔
1721
        if (bucket_scan(h, h_hash, key) != IDX_NIL)
5,860✔
1722
                return -EEXIST;
5,860✔
1723

1724
        if (!other)
5,858✔
1725
                return -ENOENT;
1726

1727
        assert(other->type == h->type);
5,856✔
1728

1729
        other_hash = bucket_hash(other, key);
5,856✔
1730
        idx = bucket_scan(other, other_hash, key);
5,856✔
1731
        if (idx == IDX_NIL)
5,856✔
1732
                return -ENOENT;
1733

1734
        e = bucket_at(other, idx);
5,854✔
1735

1736
        n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
5,854✔
1737
        n->key = e->key;
5,854✔
1738
        if (h->type != HASHMAP_TYPE_SET)
5,854✔
1739
                ((struct plain_hashmap_entry*) n)->value =
563✔
1740
                        ((struct plain_hashmap_entry*) e)->value;
563✔
1741
        r = hashmap_put_boldly(h, h_hash, &swap, true);
5,854✔
1742
        if (r < 0)
5,854✔
1743
                return r;
1744

1745
        remove_entry(other, idx);
5,854✔
1746
        return 0;
1747
}
1748

1749
HashmapBase* _hashmap_copy(HashmapBase *h) {
3✔
1750
        HashmapBase *copy;
3✔
1751
        int r;
3✔
1752

1753
        assert(h);
3✔
1754

1755
        copy = hashmap_base_new(h->hash_ops, h->type);
3✔
1756
        if (!copy)
3✔
1757
                return NULL;
1758

1759
        switch (h->type) {
3✔
1760
        case HASHMAP_TYPE_PLAIN:
2✔
1761
        case HASHMAP_TYPE_ORDERED:
1762
                r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
2✔
1763
                break;
2✔
1764
        case HASHMAP_TYPE_SET:
1✔
1765
                r = set_merge((Set*)copy, (Set*)h);
1✔
1766
                break;
1✔
1767
        default:
×
1768
                assert_not_reached();
×
1769
        }
1770

1771
        if (r < 0)
3✔
1772
                return _hashmap_free(copy);
×
1773

1774
        return copy;
1775
}
1776

1777
char** _hashmap_get_strv(HashmapBase *h) {
1,353✔
1778
        char **sv;
1,353✔
1779
        Iterator i;
1,353✔
1780
        unsigned idx, n;
1,353✔
1781

1782
        if (!h)
1,353✔
1783
                return new0(char*, 1);
1,309✔
1784

1785
        sv = new(char*, n_entries(h)+1);
44✔
1786
        if (!sv)
44✔
1787
                return NULL;
1788

1789
        n = 0;
44✔
1790
        HASHMAP_FOREACH_IDX(idx, h, i)
596✔
1791
                sv[n++] = entry_value(h, bucket_at(h, idx));
552✔
1792
        sv[n] = NULL;
44✔
1793

1794
        return sv;
44✔
1795
}
1796

1797
char** set_to_strv(Set **s) {
18✔
1798
        assert(s);
18✔
1799

1800
        /* This is similar to set_get_strv(), but invalidates the set on success. */
1801

1802
        char **v = new(char*, set_size(*s) + 1);
18✔
1803
        if (!v)
18✔
1804
                return NULL;
1805

1806
        for (char **p = v; (*p = set_steal_first(*s)); p++)
1,766✔
1807
                ;
1808

1809
        assert(set_isempty(*s));
18✔
1810
        *s = set_free(*s);
18✔
1811
        return v;
18✔
1812
}
1813

1814
void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
302✔
1815
        struct ordered_hashmap_entry *e;
302✔
1816
        unsigned hash, idx;
302✔
1817

1818
        if (!h)
302✔
1819
                return NULL;
1820

1821
        hash = bucket_hash(h, key);
301✔
1822
        idx = bucket_scan(h, hash, key);
301✔
1823
        if (idx == IDX_NIL)
301✔
1824
                return NULL;
1825

1826
        e = ordered_bucket_at(h, idx);
300✔
1827
        if (e->iterate_next == IDX_NIL)
300✔
1828
                return NULL;
1829
        return ordered_bucket_at(h, e->iterate_next)->p.value;
151✔
1830
}
1831

1832
int set_consume(Set *s, void *value) {
624,229✔
1833
        int r;
624,229✔
1834

1835
        assert(s);
624,229✔
1836
        assert(value);
624,229✔
1837

1838
        r = set_put(s, value);
624,229✔
1839
        if (r <= 0)
624,229✔
1840
                free(value);
1✔
1841

1842
        return r;
624,229✔
1843
}
1844

1845
int hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v) {
1,985✔
1846
        int r;
1,985✔
1847

1848
        r = hashmap_ensure_allocated(h, hash_ops);
1,985✔
1849
        if (r < 0)
1,985✔
1850
                return r;
1,985✔
1851

1852
        _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1,985✔
1853

1854
        kdup = strdup(k);
1,985✔
1855
        if (!kdup)
1,985✔
1856
                return -ENOMEM;
1857

1858
        if (v) {
1,985✔
1859
                vdup = strdup(v);
311✔
1860
                if (!vdup)
311✔
1861
                        return -ENOMEM;
1862
        }
1863

1864
        r = hashmap_put(*h, kdup, vdup);
1,985✔
1865
        if (r < 0) {
1,985✔
1866
                if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
10✔
1867
                        return 0;
1868
                return r;
4✔
1869
        }
1870

1871
        /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1872
        assert(vdup == NULL || r > 0);
1,975✔
1873
        if (r > 0)
1,673✔
1874
                kdup = vdup = NULL;
1,974✔
1875

1876
        return r;
1877
}
1878

1879
int set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n) {
908,426✔
1880
        char *c;
908,426✔
1881
        int r;
908,426✔
1882

1883
        assert(s);
908,426✔
1884
        assert(p);
908,426✔
1885

1886
        r = _set_ensure_allocated(s, hash_ops);
908,426✔
1887
        if (r < 0)
908,426✔
1888
                return r;
1889

1890
        if (n == SIZE_MAX) {
908,426✔
1891
                if (set_contains(*s, (char*) p))
880,542✔
1892
                        return 0;
1893

1894
                c = strdup(p);
588,363✔
1895
        } else
1896
                c = strndup(p, n);
27,884✔
1897
        if (!c)
616,247✔
1898
                return -ENOMEM;
1899

1900
        return set_consume(*s, c);
616,247✔
1901
}
1902

1903
int set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l) {
1,636✔
1904
        int n = 0, r;
1,636✔
1905

1906
        assert(s);
1,636✔
1907

1908
        STRV_FOREACH(i, l) {
1,677✔
1909
                r = set_put_strndup_full(s, hash_ops, *i, SIZE_MAX);
41✔
1910
                if (r < 0)
41✔
1911
                        return r;
1912

1913
                n += r;
41✔
1914
        }
1915

1916
        return n;
1917
}
1918

1919
int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1✔
1920
        const char *p = ASSERT_PTR(v);
1✔
1921
        int r;
1✔
1922

1923
        assert(s);
1✔
1924

1925
        for (;;) {
2✔
1926
                char *word;
3✔
1927

1928
                r = extract_first_word(&p, &word, separators, flags);
3✔
1929
                if (r <= 0)
3✔
1930
                        return r;
1✔
1931

1932
                r = set_consume(s, word);
2✔
1933
                if (r < 0)
2✔
1934
                        return r;
1935
        }
1936
}
1937

1938
/* expand the cachemem if needed, return true if newly (re)activated. */
1939
static int cachemem_maintain(CacheMem *mem, size_t size) {
413,568✔
1940
        assert(mem);
413,568✔
1941

1942
        if (!GREEDY_REALLOC(mem->ptr, size)) {
413,568✔
1943
                if (size > 0)
×
1944
                        return -ENOMEM;
1945
        }
1946

1947
        if (!mem->active) {
413,568✔
1948
                mem->active = true;
2,294✔
1949
                return true;
2,294✔
1950
        }
1951

1952
        return false;
1953
}
1954

1955
int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
413,456✔
1956
        bool sync_keys = false, sync_values = false;
413,456✔
1957
        size_t size;
413,456✔
1958
        int r;
413,456✔
1959

1960
        assert(cache);
413,456✔
1961
        assert(cache->hashmap);
413,456✔
1962

1963
        size = n_entries(cache->hashmap);
413,456✔
1964

1965
        if (res_keys) {
413,456✔
1966
                r = cachemem_maintain(&cache->keys, size);
112✔
1967
                if (r < 0)
112✔
1968
                        return r;
1969

1970
                sync_keys = r;
112✔
1971
        } else
1972
                cache->keys.active = false;
413,344✔
1973

1974
        if (res_values) {
413,456✔
1975
                r = cachemem_maintain(&cache->values, size);
413,456✔
1976
                if (r < 0)
413,456✔
1977
                        return r;
1978

1979
                sync_values = r;
413,456✔
1980
        } else
1981
                cache->values.active = false;
×
1982

1983
        if (cache->hashmap->dirty) {
413,456✔
1984
                if (cache->keys.active)
2,398✔
1985
                        sync_keys = true;
111✔
1986
                if (cache->values.active)
2,398✔
1987
                        sync_values = true;
2,398✔
1988

1989
                cache->hashmap->dirty = false;
2,398✔
1990
        }
1991

1992
        if (sync_keys || sync_values) {
413,456✔
1993
                unsigned i, idx;
2,404✔
1994
                Iterator iter;
2,404✔
1995

1996
                i = 0;
2,404✔
1997
                HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
503,596✔
1998
                        struct hashmap_base_entry *e;
501,192✔
1999

2000
                        e = bucket_at(cache->hashmap, idx);
501,192✔
2001

2002
                        if (sync_keys)
501,192✔
2003
                                cache->keys.ptr[i] = e->key;
491,500✔
2004
                        if (sync_values)
501,192✔
2005
                                cache->values.ptr[i] = entry_value(cache->hashmap, e);
501,192✔
2006
                        i++;
501,192✔
2007
                }
2008
        }
2009

2010
        if (res_keys)
413,456✔
2011
                *res_keys = cache->keys.ptr;
112✔
2012
        if (res_values)
413,456✔
2013
                *res_values = cache->values.ptr;
413,456✔
2014
        if (res_n_entries)
413,456✔
2015
                *res_n_entries = size;
413,456✔
2016

2017
        return 0;
2018
}
2019

2020
IteratedCache* iterated_cache_free(IteratedCache *cache) {
6,291✔
2021
        if (cache) {
6,291✔
2022
                free(cache->keys.ptr);
6,291✔
2023
                free(cache->values.ptr);
6,291✔
2024
        }
2025

2026
        return mfree(cache);
6,291✔
2027
}
2028

2029
int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
560,670✔
2030
        _cleanup_free_ char *str = NULL;
560,670✔
2031
        size_t separator_len, len = 0;
560,670✔
2032
        const char *value;
560,670✔
2033
        bool first;
560,670✔
2034

2035
        assert(ret);
560,670✔
2036

2037
        if (set_isempty(s)) {
560,670✔
2038
                *ret = NULL;
10,824✔
2039
                return 0;
10,824✔
2040
        }
2041

2042
        separator_len = strlen_ptr(separator);
549,846✔
2043

2044
        if (separator_len == 0)
549,842✔
2045
                wrap_with_separator = false;
2046

2047
        first = !wrap_with_separator;
549,846✔
2048

2049
        SET_FOREACH(value, s) {
1,887,081✔
2050
                size_t l = strlen_ptr(value);
1,337,235✔
2051

2052
                if (l == 0)
1,337,235✔
2053
                        continue;
×
2054

2055
                if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
3,460,019✔
2056
                        return -ENOMEM;
×
2057

2058
                if (separator_len > 0 && !first) {
1,337,235✔
2059
                        memcpy(str + len, separator, separator_len);
1,097,773✔
2060
                        len += separator_len;
1,097,773✔
2061
                }
2062

2063
                memcpy(str + len, value, l);
1,337,235✔
2064
                len += l;
1,337,235✔
2065
                first = false;
1,337,235✔
2066
        }
2067

2068
        if (wrap_with_separator) {
549,846✔
2069
                memcpy(str + len, separator, separator_len);
310,388✔
2070
                len += separator_len;
310,388✔
2071
        }
2072

2073
        str[len] = '\0';
549,846✔
2074

2075
        *ret = TAKE_PTR(str);
549,846✔
2076
        return 0;
549,846✔
2077
}
2078

2079
bool set_equal(Set *a, Set *b) {
200✔
2080
        void *p;
200✔
2081

2082
        /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2083
         * entries */
2084

2085
        if (a == b)
200✔
2086
                return true;
200✔
2087

2088
        if (set_isempty(a) && set_isempty(b))
41✔
2089
                return true;
2090

2091
        if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
23✔
2092
                                         * already */
2093
                return false;
2094

2095
        SET_FOREACH(p, a)
2,398✔
2096
                if (!set_contains(b, p))
2,386✔
2097
                        return false;
×
2098

2099
        /* If we have the same hashops, then we don't need to check things backwards given we compared the
2100
         * size and that all of a is in b. */
2101
        if (a->b.hash_ops == b->b.hash_ops)
12✔
2102
                return true;
2103

2104
        SET_FOREACH(p, b)
×
2105
                if (!set_contains(a, p))
×
2106
                        return false;
×
2107

2108
        return true;
×
2109
}
2110

2111
static bool set_fnmatch_one(Set *patterns, const char *needle) {
574,794✔
2112
        const char *p;
574,794✔
2113

2114
        assert(needle);
574,794✔
2115

2116
        /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2117

2118
        SET_FOREACH(p, patterns)
717,649✔
2119
                if (fnmatch(p, needle, 0) == 0)
177,815✔
2120
                        return true;
34,960✔
2121

2122
        return false;
539,834✔
2123
}
2124

2125
bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
408,364✔
2126
        assert(needle);
408,364✔
2127

2128
        if (set_fnmatch_one(exclude_patterns, needle))
408,364✔
2129
                return false;
2130

2131
        if (set_isempty(include_patterns))
408,232✔
2132
                return true;
2133

2134
        return set_fnmatch_one(include_patterns, needle);
166,430✔
2135
}
2136

2137
static int hashmap_entry_compare(
543,628✔
2138
                struct hashmap_base_entry * const *a,
2139
                struct hashmap_base_entry * const *b,
2140
                compare_func_t compare) {
2141

2142
        assert(a && *a);
543,628✔
2143
        assert(b && *b);
543,628✔
2144
        assert(compare);
543,628✔
2145

2146
        return compare((*a)->key, (*b)->key);
543,628✔
2147
}
2148

2149
static int _hashmap_dump_entries_sorted(
108,232✔
2150
                HashmapBase *h,
2151
                void ***ret,
2152
                size_t *ret_n) {
2153
        _cleanup_free_ void **entries = NULL;
108,232✔
2154
        Iterator iter;
108,232✔
2155
        unsigned idx;
108,232✔
2156
        size_t n = 0;
108,232✔
2157

2158
        assert(ret);
108,232✔
2159
        assert(ret_n);
108,232✔
2160

2161
        if (_hashmap_size(h) == 0) {
108,232✔
2162
                *ret = NULL;
83,188✔
2163
                *ret_n = 0;
83,188✔
2164
                return 0;
83,188✔
2165
        }
2166

2167
        /* We append one more element than needed so that the resulting array can be used as a strv. We
2168
         * don't count this entry in the returned size. */
2169
        entries = new(void*, _hashmap_size(h) + 1);
25,044✔
2170
        if (!entries)
25,044✔
2171
                return -ENOMEM;
2172

2173
        HASHMAP_FOREACH_IDX(idx, h, iter)
188,061✔
2174
                entries[n++] = bucket_at(h, idx);
163,017✔
2175

2176
        assert(n == _hashmap_size(h));
25,044✔
2177
        entries[n] = NULL;
25,044✔
2178

2179
        typesafe_qsort_r((struct hashmap_base_entry**) entries, n,
25,044✔
2180
                         hashmap_entry_compare, h->hash_ops->compare);
2181

2182
        *ret = TAKE_PTR(entries);
25,044✔
2183
        *ret_n = n;
25,044✔
2184
        return 0;
25,044✔
2185
}
2186

2187
int _hashmap_dump_keys_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
89✔
2188
        _cleanup_free_ void **entries = NULL;
89✔
2189
        size_t n;
89✔
2190
        int r;
89✔
2191

2192
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
89✔
2193
        if (r < 0)
89✔
2194
                return r;
2195

2196
        /* Reuse the array. */
2197
        FOREACH_ARRAY(e, entries, n)
136✔
2198
                *e = (void*) (*(struct hashmap_base_entry**) e)->key;
47✔
2199

2200
        *ret = TAKE_PTR(entries);
89✔
2201
        if (ret_n)
89✔
2202
                *ret_n = n;
89✔
2203
        return 0;
2204
}
2205

2206
int _hashmap_dump_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
108,143✔
2207
        _cleanup_free_ void **entries = NULL;
108,143✔
2208
        size_t n;
108,143✔
2209
        int r;
108,143✔
2210

2211
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
108,143✔
2212
        if (r < 0)
108,143✔
2213
                return r;
2214

2215
        /* Reuse the array. */
2216
        FOREACH_ARRAY(e, entries, n)
271,113✔
2217
                *e = entry_value(h, *(struct hashmap_base_entry**) e);
162,970✔
2218

2219
        *ret = TAKE_PTR(entries);
108,143✔
2220
        if (ret_n)
108,143✔
2221
                *ret_n = n;
414✔
2222
        return 0;
2223
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc