• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 25409762285

05 May 2026 08:45PM UTC coverage: 72.658% (-0.02%) from 72.674%
25409762285

push

github

web-flow
Couple of coverity fixes (#41951)

0 of 11 new or added lines in 2 files covered. (0.0%)

2705 existing lines in 63 files now uncovered.

326249 of 449021 relevant lines covered (72.66%)

1212712.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.65
/src/basic/hashmap.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <fnmatch.h>
4
#include <pthread.h>
5
#include <unistd.h>
6
#if HAVE_VALGRIND_VALGRIND_H
7
#  include <valgrind/valgrind.h>
8
#endif
9

10
#include "alloc-util.h"
11
#include "extract-word.h"
12
#include "hashmap.h"
13
#include "log.h"
14
#include "logarithm.h"
15
#include "memory-util.h"
16
#include "mempool.h"
17
#include "process-util.h"
18
#include "random-util.h"
19
#include "set.h"
20
#include "siphash24.h"
21
#include "sort-util.h"
22
#include "string-util.h"
23
#include "strv.h"
24

25
#if ENABLE_DEBUG_HASHMAP
26
#include "list.h"
27
#endif
28

29
/*
30
 * Implementation of hashmaps.
31
 * Addressing: open
32
 *   - uses less RAM compared to closed addressing (chaining), because
33
 *     our entries are small (especially in Sets, which tend to contain
34
 *     the majority of entries in systemd).
35
 * Collision resolution: Robin Hood
36
 *   - tends to equalize displacement of entries from their optimal buckets.
37
 * Probe sequence: linear
38
 *   - though theoretically worse than random probing/uniform hashing/double
39
 *     hashing, it is good for cache locality.
40
 *
41
 * References:
42
 * Celis, P. 1986. Robin Hood Hashing.
43
 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
44
 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
45
 * - The results are derived for random probing. Suggests deletion with
46
 *   tombstones and two mean-centered search methods. None of that works
47
 *   well for linear probing.
48
 *
49
 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
50
 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
51
 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
52
 * http://www.math.uu.se/~svante/papers/sj157.pdf
53
 * - Applies to Robin Hood with linear probing. Contains remarks on
54
 *   the unsuitability of mean-centered search with linear probing.
55
 *
56
 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
57
 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
58
 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
59
 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
60
 *   in a successful search), and Janson writes about displacement. C = d + 1.
61
 *
62
 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
63
 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
64
 * - Explanation of backward shift deletion with pictures.
65
 *
66
 * Khuong, P. 2013. The Other Robin Hood Hashing.
67
 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
68
 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
69
 */
70

71
/*
72
 * XXX Ideas for improvement:
73
 * For unordered hashmaps, randomize iteration order, similarly to Perl:
74
 * http://blog.booking.com/hardening-perls-hash-function.html
75
 */
76

77
/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
78
 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
79
#define INV_KEEP_FREE            5U
80

81
/* Fields common to entries of all hashmap/set types */
82
struct hashmap_base_entry {
83
        const void *key;
84
};
85

86
/* Entry types for specific hashmap/set types
87
 * hashmap_base_entry must be at the beginning of each entry struct. */
88

89
struct plain_hashmap_entry {
90
        struct hashmap_base_entry b;
91
        void *value;
92
};
93

94
struct ordered_hashmap_entry {
95
        struct plain_hashmap_entry p;
96
        unsigned iterate_next, iterate_previous;
97
};
98

99
struct set_entry {
100
        struct hashmap_base_entry b;
101
};
102

103
/* In several functions it is advantageous to have the hash table extended
104
 * virtually by a couple of additional buckets. We reserve special index values
105
 * for these "swap" buckets. */
106
#define _IDX_SWAP_BEGIN     (UINT_MAX - 3)
107
#define IDX_PUT             (_IDX_SWAP_BEGIN + 0)
108
#define IDX_TMP             (_IDX_SWAP_BEGIN + 1)
109
#define _IDX_SWAP_END       (_IDX_SWAP_BEGIN + 2)
110

111
#define IDX_FIRST           (UINT_MAX - 1) /* special index for freshly initialized iterators */
112
#define IDX_NIL             UINT_MAX       /* special index value meaning "none" or "end" */
113

114
assert_cc(IDX_FIRST == _IDX_SWAP_END);
115
assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
116

117
/* Storage space for the "swap" buckets.
118
 * All entry types can fit into an ordered_hashmap_entry. */
119
struct swap_entries {
120
        struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
121
};
122

123
/* Distance from Initial Bucket */
124
typedef uint8_t dib_raw_t;
125
#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU)   /* indicates DIB value is greater than representable */
126
#define DIB_RAW_REHASH   ((dib_raw_t)0xfeU)   /* entry yet to be rehashed during in-place resize */
127
#define DIB_RAW_FREE     ((dib_raw_t)0xffU)   /* a free bucket */
128
#define DIB_RAW_INIT     ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
129

130
#define DIB_FREE UINT_MAX
131

132
#if ENABLE_DEBUG_HASHMAP
133
struct hashmap_debug_info {
134
        LIST_FIELDS(struct hashmap_debug_info, debug_list);
135
        unsigned max_entries;  /* high watermark of n_entries */
136

137
        /* fields to detect modification while iterating */
138
        unsigned put_count;    /* counts puts into the hashmap */
139
        unsigned rem_count;    /* counts removals from hashmap */
140
        unsigned last_rem_idx; /* remembers last removal index */
141
};
142

143
/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
144
static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
145
static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
146
#endif
147

148
enum HashmapType {
149
        HASHMAP_TYPE_PLAIN,
150
        HASHMAP_TYPE_ORDERED,
151
        HASHMAP_TYPE_SET,
152
        _HASHMAP_TYPE_MAX
153
};
154

155
struct _packed_ indirect_storage {
156
        void *storage;                     /* where buckets and DIBs are stored */
157
        uint8_t  hash_key[HASH_KEY_SIZE];  /* hash key; changes during resize */
158

159
        unsigned n_entries;                /* number of stored entries */
160
        unsigned n_buckets;                /* number of buckets */
161

162
        unsigned idx_lowest_entry;         /* Index below which all buckets are free.
163
                                              Makes "while (hashmap_steal_first())" loops
164
                                              O(n) instead of O(n^2) for unordered hashmaps. */
165
        uint8_t  _pad[3];                  /* padding for the whole HashmapBase */
166
        /* The bitfields in HashmapBase complete the alignment of the whole thing. */
167
};
168

169
struct direct_storage {
170
        /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
171
         * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
172
         *              or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
173
        uint8_t storage[sizeof(struct indirect_storage)];
174
};
175

176
#define DIRECT_BUCKETS(entry_t) \
177
        (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
178

179
/* We should be able to store at least one entry directly. */
180
assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
181

182
/* We have 3 bits for n_direct_entries. */
183
assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
184

185
/* Hashmaps with directly stored entries all use this shared hash key.
186
 * It's no big deal if the key is guessed, because there can be only
187
 * a handful of directly stored entries in a hashmap. When a hashmap
188
 * outgrows direct storage, it gets its own key for indirect storage. */
189
static uint8_t shared_hash_key[HASH_KEY_SIZE];
190

191
/* Fields that all hashmap/set types must have */
192
struct HashmapBase {
193
        const struct hash_ops *hash_ops;  /* hash and compare ops to use */
194

195
        union _packed_ {
196
                struct indirect_storage indirect; /* if  has_indirect */
197
                struct direct_storage direct;     /* if !has_indirect */
198
        };
199

200
        enum HashmapType type:2;     /* HASHMAP_TYPE_* */
201
        bool has_indirect:1;         /* whether indirect storage is used */
202
        unsigned n_direct_entries:3; /* Number of entries in direct storage.
203
                                      * Only valid if !has_indirect. */
204
        bool from_pool:1;            /* whether was allocated from mempool */
205
        bool dirty:1;                /* whether dirtied since last iterated_cache_get() */
206
        bool cached:1;               /* whether this hashmap is being cached */
207

208
#if ENABLE_DEBUG_HASHMAP
209
        struct hashmap_debug_info debug;
210
#endif
211
};
212

213
/* Specific hash types
214
 * HashmapBase must be at the beginning of each hashmap struct. */
215

216
struct Hashmap {
217
        struct HashmapBase b;
218
};
219

220
struct OrderedHashmap {
221
        struct HashmapBase b;
222
        unsigned iterate_list_head, iterate_list_tail;
223
};
224

225
struct Set {
226
        struct HashmapBase b;
227
};
228

229
typedef struct CacheMem {
230
        const void **ptr;
231
        size_t n_populated;
232
        bool active:1;
233
} CacheMem;
234

235
struct IteratedCache {
236
        HashmapBase *hashmap;
237
        CacheMem keys, values;
238
};
239

240
DEFINE_MEMPOOL(hashmap_pool,         Hashmap,        8);
241
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
242
/* No need for a separate Set pool */
243
assert_cc(sizeof(Hashmap) == sizeof(Set));
244

245
struct hashmap_type_info {
246
        size_t head_size;
247
        size_t entry_size;
248
        struct mempool *mempool;
249
        unsigned n_direct_buckets;
250
};
251

252
static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
253
        [HASHMAP_TYPE_PLAIN] = {
254
                .head_size        = sizeof(Hashmap),
255
                .entry_size       = sizeof(struct plain_hashmap_entry),
256
                .mempool          = &hashmap_pool,
257
                .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
258
        },
259
        [HASHMAP_TYPE_ORDERED] = {
260
                .head_size        = sizeof(OrderedHashmap),
261
                .entry_size       = sizeof(struct ordered_hashmap_entry),
262
                .mempool          = &ordered_hashmap_pool,
263
                .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
264
        },
265
        [HASHMAP_TYPE_SET] = {
266
                .head_size        = sizeof(Set),
267
                .entry_size       = sizeof(struct set_entry),
268
                .mempool          = &hashmap_pool,
269
                .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
270
        },
271
};
272

273
void hashmap_trim_pools(void) {
2✔
274
        int r;
2✔
275

276
        /* The pool is only allocated by the main thread, but the memory can be passed to other
277
         * threads. Let's clean up if we are the main thread and no other threads are live. */
278

279
        /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
280
         * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
281
        if (getpid() != gettid())
2✔
282
                return (void) log_debug("Not cleaning up memory pools, not in main thread.");
×
283

284
        r = get_process_threads(0);
2✔
285
        if (r < 0)
2✔
286
                return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
×
287
        if (r != 1)
2✔
UNCOV
288
                return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
×
289

290
        mempool_trim(&hashmap_pool);
2✔
291
        mempool_trim(&ordered_hashmap_pool);
2✔
292
}
293

294
#if HAVE_VALGRIND_VALGRIND_H
295
_destructor_ static void cleanup_pools(void) {
296
        /* Be nice to valgrind */
297
        if (RUNNING_ON_VALGRIND)
298
                hashmap_trim_pools();
299
}
300
#endif
301

302
static unsigned n_buckets(HashmapBase *h) {
1,515,967,447✔
303
        return h->has_indirect ? h->indirect.n_buckets
1,515,967,447✔
304
                               : hashmap_type_info[h->type].n_direct_buckets;
1,515,967,447✔
305
}
306

307
static unsigned n_entries(HashmapBase *h) {
167,471,620✔
308
        return h->has_indirect ? h->indirect.n_entries
167,471,620✔
309
                               : h->n_direct_entries;
167,471,620✔
310
}
311

312
static void n_entries_inc(HashmapBase *h) {
37,402,064✔
313
        if (h->has_indirect)
37,402,064✔
314
                h->indirect.n_entries++;
29,285,485✔
315
        else
316
                h->n_direct_entries++;
8,116,579✔
317
}
37,402,064✔
318

319
static void n_entries_dec(HashmapBase *h) {
34,316,682✔
320
        if (h->has_indirect)
34,316,682✔
321
                h->indirect.n_entries--;
28,563,152✔
322
        else
323
                h->n_direct_entries--;
5,753,530✔
324
}
34,316,682✔
325

326
static void* storage_ptr(HashmapBase *h) {
1,437,554,694✔
327
        return h->has_indirect ? h->indirect.storage
1,437,554,694✔
328
                               : h->direct.storage;
1,437,554,694✔
329
}
330

331
static uint8_t* hash_key(HashmapBase *h) {
244,784,269✔
332
        return h->has_indirect ? h->indirect.hash_key
244,784,269✔
333
                               : shared_hash_key;
244,784,269✔
334
}
335

336
static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
244,784,269✔
337
        struct siphash state;
244,784,269✔
338
        uint64_t hash;
244,784,269✔
339

340
        siphash24_init(&state, hash_key(h));
244,784,269✔
341

342
        h->hash_ops->hash(p, &state);
244,784,269✔
343

344
        hash = siphash24_finalize(&state);
244,784,269✔
345

346
        return (unsigned) (hash % n_buckets(h));
244,784,269✔
347
}
348
#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
349

350
static void base_set_dirty(HashmapBase *h) {
77,069,000✔
351
        h->dirty = true;
77,069,000✔
352
}
77,069,000✔
353
#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
354

355
static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
3,681,293✔
356
        static uint8_t current[HASH_KEY_SIZE];
3,681,293✔
357
        static bool current_initialized = false;
3,681,293✔
358

359
        /* Returns a hash function key to use. In order to keep things
360
         * fast we will not generate a new key each time we allocate a
361
         * new hash table. Instead, we'll just reuse the most recently
362
         * generated one, except if we never generated one or when we
363
         * are rehashing an entire hash table because we reached a
364
         * fill level */
365

366
        if (!current_initialized || !reuse_is_ok) {
3,681,293✔
367
                random_bytes(current, sizeof(current));
2,240,935✔
368
                current_initialized = true;
2,240,935✔
369
        }
370

371
        memcpy(hash_key, current, sizeof(current));
3,681,293✔
372
}
3,681,293✔
373

374
static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
907,543,282✔
375
        return CAST_ALIGN_PTR(
907,543,282✔
376
                        struct hashmap_base_entry,
377
                        (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
378
}
379

380
static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
2,620,884✔
381
        return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
2,620,884✔
382
}
383

384
static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
100,374,113✔
385
        return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
100,374,113✔
386
}
387

388
static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
4✔
389
        return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
4✔
390
}
391

392
static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
295,042,827✔
393
        return &swap->e[idx - _IDX_SWAP_BEGIN];
295,042,827✔
394
}
395

396
/* Returns a pointer to the bucket at index idx.
397
 * Understands real indexes and swap indexes, hence "_virtual". */
398
static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
525,573,235✔
399
                                                    unsigned idx) {
400
        if (idx < _IDX_SWAP_BEGIN)
525,573,235✔
401
                return bucket_at(h, idx);
312,332,951✔
402

403
        if (idx < _IDX_SWAP_END)
213,240,284✔
404
                return &bucket_at_swap(swap, idx)->p.b;
213,240,284✔
405

406
        assert_not_reached();
×
407
}
408

409
static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
530,011,412✔
410
        return (dib_raw_t*)
1,060,022,824✔
411
                ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
530,011,412✔
412
}
413

414
static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
×
415
        return idx >= from ? idx - from
×
416
                           : n_buckets(h) + idx - from;
×
417
}
418

419
static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
324,486,796✔
420
        unsigned initial_bucket;
324,486,796✔
421

422
        if (raw_dib == DIB_RAW_FREE)
324,486,796✔
423
                return DIB_FREE;
424

425
        if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
324,486,796✔
426
                return raw_dib;
324,486,796✔
427

428
        /*
429
         * Having an overflow DIB value is very unlikely. The hash function
430
         * would have to be bad. For example, in a table of size 2^24 filled
431
         * to load factor 0.9 the maximum observed DIB is only about 60.
432
         * In theory (assuming I used Maxima correctly), for an infinite size
433
         * hash table with load factor 0.8 the probability of a given entry
434
         * having DIB > 40 is 1.9e-8.
435
         * This returns the correct DIB value by recomputing the hash value in
436
         * the unlikely case. XXX Hitting this case could be a hint to rehash.
437
         */
438
        initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
×
439
        return bucket_distance(h, idx, initial_bucket);
×
440
}
441

442
static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
142,693,923✔
443
        dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
142,693,923✔
444
}
142,693,923✔
445

446
static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
71,854,702✔
447
        dib_raw_t *dibs;
71,854,702✔
448

449
        dibs = dib_raw_ptr(h);
71,854,702✔
450

451
        for ( ; idx < n_buckets(h); idx++)
216,720,378✔
452
                if (dibs[idx] != DIB_RAW_FREE)
131,683,182✔
453
                        return idx;
454

455
        return IDX_NIL;
456
}
457

458
static void bucket_mark_free(HashmapBase *h, unsigned idx) {
34,316,682✔
459
        memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
34,316,682✔
460
        bucket_set_dib(h, idx, DIB_FREE);
34,316,682✔
461
}
34,316,682✔
462

463
static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
194,698,336✔
464
                              unsigned from, unsigned to) {
465
        struct hashmap_base_entry *e_from, *e_to;
194,698,336✔
466

467
        assert(from != to);
194,698,336✔
468

469
        e_from = bucket_at_virtual(h, swap, from);
194,698,336✔
470
        e_to   = bucket_at_virtual(h, swap, to);
194,698,336✔
471

472
        memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
194,698,336✔
473

474
        if (h->type == HASHMAP_TYPE_ORDERED) {
194,698,336✔
475
                OrderedHashmap *lh = (OrderedHashmap*) h;
84,698,704✔
476
                struct ordered_hashmap_entry *le, *le_to;
84,698,704✔
477

478
                le_to = (struct ordered_hashmap_entry*) e_to;
84,698,704✔
479

480
                if (le_to->iterate_next != IDX_NIL) {
84,698,704✔
481
                        le = (struct ordered_hashmap_entry*)
60,786,982✔
482
                             bucket_at_virtual(h, swap, le_to->iterate_next);
60,786,982✔
483
                        le->iterate_previous = to;
60,786,982✔
484
                }
485

486
                if (le_to->iterate_previous != IDX_NIL) {
84,698,704✔
487
                        le = (struct ordered_hashmap_entry*)
75,389,581✔
488
                             bucket_at_virtual(h, swap, le_to->iterate_previous);
75,389,581✔
489
                        le->iterate_next = to;
75,389,581✔
490
                }
491

492
                if (lh->iterate_list_head == from)
84,698,704✔
493
                        lh->iterate_list_head = to;
9,309,123✔
494
                if (lh->iterate_list_tail == from)
84,698,704✔
495
                        lh->iterate_list_tail = to;
23,911,722✔
496
        }
497
}
194,698,336✔
498

499
static unsigned next_idx(HashmapBase *h, unsigned idx) {
278,391,212✔
500
        return (idx + 1U) % n_buckets(h);
278,391,212✔
501
}
502

503
static unsigned prev_idx(HashmapBase *h, unsigned idx) {
94✔
504
        return (n_buckets(h) + idx - 1U) % n_buckets(h);
94✔
505
}
506

507
static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
166,035,297✔
508
        switch (h->type) {
166,035,297✔
509

510
        case HASHMAP_TYPE_PLAIN:
154,762,178✔
511
        case HASHMAP_TYPE_ORDERED:
512
                return ((struct plain_hashmap_entry*)e)->value;
154,762,178✔
513

514
        case HASHMAP_TYPE_SET:
11,273,119✔
515
                return (void*) e->key;
11,273,119✔
516

517
        default:
×
518
                assert_not_reached();
×
519
        }
520
}
521

522
static void base_remove_entry(HashmapBase *h, unsigned idx) {
34,316,682✔
523
        unsigned left, right, prev, dib;
34,316,682✔
524
        dib_raw_t raw_dib, *dibs;
34,316,682✔
525

526
        dibs = dib_raw_ptr(h);
34,316,682✔
527
        assert(dibs[idx] != DIB_RAW_FREE);
34,316,682✔
528

529
#if ENABLE_DEBUG_HASHMAP
530
        h->debug.rem_count++;
531
        h->debug.last_rem_idx = idx;
532
#endif
533

534
        left = idx;
34,316,682✔
535
        /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
536
        for (right = next_idx(h, left); ; right = next_idx(h, right)) {
34,316,682✔
537
                raw_dib = dibs[right];
48,825,739✔
538
                if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
48,825,739✔
539
                        break;
540

541
                /* The buckets are not supposed to be all occupied and with DIB > 0.
542
                 * That would mean we could make everyone better off by shifting them
543
                 * backward. This scenario is impossible. */
544
                assert(left != right);
14,509,057✔
545
        }
546

547
        if (h->type == HASHMAP_TYPE_ORDERED) {
34,316,682✔
548
                OrderedHashmap *lh = (OrderedHashmap*) h;
17,634,264✔
549
                struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
17,634,264✔
550

551
                if (le->iterate_next != IDX_NIL)
17,634,264✔
552
                        ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
16,225,977✔
553
                else
554
                        lh->iterate_list_tail = le->iterate_previous;
1,408,287✔
555

556
                if (le->iterate_previous != IDX_NIL)
17,634,264✔
557
                        ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
14,420✔
558
                else
559
                        lh->iterate_list_head = le->iterate_next;
17,619,844✔
560
        }
561

562
        /* Now shift all buckets in the interval (left, right) one step backwards */
563
        for (prev = left, left = next_idx(h, left); left != right;
48,825,739✔
564
             prev = left, left = next_idx(h, left)) {
14,509,057✔
565
                dib = bucket_calculate_dib(h, left, dibs[left]);
14,509,057✔
566
                assert(dib != 0);
14,509,057✔
567
                bucket_move_entry(h, NULL, left, prev);
14,509,057✔
568
                bucket_set_dib(h, prev, dib - 1);
14,509,057✔
569
        }
570

571
        bucket_mark_free(h, prev);
34,316,682✔
572
        n_entries_dec(h);
34,316,682✔
573
        base_set_dirty(h);
34,316,682✔
574
}
34,316,682✔
575
#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
576

577
static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
27,427,178✔
578
        struct ordered_hashmap_entry *e;
27,427,178✔
579
        unsigned idx;
27,427,178✔
580

581
        assert(h);
27,427,178✔
582
        assert(i);
27,427,178✔
583

584
        if (i->idx == IDX_NIL)
27,427,178✔
585
                goto at_end;
763,110✔
586

587
        if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
26,664,068✔
588
                goto at_end;
593,660✔
589

590
        if (i->idx == IDX_FIRST) {
26,070,408✔
591
                idx = h->iterate_list_head;
18,371,150✔
592
                e = ordered_bucket_at(h, idx);
18,371,150✔
593
        } else {
594
                idx = i->idx;
7,699,258✔
595
                e = ordered_bucket_at(h, idx);
7,699,258✔
596
                /*
597
                 * We allow removing the current entry while iterating, but removal may cause
598
                 * a backward shift. The next entry may thus move one bucket to the left.
599
                 * To detect when it happens, we remember the key pointer of the entry we were
600
                 * going to iterate next. If it does not match, there was a backward shift.
601
                 */
602
                if (e->p.b.key != i->next_key) {
7,699,258✔
603
                        idx = prev_idx(HASHMAP_BASE(h), idx);
87✔
604
                        e = ordered_bucket_at(h, idx);
87✔
605
                }
606
                assert(e->p.b.key == i->next_key);
7,699,258✔
607
        }
608

609
#if ENABLE_DEBUG_HASHMAP
610
        i->prev_idx = idx;
611
#endif
612

613
        if (e->iterate_next != IDX_NIL) {
26,070,408✔
614
                struct ordered_hashmap_entry *n;
23,937,630✔
615
                i->idx = e->iterate_next;
23,937,630✔
616
                n = ordered_bucket_at(h, i->idx);
23,937,630✔
617
                i->next_key = n->p.b.key;
23,937,630✔
618
        } else
619
                i->idx = IDX_NIL;
2,132,778✔
620

621
        return idx;
622

623
at_end:
1,356,770✔
624
        i->idx = IDX_NIL;
1,356,770✔
625
        return IDX_NIL;
1,356,770✔
626
}
627

628
static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
56,070,567✔
629
        unsigned idx;
56,070,567✔
630

631
        assert(h);
56,070,567✔
632
        assert(i);
56,070,567✔
633

634
        if (i->idx == IDX_NIL)
56,070,567✔
635
                goto at_end;
4,197,469✔
636

637
        if (i->idx == IDX_FIRST) {
51,873,098✔
638
                /* fast forward to the first occupied bucket */
639
                if (h->has_indirect) {
20,669,092✔
640
                        i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
8,944,713✔
641
                        h->indirect.idx_lowest_entry = i->idx;
8,944,713✔
642
                } else
643
                        i->idx = skip_free_buckets(h, 0);
11,724,379✔
644

645
                if (i->idx == IDX_NIL)
20,669,092✔
646
                        goto at_end;
687,488✔
647
        } else {
648
                struct hashmap_base_entry *e;
31,204,006✔
649

650
                assert(i->idx > 0);
31,204,006✔
651

652
                e = bucket_at(h, i->idx);
31,204,006✔
653
                /*
654
                 * We allow removing the current entry while iterating, but removal may cause
655
                 * a backward shift. The next entry may thus move one bucket to the left.
656
                 * To detect when it happens, we remember the key pointer of the entry we were
657
                 * going to iterate next. If it does not match, there was a backward shift.
658
                 */
659
                if (e->key != i->next_key)
31,204,006✔
660
                        e = bucket_at(h, --i->idx);
32,228✔
661

662
                assert(e->key == i->next_key);
31,204,006✔
663
        }
664

665
        idx = i->idx;
51,185,610✔
666
#if ENABLE_DEBUG_HASHMAP
667
        i->prev_idx = idx;
668
#endif
669

670
        i->idx = skip_free_buckets(h, i->idx + 1);
51,185,610✔
671
        if (i->idx != IDX_NIL)
51,185,610✔
672
                i->next_key = bucket_at(h, i->idx)->key;
38,690,604✔
673
        else
674
                i->idx = IDX_NIL;
12,495,006✔
675

676
        return idx;
677

678
at_end:
4,884,957✔
679
        i->idx = IDX_NIL;
4,884,957✔
680
        return IDX_NIL;
4,884,957✔
681
}
682

683
static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
99,557,401✔
684
        if (!h) {
99,557,401✔
685
                i->idx = IDX_NIL;
16,059,656✔
686
                return IDX_NIL;
16,059,656✔
687
        }
688

689
#if ENABLE_DEBUG_HASHMAP
690
        if (i->idx == IDX_FIRST) {
691
                i->put_count = h->debug.put_count;
692
                i->rem_count = h->debug.rem_count;
693
        } else {
694
                /* While iterating, must not add any new entries */
695
                assert(i->put_count == h->debug.put_count);
696
                /* ... or remove entries other than the current one */
697
                assert(i->rem_count == h->debug.rem_count ||
698
                       (i->rem_count == h->debug.rem_count - 1 &&
699
                        i->prev_idx == h->debug.last_rem_idx));
700
                /* Reset our removals counter */
701
                i->rem_count = h->debug.rem_count;
702
        }
703
#endif
704

705
        return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
27,427,178✔
706
                                               : hashmap_iterate_in_internal_order(h, i);
83,497,745✔
707
}
708

709
bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
71,652,304✔
710
        struct hashmap_base_entry *e;
71,652,304✔
711
        void *data;
71,652,304✔
712
        unsigned idx;
71,652,304✔
713

714
        idx = hashmap_iterate_entry(h, i);
71,652,304✔
715
        if (idx == IDX_NIL) {
71,652,304✔
716
                if (value)
22,252,249✔
717
                        *value = NULL;
21,240,006✔
718
                if (key)
22,252,249✔
719
                        *key = NULL;
4,926,790✔
720

721
                return false;
722
        }
723

724
        e = bucket_at(h, idx);
49,400,055✔
725
        data = entry_value(h, e);
49,400,055✔
726
        if (value)
49,400,055✔
727
                *value = data;
37,233,794✔
728
        if (key)
49,400,055✔
729
                *key = e->key;
28,025,192✔
730

731
        return true;
732
}
733

734
#define HASHMAP_FOREACH_IDX(idx, h, i) \
735
        for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
736
             (idx != IDX_NIL); \
737
             (idx) = hashmap_iterate_entry((h), &(i)))
738

739
IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
7,455✔
740
        IteratedCache *cache;
7,455✔
741

742
        assert(h);
7,455✔
743
        assert(!h->cached);
7,455✔
744

745
        if (h->cached)
7,455✔
746
                return NULL;
747

748
        cache = new0(IteratedCache, 1);
7,455✔
749
        if (!cache)
7,455✔
750
                return NULL;
751

752
        cache->hashmap = h;
7,455✔
753
        h->cached = true;
7,455✔
754

755
        return cache;
7,455✔
756
}
757

758
static void reset_direct_storage(HashmapBase *h) {
8,278,045✔
759
        const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
8,278,045✔
760
        void *p;
8,278,045✔
761

762
        assert(!h->has_indirect);
8,278,045✔
763

764
        p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
8,278,045✔
765
        memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
8,278,045✔
766
}
8,278,045✔
767

768
static void shared_hash_key_initialize(void) {
63,765✔
769
        random_bytes(shared_hash_key, sizeof(shared_hash_key));
63,765✔
770
}
63,765✔
771

772
static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type) {
4,151,757✔
773
        HashmapBase *h;
4,151,757✔
774
        const struct hashmap_type_info *hi = &hashmap_type_info[type];
4,151,757✔
775

776
        bool use_pool = mempool_enabled && mempool_enabled();  /* mempool_enabled is a weak symbol */
4,151,757✔
777

778
        h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
4,238,040✔
779
        if (!h)
4,151,757✔
780
                return NULL;
781

782
        h->type = type;
4,151,757✔
783
        h->from_pool = use_pool;
4,151,757✔
784
        h->hash_ops = hash_ops ?: &trivial_hash_ops;
4,151,757✔
785

786
        if (type == HASHMAP_TYPE_ORDERED) {
4,151,757✔
787
                OrderedHashmap *lh = (OrderedHashmap*)h;
1,214,945✔
788
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,214,945✔
789
        }
790

791
        reset_direct_storage(h);
4,151,757✔
792

793
        static pthread_once_t once = PTHREAD_ONCE_INIT;
4,151,757✔
794
        assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
4,151,757✔
795

796
#if ENABLE_DEBUG_HASHMAP
797
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
798
        LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
799
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
800
#endif
801

802
        return h;
803
}
804

805
Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
689,779✔
806
        return (Hashmap*)        hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN);
689,779✔
807
}
808

809
OrderedHashmap *ordered_hashmap_new(const struct hash_ops *hash_ops) {
56,610✔
810
        return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED);
56,610✔
811
}
812

813
Set *set_new(const struct hash_ops *hash_ops) {
72,150✔
814
        return (Set*)            hashmap_base_new(hash_ops, HASHMAP_TYPE_SET);
72,150✔
815
}
816

817
static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
32,478,297✔
818
                                         enum HashmapType type) {
819
        HashmapBase *q;
32,478,297✔
820

821
        assert(h);
32,478,297✔
822

823
        if (*h) {
32,478,297✔
824
                assert((*h)->hash_ops == (hash_ops ?: &trivial_hash_ops));
32,212,206✔
825
                return 0;
826
        }
827

828
        q = hashmap_base_new(hash_ops, type);
3,333,215✔
829
        if (!q)
3,333,215✔
830
                return -ENOMEM;
831

832
        *h = q;
3,333,215✔
833
        return 1;
3,333,215✔
834
}
835

836
int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
6,880,260✔
837
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN);
6,880,260✔
838
}
839

840
int ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops) {
18,657,604✔
841
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED);
18,657,604✔
842
}
843

844
int set_ensure_allocated(Set **s, const struct hash_ops *hash_ops) {
6,940,433✔
845
        return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET);
6,940,433✔
846
}
847

848
int hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
5,854,704✔
849
        int r;
5,854,704✔
850

851
        assert(h);
5,854,704✔
852

853
        r = hashmap_ensure_allocated(h, hash_ops);
5,854,704✔
854
        if (r < 0)
5,854,704✔
855
                return r;
856

857
        return hashmap_put(*h, key, value);
5,854,704✔
858
}
859

860
int ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
4,928,066✔
861
        int r;
4,928,066✔
862

863
        assert(h);
4,928,066✔
864

865
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
4,928,066✔
866
        if (r < 0)
4,928,066✔
867
                return r;
868

869
        return ordered_hashmap_put(*h, key, value);
4,928,066✔
870
}
871

872
int ordered_hashmap_ensure_replace(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
6,159✔
873
        int r;
6,159✔
874

875
        assert(h);
6,159✔
876

877
        r = ordered_hashmap_ensure_allocated(h, hash_ops);
6,159✔
878
        if (r < 0)
6,159✔
879
                return r;
880

881
        return ordered_hashmap_replace(*h, key, value);
6,159✔
882
}
883

884
int hashmap_ensure_replace(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
26,208✔
885
        int r;
26,208✔
886

887
        assert(h);
26,208✔
888

889
        r = hashmap_ensure_allocated(h, hash_ops);
26,208✔
890
        if (r < 0)
26,208✔
891
                return r;
892

893
        return hashmap_replace(*h, key, value);
26,208✔
894
}
895

896
static void hashmap_free_no_clear(HashmapBase *h) {
4,108,424✔
897
        assert(!h->has_indirect);
4,108,424✔
898
        assert(h->n_direct_entries == 0);
4,108,424✔
899

900
#if ENABLE_DEBUG_HASHMAP
901
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
902
        LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
903
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
904
#endif
905

906
        if (h->from_pool) {
4,108,424✔
907
                /* Ensure that the object didn't get migrated between threads. */
908
                assert_se(is_main_thread());
4,022,749✔
909
                mempool_free_tile(hashmap_type_info[h->type].mempool, h);
4,022,749✔
910
        } else
911
                free(h);
85,675✔
912
}
4,108,424✔
913

914
HashmapBase* _hashmap_free(HashmapBase *h) {
16,639,112✔
915
        if (h) {
16,639,112✔
916
                _hashmap_clear(h);
4,108,424✔
917
                hashmap_free_no_clear(h);
4,108,424✔
918
        }
919

920
        return NULL;
16,639,112✔
921
}
922

923
void _hashmap_clear(HashmapBase *h) {
4,270,417✔
924
        if (!h)
4,270,417✔
925
                return;
926

927
        if (h->hash_ops->free_key || h->hash_ops->free_value) {
4,126,288✔
928

929
                /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
930
                 * hash table, and only then call the destructor functions. If these destructors then try to unregister
931
                 * themselves from our hash table a second time, the entry is already gone. */
932

933
                while (_hashmap_size(h) > 0) {
23,495,348✔
934
                        void *k = NULL;
20,348,110✔
935
                        void *v;
20,348,110✔
936

937
                        v = _hashmap_first_key_and_value(h, true, &k);
20,348,110✔
938

939
                        if (h->hash_ops->free_key)
20,348,110✔
940
                                h->hash_ops->free_key(k);
15,727,936✔
941

942
                        if (h->hash_ops->free_value)
20,348,110✔
943
                                h->hash_ops->free_value(v);
18,038,926✔
944
                }
945
        }
946

947
        if (h->has_indirect) {
4,126,288✔
948
                free(h->indirect.storage);
1,463,505✔
949
                h->has_indirect = false;
1,463,505✔
950
        }
951

952
        h->n_direct_entries = 0;
4,126,288✔
953
        reset_direct_storage(h);
4,126,288✔
954

955
        if (h->type == HASHMAP_TYPE_ORDERED) {
4,126,288✔
956
                OrderedHashmap *lh = (OrderedHashmap*) h;
1,211,060✔
957
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,211,060✔
958
        }
959

960
        base_set_dirty(h);
4,126,288✔
961
}
962

963
static int resize_buckets(HashmapBase *h, unsigned entries_add);
964

965
/*
966
 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
967
 * Performs Robin Hood swaps as it goes. The entry to put must be placed
968
 * by the caller into swap slot IDX_PUT.
969
 * If used for in-place resizing, may leave a displaced entry in swap slot
970
 * IDX_PUT. Caller must rehash it next.
971
 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
972
 *          false otherwise.
973
 */
974
static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
71,011,624✔
975
                                   struct swap_entries *swap) {
976
        dib_raw_t raw_dib, *dibs;
71,011,624✔
977
        unsigned dib, distance;
71,011,624✔
978

979
#if ENABLE_DEBUG_HASHMAP
980
        h->debug.put_count++;
981
#endif
982

983
        dibs = dib_raw_ptr(h);
71,011,624✔
984

985
        for (distance = 0; ; distance++) {
71,011,624✔
986
                raw_dib = dibs[idx];
132,034,612✔
987
                if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
132,034,612✔
988
                        if (raw_dib == DIB_RAW_REHASH)
71,011,624✔
989
                                bucket_move_entry(h, swap, idx, IDX_TMP);
6,998,415✔
990

991
                        if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
71,011,624✔
992
                                h->indirect.idx_lowest_entry = idx;
41,049✔
993

994
                        bucket_set_dib(h, idx, distance);
71,011,624✔
995
                        bucket_move_entry(h, swap, IDX_PUT, idx);
71,011,624✔
996
                        if (raw_dib == DIB_RAW_REHASH) {
71,011,624✔
997
                                bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
6,998,415✔
998
                                return true;
6,998,415✔
999
                        }
1000

1001
                        return false;
1002
                }
1003

1004
                dib = bucket_calculate_dib(h, idx, raw_dib);
61,022,988✔
1005

1006
                if (dib < distance) {
61,022,988✔
1007
                        /* Found a wealthier entry. Go Robin Hood! */
1008
                        bucket_set_dib(h, idx, distance);
22,856,560✔
1009

1010
                        /* swap the entries */
1011
                        bucket_move_entry(h, swap, idx, IDX_TMP);
22,856,560✔
1012
                        bucket_move_entry(h, swap, IDX_PUT, idx);
22,856,560✔
1013
                        bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
22,856,560✔
1014

1015
                        distance = dib;
22,856,560✔
1016
                }
1017

1018
                idx = next_idx(h, idx);
61,022,988✔
1019
        }
1020
}
1021

1022
/*
1023
 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1024
 * The caller must place the entry (only its key and value, not link indexes)
1025
 * in swap slot IDX_PUT.
1026
 * Caller must ensure: the key does not exist yet in the hashmap.
1027
 *                     that resize is not needed if !may_resize.
1028
 * Returns: 1 if entry was put successfully.
1029
 *          -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1030
 *          Cannot return -ENOMEM if !may_resize.
1031
 */
1032
static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
37,402,064✔
1033
                                   struct swap_entries *swap, bool may_resize) {
1034
        struct ordered_hashmap_entry *new_entry;
37,402,064✔
1035
        int r;
37,402,064✔
1036

1037
        assert(idx < n_buckets(h));
37,402,064✔
1038

1039
        new_entry = bucket_at_swap(swap, IDX_PUT);
37,402,064✔
1040

1041
        if (may_resize) {
37,402,064✔
1042
                r = resize_buckets(h, 1);
37,398,287✔
1043
                if (r < 0)
37,398,287✔
1044
                        return r;
1045
                if (r > 0)
37,398,287✔
1046
                        idx = bucket_hash(h, new_entry->p.b.key);
3,677,830✔
1047
        }
1048
        assert(n_entries(h) < n_buckets(h));
37,402,064✔
1049

1050
        if (h->type == HASHMAP_TYPE_ORDERED) {
37,402,064✔
1051
                OrderedHashmap *lh = (OrderedHashmap*) h;
17,910,383✔
1052

1053
                new_entry->iterate_next = IDX_NIL;
17,910,383✔
1054
                new_entry->iterate_previous = lh->iterate_list_tail;
17,910,383✔
1055

1056
                if (lh->iterate_list_tail != IDX_NIL) {
17,910,383✔
1057
                        struct ordered_hashmap_entry *old_tail;
16,490,630✔
1058

1059
                        old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
16,490,630✔
1060
                        assert(old_tail->iterate_next == IDX_NIL);
16,490,630✔
1061
                        old_tail->iterate_next = IDX_PUT;
16,490,630✔
1062
                }
1063

1064
                lh->iterate_list_tail = IDX_PUT;
17,910,383✔
1065
                if (lh->iterate_list_head == IDX_NIL)
17,910,383✔
1066
                        lh->iterate_list_head = IDX_PUT;
1,419,753✔
1067
        }
1068

1069
        assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
37,402,064✔
1070

1071
        n_entries_inc(h);
37,402,064✔
1072
#if ENABLE_DEBUG_HASHMAP
1073
        h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1074
#endif
1075

1076
        base_set_dirty(h);
37,402,064✔
1077

1078
        return 1;
37,402,064✔
1079
}
1080
#define hashmap_put_boldly(h, idx, swap, may_resize) \
1081
        hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1082

1083
/*
1084
 * Returns 0 if resize is not needed.
1085
 *         1 if successfully resized.
1086
 *         -ENOMEM on allocation failure.
1087
 */
1088
static int resize_buckets(HashmapBase *h, unsigned entries_add) {
37,410,951✔
1089
        struct swap_entries swap;
37,410,951✔
1090
        void *new_storage;
37,410,951✔
1091
        dib_raw_t *old_dibs, *new_dibs;
37,410,951✔
1092
        const struct hashmap_type_info *hi;
37,410,951✔
1093
        unsigned idx, optimal_idx;
37,410,951✔
1094
        unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
37,410,951✔
1095
        uint8_t new_shift;
37,410,951✔
1096
        bool rehash_next;
37,410,951✔
1097

1098
        assert(h);
37,410,951✔
1099

1100
        hi = &hashmap_type_info[h->type];
37,410,951✔
1101
        new_n_entries = n_entries(h) + entries_add;
37,410,951✔
1102

1103
        /* overflow? */
1104
        if (_unlikely_(new_n_entries < entries_add))
37,410,951✔
1105
                return -ENOMEM;
37,410,951✔
1106

1107
        /* For direct storage we allow 100% load, because it's tiny. */
1108
        if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
37,410,949✔
1109
                return 0;
1110

1111
        /*
1112
         * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1113
         * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1114
         */
1115
        new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
29,294,364✔
1116
        /* overflow? */
1117
        if (_unlikely_(new_n_buckets < new_n_entries))
29,294,364✔
1118
                return -ENOMEM;
1119

1120
        if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
29,294,362✔
1121
                return -ENOMEM;
1122

1123
        old_n_buckets = n_buckets(h);
29,294,362✔
1124

1125
        if (_likely_(new_n_buckets <= old_n_buckets))
29,294,362✔
1126
                return 0;
1127

1128
        new_shift = log2u_round_up(MAX(
3,681,293✔
1129
                        new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1130
                        2 * sizeof(struct direct_storage)));
1131

1132
        /* Realloc storage (buckets and DIB array). */
1133
        new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
2,202,966✔
1134
                              1U << new_shift);
3,681,293✔
1135
        if (!new_storage)
3,681,293✔
1136
                return -ENOMEM;
1137

1138
        /* Must upgrade direct to indirect storage. */
1139
        if (!h->has_indirect) {
3,681,293✔
1140
                memcpy(new_storage, h->direct.storage,
1,478,327✔
1141
                       old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1,478,327✔
1142
                h->indirect.n_entries = h->n_direct_entries;
1,478,327✔
1143
                h->indirect.idx_lowest_entry = 0;
1,478,327✔
1144
                h->n_direct_entries = 0;
1,478,327✔
1145
        }
1146

1147
        /* Get a new hash key. If we've just upgraded to indirect storage,
1148
         * allow reusing a previously generated key. It's still a different key
1149
         * from the shared one that we used for direct storage. */
1150
        get_hash_key(h->indirect.hash_key, !h->has_indirect);
3,681,293✔
1151

1152
        h->has_indirect = true;
3,681,293✔
1153
        h->indirect.storage = new_storage;
3,681,293✔
1154
        h->indirect.n_buckets = (1U << new_shift) /
3,681,293✔
1155
                                (hi->entry_size + sizeof(dib_raw_t));
1156

1157
        old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
3,681,293✔
1158
        new_dibs = dib_raw_ptr(h);
3,681,293✔
1159

1160
        /*
1161
         * Move the DIB array to the new place, replacing valid DIB values with
1162
         * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1163
         * Note: Overlap is not possible, because we have at least doubled the
1164
         * number of buckets and dib_raw_t is smaller than any entry type.
1165
         */
1166
        for (idx = 0; idx < old_n_buckets; idx++) {
50,027,603✔
1167
                assert(old_dibs[idx] != DIB_RAW_REHASH);
42,665,017✔
1168
                new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
77,318,268✔
1169
                                                              : DIB_RAW_REHASH;
1170
        }
1171

1172
        /* Zero the area of newly added entries (including the old DIB area) */
1173
        memzero(bucket_at(h, old_n_buckets),
3,681,293✔
1174
               (n_buckets(h) - old_n_buckets) * hi->entry_size);
1175

1176
        /* The upper half of the new DIB array needs initialization */
1177
        memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
7,362,586✔
1178
               (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
3,681,293✔
1179

1180
        /* Rehash entries that need it */
1181
        n_rehashed = 0;
3,681,293✔
1182
        for (idx = 0; idx < old_n_buckets; idx++) {
46,346,310✔
1183
                if (new_dibs[idx] != DIB_RAW_REHASH)
42,665,017✔
1184
                        continue;
15,010,181✔
1185

1186
                optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
27,654,836✔
1187

1188
                /*
1189
                 * Not much to do if by luck the entry hashes to its current
1190
                 * location. Just set its DIB.
1191
                 */
1192
                if (optimal_idx == idx) {
27,654,836✔
1193
                        new_dibs[idx] = 0;
1,043,691✔
1194
                        n_rehashed++;
1,043,691✔
1195
                        continue;
1,043,691✔
1196
                }
1197

1198
                new_dibs[idx] = DIB_RAW_FREE;
26,611,145✔
1199
                bucket_move_entry(h, &swap, idx, IDX_PUT);
26,611,145✔
1200
                /* bucket_move_entry does not clear the source */
1201
                memzero(bucket_at(h, idx), hi->entry_size);
26,611,145✔
1202

1203
                do {
33,609,560✔
1204
                        /*
1205
                         * Find the new bucket for the current entry. This may make
1206
                         * another entry homeless and load it into IDX_PUT.
1207
                         */
1208
                        rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
33,609,560✔
1209
                        n_rehashed++;
33,609,560✔
1210

1211
                        /* Did the current entry displace another one? */
1212
                        if (rehash_next)
33,609,560✔
1213
                                optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
6,998,415✔
1214
                } while (rehash_next);
33,609,560✔
1215
        }
1216

1217
        assert_se(n_rehashed == n_entries(h));
3,681,293✔
1218

1219
        return 1;
1220
}
1221

1222
/*
1223
 * Finds an entry with a matching key
1224
 * Returns: index of the found entry, or IDX_NIL if not found.
1225
 */
1226
static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
206,453,188✔
1227
        struct hashmap_base_entry *e;
206,453,188✔
1228
        unsigned dib, distance;
206,453,188✔
1229
        dib_raw_t *dibs = dib_raw_ptr(h);
206,453,188✔
1230

1231
        assert(idx < n_buckets(h));
206,453,188✔
1232

1233
        for (distance = 0; ; distance++) {
119,716,746✔
1234
                if (dibs[idx] == DIB_RAW_FREE)
326,169,934✔
1235
                        return IDX_NIL;
1236

1237
                dib = bucket_calculate_dib(h, idx, dibs[idx]);
248,954,751✔
1238

1239
                if (dib < distance)
248,954,751✔
1240
                        return IDX_NIL;
1241
                if (dib == distance) {
212,564,035✔
1242
                        e = bucket_at(h, idx);
163,982,157✔
1243
                        if (h->hash_ops->compare(e->key, key) == 0)
163,982,157✔
1244
                                return idx;
1245
                }
1246

1247
                idx = next_idx(h, idx);
119,716,746✔
1248
        }
1249
}
1250
#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1251

1252
int hashmap_put(Hashmap *h, const void *key, void *value) {
15,914,841✔
1253
        struct swap_entries swap;
15,914,841✔
1254
        struct plain_hashmap_entry *e;
15,914,841✔
1255
        unsigned hash, idx;
15,914,841✔
1256

1257
        assert(h);
15,914,841✔
1258

1259
        hash = bucket_hash(h, key);
15,914,841✔
1260
        idx = bucket_scan(h, hash, key);
15,914,841✔
1261
        if (idx != IDX_NIL) {
15,914,841✔
1262
                e = plain_bucket_at(h, idx);
267,481✔
1263
                if (e->value == value)
267,481✔
1264
                        return 0;
15,914,841✔
1265
                return -EEXIST;
35,133✔
1266
        }
1267

1268
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
15,647,360✔
1269
        e->b.key = key;
15,647,360✔
1270
        e->value = value;
15,647,360✔
1271
        return hashmap_put_boldly(h, hash, &swap, true);
15,647,360✔
1272
}
1273

1274
int set_put(Set *s, const void *key) {
7,183,058✔
1275
        struct swap_entries swap;
7,183,058✔
1276
        struct hashmap_base_entry *e;
7,183,058✔
1277
        unsigned hash, idx;
7,183,058✔
1278

1279
        assert(s);
7,183,058✔
1280

1281
        hash = bucket_hash(s, key);
7,183,058✔
1282
        idx = bucket_scan(s, hash, key);
7,183,058✔
1283
        if (idx != IDX_NIL)
7,183,058✔
1284
                return 0;
7,183,058✔
1285

1286
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
7,165,571✔
1287
        e->key = key;
7,165,571✔
1288
        return hashmap_put_boldly(s, hash, &swap, true);
7,165,571✔
1289
}
1290

1291
int set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key) {
5,690,293✔
1292
        int r;
5,690,293✔
1293

1294
        assert(s);
5,690,293✔
1295

1296
        r = set_ensure_allocated(s, hash_ops);
5,690,293✔
1297
        if (r < 0)
5,690,293✔
1298
                return r;
1299

1300
        return set_put(*s, key);
5,690,293✔
1301
}
1302

1303
int set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key) {
1,501,882✔
1304
        int r;
1,501,882✔
1305

1306
        r = set_ensure_put(s, hash_ops, key);
1,501,882✔
1307
        if (r <= 0) {
1,501,882✔
1308
                if (hash_ops && hash_ops->free_key)
1,050✔
1309
                        hash_ops->free_key(key);
1,050✔
1310
                else
1311
                        free(key);
×
1312
        }
1313

1314
        return r;
1,501,882✔
1315
}
1316

1317
int hashmap_replace(Hashmap *h, const void *key, void *value) {
15,786,447✔
1318
        struct swap_entries swap;
15,786,447✔
1319
        struct plain_hashmap_entry *e;
15,786,447✔
1320
        unsigned hash, idx;
15,786,447✔
1321

1322
        assert(h);
15,786,447✔
1323

1324
        hash = bucket_hash(h, key);
15,786,447✔
1325
        idx = bucket_scan(h, hash, key);
15,786,447✔
1326
        if (idx != IDX_NIL) {
15,786,447✔
1327
                e = plain_bucket_at(h, idx);
1,204,858✔
1328
#if ENABLE_DEBUG_HASHMAP
1329
                /* Although the key is equal, the key pointer may have changed,
1330
                 * and this would break our assumption for iterating. So count
1331
                 * this operation as incompatible with iteration. */
1332
                if (e->b.key != key) {
1333
                        h->b.debug.put_count++;
1334
                        h->b.debug.rem_count++;
1335
                        h->b.debug.last_rem_idx = idx;
1336
                }
1337
#endif
1338
                e->b.key = key;
1,204,858✔
1339
                e->value = value;
1,204,858✔
1340
                hashmap_set_dirty(h);
1,204,858✔
1341

1342
                return 0;
1,204,858✔
1343
        }
1344

1345
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
14,581,589✔
1346
        e->b.key = key;
14,581,589✔
1347
        e->value = value;
14,581,589✔
1348
        return hashmap_put_boldly(h, hash, &swap, true);
14,581,589✔
1349
}
1350

1351
int hashmap_update(Hashmap *h, const void *key, void *value) {
19,110✔
1352
        struct plain_hashmap_entry *e;
19,110✔
1353
        unsigned hash, idx;
19,110✔
1354

1355
        assert(h);
19,110✔
1356

1357
        hash = bucket_hash(h, key);
19,110✔
1358
        idx = bucket_scan(h, hash, key);
19,110✔
1359
        if (idx == IDX_NIL)
19,110✔
1360
                return -ENOENT;
1361

1362
        e = plain_bucket_at(h, idx);
19,108✔
1363
        e->value = value;
19,108✔
1364
        hashmap_set_dirty(h);
19,108✔
1365

1366
        return 0;
19,108✔
1367
}
1368

1369
void* _hashmap_get(HashmapBase *h, const void *key) {
118,008,524✔
1370
        struct hashmap_base_entry *e;
118,008,524✔
1371
        unsigned hash, idx;
118,008,524✔
1372

1373
        if (!h)
118,008,524✔
1374
                return NULL;
1375

1376
        hash = bucket_hash(h, key);
108,475,284✔
1377
        idx = bucket_scan(h, hash, key);
108,475,284✔
1378
        if (idx == IDX_NIL)
108,475,284✔
1379
                return NULL;
1380

1381
        e = bucket_at(h, idx);
81,332,464✔
1382
        return entry_value(h, e);
81,332,464✔
1383
}
1384

1385
void* hashmap_get2(Hashmap *h, const void *key, void **ret) {
13,876,665✔
1386
        struct plain_hashmap_entry *e;
13,876,665✔
1387
        unsigned hash, idx;
13,876,665✔
1388

1389
        if (!h)
13,876,665✔
1390
                return NULL;
1391

1392
        hash = bucket_hash(h, key);
13,876,639✔
1393
        idx = bucket_scan(h, hash, key);
13,876,639✔
1394
        if (idx == IDX_NIL)
13,876,639✔
1395
                return NULL;
1396

1397
        e = plain_bucket_at(h, idx);
1,117,956✔
1398
        if (ret)
1,117,956✔
1399
                *ret = (void*) e->b.key;
1,117,956✔
1400

1401
        return e->value;
1,117,956✔
1402
}
1403

1404
bool _hashmap_contains(HashmapBase *h, const void *key) {
31,743,509✔
1405
        unsigned hash;
31,743,509✔
1406

1407
        if (!h)
31,743,509✔
1408
                return false;
1409

1410
        hash = bucket_hash(h, key);
30,828,950✔
1411
        return bucket_scan(h, hash, key) != IDX_NIL;
30,828,950✔
1412
}
1413

1414
void* _hashmap_remove(HashmapBase *h, const void *key) {
14,467,219✔
1415
        struct hashmap_base_entry *e;
14,467,219✔
1416
        unsigned hash, idx;
14,467,219✔
1417
        void *data;
14,467,219✔
1418

1419
        if (!h)
14,467,219✔
1420
                return NULL;
1421

1422
        hash = bucket_hash(h, key);
13,244,899✔
1423
        idx = bucket_scan(h, hash, key);
13,244,899✔
1424
        if (idx == IDX_NIL)
13,244,899✔
1425
                return NULL;
1426

1427
        e = bucket_at(h, idx);
6,924,355✔
1428
        data = entry_value(h, e);
6,924,355✔
1429
        remove_entry(h, idx);
6,924,355✔
1430

1431
        return data;
6,924,355✔
1432
}
1433

1434
void* hashmap_remove2(Hashmap *h, const void *key, void **ret) {
620,425✔
1435
        struct plain_hashmap_entry *e;
620,425✔
1436
        unsigned hash, idx;
620,425✔
1437
        void *data;
620,425✔
1438

1439
        if (!h) {
620,425✔
1440
                if (ret)
94,264✔
1441
                        *ret = NULL;
94,264✔
1442
                return NULL;
1443
        }
1444

1445
        hash = bucket_hash(h, key);
526,161✔
1446
        idx = bucket_scan(h, hash, key);
526,161✔
1447
        if (idx == IDX_NIL) {
526,161✔
1448
                if (ret)
514,692✔
1449
                        *ret = NULL;
514,692✔
1450
                return NULL;
1451
        }
1452

1453
        e = plain_bucket_at(h, idx);
11,469✔
1454
        data = e->value;
11,469✔
1455
        if (ret)
11,469✔
1456
                *ret = (void*) e->b.key;
11,469✔
1457

1458
        remove_entry(h, idx);
11,469✔
1459

1460
        return data;
11,469✔
1461
}
1462

1463
int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
10✔
1464
        struct swap_entries swap;
10✔
1465
        struct plain_hashmap_entry *e;
10✔
1466
        unsigned old_hash, new_hash, idx;
10✔
1467

1468
        if (!h)
10✔
1469
                return -ENOENT;
10✔
1470

1471
        old_hash = bucket_hash(h, old_key);
8✔
1472
        idx = bucket_scan(h, old_hash, old_key);
8✔
1473
        if (idx == IDX_NIL)
8✔
1474
                return -ENOENT;
1475

1476
        new_hash = bucket_hash(h, new_key);
6✔
1477
        if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
6✔
1478
                return -EEXIST;
1479

1480
        remove_entry(h, idx);
4✔
1481

1482
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
4✔
1483
        e->b.key = new_key;
4✔
1484
        e->value = value;
4✔
1485
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
4✔
1486

1487
        return 0;
1488
}
1489

1490
int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
591✔
1491
        struct swap_entries swap;
591✔
1492
        struct hashmap_base_entry *e;
591✔
1493
        unsigned old_hash, new_hash, idx;
591✔
1494

1495
        if (!s)
591✔
1496
                return -ENOENT;
591✔
1497

1498
        old_hash = bucket_hash(s, old_key);
591✔
1499
        idx = bucket_scan(s, old_hash, old_key);
591✔
1500
        if (idx == IDX_NIL)
591✔
1501
                return -ENOENT;
1502

1503
        new_hash = bucket_hash(s, new_key);
591✔
1504
        if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
591✔
1505
                return -EEXIST;
1506

1507
        remove_entry(s, idx);
591✔
1508

1509
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
591✔
1510
        e->key = new_key;
591✔
1511
        assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
591✔
1512

1513
        return 0;
1514
}
1515

1516
int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
48✔
1517
        struct swap_entries swap;
48✔
1518
        struct plain_hashmap_entry *e;
48✔
1519
        unsigned old_hash, new_hash, idx_old, idx_new;
48✔
1520

1521
        if (!h)
48✔
1522
                return -ENOENT;
48✔
1523

1524
        old_hash = bucket_hash(h, old_key);
46✔
1525
        idx_old = bucket_scan(h, old_hash, old_key);
46✔
1526
        if (idx_old == IDX_NIL)
46✔
1527
                return -ENOENT;
1528

1529
        old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
44✔
1530

1531
        new_hash = bucket_hash(h, new_key);
44✔
1532
        idx_new = bucket_scan(h, new_hash, new_key);
44✔
1533
        if (idx_new != IDX_NIL)
44✔
1534
                if (idx_old != idx_new) {
42✔
1535
                        remove_entry(h, idx_new);
42✔
1536
                        /* Compensate for a possible backward shift. */
1537
                        if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
42✔
1538
                                idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
7✔
1539
                        assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
42✔
1540
                }
1541

1542
        remove_entry(h, idx_old);
44✔
1543

1544
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
44✔
1545
        e->b.key = new_key;
44✔
1546
        e->value = value;
44✔
1547
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
44✔
1548

1549
        return 0;
1550
}
1551

1552
void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
585,532✔
1553
        struct hashmap_base_entry *e;
585,532✔
1554
        unsigned hash, idx;
585,532✔
1555

1556
        if (!h)
585,532✔
1557
                return NULL;
1558

1559
        hash = bucket_hash(h, key);
585,407✔
1560
        idx = bucket_scan(h, hash, key);
585,407✔
1561
        if (idx == IDX_NIL)
585,407✔
1562
                return NULL;
1563

1564
        e = bucket_at(h, idx);
525,663✔
1565
        if (entry_value(h, e) != value)
525,663✔
1566
                return NULL;
1567

1568
        remove_entry(h, idx);
525,345✔
1569

1570
        return value;
525,345✔
1571
}
1572

1573
static unsigned find_first_entry(HashmapBase *h) {
28,664,782✔
1574
        Iterator i = ITERATOR_FIRST;
28,664,782✔
1575

1576
        if (!h || !n_entries(h))
28,664,782✔
1577
                return IDX_NIL;
28,664,782✔
1578

1579
        return hashmap_iterate_entry(h, &i);
27,125,265✔
1580
}
1581

1582
void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
28,664,782✔
1583
        struct hashmap_base_entry *e;
28,664,782✔
1584
        void *key, *data;
28,664,782✔
1585
        unsigned idx;
28,664,782✔
1586

1587
        idx = find_first_entry(h);
28,664,782✔
1588
        if (idx == IDX_NIL) {
28,664,782✔
1589
                if (ret_key)
1,539,517✔
1590
                        *ret_key = NULL;
807,317✔
1591
                return NULL;
1592
        }
1593

1594
        e = bucket_at(h, idx);
27,125,265✔
1595
        key = (void*) e->key;
27,125,265✔
1596
        data = entry_value(h, e);
27,125,265✔
1597

1598
        if (remove)
27,125,265✔
1599
                remove_entry(h, idx);
26,847,927✔
1600

1601
        if (ret_key)
27,125,265✔
1602
                *ret_key = key;
21,319,018✔
1603

1604
        return data;
1605
}
1606

1607
unsigned _hashmap_size(HashmapBase *h) {
55,748,905✔
1608
        if (!h)
55,748,905✔
1609
                return 0;
1610

1611
        return n_entries(h);
33,726,767✔
1612
}
1613

1614
unsigned _hashmap_buckets(HashmapBase *h) {
429✔
1615
        if (!h)
429✔
1616
                return 0;
1617

1618
        return n_buckets(h);
426✔
1619
}
1620

1621
int _hashmap_merge(Hashmap *h, Hashmap *other) {
4✔
1622
        Iterator i;
4✔
1623
        unsigned idx;
4✔
1624

1625
        assert(h);
4✔
1626

1627
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
16✔
1628
                struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
12✔
1629
                int r;
12✔
1630

1631
                r = hashmap_put(h, pe->b.key, pe->value);
12✔
1632
                if (r < 0 && r != -EEXIST)
12✔
1633
                        return r;
1634
        }
1635

1636
        return 0;
1637
}
1638

1639
int set_merge(Set *s, Set *other) {
1✔
1640
        Iterator i;
1✔
1641
        unsigned idx;
1✔
1642

1643
        assert(s);
1✔
1644

1645
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
5✔
1646
                struct set_entry *se = set_bucket_at(other, idx);
4✔
1647
                int r;
4✔
1648

1649
                r = set_put(s, se->b.key);
4✔
1650
                if (r < 0)
4✔
1651
                        return r;
1652
        }
1653

1654
        return 0;
1655
}
1656

1657
int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
9,469✔
1658
        int r;
9,469✔
1659

1660
        assert(h);
9,469✔
1661

1662
        r = resize_buckets(h, entries_add);
9,469✔
1663
        if (r < 0)
9,469✔
1664
                return r;
4✔
1665

1666
        return 0;
1667
}
1668

1669
/*
1670
 * The same as hashmap_merge(), but every new item from other is moved to h.
1671
 * Keys already in h are skipped and stay in other.
1672
 * Returns: 0 on success.
1673
 *          -ENOMEM on alloc failure, in which case no move has been done.
1674
 */
1675
int _hashmap_move(HashmapBase *h, HashmapBase *other) {
3,198✔
1676
        struct swap_entries swap;
3,198✔
1677
        struct hashmap_base_entry *e, *n;
3,198✔
1678
        Iterator i;
3,198✔
1679
        unsigned idx;
3,198✔
1680
        int r;
3,198✔
1681

1682
        assert(h);
3,198✔
1683

1684
        if (!other)
3,198✔
1685
                return 0;
3,198✔
1686

1687
        assert(other->type == h->type);
3,195✔
1688

1689
        /*
1690
         * This reserves buckets for the worst case, where none of other's
1691
         * entries are yet present in h. This is preferable to risking
1692
         * an allocation failure in the middle of the moving and having to
1693
         * rollback or return a partial result.
1694
         */
1695
        r = resize_buckets(h, n_entries(other));
3,195✔
1696
        if (r < 0)
3,195✔
1697
                return r;
1698

1699
        HASHMAP_FOREACH_IDX(idx, other, i) {
6,335✔
1700
                unsigned h_hash;
3,140✔
1701

1702
                e = bucket_at(other, idx);
3,140✔
1703
                h_hash = bucket_hash(h, e->key);
3,140✔
1704
                if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
3,140✔
1705
                        continue;
2✔
1706

1707
                n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
3,138✔
1708
                n->key = e->key;
3,138✔
1709
                if (h->type != HASHMAP_TYPE_SET)
3,138✔
1710
                        ((struct plain_hashmap_entry*) n)->value =
3,138✔
1711
                                ((struct plain_hashmap_entry*) e)->value;
3,138✔
1712
                assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
3,138✔
1713

1714
                remove_entry(other, idx);
3,138✔
1715
        }
1716

1717
        return 0;
1718
}
1719

1720
int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
3,773✔
1721
        struct swap_entries swap;
3,773✔
1722
        unsigned h_hash, other_hash, idx;
3,773✔
1723
        struct hashmap_base_entry *e, *n;
3,773✔
1724
        int r;
3,773✔
1725

1726
        assert(h);
3,773✔
1727

1728
        h_hash = bucket_hash(h, key);
3,773✔
1729
        if (bucket_scan(h, h_hash, key) != IDX_NIL)
3,773✔
1730
                return -EEXIST;
3,773✔
1731

1732
        if (!other)
3,771✔
1733
                return -ENOENT;
1734

1735
        assert(other->type == h->type);
3,769✔
1736

1737
        other_hash = bucket_hash(other, key);
3,769✔
1738
        idx = bucket_scan(other, other_hash, key);
3,769✔
1739
        if (idx == IDX_NIL)
3,769✔
1740
                return -ENOENT;
1741

1742
        e = bucket_at(other, idx);
3,767✔
1743

1744
        n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
3,767✔
1745
        n->key = e->key;
3,767✔
1746
        if (h->type != HASHMAP_TYPE_SET)
3,767✔
1747
                ((struct plain_hashmap_entry*) n)->value =
615✔
1748
                        ((struct plain_hashmap_entry*) e)->value;
615✔
1749
        r = hashmap_put_boldly(h, h_hash, &swap, true);
3,767✔
1750
        if (r < 0)
3,767✔
1751
                return r;
1752

1753
        remove_entry(other, idx);
3,767✔
1754
        return 0;
1755
}
1756

1757
HashmapBase* _hashmap_copy(HashmapBase *h) {
3✔
1758
        HashmapBase *copy;
3✔
1759
        int r;
3✔
1760

1761
        assert(h);
3✔
1762

1763
        copy = hashmap_base_new(h->hash_ops, h->type);
3✔
1764
        if (!copy)
3✔
1765
                return NULL;
1766

1767
        switch (h->type) {
3✔
1768
        case HASHMAP_TYPE_PLAIN:
2✔
1769
        case HASHMAP_TYPE_ORDERED:
1770
                r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
2✔
1771
                break;
2✔
1772
        case HASHMAP_TYPE_SET:
1✔
1773
                r = set_merge((Set*)copy, (Set*)h);
1✔
1774
                break;
1✔
1775
        default:
×
1776
                assert_not_reached();
×
1777
        }
1778

1779
        if (r < 0)
3✔
1780
                return _hashmap_free(copy);
×
1781

1782
        return copy;
1783
}
1784

1785
char** _hashmap_get_strv(HashmapBase *h) {
3,958✔
1786
        char **sv;
3,958✔
1787
        Iterator i;
3,958✔
1788
        unsigned idx, n;
3,958✔
1789

1790
        if (!h)
3,958✔
1791
                return new0(char*, 1);
3,958✔
1792

1793
        sv = new(char*, n_entries(h)+1);
67✔
1794
        if (!sv)
67✔
1795
                return NULL;
1796

1797
        n = 0;
67✔
1798
        HASHMAP_FOREACH_IDX(idx, h, i)
692✔
1799
                sv[n++] = entry_value(h, bucket_at(h, idx));
625✔
1800
        sv[n] = NULL;
67✔
1801

1802
        return sv;
67✔
1803
}
1804

1805
char** set_to_strv(Set **s) {
20✔
1806
        assert(s);
20✔
1807

1808
        /* This is similar to set_get_strv(), but invalidates the set on success. */
1809

1810
        char **v = new(char*, set_size(*s) + 1);
20✔
1811
        if (!v)
20✔
1812
                return NULL;
1813

1814
        for (char **p = v; (*p = set_steal_first(*s)); p++)
2,050✔
1815
                ;
1816

1817
        assert(set_isempty(*s));
20✔
1818
        *s = set_free(*s);
20✔
1819
        return v;
20✔
1820
}
1821

1822
void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
425✔
1823
        struct ordered_hashmap_entry *e;
425✔
1824
        unsigned hash, idx;
425✔
1825

1826
        if (!h)
425✔
1827
                return NULL;
1828

1829
        hash = bucket_hash(h, key);
424✔
1830
        idx = bucket_scan(h, hash, key);
424✔
1831
        if (idx == IDX_NIL)
424✔
1832
                return NULL;
1833

1834
        e = ordered_bucket_at(h, idx);
423✔
1835
        if (e->iterate_next == IDX_NIL)
423✔
1836
                return NULL;
1837
        return ordered_bucket_at(h, e->iterate_next)->p.value;
274✔
1838
}
1839

1840
int set_consume(Set *s, void *value) {
832,843✔
1841
        int r;
832,843✔
1842

1843
        assert(s);
832,843✔
1844
        assert(value);
832,843✔
1845

1846
        r = set_put(s, value);
832,843✔
1847
        if (r <= 0)
832,843✔
1848
                free(value);
1✔
1849

1850
        return r;
832,843✔
1851
}
1852

1853
int hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v) {
1,861✔
1854
        int r;
1,861✔
1855

1856
        assert(h);
1,861✔
1857

1858
        r = hashmap_ensure_allocated(h, hash_ops);
1,861✔
1859
        if (r < 0)
1,861✔
1860
                return r;
1,861✔
1861

1862
        _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1,861✔
1863

1864
        kdup = strdup(k);
1,861✔
1865
        if (!kdup)
1,861✔
1866
                return -ENOMEM;
1867

1868
        if (v) {
1,861✔
1869
                vdup = strdup(v);
64✔
1870
                if (!vdup)
64✔
1871
                        return -ENOMEM;
1872
        }
1873

1874
        r = hashmap_put(*h, kdup, vdup);
1,861✔
1875
        if (r < 0) {
1,861✔
1876
                if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
10✔
1877
                        return 0;
1878
                return r;
4✔
1879
        }
1880

1881
        /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1882
        assert(vdup == NULL || r > 0);
1,851✔
1883
        if (r > 0)
1,796✔
1884
                kdup = vdup = NULL;
1,850✔
1885

1886
        return r;
1887
}
1888

1889
int set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n) {
1,234,157✔
1890
        char *c;
1,234,157✔
1891
        int r;
1,234,157✔
1892

1893
        assert(s);
1,234,157✔
1894
        assert(p);
1,234,157✔
1895

1896
        r = set_ensure_allocated(s, hash_ops);
1,234,157✔
1897
        if (r < 0)
1,234,157✔
1898
                return r;
1899

1900
        if (n == SIZE_MAX) {
1,234,157✔
1901
                if (set_contains(*s, (char*) p))
1,209,256✔
1902
                        return 0;
1903

1904
                c = strdup(p);
799,695✔
1905
        } else
1906
                c = strndup(p, n);
24,901✔
1907
        if (!c)
824,596✔
1908
                return -ENOMEM;
1909

1910
        return set_consume(*s, c);
824,596✔
1911
}
1912

1913
int set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l) {
2,103✔
1914
        int n = 0, r;
2,103✔
1915

1916
        assert(s);
2,103✔
1917

1918
        STRV_FOREACH(i, l) {
2,146✔
1919
                r = set_put_strndup_full(s, hash_ops, *i, SIZE_MAX);
43✔
1920
                if (r < 0)
43✔
1921
                        return r;
1922

1923
                n += r;
43✔
1924
        }
1925

1926
        return n;
1927
}
1928

1929
int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1✔
1930
        const char *p = ASSERT_PTR(v);
1✔
1931
        int r;
1✔
1932

1933
        assert(s);
1✔
1934

1935
        for (;;) {
2✔
1936
                char *word;
3✔
1937

1938
                r = extract_first_word(&p, &word, separators, flags);
3✔
1939
                if (r <= 0)
3✔
1940
                        return r;
1✔
1941

1942
                r = set_consume(s, word);
2✔
1943
                if (r < 0)
2✔
1944
                        return r;
1945
        }
1946
}
1947

1948
/* expand the cachemem if needed, return true if newly (re)activated. */
1949
static int cachemem_maintain(CacheMem *mem, size_t size) {
27,108,377✔
1950
        assert(mem);
27,108,377✔
1951

1952
        if (!GREEDY_REALLOC(mem->ptr, size)) {
27,108,377✔
1953
                if (size > 0)
×
1954
                        return -ENOMEM;
1955
        }
1956

1957
        if (!mem->active) {
27,108,377✔
1958
                mem->active = true;
2,830✔
1959
                return true;
2,830✔
1960
        }
1961

1962
        return false;
1963
}
1964

1965
int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
27,108,265✔
1966
        bool sync_keys = false, sync_values = false;
27,108,265✔
1967
        size_t size;
27,108,265✔
1968
        int r;
27,108,265✔
1969

1970
        assert(cache);
27,108,265✔
1971
        assert(cache->hashmap);
27,108,265✔
1972

1973
        size = n_entries(cache->hashmap);
27,108,265✔
1974

1975
        if (res_keys) {
27,108,265✔
1976
                r = cachemem_maintain(&cache->keys, size);
112✔
1977
                if (r < 0)
112✔
1978
                        return r;
1979

1980
                sync_keys = r;
112✔
1981
        } else
1982
                cache->keys.active = false;
27,108,153✔
1983

1984
        if (res_values) {
27,108,265✔
1985
                r = cachemem_maintain(&cache->values, size);
27,108,265✔
1986
                if (r < 0)
27,108,265✔
1987
                        return r;
1988

1989
                sync_values = r;
27,108,265✔
1990
        } else
1991
                cache->values.active = false;
×
1992

1993
        if (cache->hashmap->dirty) {
27,108,265✔
1994
                if (cache->keys.active)
2,936✔
1995
                        sync_keys = true;
111✔
1996
                if (cache->values.active)
2,936✔
1997
                        sync_values = true;
2,936✔
1998

1999
                cache->hashmap->dirty = false;
2,936✔
2000
        }
2001

2002
        if (sync_keys || sync_values) {
27,108,265✔
2003
                unsigned i, idx;
2,940✔
2004
                Iterator iter;
2,940✔
2005

2006
                i = 0;
2,940✔
2007
                HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
508,576✔
2008
                        struct hashmap_base_entry *e;
505,636✔
2009

2010
                        e = bucket_at(cache->hashmap, idx);
505,636✔
2011

2012
                        if (sync_keys)
505,636✔
2013
                                cache->keys.ptr[i] = e->key;
491,500✔
2014
                        if (sync_values)
505,636✔
2015
                                cache->values.ptr[i] = entry_value(cache->hashmap, e);
505,636✔
2016
                        i++;
505,636✔
2017
                }
2018
        }
2019

2020
        if (res_keys)
27,108,265✔
2021
                *res_keys = cache->keys.ptr;
112✔
2022
        if (res_values)
27,108,265✔
2023
                *res_values = cache->values.ptr;
27,108,265✔
2024
        if (res_n_entries)
27,108,265✔
2025
                *res_n_entries = size;
27,108,265✔
2026

2027
        return 0;
2028
}
2029

2030
IteratedCache* iterated_cache_free(IteratedCache *cache) {
7,455✔
2031
        if (cache) {
7,455✔
2032
                free(cache->keys.ptr);
7,455✔
2033
                free(cache->values.ptr);
7,455✔
2034
        }
2035

2036
        return mfree(cache);
7,455✔
2037
}
2038

2039
int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
733,929✔
2040
        _cleanup_free_ char *str = NULL;
733,929✔
2041
        size_t separator_len, len = 0;
733,929✔
2042
        const char *value;
733,929✔
2043
        bool first;
733,929✔
2044

2045
        assert(ret);
733,929✔
2046

2047
        if (set_isempty(s)) {
733,929✔
2048
                *ret = NULL;
23,460✔
2049
                return 0;
23,460✔
2050
        }
2051

2052
        separator_len = strlen_ptr(separator);
710,469✔
2053

2054
        if (separator_len == 0)
710,465✔
2055
                wrap_with_separator = false;
2056

2057
        first = !wrap_with_separator;
710,469✔
2058

2059
        SET_FOREACH(value, s) {
2,358,408✔
2060
                size_t l = strlen_ptr(value);
1,647,939✔
2061

2062
                if (l == 0)
1,647,939✔
2063
                        continue;
×
2064

2065
                if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
4,229,721✔
2066
                        return -ENOMEM;
×
2067

2068
                if (separator_len > 0 && !first) {
1,647,939✔
2069
                        memcpy(str + len, separator, separator_len);
1,353,890✔
2070
                        len += separator_len;
1,353,890✔
2071
                }
2072

2073
                memcpy(str + len, value, l);
1,647,939✔
2074
                len += l;
1,647,939✔
2075
                first = false;
1,647,939✔
2076
        }
2077

2078
        if (wrap_with_separator) {
710,469✔
2079
                memcpy(str + len, separator, separator_len);
416,424✔
2080
                len += separator_len;
416,424✔
2081
        }
2082

2083
        str[len] = '\0';
710,469✔
2084

2085
        *ret = TAKE_PTR(str);
710,469✔
2086
        return 0;
710,469✔
2087
}
2088

2089
bool set_equal(Set *a, Set *b) {
263✔
2090
        void *p;
263✔
2091

2092
        /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2093
         * entries */
2094

2095
        if (a == b)
263✔
2096
                return true;
263✔
2097

2098
        if (set_isempty(a) && set_isempty(b))
47✔
2099
                return true;
2100

2101
        if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
29✔
2102
                                         * already */
2103
                return false;
2104

2105
        SET_FOREACH(p, a)
2,462✔
2106
                if (!set_contains(b, p))
2,444✔
2107
                        return false;
×
2108

2109
        /* If we have the same hashops, then we don't need to check things backwards given we compared the
2110
         * size and that all of a is in b. */
2111
        if (a->b.hash_ops == b->b.hash_ops)
18✔
2112
                return true;
2113

2114
        SET_FOREACH(p, b)
×
2115
                if (!set_contains(a, p))
×
2116
                        return false;
×
2117

2118
        return true;
×
2119
}
2120

2121
static bool set_fnmatch_one(Set *patterns, const char *needle) {
706,802✔
2122
        const char *p;
706,802✔
2123

2124
        assert(needle);
706,802✔
2125

2126
        /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2127

2128
        SET_FOREACH(p, patterns)
920,504✔
2129
                if (fnmatch(p, needle, 0) == 0)
260,868✔
2130
                        return true;
47,166✔
2131

2132
        return false;
659,636✔
2133
}
2134

2135
bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
484,140✔
2136
        assert(needle);
484,140✔
2137

2138
        if (set_fnmatch_one(exclude_patterns, needle))
484,140✔
2139
                return false;
2140

2141
        if (set_isempty(include_patterns))
483,816✔
2142
                return true;
2143

2144
        return set_fnmatch_one(include_patterns, needle);
222,662✔
2145
}
2146

2147
static int hashmap_entry_compare(
715,977✔
2148
                struct hashmap_base_entry * const *a,
2149
                struct hashmap_base_entry * const *b,
2150
                compare_func_t compare) {
2151

2152
        assert(a && *a);
715,977✔
2153
        assert(b && *b);
715,977✔
2154
        assert(compare);
715,977✔
2155

2156
        return compare((*a)->key, (*b)->key);
715,977✔
2157
}
2158

2159
static int _hashmap_dump_entries_sorted(
122,788✔
2160
                HashmapBase *h,
2161
                void ***ret,
2162
                size_t *ret_n) {
2163
        _cleanup_free_ void **entries = NULL;
122,788✔
2164
        Iterator iter;
122,788✔
2165
        unsigned idx;
122,788✔
2166
        size_t n = 0;
122,788✔
2167

2168
        assert(ret);
122,788✔
2169
        assert(ret_n);
122,788✔
2170

2171
        if (_hashmap_size(h) == 0) {
122,788✔
2172
                *ret = NULL;
79,861✔
2173
                *ret_n = 0;
79,861✔
2174
                return 0;
79,861✔
2175
        }
2176

2177
        /* We append one more element than needed so that the resulting array can be used as a strv. We
2178
         * don't count this entry in the returned size. */
2179
        entries = new(void*, _hashmap_size(h) + 1);
42,927✔
2180
        if (!entries)
42,927✔
2181
                return -ENOMEM;
2182

2183
        HASHMAP_FOREACH_IDX(idx, h, iter)
264,208✔
2184
                entries[n++] = bucket_at(h, idx);
221,281✔
2185

2186
        assert(n == _hashmap_size(h));
42,927✔
2187
        entries[n] = NULL;
42,927✔
2188

2189
        typesafe_qsort_r((struct hashmap_base_entry**) entries, n,
42,927✔
2190
                         hashmap_entry_compare, h->hash_ops->compare);
2191

2192
        *ret = TAKE_PTR(entries);
42,927✔
2193
        *ret_n = n;
42,927✔
2194
        return 0;
42,927✔
2195
}
2196

2197
int _hashmap_dump_keys_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
93✔
2198
        _cleanup_free_ void **entries = NULL;
93✔
2199
        size_t n;
93✔
2200
        int r;
93✔
2201

2202
        assert(ret);
93✔
2203

2204
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
93✔
2205
        if (r < 0)
93✔
2206
                return r;
2207

2208
        /* Reuse the array. */
2209
        FOREACH_ARRAY(e, entries, n)
140✔
2210
                *e = (void*) (*(struct hashmap_base_entry**) e)->key;
47✔
2211

2212
        *ret = TAKE_PTR(entries);
93✔
2213
        if (ret_n)
93✔
2214
                *ret_n = n;
93✔
2215
        return 0;
2216
}
2217

2218
int _hashmap_dump_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
122,695✔
2219
        _cleanup_free_ void **entries = NULL;
122,695✔
2220
        size_t n;
122,695✔
2221
        int r;
122,695✔
2222

2223
        assert(ret);
122,695✔
2224

2225
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
122,695✔
2226
        if (r < 0)
122,695✔
2227
                return r;
2228

2229
        /* Reuse the array. */
2230
        FOREACH_ARRAY(e, entries, n)
343,929✔
2231
                *e = entry_value(h, *(struct hashmap_base_entry**) e);
221,234✔
2232

2233
        *ret = TAKE_PTR(entries);
122,695✔
2234
        if (ret_n)
122,695✔
2235
                *ret_n = n;
120,121✔
2236
        return 0;
2237
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc