• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 14766779411

30 Apr 2025 04:55PM UTC coverage: 72.225% (-0.06%) from 72.282%
14766779411

push

github

web-flow
wait-online: handle varlink connection errors while waiting for DNS (#37283)

Currently, if systemd-networkd-wait-online is started with --dns, and
systemd-resolved is not running, it will exit with an error right away.
Similarly, if systemd-resolved is restarted while waiting for DNS
configuration, systemd-networkd-wait-online will not attempt to
re-connect, and will potentially never see subsequent DNS
configurations.

Improve this by adding socket units for the systemd-resolved varlink
servers, and re-establish the connection in systemd-networkd-wait-online
when we receive `SD_VARLINK_ERROR_DISCONNECTED`.

8 of 16 new or added lines in 2 files covered. (50.0%)

5825 existing lines in 217 files now uncovered.

297168 of 411450 relevant lines covered (72.22%)

695892.62 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.64
/src/basic/hashmap.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <errno.h>
4
#include <fnmatch.h>
5
#include <pthread.h>
6
#include <stdint.h>
7
#include <stdlib.h>
8
#if HAVE_VALGRIND_VALGRIND_H
9
#  include <valgrind/valgrind.h>
10
#endif
11

12
#include "alloc-util.h"
13
#include "fileio.h"
14
#include "hashmap.h"
15
#include "log.h"
16
#include "logarithm.h"
17
#include "macro.h"
18
#include "memory-util.h"
19
#include "mempool.h"
20
#include "missing_syscall.h"
21
#include "process-util.h"
22
#include "random-util.h"
23
#include "set.h"
24
#include "siphash24.h"
25
#include "sort-util.h"
26
#include "string-util.h"
27
#include "strv.h"
28

29
#if ENABLE_DEBUG_HASHMAP
30
#include "list.h"
31
#endif
32

33
/*
34
 * Implementation of hashmaps.
35
 * Addressing: open
36
 *   - uses less RAM compared to closed addressing (chaining), because
37
 *     our entries are small (especially in Sets, which tend to contain
38
 *     the majority of entries in systemd).
39
 * Collision resolution: Robin Hood
40
 *   - tends to equalize displacement of entries from their optimal buckets.
41
 * Probe sequence: linear
42
 *   - though theoretically worse than random probing/uniform hashing/double
43
 *     hashing, it is good for cache locality.
44
 *
45
 * References:
46
 * Celis, P. 1986. Robin Hood Hashing.
47
 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
48
 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
49
 * - The results are derived for random probing. Suggests deletion with
50
 *   tombstones and two mean-centered search methods. None of that works
51
 *   well for linear probing.
52
 *
53
 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
54
 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
55
 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
56
 * http://www.math.uu.se/~svante/papers/sj157.pdf
57
 * - Applies to Robin Hood with linear probing. Contains remarks on
58
 *   the unsuitability of mean-centered search with linear probing.
59
 *
60
 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
61
 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
62
 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
63
 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
64
 *   in a successful search), and Janson writes about displacement. C = d + 1.
65
 *
66
 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
67
 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
68
 * - Explanation of backward shift deletion with pictures.
69
 *
70
 * Khuong, P. 2013. The Other Robin Hood Hashing.
71
 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
72
 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
73
 */
74

75
/*
76
 * XXX Ideas for improvement:
77
 * For unordered hashmaps, randomize iteration order, similarly to Perl:
78
 * http://blog.booking.com/hardening-perls-hash-function.html
79
 */
80

81
/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
82
 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
83
#define INV_KEEP_FREE            5U
84

85
/* Fields common to entries of all hashmap/set types */
86
struct hashmap_base_entry {
87
        const void *key;
88
};
89

90
/* Entry types for specific hashmap/set types
91
 * hashmap_base_entry must be at the beginning of each entry struct. */
92

93
struct plain_hashmap_entry {
94
        struct hashmap_base_entry b;
95
        void *value;
96
};
97

98
struct ordered_hashmap_entry {
99
        struct plain_hashmap_entry p;
100
        unsigned iterate_next, iterate_previous;
101
};
102

103
struct set_entry {
104
        struct hashmap_base_entry b;
105
};
106

107
/* In several functions it is advantageous to have the hash table extended
108
 * virtually by a couple of additional buckets. We reserve special index values
109
 * for these "swap" buckets. */
110
#define _IDX_SWAP_BEGIN     (UINT_MAX - 3)
111
#define IDX_PUT             (_IDX_SWAP_BEGIN + 0)
112
#define IDX_TMP             (_IDX_SWAP_BEGIN + 1)
113
#define _IDX_SWAP_END       (_IDX_SWAP_BEGIN + 2)
114

115
#define IDX_FIRST           (UINT_MAX - 1) /* special index for freshly initialized iterators */
116
#define IDX_NIL             UINT_MAX       /* special index value meaning "none" or "end" */
117

118
assert_cc(IDX_FIRST == _IDX_SWAP_END);
119
assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
120

121
/* Storage space for the "swap" buckets.
122
 * All entry types can fit into an ordered_hashmap_entry. */
123
struct swap_entries {
124
        struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
125
};
126

127
/* Distance from Initial Bucket */
128
typedef uint8_t dib_raw_t;
129
#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU)   /* indicates DIB value is greater than representable */
130
#define DIB_RAW_REHASH   ((dib_raw_t)0xfeU)   /* entry yet to be rehashed during in-place resize */
131
#define DIB_RAW_FREE     ((dib_raw_t)0xffU)   /* a free bucket */
132
#define DIB_RAW_INIT     ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
133

134
#define DIB_FREE UINT_MAX
135

136
#if ENABLE_DEBUG_HASHMAP
137
struct hashmap_debug_info {
138
        LIST_FIELDS(struct hashmap_debug_info, debug_list);
139
        unsigned max_entries;  /* high watermark of n_entries */
140

141
        /* who allocated this hashmap */
142
        int line;
143
        const char *file;
144
        const char *func;
145

146
        /* fields to detect modification while iterating */
147
        unsigned put_count;    /* counts puts into the hashmap */
148
        unsigned rem_count;    /* counts removals from hashmap */
149
        unsigned last_rem_idx; /* remembers last removal index */
150
};
151

152
/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
153
static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
154
static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
155
#endif
156

157
enum HashmapType {
158
        HASHMAP_TYPE_PLAIN,
159
        HASHMAP_TYPE_ORDERED,
160
        HASHMAP_TYPE_SET,
161
        _HASHMAP_TYPE_MAX
162
};
163

164
struct _packed_ indirect_storage {
165
        void *storage;                     /* where buckets and DIBs are stored */
166
        uint8_t  hash_key[HASH_KEY_SIZE];  /* hash key; changes during resize */
167

168
        unsigned n_entries;                /* number of stored entries */
169
        unsigned n_buckets;                /* number of buckets */
170

171
        unsigned idx_lowest_entry;         /* Index below which all buckets are free.
172
                                              Makes "while (hashmap_steal_first())" loops
173
                                              O(n) instead of O(n^2) for unordered hashmaps. */
174
        uint8_t  _pad[3];                  /* padding for the whole HashmapBase */
175
        /* The bitfields in HashmapBase complete the alignment of the whole thing. */
176
};
177

178
struct direct_storage {
179
        /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
180
         * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
181
         *              or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
182
        uint8_t storage[sizeof(struct indirect_storage)];
183
};
184

185
#define DIRECT_BUCKETS(entry_t) \
186
        (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
187

188
/* We should be able to store at least one entry directly. */
189
assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
190

191
/* We have 3 bits for n_direct_entries. */
192
assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
193

194
/* Hashmaps with directly stored entries all use this shared hash key.
195
 * It's no big deal if the key is guessed, because there can be only
196
 * a handful of directly stored entries in a hashmap. When a hashmap
197
 * outgrows direct storage, it gets its own key for indirect storage. */
198
static uint8_t shared_hash_key[HASH_KEY_SIZE];
199

200
/* Fields that all hashmap/set types must have */
201
struct HashmapBase {
202
        const struct hash_ops *hash_ops;  /* hash and compare ops to use */
203

204
        union _packed_ {
205
                struct indirect_storage indirect; /* if  has_indirect */
206
                struct direct_storage direct;     /* if !has_indirect */
207
        };
208

209
        enum HashmapType type:2;     /* HASHMAP_TYPE_* */
210
        bool has_indirect:1;         /* whether indirect storage is used */
211
        unsigned n_direct_entries:3; /* Number of entries in direct storage.
212
                                      * Only valid if !has_indirect. */
213
        bool from_pool:1;            /* whether was allocated from mempool */
214
        bool dirty:1;                /* whether dirtied since last iterated_cache_get() */
215
        bool cached:1;               /* whether this hashmap is being cached */
216

217
#if ENABLE_DEBUG_HASHMAP
218
        struct hashmap_debug_info debug;
219
#endif
220
};
221

222
/* Specific hash types
223
 * HashmapBase must be at the beginning of each hashmap struct. */
224

225
struct Hashmap {
226
        struct HashmapBase b;
227
};
228

229
struct OrderedHashmap {
230
        struct HashmapBase b;
231
        unsigned iterate_list_head, iterate_list_tail;
232
};
233

234
struct Set {
235
        struct HashmapBase b;
236
};
237

238
typedef struct CacheMem {
239
        const void **ptr;
240
        size_t n_populated;
241
        bool active:1;
242
} CacheMem;
243

244
struct IteratedCache {
245
        HashmapBase *hashmap;
246
        CacheMem keys, values;
247
};
248

249
DEFINE_MEMPOOL(hashmap_pool,         Hashmap,        8);
250
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
251
/* No need for a separate Set pool */
252
assert_cc(sizeof(Hashmap) == sizeof(Set));
253

254
struct hashmap_type_info {
255
        size_t head_size;
256
        size_t entry_size;
257
        struct mempool *mempool;
258
        unsigned n_direct_buckets;
259
};
260

261
static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
262
        [HASHMAP_TYPE_PLAIN] = {
263
                .head_size        = sizeof(Hashmap),
264
                .entry_size       = sizeof(struct plain_hashmap_entry),
265
                .mempool          = &hashmap_pool,
266
                .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
267
        },
268
        [HASHMAP_TYPE_ORDERED] = {
269
                .head_size        = sizeof(OrderedHashmap),
270
                .entry_size       = sizeof(struct ordered_hashmap_entry),
271
                .mempool          = &ordered_hashmap_pool,
272
                .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
273
        },
274
        [HASHMAP_TYPE_SET] = {
275
                .head_size        = sizeof(Set),
276
                .entry_size       = sizeof(struct set_entry),
277
                .mempool          = &hashmap_pool,
278
                .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
279
        },
280
};
281

282
void hashmap_trim_pools(void) {
2✔
283
        int r;
2✔
284

285
        /* The pool is only allocated by the main thread, but the memory can be passed to other
286
         * threads. Let's clean up if we are the main thread and no other threads are live. */
287

288
        /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
289
         * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
290
        if (getpid() != gettid())
2✔
291
                return (void) log_debug("Not cleaning up memory pools, not in main thread.");
×
292

293
        r = get_process_threads(0);
2✔
294
        if (r < 0)
2✔
295
                return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
×
296
        if (r != 1)
2✔
UNCOV
297
                return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
×
298

299
        mempool_trim(&hashmap_pool);
2✔
300
        mempool_trim(&ordered_hashmap_pool);
2✔
301
}
302

303
#if HAVE_VALGRIND_VALGRIND_H
304
_destructor_ static void cleanup_pools(void) {
305
        /* Be nice to valgrind */
306
        if (RUNNING_ON_VALGRIND)
307
                hashmap_trim_pools();
308
}
309
#endif
310

311
static unsigned n_buckets(HashmapBase *h) {
1,092,169,737✔
312
        return h->has_indirect ? h->indirect.n_buckets
1,092,169,737✔
313
                               : hashmap_type_info[h->type].n_direct_buckets;
1,092,169,737✔
314
}
315

316
static unsigned n_entries(HashmapBase *h) {
130,866,170✔
317
        return h->has_indirect ? h->indirect.n_entries
130,866,170✔
318
                               : h->n_direct_entries;
130,866,170✔
319
}
320

321
static void n_entries_inc(HashmapBase *h) {
34,033,608✔
322
        if (h->has_indirect)
34,033,608✔
323
                h->indirect.n_entries++;
27,193,684✔
324
        else
325
                h->n_direct_entries++;
6,839,924✔
326
}
34,033,608✔
327

328
static void n_entries_dec(HashmapBase *h) {
31,197,704✔
329
        if (h->has_indirect)
31,197,704✔
330
                h->indirect.n_entries--;
26,346,379✔
331
        else
332
                h->n_direct_entries--;
4,851,325✔
333
}
31,197,704✔
334

335
static void* storage_ptr(HashmapBase *h) {
1,089,676,834✔
336
        return h->has_indirect ? h->indirect.storage
1,089,676,834✔
337
                               : h->direct.storage;
1,089,676,834✔
338
}
339

340
static uint8_t* hash_key(HashmapBase *h) {
154,704,726✔
341
        return h->has_indirect ? h->indirect.hash_key
154,704,726✔
342
                               : shared_hash_key;
154,704,726✔
343
}
344

345
static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
154,704,726✔
346
        struct siphash state;
154,704,726✔
347
        uint64_t hash;
154,704,726✔
348

349
        siphash24_init(&state, hash_key(h));
154,704,726✔
350

351
        h->hash_ops->hash(p, &state);
154,704,726✔
352

353
        hash = siphash24_finalize(&state);
154,704,726✔
354

355
        return (unsigned) (hash % n_buckets(h));
154,704,726✔
356
}
357
#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
358

359
static void base_set_dirty(HashmapBase *h) {
69,706,678✔
360
        h->dirty = true;
69,706,678✔
361
}
69,706,678✔
362
#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
363

364
static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
3,231,738✔
365
        static uint8_t current[HASH_KEY_SIZE];
3,231,738✔
366
        static bool current_initialized = false;
3,231,738✔
367

368
        /* Returns a hash function key to use. In order to keep things
369
         * fast we will not generate a new key each time we allocate a
370
         * new hash table. Instead, we'll just reuse the most recently
371
         * generated one, except if we never generated one or when we
372
         * are rehashing an entire hash table because we reached a
373
         * fill level */
374

375
        if (!current_initialized || !reuse_is_ok) {
3,231,738✔
376
                random_bytes(current, sizeof(current));
1,993,515✔
377
                current_initialized = true;
1,993,515✔
378
        }
379

380
        memcpy(hash_key, current, sizeof(current));
3,231,738✔
381
}
3,231,738✔
382

383
static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
698,230,381✔
384
        return CAST_ALIGN_PTR(
698,230,381✔
385
                        struct hashmap_base_entry,
386
                        (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
387
}
388

389
static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
2,281,908✔
390
        return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
2,281,908✔
391
}
392

393
static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
106,222,954✔
394
        return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
106,222,954✔
395
}
396

397
static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
4✔
398
        return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
4✔
399
}
400

401
static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
252,719,321✔
402
        return &swap->e[idx - _IDX_SWAP_BEGIN];
252,719,321✔
403
}
404

405
/* Returns a pointer to the bucket at index idx.
406
 * Understands real indexes and swap indexes, hence "_virtual". */
407
static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
469,580,636✔
408
                                                    unsigned idx) {
409
        if (idx < _IDX_SWAP_BEGIN)
469,580,636✔
410
                return bucket_at(h, idx);
290,566,772✔
411

412
        if (idx < _IDX_SWAP_END)
179,013,864✔
413
                return &bucket_at_swap(swap, idx)->p.b;
179,013,864✔
414

415
        assert_not_reached();
×
416
}
417

418
static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
391,446,453✔
419
        return (dib_raw_t*)
782,892,906✔
420
                ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
391,446,453✔
421
}
422

423
static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
×
424
        return idx >= from ? idx - from
×
425
                           : n_buckets(h) + idx - from;
×
426
}
427

428
static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
206,863,287✔
429
        unsigned initial_bucket;
206,863,287✔
430

431
        if (raw_dib == DIB_RAW_FREE)
206,863,287✔
432
                return DIB_FREE;
433

434
        if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
206,863,287✔
435
                return raw_dib;
206,863,287✔
436

437
        /*
438
         * Having an overflow DIB value is very unlikely. The hash function
439
         * would have to be bad. For example, in a table of size 2^24 filled
440
         * to load factor 0.9 the maximum observed DIB is only about 60.
441
         * In theory (assuming I used Maxima correctly), for an infinite size
442
         * hash table with load factor 0.8 the probability of a given entry
443
         * having DIB > 40 is 1.9e-8.
444
         * This returns the correct DIB value by recomputing the hash value in
445
         * the unlikely case. XXX Hitting this case could be a hint to rehash.
446
         */
447
        initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
×
448
        return bucket_distance(h, idx, initial_bucket);
×
449
}
450

451
static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
126,329,743✔
452
        dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
126,329,743✔
453
}
126,329,743✔
454

455
static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
46,094,118✔
456
        dib_raw_t *dibs;
46,094,118✔
457

458
        dibs = dib_raw_ptr(h);
46,094,118✔
459

460
        for ( ; idx < n_buckets(h); idx++)
140,450,767✔
461
                if (dibs[idx] != DIB_RAW_FREE)
86,361,509✔
462
                        return idx;
463

464
        return IDX_NIL;
465
}
466

467
static void bucket_mark_free(HashmapBase *h, unsigned idx) {
31,197,704✔
468
        memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
31,197,704✔
469
        bucket_set_dib(h, idx, DIB_FREE);
31,197,704✔
470
}
31,197,704✔
471

472
static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
166,095,735✔
473
                              unsigned from, unsigned to) {
474
        struct hashmap_base_entry *e_from, *e_to;
166,095,735✔
475

476
        assert(from != to);
166,095,735✔
477

478
        e_from = bucket_at_virtual(h, swap, from);
166,095,735✔
479
        e_to   = bucket_at_virtual(h, swap, to);
166,095,735✔
480

481
        memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
166,095,735✔
482

483
        if (h->type == HASHMAP_TYPE_ORDERED) {
166,095,735✔
484
                OrderedHashmap *lh = (OrderedHashmap*) h;
85,586,552✔
485
                struct ordered_hashmap_entry *le, *le_to;
85,586,552✔
486

487
                le_to = (struct ordered_hashmap_entry*) e_to;
85,586,552✔
488

489
                if (le_to->iterate_next != IDX_NIL) {
85,586,552✔
490
                        le = (struct ordered_hashmap_entry*)
60,609,813✔
491
                             bucket_at_virtual(h, swap, le_to->iterate_next);
60,609,813✔
492
                        le->iterate_previous = to;
60,609,813✔
493
                }
494

495
                if (le_to->iterate_previous != IDX_NIL) {
85,586,552✔
496
                        le = (struct ordered_hashmap_entry*)
76,779,353✔
497
                             bucket_at_virtual(h, swap, le_to->iterate_previous);
76,779,353✔
498
                        le->iterate_next = to;
76,779,353✔
499
                }
500

501
                if (lh->iterate_list_head == from)
85,586,552✔
502
                        lh->iterate_list_head = to;
8,807,199✔
503
                if (lh->iterate_list_tail == from)
85,586,552✔
504
                        lh->iterate_list_tail = to;
24,976,739✔
505
        }
506
}
166,095,735✔
507

508
static unsigned next_idx(HashmapBase *h, unsigned idx) {
226,697,368✔
509
        return (idx + 1U) % n_buckets(h);
226,697,368✔
510
}
511

512
static unsigned prev_idx(HashmapBase *h, unsigned idx) {
98✔
513
        return (n_buckets(h) + idx - 1U) % n_buckets(h);
98✔
514
}
515

516
static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
88,401,721✔
517
        switch (h->type) {
88,401,721✔
518

519
        case HASHMAP_TYPE_PLAIN:
80,616,467✔
520
        case HASHMAP_TYPE_ORDERED:
521
                return ((struct plain_hashmap_entry*)e)->value;
80,616,467✔
522

523
        case HASHMAP_TYPE_SET:
7,785,254✔
524
                return (void*) e->key;
7,785,254✔
525

526
        default:
×
527
                assert_not_reached();
×
528
        }
529
}
530

531
static void base_remove_entry(HashmapBase *h, unsigned idx) {
31,197,704✔
532
        unsigned left, right, prev, dib;
31,197,704✔
533
        dib_raw_t raw_dib, *dibs;
31,197,704✔
534

535
        dibs = dib_raw_ptr(h);
31,197,704✔
536
        assert(dibs[idx] != DIB_RAW_FREE);
31,197,704✔
537

538
#if ENABLE_DEBUG_HASHMAP
539
        h->debug.rem_count++;
540
        h->debug.last_rem_idx = idx;
541
#endif
542

543
        left = idx;
31,197,704✔
544
        /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
545
        for (right = next_idx(h, left); ; right = next_idx(h, right)) {
45,971,662✔
546
                raw_dib = dibs[right];
45,971,662✔
547
                if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
45,971,662✔
548
                        break;
549

550
                /* The buckets are not supposed to be all occupied and with DIB > 0.
551
                 * That would mean we could make everyone better off by shifting them
552
                 * backward. This scenario is impossible. */
553
                assert(left != right);
14,773,958✔
554
        }
555

556
        if (h->type == HASHMAP_TYPE_ORDERED) {
31,197,704✔
557
                OrderedHashmap *lh = (OrderedHashmap*) h;
19,070,547✔
558
                struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
19,070,547✔
559

560
                if (le->iterate_next != IDX_NIL)
19,070,547✔
561
                        ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
17,833,910✔
562
                else
563
                        lh->iterate_list_tail = le->iterate_previous;
1,236,637✔
564

565
                if (le->iterate_previous != IDX_NIL)
19,070,547✔
566
                        ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
14,720✔
567
                else
568
                        lh->iterate_list_head = le->iterate_next;
19,055,827✔
569
        }
570

571
        /* Now shift all buckets in the interval (left, right) one step backwards */
572
        for (prev = left, left = next_idx(h, left); left != right;
45,971,662✔
573
             prev = left, left = next_idx(h, left)) {
14,773,958✔
574
                dib = bucket_calculate_dib(h, left, dibs[left]);
14,773,958✔
575
                assert(dib != 0);
14,773,958✔
576
                bucket_move_entry(h, NULL, left, prev);
14,773,958✔
577
                bucket_set_dib(h, prev, dib - 1);
14,773,958✔
578
        }
579

580
        bucket_mark_free(h, prev);
31,197,704✔
581
        n_entries_dec(h);
31,197,704✔
582
        base_set_dirty(h);
31,197,704✔
583
}
31,197,704✔
584
#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
585

586
static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
27,593,694✔
587
        struct ordered_hashmap_entry *e;
27,593,694✔
588
        unsigned idx;
27,593,694✔
589

590
        assert(h);
27,593,694✔
591
        assert(i);
27,593,694✔
592

593
        if (i->idx == IDX_NIL)
27,593,694✔
594
                goto at_end;
689,440✔
595

596
        if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
26,904,254✔
597
                goto at_end;
330,135✔
598

599
        if (i->idx == IDX_FIRST) {
26,574,119✔
600
                idx = h->iterate_list_head;
19,727,068✔
601
                e = ordered_bucket_at(h, idx);
19,727,068✔
602
        } else {
603
                idx = i->idx;
6,847,051✔
604
                e = ordered_bucket_at(h, idx);
6,847,051✔
605
                /*
606
                 * We allow removing the current entry while iterating, but removal may cause
607
                 * a backward shift. The next entry may thus move one bucket to the left.
608
                 * To detect when it happens, we remember the key pointer of the entry we were
609
                 * going to iterate next. If it does not match, there was a backward shift.
610
                 */
611
                if (e->p.b.key != i->next_key) {
6,847,051✔
612
                        idx = prev_idx(HASHMAP_BASE(h), idx);
91✔
613
                        e = ordered_bucket_at(h, idx);
91✔
614
                }
615
                assert(e->p.b.key == i->next_key);
6,847,051✔
616
        }
617

618
#if ENABLE_DEBUG_HASHMAP
619
        i->prev_idx = idx;
620
#endif
621

622
        if (e->iterate_next != IDX_NIL) {
26,574,119✔
623
                struct ordered_hashmap_entry *n;
24,686,926✔
624
                i->idx = e->iterate_next;
24,686,926✔
625
                n = ordered_bucket_at(h, i->idx);
24,686,926✔
626
                i->next_key = n->p.b.key;
24,686,926✔
627
        } else
628
                i->idx = IDX_NIL;
1,887,193✔
629

630
        return idx;
631

632
at_end:
1,019,575✔
633
        i->idx = IDX_NIL;
1,019,575✔
634
        return IDX_NIL;
1,019,575✔
635
}
636

637
static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
35,357,462✔
638
        unsigned idx;
35,357,462✔
639

640
        assert(h);
35,357,462✔
641
        assert(i);
35,357,462✔
642

643
        if (i->idx == IDX_NIL)
35,357,462✔
644
                goto at_end;
2,772,504✔
645

646
        if (i->idx == IDX_FIRST) {
32,584,958✔
647
                /* fast forward to the first occupied bucket */
648
                if (h->has_indirect) {
13,800,441✔
649
                        i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
6,526,413✔
650
                        h->indirect.idx_lowest_entry = i->idx;
6,526,413✔
651
                } else
652
                        i->idx = skip_free_buckets(h, 0);
7,274,028✔
653

654
                if (i->idx == IDX_NIL)
13,800,441✔
655
                        goto at_end;
291,281✔
656
        } else {
657
                struct hashmap_base_entry *e;
18,784,517✔
658

659
                assert(i->idx > 0);
18,784,517✔
660

661
                e = bucket_at(h, i->idx);
18,784,517✔
662
                /*
663
                 * We allow removing the current entry while iterating, but removal may cause
664
                 * a backward shift. The next entry may thus move one bucket to the left.
665
                 * To detect when it happens, we remember the key pointer of the entry we were
666
                 * going to iterate next. If it does not match, there was a backward shift.
667
                 */
668
                if (e->key != i->next_key)
18,784,517✔
669
                        e = bucket_at(h, --i->idx);
31,878✔
670

671
                assert(e->key == i->next_key);
18,784,517✔
672
        }
673

674
        idx = i->idx;
32,293,677✔
675
#if ENABLE_DEBUG_HASHMAP
676
        i->prev_idx = idx;
677
#endif
678

679
        i->idx = skip_free_buckets(h, i->idx + 1);
32,293,677✔
680
        if (i->idx != IDX_NIL)
32,293,677✔
681
                i->next_key = bucket_at(h, i->idx)->key;
24,589,818✔
682
        else
683
                i->idx = IDX_NIL;
7,703,859✔
684

685
        return idx;
686

687
at_end:
3,063,785✔
688
        i->idx = IDX_NIL;
3,063,785✔
689
        return IDX_NIL;
3,063,785✔
690
}
691

692
static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
72,341,521✔
693
        if (!h) {
72,341,521✔
694
                i->idx = IDX_NIL;
9,390,365✔
695
                return IDX_NIL;
9,390,365✔
696
        }
697

698
#if ENABLE_DEBUG_HASHMAP
699
        if (i->idx == IDX_FIRST) {
700
                i->put_count = h->debug.put_count;
701
                i->rem_count = h->debug.rem_count;
702
        } else {
703
                /* While iterating, must not add any new entries */
704
                assert(i->put_count == h->debug.put_count);
705
                /* ... or remove entries other than the current one */
706
                assert(i->rem_count == h->debug.rem_count ||
707
                       (i->rem_count == h->debug.rem_count - 1 &&
708
                        i->prev_idx == h->debug.last_rem_idx));
709
                /* Reset our removals counter */
710
                i->rem_count = h->debug.rem_count;
711
        }
712
#endif
713

714
        return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
27,593,694✔
715
                                               : hashmap_iterate_in_internal_order(h, i);
62,951,156✔
716
}
717

718
bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
45,103,377✔
719
        struct hashmap_base_entry *e;
45,103,377✔
720
        void *data;
45,103,377✔
721
        unsigned idx;
45,103,377✔
722

723
        idx = hashmap_iterate_entry(h, i);
45,103,377✔
724
        if (idx == IDX_NIL) {
45,103,377✔
725
                if (value)
13,443,738✔
726
                        *value = NULL;
12,997,914✔
727
                if (key)
13,443,738✔
728
                        *key = NULL;
3,149,844✔
729

730
                return false;
13,443,738✔
731
        }
732

733
        e = bucket_at(h, idx);
31,659,639✔
734
        data = entry_value(h, e);
31,659,639✔
735
        if (value)
31,659,639✔
736
                *value = data;
25,332,149✔
737
        if (key)
31,659,639✔
738
                *key = e->key;
19,428,045✔
739

740
        return true;
741
}
742

743
#define HASHMAP_FOREACH_IDX(idx, h, i) \
744
        for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
745
             (idx != IDX_NIL); \
746
             (idx) = hashmap_iterate_entry((h), &(i)))
747

748
IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
6,292✔
749
        IteratedCache *cache;
6,292✔
750

751
        assert(h);
6,292✔
752
        assert(!h->cached);
6,292✔
753

754
        if (h->cached)
6,292✔
755
                return NULL;
756

757
        cache = new0(IteratedCache, 1);
6,292✔
758
        if (!cache)
6,292✔
759
                return NULL;
760

761
        cache->hashmap = h;
6,292✔
762
        h->cached = true;
6,292✔
763

764
        return cache;
6,292✔
765
}
766

767
static void reset_direct_storage(HashmapBase *h) {
6,795,320✔
768
        const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
6,795,320✔
769
        void *p;
6,795,320✔
770

771
        assert(!h->has_indirect);
6,795,320✔
772

773
        p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
6,795,320✔
774
        memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
6,795,320✔
775
}
6,795,320✔
776

777
static void shared_hash_key_initialize(void) {
72,458✔
778
        random_bytes(shared_hash_key, sizeof(shared_hash_key));
72,458✔
779
}
72,458✔
780

781
static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type  HASHMAP_DEBUG_PARAMS) {
3,408,874✔
782
        HashmapBase *h;
3,408,874✔
783
        const struct hashmap_type_info *hi = &hashmap_type_info[type];
3,408,874✔
784

785
        bool use_pool = mempool_enabled && mempool_enabled();  /* mempool_enabled is a weak symbol */
3,408,874✔
786

787
        h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
3,489,491✔
788
        if (!h)
3,408,874✔
789
                return NULL;
790

791
        h->type = type;
3,408,874✔
792
        h->from_pool = use_pool;
3,408,874✔
793
        h->hash_ops = hash_ops ?: &trivial_hash_ops;
3,408,874✔
794

795
        if (type == HASHMAP_TYPE_ORDERED) {
3,408,874✔
796
                OrderedHashmap *lh = (OrderedHashmap*)h;
1,114,356✔
797
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,114,356✔
798
        }
799

800
        reset_direct_storage(h);
3,408,874✔
801

802
        static pthread_once_t once = PTHREAD_ONCE_INIT;
3,408,874✔
803
        assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
3,408,874✔
804

805
#if ENABLE_DEBUG_HASHMAP
806
        h->debug.func = func;
807
        h->debug.file = file;
808
        h->debug.line = line;
809
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
810
        LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
811
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
812
#endif
813

814
        return h;
815
}
816

817
Hashmap *_hashmap_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
411,004✔
818
        return (Hashmap*)        hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN  HASHMAP_DEBUG_PASS_ARGS);
411,004✔
819
}
820

821
OrderedHashmap *_ordered_hashmap_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
50,634✔
822
        return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED  HASHMAP_DEBUG_PASS_ARGS);
50,634✔
823
}
824

825
Set *_set_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
54,444✔
826
        return (Set*)            hashmap_base_new(hash_ops, HASHMAP_TYPE_SET  HASHMAP_DEBUG_PASS_ARGS);
54,444✔
827
}
828

829
static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
30,773,335✔
830
                                         enum HashmapType type  HASHMAP_DEBUG_PARAMS) {
831
        HashmapBase *q;
30,773,335✔
832

833
        assert(h);
30,773,335✔
834

835
        if (*h)
30,773,335✔
836
                return 0;
837

838
        q = hashmap_base_new(hash_ops, type  HASHMAP_DEBUG_PASS_ARGS);
2,892,789✔
839
        if (!q)
2,892,789✔
840
                return -ENOMEM;
841

842
        *h = q;
2,892,789✔
843
        return 1;
2,892,789✔
844
}
845

846
int _hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
5,924,667✔
847
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN  HASHMAP_DEBUG_PASS_ARGS);
5,924,667✔
848
}
849

850
int _ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
20,044,695✔
851
        return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED  HASHMAP_DEBUG_PASS_ARGS);
20,044,695✔
852
}
853

854
int _set_ensure_allocated(Set **s, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
4,803,973✔
855
        return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET  HASHMAP_DEBUG_PASS_ARGS);
4,803,973✔
856
}
857

858
int _hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value  HASHMAP_DEBUG_PARAMS) {
5,318,501✔
859
        int r;
5,318,501✔
860

861
        r = _hashmap_ensure_allocated(h, hash_ops  HASHMAP_DEBUG_PASS_ARGS);
5,318,501✔
862
        if (r < 0)
5,318,501✔
863
                return r;
864

865
        return hashmap_put(*h, key, value);
5,318,501✔
866
}
867

868
int _ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value  HASHMAP_DEBUG_PARAMS) {
7,703,326✔
869
        int r;
7,703,326✔
870

871
        r = _ordered_hashmap_ensure_allocated(h, hash_ops  HASHMAP_DEBUG_PASS_ARGS);
7,703,326✔
872
        if (r < 0)
7,703,326✔
873
                return r;
874

875
        return ordered_hashmap_put(*h, key, value);
7,703,326✔
876
}
877

878
int _ordered_hashmap_ensure_replace(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value  HASHMAP_DEBUG_PARAMS) {
6,766✔
879
        int r;
6,766✔
880

881
        r = _ordered_hashmap_ensure_allocated(h, hash_ops  HASHMAP_DEBUG_PASS_ARGS);
6,766✔
882
        if (r < 0)
6,766✔
883
                return r;
884

885
        return ordered_hashmap_replace(*h, key, value);
6,766✔
886
}
887

888
int _hashmap_ensure_replace(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value  HASHMAP_DEBUG_PARAMS) {
24,005✔
889
        int r;
24,005✔
890

891
        r = _hashmap_ensure_allocated(h, hash_ops  HASHMAP_DEBUG_PASS_ARGS);
24,005✔
892
        if (r < 0)
24,005✔
893
                return r;
894

895
        return hashmap_replace(*h, key, value);
24,005✔
896
}
897

898
static void hashmap_free_no_clear(HashmapBase *h) {
3,370,183✔
899
        assert(!h->has_indirect);
3,370,183✔
900
        assert(h->n_direct_entries == 0);
3,370,183✔
901

902
#if ENABLE_DEBUG_HASHMAP
903
        assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
904
        LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
905
        assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
906
#endif
907

908
        if (h->from_pool) {
3,370,183✔
909
                /* Ensure that the object didn't get migrated between threads. */
910
                assert_se(is_main_thread());
3,290,231✔
911
                mempool_free_tile(hashmap_type_info[h->type].mempool, h);
3,290,231✔
912
        } else
913
                free(h);
79,952✔
914
}
3,370,183✔
915

916
HashmapBase* _hashmap_free(HashmapBase *h) {
14,226,338✔
917
        if (h) {
14,226,338✔
918
                _hashmap_clear(h);
3,370,183✔
919
                hashmap_free_no_clear(h);
3,370,183✔
920
        }
921

922
        return NULL;
14,226,338✔
923
}
924

925
void _hashmap_clear(HashmapBase *h) {
3,517,038✔
926
        if (!h)
3,517,038✔
927
                return;
928

929
        if (h->hash_ops->free_key || h->hash_ops->free_value) {
3,386,446✔
930

931
                /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
932
                 * hash table, and only then call the destructor functions. If these destructors then try to unregister
933
                 * themselves from our hash table a second time, the entry is already gone. */
934

935
                while (_hashmap_size(h) > 0) {
20,613,682✔
936
                        void *k = NULL;
17,854,896✔
937
                        void *v;
17,854,896✔
938

939
                        v = _hashmap_first_key_and_value(h, true, &k);
17,854,896✔
940

941
                        if (h->hash_ops->free_key)
17,854,896✔
942
                                h->hash_ops->free_key(k);
13,699,052✔
943

944
                        if (h->hash_ops->free_value)
17,854,896✔
945
                                h->hash_ops->free_value(v);
16,079,240✔
946
                }
947
        }
948

949
        if (h->has_indirect) {
3,386,446✔
950
                free(h->indirect.storage);
1,277,464✔
951
                h->has_indirect = false;
1,277,464✔
952
        }
953

954
        h->n_direct_entries = 0;
3,386,446✔
955
        reset_direct_storage(h);
3,386,446✔
956

957
        if (h->type == HASHMAP_TYPE_ORDERED) {
3,386,446✔
958
                OrderedHashmap *lh = (OrderedHashmap*) h;
1,111,840✔
959
                lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
1,111,840✔
960
        }
961

962
        base_set_dirty(h);
3,386,446✔
963
}
964

965
static int resize_buckets(HashmapBase *h, unsigned entries_add);
966

967
/*
968
 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
969
 * Performs Robin Hood swaps as it goes. The entry to put must be placed
970
 * by the caller into swap slot IDX_PUT.
971
 * If used for in-place resizing, may leave a displaced entry in swap slot
972
 * IDX_PUT. Caller must rehash it next.
973
 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
974
 *          false otherwise.
975
 */
976
static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
61,357,099✔
977
                                   struct swap_entries *swap) {
978
        dib_raw_t raw_dib, *dibs;
61,357,099✔
979
        unsigned dib, distance;
61,357,099✔
980

981
#if ENABLE_DEBUG_HASHMAP
982
        h->debug.put_count++;
983
#endif
984

985
        dibs = dib_raw_ptr(h);
61,357,099✔
986

987
        for (distance = 0; ; distance++) {
61,357,099✔
988
                raw_dib = dibs[idx];
113,110,998✔
989
                if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
113,110,998✔
990
                        if (raw_dib == DIB_RAW_REHASH)
61,357,099✔
991
                                bucket_move_entry(h, swap, idx, IDX_TMP);
5,638,241✔
992

993
                        if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
61,357,099✔
994
                                h->indirect.idx_lowest_entry = idx;
34,723✔
995

996
                        bucket_set_dib(h, idx, distance);
61,357,099✔
997
                        bucket_move_entry(h, swap, IDX_PUT, idx);
61,357,099✔
998
                        if (raw_dib == DIB_RAW_REHASH) {
61,357,099✔
999
                                bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
5,638,241✔
1000
                                return true;
5,638,241✔
1001
                        }
1002

1003
                        return false;
1004
                }
1005

1006
                dib = bucket_calculate_dib(h, idx, raw_dib);
51,753,899✔
1007

1008
                if (dib < distance) {
51,753,899✔
1009
                        /* Found a wealthier entry. Go Robin Hood! */
1010
                        bucket_set_dib(h, idx, distance);
19,000,982✔
1011

1012
                        /* swap the entries */
1013
                        bucket_move_entry(h, swap, idx, IDX_TMP);
19,000,982✔
1014
                        bucket_move_entry(h, swap, IDX_PUT, idx);
19,000,982✔
1015
                        bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
19,000,982✔
1016

1017
                        distance = dib;
19,000,982✔
1018
                }
1019

1020
                idx = next_idx(h, idx);
51,753,899✔
1021
        }
1022
}
1023

1024
/*
1025
 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1026
 * The caller must place the entry (only its key and value, not link indexes)
1027
 * in swap slot IDX_PUT.
1028
 * Caller must ensure: the key does not exist yet in the hashmap.
1029
 *                     that resize is not needed if !may_resize.
1030
 * Returns: 1 if entry was put successfully.
1031
 *          -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1032
 *          Cannot return -ENOMEM if !may_resize.
1033
 */
1034
static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
34,033,608✔
1035
                                   struct swap_entries *swap, bool may_resize) {
1036
        struct ordered_hashmap_entry *new_entry;
34,033,608✔
1037
        int r;
34,033,608✔
1038

1039
        assert(idx < n_buckets(h));
34,033,608✔
1040

1041
        new_entry = bucket_at_swap(swap, IDX_PUT);
34,033,608✔
1042

1043
        if (may_resize) {
34,033,608✔
1044
                r = resize_buckets(h, 1);
34,030,756✔
1045
                if (r < 0)
34,030,756✔
1046
                        return r;
1047
                if (r > 0)
34,030,756✔
1048
                        idx = bucket_hash(h, new_entry->p.b.key);
3,230,065✔
1049
        }
1050
        assert(n_entries(h) < n_buckets(h));
34,033,608✔
1051

1052
        if (h->type == HASHMAP_TYPE_ORDERED) {
34,033,608✔
1053
                OrderedHashmap *lh = (OrderedHashmap*) h;
19,288,913✔
1054

1055
                new_entry->iterate_next = IDX_NIL;
19,288,913✔
1056
                new_entry->iterate_previous = lh->iterate_list_tail;
19,288,913✔
1057

1058
                if (lh->iterate_list_tail != IDX_NIL) {
19,288,913✔
1059
                        struct ordered_hashmap_entry *old_tail;
18,042,190✔
1060

1061
                        old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
18,042,190✔
1062
                        assert(old_tail->iterate_next == IDX_NIL);
18,042,190✔
1063
                        old_tail->iterate_next = IDX_PUT;
18,042,190✔
1064
                }
1065

1066
                lh->iterate_list_tail = IDX_PUT;
19,288,913✔
1067
                if (lh->iterate_list_head == IDX_NIL)
19,288,913✔
1068
                        lh->iterate_list_head = IDX_PUT;
1,246,723✔
1069
        }
1070

1071
        assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
34,033,608✔
1072

1073
        n_entries_inc(h);
34,033,608✔
1074
#if ENABLE_DEBUG_HASHMAP
1075
        h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1076
#endif
1077

1078
        base_set_dirty(h);
34,033,608✔
1079

1080
        return 1;
34,033,608✔
1081
}
1082
#define hashmap_put_boldly(h, idx, swap, may_resize) \
1083
        hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1084

1085
/*
1086
 * Returns 0 if resize is not needed.
1087
 *         1 if successfully resized.
1088
 *         -ENOMEM on allocation failure.
1089
 */
1090
static int resize_buckets(HashmapBase *h, unsigned entries_add) {
34,035,060✔
1091
        struct swap_entries swap;
34,035,060✔
1092
        void *new_storage;
34,035,060✔
1093
        dib_raw_t *old_dibs, *new_dibs;
34,035,060✔
1094
        const struct hashmap_type_info *hi;
34,035,060✔
1095
        unsigned idx, optimal_idx;
34,035,060✔
1096
        unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
34,035,060✔
1097
        uint8_t new_shift;
34,035,060✔
1098
        bool rehash_next;
34,035,060✔
1099

1100
        assert(h);
34,035,060✔
1101

1102
        hi = &hashmap_type_info[h->type];
34,035,060✔
1103
        new_n_entries = n_entries(h) + entries_add;
34,035,060✔
1104

1105
        /* overflow? */
1106
        if (_unlikely_(new_n_entries < entries_add))
34,035,060✔
1107
                return -ENOMEM;
34,035,060✔
1108

1109
        /* For direct storage we allow 100% load, because it's tiny. */
1110
        if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
34,035,058✔
1111
                return 0;
1112

1113
        /*
1114
         * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1115
         * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1116
         */
1117
        new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
27,194,998✔
1118
        /* overflow? */
1119
        if (_unlikely_(new_n_buckets < new_n_entries))
27,194,998✔
1120
                return -ENOMEM;
1121

1122
        if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
27,194,996✔
1123
                return -ENOMEM;
1124

1125
        old_n_buckets = n_buckets(h);
27,194,996✔
1126

1127
        if (_likely_(new_n_buckets <= old_n_buckets))
27,194,996✔
1128
                return 0;
1129

1130
        new_shift = log2u_round_up(MAX(
3,231,738✔
1131
                        new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1132
                        2 * sizeof(struct direct_storage)));
1133

1134
        /* Realloc storage (buckets and DIB array). */
1135
        new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1,941,971✔
1136
                              1U << new_shift);
3,231,738✔
1137
        if (!new_storage)
3,231,738✔
1138
                return -ENOMEM;
1139

1140
        /* Must upgrade direct to indirect storage. */
1141
        if (!h->has_indirect) {
3,231,738✔
1142
                memcpy(new_storage, h->direct.storage,
1,289,767✔
1143
                       old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1,289,767✔
1144
                h->indirect.n_entries = h->n_direct_entries;
1,289,767✔
1145
                h->indirect.idx_lowest_entry = 0;
1,289,767✔
1146
                h->n_direct_entries = 0;
1,289,767✔
1147
        }
1148

1149
        /* Get a new hash key. If we've just upgraded to indirect storage,
1150
         * allow reusing a previously generated key. It's still a different key
1151
         * from the shared one that we used for direct storage. */
1152
        get_hash_key(h->indirect.hash_key, !h->has_indirect);
3,231,738✔
1153

1154
        h->has_indirect = true;
3,231,738✔
1155
        h->indirect.storage = new_storage;
3,231,738✔
1156
        h->indirect.n_buckets = (1U << new_shift) /
3,231,738✔
1157
                                (hi->entry_size + sizeof(dib_raw_t));
1158

1159
        old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
3,231,738✔
1160
        new_dibs = dib_raw_ptr(h);
3,231,738✔
1161

1162
        /*
1163
         * Move the DIB array to the new place, replacing valid DIB values with
1164
         * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1165
         * Note: Overlap is not possible, because we have at least doubled the
1166
         * number of buckets and dib_raw_t is smaller than any entry type.
1167
         */
1168
        for (idx = 0; idx < old_n_buckets; idx++) {
41,202,315✔
1169
                assert(old_dibs[idx] != DIB_RAW_REHASH);
34,738,839✔
1170
                new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
62,977,449✔
1171
                                                              : DIB_RAW_REHASH;
1172
        }
1173

1174
        /* Zero the area of newly added entries (including the old DIB area) */
1175
        memzero(bucket_at(h, old_n_buckets),
3,231,738✔
1176
               (n_buckets(h) - old_n_buckets) * hi->entry_size);
1177

1178
        /* The upper half of the new DIB array needs initialization */
1179
        memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
6,463,476✔
1180
               (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
3,231,738✔
1181

1182
        /* Rehash entries that need it */
1183
        n_rehashed = 0;
3,231,738✔
1184
        for (idx = 0; idx < old_n_buckets; idx++) {
37,970,577✔
1185
                if (new_dibs[idx] != DIB_RAW_REHASH)
34,738,839✔
1186
                        continue;
12,138,470✔
1187

1188
                optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
22,600,369✔
1189

1190
                /*
1191
                 * Not much to do if by luck the entry hashes to its current
1192
                 * location. Just set its DIB.
1193
                 */
1194
                if (optimal_idx == idx) {
22,600,369✔
1195
                        new_dibs[idx] = 0;
915,119✔
1196
                        n_rehashed++;
915,119✔
1197
                        continue;
915,119✔
1198
                }
1199

1200
                new_dibs[idx] = DIB_RAW_FREE;
21,685,250✔
1201
                bucket_move_entry(h, &swap, idx, IDX_PUT);
21,685,250✔
1202
                /* bucket_move_entry does not clear the source */
1203
                memzero(bucket_at(h, idx), hi->entry_size);
21,685,250✔
1204

1205
                do {
27,323,491✔
1206
                        /*
1207
                         * Find the new bucket for the current entry. This may make
1208
                         * another entry homeless and load it into IDX_PUT.
1209
                         */
1210
                        rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
27,323,491✔
1211
                        n_rehashed++;
27,323,491✔
1212

1213
                        /* Did the current entry displace another one? */
1214
                        if (rehash_next)
27,323,491✔
1215
                                optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
5,638,241✔
1216
                } while (rehash_next);
27,323,491✔
1217
        }
1218

1219
        assert_se(n_rehashed == n_entries(h));
3,231,738✔
1220

1221
        return 1;
1222
}
1223

1224
/*
1225
 * Finds an entry with a matching key
1226
 * Returns: index of the found entry, or IDX_NIL if not found.
1227
 */
1228
static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
123,236,051✔
1229
        struct hashmap_base_entry *e;
123,236,051✔
1230
        unsigned dib, distance;
123,236,051✔
1231
        dib_raw_t *dibs = dib_raw_ptr(h);
123,236,051✔
1232

1233
        assert(idx < n_buckets(h));
123,236,051✔
1234

1235
        for (distance = 0; ; distance++) {
83,000,145✔
1236
                if (dibs[idx] == DIB_RAW_FREE)
206,236,196✔
1237
                        return IDX_NIL;
1238

1239
                dib = bucket_calculate_dib(h, idx, dibs[idx]);
140,335,430✔
1240

1241
                if (dib < distance)
140,335,430✔
1242
                        return IDX_NIL;
1243
                if (dib == distance) {
115,972,917✔
1244
                        e = bucket_at(h, idx);
88,627,338✔
1245
                        if (h->hash_ops->compare(e->key, key) == 0)
88,627,338✔
1246
                                return idx;
1247
                }
1248

1249
                idx = next_idx(h, idx);
83,000,145✔
1250
        }
1251
}
1252
#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1253

1254
int hashmap_put(Hashmap *h, const void *key, void *value) {
16,877,001✔
1255
        struct swap_entries swap;
16,877,001✔
1256
        struct plain_hashmap_entry *e;
16,877,001✔
1257
        unsigned hash, idx;
16,877,001✔
1258

1259
        assert(h);
16,877,001✔
1260

1261
        hash = bucket_hash(h, key);
16,877,001✔
1262
        idx = bucket_scan(h, hash, key);
16,877,001✔
1263
        if (idx != IDX_NIL) {
16,877,001✔
1264
                e = plain_bucket_at(h, idx);
155,682✔
1265
                if (e->value == value)
155,682✔
1266
                        return 0;
16,877,001✔
1267
                return -EEXIST;
32,449✔
1268
        }
1269

1270
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
16,721,319✔
1271
        e->b.key = key;
16,721,319✔
1272
        e->value = value;
16,721,319✔
1273
        return hashmap_put_boldly(h, hash, &swap, true);
16,721,319✔
1274
}
1275

1276
int set_put(Set *s, const void *key) {
4,952,036✔
1277
        struct swap_entries swap;
4,952,036✔
1278
        struct hashmap_base_entry *e;
4,952,036✔
1279
        unsigned hash, idx;
4,952,036✔
1280

1281
        assert(s);
4,952,036✔
1282

1283
        hash = bucket_hash(s, key);
4,952,036✔
1284
        idx = bucket_scan(s, hash, key);
4,952,036✔
1285
        if (idx != IDX_NIL)
4,952,036✔
1286
                return 0;
4,952,036✔
1287

1288
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
4,938,539✔
1289
        e->key = key;
4,938,539✔
1290
        return hashmap_put_boldly(s, hash, &swap, true);
4,938,539✔
1291
}
1292

1293
int _set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key  HASHMAP_DEBUG_PARAMS) {
3,886,536✔
1294
        int r;
3,886,536✔
1295

1296
        r = _set_ensure_allocated(s, hash_ops  HASHMAP_DEBUG_PASS_ARGS);
3,886,536✔
1297
        if (r < 0)
3,886,536✔
1298
                return r;
1299

1300
        return set_put(*s, key);
3,886,536✔
1301
}
1302

1303
int _set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key  HASHMAP_DEBUG_PARAMS) {
1,249,253✔
1304
        int r;
1,249,253✔
1305

1306
        r = _set_ensure_put(s, hash_ops, key  HASHMAP_DEBUG_PASS_ARGS);
1,249,253✔
1307
        if (r <= 0) {
1,249,253✔
1308
                if (hash_ops && hash_ops->free_key)
902✔
1309
                        hash_ops->free_key(key);
902✔
1310
                else
1311
                        free(key);
×
1312
        }
1313

1314
        return r;
1,249,253✔
1315
}
1316

1317
int hashmap_replace(Hashmap *h, const void *key, void *value) {
13,444,262✔
1318
        struct swap_entries swap;
13,444,262✔
1319
        struct plain_hashmap_entry *e;
13,444,262✔
1320
        unsigned hash, idx;
13,444,262✔
1321

1322
        assert(h);
13,444,262✔
1323

1324
        hash = bucket_hash(h, key);
13,444,262✔
1325
        idx = bucket_scan(h, hash, key);
13,444,262✔
1326
        if (idx != IDX_NIL) {
13,444,262✔
1327
                e = plain_bucket_at(h, idx);
1,079,112✔
1328
#if ENABLE_DEBUG_HASHMAP
1329
                /* Although the key is equal, the key pointer may have changed,
1330
                 * and this would break our assumption for iterating. So count
1331
                 * this operation as incompatible with iteration. */
1332
                if (e->b.key != key) {
1333
                        h->b.debug.put_count++;
1334
                        h->b.debug.rem_count++;
1335
                        h->b.debug.last_rem_idx = idx;
1336
                }
1337
#endif
1338
                e->b.key = key;
1,079,112✔
1339
                e->value = value;
1,079,112✔
1340
                hashmap_set_dirty(h);
1,079,112✔
1341

1342
                return 0;
1,079,112✔
1343
        }
1344

1345
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
12,365,150✔
1346
        e->b.key = key;
12,365,150✔
1347
        e->value = value;
12,365,150✔
1348
        return hashmap_put_boldly(h, hash, &swap, true);
12,365,150✔
1349
}
1350

1351
int hashmap_update(Hashmap *h, const void *key, void *value) {
9,810✔
1352
        struct plain_hashmap_entry *e;
9,810✔
1353
        unsigned hash, idx;
9,810✔
1354

1355
        assert(h);
9,810✔
1356

1357
        hash = bucket_hash(h, key);
9,810✔
1358
        idx = bucket_scan(h, hash, key);
9,810✔
1359
        if (idx == IDX_NIL)
9,810✔
1360
                return -ENOENT;
1361

1362
        e = plain_bucket_at(h, idx);
9,808✔
1363
        e->value = value;
9,808✔
1364
        hashmap_set_dirty(h);
9,808✔
1365

1366
        return 0;
9,808✔
1367
}
1368

1369
void* _hashmap_get(HashmapBase *h, const void *key) {
72,912,269✔
1370
        struct hashmap_base_entry *e;
72,912,269✔
1371
        unsigned hash, idx;
72,912,269✔
1372

1373
        if (!h)
72,912,269✔
1374
                return NULL;
1375

1376
        hash = bucket_hash(h, key);
63,972,580✔
1377
        idx = bucket_scan(h, hash, key);
63,972,580✔
1378
        if (idx == IDX_NIL)
63,972,580✔
1379
                return NULL;
1380

1381
        e = bucket_at(h, idx);
24,754,247✔
1382
        return entry_value(h, e);
24,754,247✔
1383
}
1384

1385
void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
12,420,283✔
1386
        struct plain_hashmap_entry *e;
12,420,283✔
1387
        unsigned hash, idx;
12,420,283✔
1388

1389
        if (!h)
12,420,283✔
1390
                return NULL;
1391

1392
        hash = bucket_hash(h, key);
12,420,266✔
1393
        idx = bucket_scan(h, hash, key);
12,420,266✔
1394
        if (idx == IDX_NIL)
12,420,266✔
1395
                return NULL;
1396

1397
        e = plain_bucket_at(h, idx);
1,025,991✔
1398
        if (key2)
1,025,991✔
1399
                *key2 = (void*) e->b.key;
1,025,991✔
1400

1401
        return e->value;
1,025,991✔
1402
}
1403

1404
bool _hashmap_contains(HashmapBase *h, const void *key) {
3,089,716✔
1405
        unsigned hash;
3,089,716✔
1406

1407
        if (!h)
3,089,716✔
1408
                return false;
1409

1410
        hash = bucket_hash(h, key);
2,533,012✔
1411
        return bucket_scan(h, hash, key) != IDX_NIL;
2,533,012✔
1412
}
1413

1414
void* _hashmap_remove(HashmapBase *h, const void *key) {
9,196,904✔
1415
        struct hashmap_base_entry *e;
9,196,904✔
1416
        unsigned hash, idx;
9,196,904✔
1417
        void *data;
9,196,904✔
1418

1419
        if (!h)
9,196,904✔
1420
                return NULL;
1421

1422
        hash = bucket_hash(h, key);
8,187,228✔
1423
        idx = bucket_scan(h, hash, key);
8,187,228✔
1424
        if (idx == IDX_NIL)
8,187,228✔
1425
                return NULL;
1426

1427
        e = bucket_at(h, idx);
4,491,907✔
1428
        data = entry_value(h, e);
4,491,907✔
1429
        remove_entry(h, idx);
4,491,907✔
1430

1431
        return data;
4,491,907✔
1432
}
1433

1434
void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
577,782✔
1435
        struct plain_hashmap_entry *e;
577,782✔
1436
        unsigned hash, idx;
577,782✔
1437
        void *data;
577,782✔
1438

1439
        if (!h) {
577,782✔
1440
                if (rkey)
110,053✔
1441
                        *rkey = NULL;
110,053✔
1442
                return NULL;
110,053✔
1443
        }
1444

1445
        hash = bucket_hash(h, key);
467,729✔
1446
        idx = bucket_scan(h, hash, key);
467,729✔
1447
        if (idx == IDX_NIL) {
467,729✔
1448
                if (rkey)
456,426✔
1449
                        *rkey = NULL;
456,426✔
1450
                return NULL;
456,426✔
1451
        }
1452

1453
        e = plain_bucket_at(h, idx);
11,303✔
1454
        data = e->value;
11,303✔
1455
        if (rkey)
11,303✔
1456
                *rkey = (void*) e->b.key;
11,303✔
1457

1458
        remove_entry(h, idx);
11,303✔
1459

1460
        return data;
11,303✔
1461
}
1462

1463
int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
10✔
1464
        struct swap_entries swap;
10✔
1465
        struct plain_hashmap_entry *e;
10✔
1466
        unsigned old_hash, new_hash, idx;
10✔
1467

1468
        if (!h)
10✔
1469
                return -ENOENT;
10✔
1470

1471
        old_hash = bucket_hash(h, old_key);
8✔
1472
        idx = bucket_scan(h, old_hash, old_key);
8✔
1473
        if (idx == IDX_NIL)
8✔
1474
                return -ENOENT;
1475

1476
        new_hash = bucket_hash(h, new_key);
6✔
1477
        if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
6✔
1478
                return -EEXIST;
1479

1480
        remove_entry(h, idx);
4✔
1481

1482
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
4✔
1483
        e->b.key = new_key;
4✔
1484
        e->value = value;
4✔
1485
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
4✔
1486

1487
        return 0;
1488
}
1489

1490
int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
319✔
1491
        struct swap_entries swap;
319✔
1492
        struct hashmap_base_entry *e;
319✔
1493
        unsigned old_hash, new_hash, idx;
319✔
1494

1495
        if (!s)
319✔
1496
                return -ENOENT;
319✔
1497

1498
        old_hash = bucket_hash(s, old_key);
319✔
1499
        idx = bucket_scan(s, old_hash, old_key);
319✔
1500
        if (idx == IDX_NIL)
319✔
1501
                return -ENOENT;
1502

1503
        new_hash = bucket_hash(s, new_key);
319✔
1504
        if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
319✔
1505
                return -EEXIST;
1506

1507
        remove_entry(s, idx);
319✔
1508

1509
        e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
319✔
1510
        e->key = new_key;
319✔
1511
        assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
319✔
1512

1513
        return 0;
1514
}
1515

1516
int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
48✔
1517
        struct swap_entries swap;
48✔
1518
        struct plain_hashmap_entry *e;
48✔
1519
        unsigned old_hash, new_hash, idx_old, idx_new;
48✔
1520

1521
        if (!h)
48✔
1522
                return -ENOENT;
48✔
1523

1524
        old_hash = bucket_hash(h, old_key);
46✔
1525
        idx_old = bucket_scan(h, old_hash, old_key);
46✔
1526
        if (idx_old == IDX_NIL)
46✔
1527
                return -ENOENT;
1528

1529
        old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
44✔
1530

1531
        new_hash = bucket_hash(h, new_key);
44✔
1532
        idx_new = bucket_scan(h, new_hash, new_key);
44✔
1533
        if (idx_new != IDX_NIL)
44✔
1534
                if (idx_old != idx_new) {
42✔
1535
                        remove_entry(h, idx_new);
42✔
1536
                        /* Compensate for a possible backward shift. */
1537
                        if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
42✔
1538
                                idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
7✔
1539
                        assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
42✔
1540
                }
1541

1542
        remove_entry(h, idx_old);
44✔
1543

1544
        e = &bucket_at_swap(&swap, IDX_PUT)->p;
44✔
1545
        e->b.key = new_key;
44✔
1546
        e->value = value;
44✔
1547
        assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
44✔
1548

1549
        return 0;
1550
}
1551

1552
void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
357,185✔
1553
        struct hashmap_base_entry *e;
357,185✔
1554
        unsigned hash, idx;
357,185✔
1555

1556
        if (!h)
357,185✔
1557
                return NULL;
1558

1559
        hash = bucket_hash(h, key);
357,093✔
1560
        idx = bucket_scan(h, hash, key);
357,093✔
1561
        if (idx == IDX_NIL)
357,093✔
1562
                return NULL;
1563

1564
        e = bucket_at(h, idx);
290,321✔
1565
        if (entry_value(h, e) != value)
290,321✔
1566
                return NULL;
1567

1568
        remove_entry(h, idx);
290,041✔
1569

1570
        return value;
290,041✔
1571
}
1572

1573
static unsigned find_first_entry(HashmapBase *h) {
27,487,091✔
1574
        Iterator i = ITERATOR_FIRST;
27,487,091✔
1575

1576
        if (!h || !n_entries(h))
27,487,091✔
1577
                return IDX_NIL;
27,487,091✔
1578

1579
        return hashmap_iterate_entry(h, &i);
26,541,460✔
1580
}
1581

1582
void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
27,487,091✔
1583
        struct hashmap_base_entry *e;
27,487,091✔
1584
        void *key, *data;
27,487,091✔
1585
        unsigned idx;
27,487,091✔
1586

1587
        idx = find_first_entry(h);
27,487,091✔
1588
        if (idx == IDX_NIL) {
27,487,091✔
1589
                if (ret_key)
945,631✔
1590
                        *ret_key = NULL;
458,859✔
1591
                return NULL;
945,631✔
1592
        }
1593

1594
        e = bucket_at(h, idx);
26,541,460✔
1595
        key = (void*) e->key;
26,541,460✔
1596
        data = entry_value(h, e);
26,541,460✔
1597

1598
        if (remove)
26,541,460✔
1599
                remove_entry(h, idx);
26,395,811✔
1600

1601
        if (ret_key)
26,541,460✔
1602
                *ret_key = key;
18,352,381✔
1603

1604
        return data;
1605
}
1606

1607
unsigned _hashmap_size(HashmapBase *h) {
44,870,570✔
1608
        if (!h)
44,870,570✔
1609
                return 0;
1610

1611
        return n_entries(h);
32,005,300✔
1612
}
1613

1614
unsigned _hashmap_buckets(HashmapBase *h) {
2,609✔
1615
        if (!h)
2,609✔
1616
                return 0;
1617

1618
        return n_buckets(h);
2,606✔
1619
}
1620

1621
int _hashmap_merge(Hashmap *h, Hashmap *other) {
4✔
1622
        Iterator i;
4✔
1623
        unsigned idx;
4✔
1624

1625
        assert(h);
4✔
1626

1627
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
16✔
1628
                struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
12✔
1629
                int r;
12✔
1630

1631
                r = hashmap_put(h, pe->b.key, pe->value);
12✔
1632
                if (r < 0 && r != -EEXIST)
12✔
1633
                        return r;
1634
        }
1635

1636
        return 0;
1637
}
1638

1639
int set_merge(Set *s, Set *other) {
1✔
1640
        Iterator i;
1✔
1641
        unsigned idx;
1✔
1642

1643
        assert(s);
1✔
1644

1645
        HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
5✔
1646
                struct set_entry *se = set_bucket_at(other, idx);
4✔
1647
                int r;
4✔
1648

1649
                r = set_put(s, se->b.key);
4✔
1650
                if (r < 0)
4✔
1651
                        return r;
1652
        }
1653

1654
        return 0;
1655
}
1656

1657
int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1,819✔
1658
        int r;
1,819✔
1659

1660
        assert(h);
1,819✔
1661

1662
        r = resize_buckets(h, entries_add);
1,819✔
1663
        if (r < 0)
1,819✔
1664
                return r;
4✔
1665

1666
        return 0;
1667
}
1668

1669
/*
1670
 * The same as hashmap_merge(), but every new item from other is moved to h.
1671
 * Keys already in h are skipped and stay in other.
1672
 * Returns: 0 on success.
1673
 *          -ENOMEM on alloc failure, in which case no move has been done.
1674
 */
1675
int _hashmap_move(HashmapBase *h, HashmapBase *other) {
2,488✔
1676
        struct swap_entries swap;
2,488✔
1677
        struct hashmap_base_entry *e, *n;
2,488✔
1678
        Iterator i;
2,488✔
1679
        unsigned idx;
2,488✔
1680
        int r;
2,488✔
1681

1682
        assert(h);
2,488✔
1683

1684
        if (!other)
2,488✔
1685
                return 0;
2,488✔
1686

1687
        assert(other->type == h->type);
2,485✔
1688

1689
        /*
1690
         * This reserves buckets for the worst case, where none of other's
1691
         * entries are yet present in h. This is preferable to risking
1692
         * an allocation failure in the middle of the moving and having to
1693
         * rollback or return a partial result.
1694
         */
1695
        r = resize_buckets(h, n_entries(other));
2,485✔
1696
        if (r < 0)
2,485✔
1697
                return r;
1698

1699
        HASHMAP_FOREACH_IDX(idx, other, i) {
4,972✔
1700
                unsigned h_hash;
2,487✔
1701

1702
                e = bucket_at(other, idx);
2,487✔
1703
                h_hash = bucket_hash(h, e->key);
2,487✔
1704
                if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
2,487✔
1705
                        continue;
2✔
1706

1707
                n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
2,485✔
1708
                n->key = e->key;
2,485✔
1709
                if (h->type != HASHMAP_TYPE_SET)
2,485✔
1710
                        ((struct plain_hashmap_entry*) n)->value =
2,485✔
1711
                                ((struct plain_hashmap_entry*) e)->value;
2,485✔
1712
                assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
2,485✔
1713

1714
                remove_entry(other, idx);
2,485✔
1715
        }
1716

1717
        return 0;
1718
}
1719

1720
int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
5,754✔
1721
        struct swap_entries swap;
5,754✔
1722
        unsigned h_hash, other_hash, idx;
5,754✔
1723
        struct hashmap_base_entry *e, *n;
5,754✔
1724
        int r;
5,754✔
1725

1726
        assert(h);
5,754✔
1727

1728
        h_hash = bucket_hash(h, key);
5,754✔
1729
        if (bucket_scan(h, h_hash, key) != IDX_NIL)
5,754✔
1730
                return -EEXIST;
5,754✔
1731

1732
        if (!other)
5,752✔
1733
                return -ENOENT;
1734

1735
        assert(other->type == h->type);
5,750✔
1736

1737
        other_hash = bucket_hash(other, key);
5,750✔
1738
        idx = bucket_scan(other, other_hash, key);
5,750✔
1739
        if (idx == IDX_NIL)
5,750✔
1740
                return -ENOENT;
1741

1742
        e = bucket_at(other, idx);
5,748✔
1743

1744
        n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
5,748✔
1745
        n->key = e->key;
5,748✔
1746
        if (h->type != HASHMAP_TYPE_SET)
5,748✔
1747
                ((struct plain_hashmap_entry*) n)->value =
563✔
1748
                        ((struct plain_hashmap_entry*) e)->value;
563✔
1749
        r = hashmap_put_boldly(h, h_hash, &swap, true);
5,748✔
1750
        if (r < 0)
5,748✔
1751
                return r;
1752

1753
        remove_entry(other, idx);
5,748✔
1754
        return 0;
1755
}
1756

1757
HashmapBase* _hashmap_copy(HashmapBase *h  HASHMAP_DEBUG_PARAMS) {
3✔
1758
        HashmapBase *copy;
3✔
1759
        int r;
3✔
1760

1761
        assert(h);
3✔
1762

1763
        copy = hashmap_base_new(h->hash_ops, h->type  HASHMAP_DEBUG_PASS_ARGS);
3✔
1764
        if (!copy)
3✔
1765
                return NULL;
1766

1767
        switch (h->type) {
3✔
1768
        case HASHMAP_TYPE_PLAIN:
2✔
1769
        case HASHMAP_TYPE_ORDERED:
1770
                r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
2✔
1771
                break;
2✔
1772
        case HASHMAP_TYPE_SET:
1✔
1773
                r = set_merge((Set*)copy, (Set*)h);
1✔
1774
                break;
1✔
1775
        default:
×
1776
                assert_not_reached();
×
1777
        }
1778

1779
        if (r < 0)
3✔
1780
                return _hashmap_free(copy);
×
1781

1782
        return copy;
1783
}
1784

1785
char** _hashmap_get_strv(HashmapBase *h) {
1,351✔
1786
        char **sv;
1,351✔
1787
        Iterator i;
1,351✔
1788
        unsigned idx, n;
1,351✔
1789

1790
        if (!h)
1,351✔
1791
                return new0(char*, 1);
1,307✔
1792

1793
        sv = new(char*, n_entries(h)+1);
44✔
1794
        if (!sv)
44✔
1795
                return NULL;
1796

1797
        n = 0;
44✔
1798
        HASHMAP_FOREACH_IDX(idx, h, i)
596✔
1799
                sv[n++] = entry_value(h, bucket_at(h, idx));
552✔
1800
        sv[n] = NULL;
44✔
1801

1802
        return sv;
44✔
1803
}
1804

1805
char** set_to_strv(Set **s) {
18✔
1806
        assert(s);
18✔
1807

1808
        /* This is similar to set_get_strv(), but invalidates the set on success. */
1809

1810
        char **v = new(char*, set_size(*s) + 1);
18✔
1811
        if (!v)
18✔
1812
                return NULL;
1813

1814
        for (char **p = v; (*p = set_steal_first(*s)); p++)
1,766✔
1815
                ;
1816

1817
        assert(set_isempty(*s));
18✔
1818
        *s = set_free(*s);
18✔
1819
        return v;
18✔
1820
}
1821

1822
void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
302✔
1823
        struct ordered_hashmap_entry *e;
302✔
1824
        unsigned hash, idx;
302✔
1825

1826
        if (!h)
302✔
1827
                return NULL;
1828

1829
        hash = bucket_hash(h, key);
301✔
1830
        idx = bucket_scan(h, hash, key);
301✔
1831
        if (idx == IDX_NIL)
301✔
1832
                return NULL;
1833

1834
        e = ordered_bucket_at(h, idx);
300✔
1835
        if (e->iterate_next == IDX_NIL)
300✔
1836
                return NULL;
1837
        return ordered_bucket_at(h, e->iterate_next)->p.value;
151✔
1838
}
1839

1840
int set_consume(Set *s, void *value) {
626,412✔
1841
        int r;
626,412✔
1842

1843
        assert(s);
626,412✔
1844
        assert(value);
626,412✔
1845

1846
        r = set_put(s, value);
626,412✔
1847
        if (r <= 0)
626,412✔
1848
                free(value);
1✔
1849

1850
        return r;
626,412✔
1851
}
1852

1853
int _hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v  HASHMAP_DEBUG_PARAMS) {
1,992✔
1854
        int r;
1,992✔
1855

1856
        r = _hashmap_ensure_allocated(h, hash_ops  HASHMAP_DEBUG_PASS_ARGS);
1,992✔
1857
        if (r < 0)
1,992✔
1858
                return r;
1,992✔
1859

1860
        _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1,992✔
1861

1862
        kdup = strdup(k);
1,992✔
1863
        if (!kdup)
1,992✔
1864
                return -ENOMEM;
1865

1866
        if (v) {
1,992✔
1867
                vdup = strdup(v);
318✔
1868
                if (!vdup)
318✔
1869
                        return -ENOMEM;
1870
        }
1871

1872
        r = hashmap_put(*h, kdup, vdup);
1,992✔
1873
        if (r < 0) {
1,992✔
1874
                if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
10✔
1875
                        return 0;
1876
                return r;
4✔
1877
        }
1878

1879
        /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1880
        assert(vdup == NULL || r > 0);
1,982✔
1881
        if (r > 0)
1,673✔
1882
                kdup = vdup = NULL;
1,981✔
1883

1884
        return r;
1885
}
1886

1887
int _set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n  HASHMAP_DEBUG_PARAMS) {
910,202✔
1888
        char *c;
910,202✔
1889
        int r;
910,202✔
1890

1891
        assert(s);
910,202✔
1892
        assert(p);
910,202✔
1893

1894
        r = _set_ensure_allocated(s, hash_ops  HASHMAP_DEBUG_PASS_ARGS);
910,202✔
1895
        if (r < 0)
910,202✔
1896
                return r;
1897

1898
        if (n == SIZE_MAX) {
910,202✔
1899
                if (set_contains(*s, (char*) p))
882,318✔
1900
                        return 0;
1901

1902
                c = strdup(p);
590,546✔
1903
        } else
1904
                c = strndup(p, n);
27,884✔
1905
        if (!c)
618,430✔
1906
                return -ENOMEM;
1907

1908
        return set_consume(*s, c);
618,430✔
1909
}
1910

1911
int _set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l  HASHMAP_DEBUG_PARAMS) {
1,636✔
1912
        int n = 0, r;
1,636✔
1913

1914
        assert(s);
1,636✔
1915

1916
        STRV_FOREACH(i, l) {
1,677✔
1917
                r = _set_put_strndup_full(s, hash_ops, *i, SIZE_MAX  HASHMAP_DEBUG_PASS_ARGS);
41✔
1918
                if (r < 0)
41✔
1919
                        return r;
1920

1921
                n += r;
41✔
1922
        }
1923

1924
        return n;
1925
}
1926

1927
int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1✔
1928
        const char *p = ASSERT_PTR(v);
1✔
1929
        int r;
1✔
1930

1931
        assert(s);
1✔
1932

1933
        for (;;) {
2✔
1934
                char *word;
3✔
1935

1936
                r = extract_first_word(&p, &word, separators, flags);
3✔
1937
                if (r <= 0)
3✔
1938
                        return r;
1✔
1939

1940
                r = set_consume(s, word);
2✔
1941
                if (r < 0)
2✔
1942
                        return r;
1943
        }
1944
}
1945

1946
/* expand the cachemem if needed, return true if newly (re)activated. */
1947
static int cachemem_maintain(CacheMem *mem, size_t size) {
417,084✔
1948
        assert(mem);
417,084✔
1949

1950
        if (!GREEDY_REALLOC(mem->ptr, size)) {
417,084✔
1951
                if (size > 0)
×
1952
                        return -ENOMEM;
1953
        }
1954

1955
        if (!mem->active) {
417,084✔
1956
                mem->active = true;
2,295✔
1957
                return true;
2,295✔
1958
        }
1959

1960
        return false;
1961
}
1962

1963
int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
416,972✔
1964
        bool sync_keys = false, sync_values = false;
416,972✔
1965
        size_t size;
416,972✔
1966
        int r;
416,972✔
1967

1968
        assert(cache);
416,972✔
1969
        assert(cache->hashmap);
416,972✔
1970

1971
        size = n_entries(cache->hashmap);
416,972✔
1972

1973
        if (res_keys) {
416,972✔
1974
                r = cachemem_maintain(&cache->keys, size);
112✔
1975
                if (r < 0)
112✔
1976
                        return r;
1977

1978
                sync_keys = r;
112✔
1979
        } else
1980
                cache->keys.active = false;
416,860✔
1981

1982
        if (res_values) {
416,972✔
1983
                r = cachemem_maintain(&cache->values, size);
416,972✔
1984
                if (r < 0)
416,972✔
1985
                        return r;
1986

1987
                sync_values = r;
416,972✔
1988
        } else
1989
                cache->values.active = false;
×
1990

1991
        if (cache->hashmap->dirty) {
416,972✔
1992
                if (cache->keys.active)
2,399✔
1993
                        sync_keys = true;
111✔
1994
                if (cache->values.active)
2,399✔
1995
                        sync_values = true;
2,399✔
1996

1997
                cache->hashmap->dirty = false;
2,399✔
1998
        }
1999

2000
        if (sync_keys || sync_values) {
416,972✔
2001
                unsigned i, idx;
2,405✔
2002
                Iterator iter;
2,405✔
2003

2004
                i = 0;
2,405✔
2005
                HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
503,561✔
2006
                        struct hashmap_base_entry *e;
501,156✔
2007

2008
                        e = bucket_at(cache->hashmap, idx);
501,156✔
2009

2010
                        if (sync_keys)
501,156✔
2011
                                cache->keys.ptr[i] = e->key;
491,500✔
2012
                        if (sync_values)
501,156✔
2013
                                cache->values.ptr[i] = entry_value(cache->hashmap, e);
501,156✔
2014
                        i++;
501,156✔
2015
                }
2016
        }
2017

2018
        if (res_keys)
416,972✔
2019
                *res_keys = cache->keys.ptr;
112✔
2020
        if (res_values)
416,972✔
2021
                *res_values = cache->values.ptr;
416,972✔
2022
        if (res_n_entries)
416,972✔
2023
                *res_n_entries = size;
416,972✔
2024

2025
        return 0;
2026
}
2027

2028
IteratedCache* iterated_cache_free(IteratedCache *cache) {
6,292✔
2029
        if (cache) {
6,292✔
2030
                free(cache->keys.ptr);
6,292✔
2031
                free(cache->values.ptr);
6,292✔
2032
        }
2033

2034
        return mfree(cache);
6,292✔
2035
}
2036

2037
int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
564,057✔
2038
        _cleanup_free_ char *str = NULL;
564,057✔
2039
        size_t separator_len, len = 0;
564,057✔
2040
        const char *value;
564,057✔
2041
        bool first;
564,057✔
2042

2043
        assert(ret);
564,057✔
2044

2045
        if (set_isempty(s)) {
564,057✔
2046
                *ret = NULL;
10,356✔
2047
                return 0;
10,356✔
2048
        }
2049

2050
        separator_len = strlen_ptr(separator);
553,701✔
2051

2052
        if (separator_len == 0)
553,697✔
2053
                wrap_with_separator = false;
2054

2055
        first = !wrap_with_separator;
553,701✔
2056

2057
        SET_FOREACH(value, s) {
1,904,626✔
2058
                size_t l = strlen_ptr(value);
1,350,925✔
2059

2060
                if (l == 0)
1,350,925✔
2061
                        continue;
×
2062

2063
                if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
3,497,186✔
2064
                        return -ENOMEM;
×
2065

2066
                if (separator_len > 0 && !first) {
1,350,925✔
2067
                        memcpy(str + len, separator, separator_len);
1,109,610✔
2068
                        len += separator_len;
1,109,610✔
2069
                }
2070

2071
                memcpy(str + len, value, l);
1,350,925✔
2072
                len += l;
1,350,925✔
2073
                first = false;
1,350,925✔
2074
        }
2075

2076
        if (wrap_with_separator) {
553,701✔
2077
                memcpy(str + len, separator, separator_len);
312,390✔
2078
                len += separator_len;
312,390✔
2079
        }
2080

2081
        str[len] = '\0';
553,701✔
2082

2083
        *ret = TAKE_PTR(str);
553,701✔
2084
        return 0;
553,701✔
2085
}
2086

2087
bool set_equal(Set *a, Set *b) {
200✔
2088
        void *p;
200✔
2089

2090
        /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2091
         * entries */
2092

2093
        if (a == b)
200✔
2094
                return true;
200✔
2095

2096
        if (set_isempty(a) && set_isempty(b))
41✔
2097
                return true;
2098

2099
        if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
23✔
2100
                                         * already */
2101
                return false;
2102

2103
        SET_FOREACH(p, a)
2,416✔
2104
                if (!set_contains(b, p))
2,404✔
2105
                        return false;
×
2106

2107
        /* If we have the same hashops, then we don't need to check things backwards given we compared the
2108
         * size and that all of a is in b. */
2109
        if (a->b.hash_ops == b->b.hash_ops)
12✔
2110
                return true;
2111

2112
        SET_FOREACH(p, b)
×
2113
                if (!set_contains(a, p))
×
2114
                        return false;
×
2115

2116
        return true;
×
2117
}
2118

2119
static bool set_fnmatch_one(Set *patterns, const char *needle) {
575,334✔
2120
        const char *p;
575,334✔
2121

2122
        assert(needle);
575,334✔
2123

2124
        /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2125

2126
        SET_FOREACH(p, patterns)
718,263✔
2127
                if (fnmatch(p, needle, 0) == 0)
177,941✔
2128
                        return true;
35,012✔
2129

2130
        return false;
540,322✔
2131
}
2132

2133
bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
408,757✔
2134
        assert(needle);
408,757✔
2135

2136
        if (set_fnmatch_one(exclude_patterns, needle))
408,757✔
2137
                return false;
2138

2139
        if (set_isempty(include_patterns))
408,625✔
2140
                return true;
2141

2142
        return set_fnmatch_one(include_patterns, needle);
166,577✔
2143
}
2144

2145
static int hashmap_entry_compare(
540,724✔
2146
                struct hashmap_base_entry * const *a,
2147
                struct hashmap_base_entry * const *b,
2148
                compare_func_t compare) {
2149

2150
        assert(a && *a);
540,724✔
2151
        assert(b && *b);
540,724✔
2152
        assert(compare);
540,724✔
2153

2154
        return compare((*a)->key, (*b)->key);
540,724✔
2155
}
2156

2157
static int _hashmap_dump_entries_sorted(
108,469✔
2158
                HashmapBase *h,
2159
                void ***ret,
2160
                size_t *ret_n) {
2161
        _cleanup_free_ void **entries = NULL;
108,469✔
2162
        Iterator iter;
108,469✔
2163
        unsigned idx;
108,469✔
2164
        size_t n = 0;
108,469✔
2165

2166
        assert(ret);
108,469✔
2167
        assert(ret_n);
108,469✔
2168

2169
        if (_hashmap_size(h) == 0) {
108,469✔
2170
                *ret = NULL;
83,421✔
2171
                *ret_n = 0;
83,421✔
2172
                return 0;
83,421✔
2173
        }
2174

2175
        /* We append one more element than needed so that the resulting array can be used as a strv. We
2176
         * don't count this entry in the returned size. */
2177
        entries = new(void*, _hashmap_size(h) + 1);
25,048✔
2178
        if (!entries)
25,048✔
2179
                return -ENOMEM;
2180

2181
        HASHMAP_FOREACH_IDX(idx, h, iter)
187,534✔
2182
                entries[n++] = bucket_at(h, idx);
162,486✔
2183

2184
        assert(n == _hashmap_size(h));
25,048✔
2185
        entries[n] = NULL;
25,048✔
2186

2187
        typesafe_qsort_r((struct hashmap_base_entry**) entries, n,
25,048✔
2188
                         hashmap_entry_compare, h->hash_ops->compare);
2189

2190
        *ret = TAKE_PTR(entries);
25,048✔
2191
        *ret_n = n;
25,048✔
2192
        return 0;
25,048✔
2193
}
2194

2195
int _hashmap_dump_keys_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
88✔
2196
        _cleanup_free_ void **entries = NULL;
88✔
2197
        size_t n;
88✔
2198
        int r;
88✔
2199

2200
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
88✔
2201
        if (r < 0)
88✔
2202
                return r;
2203

2204
        /* Reuse the array. */
2205
        FOREACH_ARRAY(e, entries, n)
135✔
2206
                *e = (void*) (*(struct hashmap_base_entry**) e)->key;
47✔
2207

2208
        *ret = TAKE_PTR(entries);
88✔
2209
        if (ret_n)
88✔
2210
                *ret_n = n;
88✔
2211
        return 0;
2212
}
2213

2214
int _hashmap_dump_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
108,381✔
2215
        _cleanup_free_ void **entries = NULL;
108,381✔
2216
        size_t n;
108,381✔
2217
        int r;
108,381✔
2218

2219
        r = _hashmap_dump_entries_sorted(h, &entries, &n);
108,381✔
2220
        if (r < 0)
108,381✔
2221
                return r;
2222

2223
        /* Reuse the array. */
2224
        FOREACH_ARRAY(e, entries, n)
270,820✔
2225
                *e = entry_value(h, *(struct hashmap_base_entry**) e);
162,439✔
2226

2227
        *ret = TAKE_PTR(entries);
108,381✔
2228
        if (ret_n)
108,381✔
2229
                *ret_n = n;
416✔
2230
        return 0;
2231
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc