• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

c-ares / c-ares / 12832224650

17 Jan 2025 03:40PM UTC coverage: 91.487% (-0.007%) from 91.494%
12832224650

push

github

web-flow
Fix inet pton (#944)

With partial address specification (is this really intended to be supported?), remaining bits were not zero'd out.

Signed-off-by: Nikolaos Chatzikonstantinou (@createyourpersonalaccount)

4 of 4 new or added lines in 2 files covered. (100.0%)

3 existing lines in 2 files now uncovered.

22408 of 24493 relevant lines covered (91.49%)

11881.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.16
/src/lib/dsa/ares_htable.c
1
/* MIT License
2
 *
3
 * Copyright (c) 2023 Brad House
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a copy
6
 * of this software and associated documentation files (the "Software"), to deal
7
 * in the Software without restriction, including without limitation the rights
8
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
 * copies of the Software, and to permit persons to whom the Software is
10
 * furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice (including the next
13
 * paragraph) shall be included in all copies or substantial portions of the
14
 * Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 *
24
 * SPDX-License-Identifier: MIT
25
 */
26
#include "ares_private.h"
27
#include "ares_llist.h"
28
#include "ares_htable.h"
29

30
#define ARES__HTABLE_MAX_BUCKETS    (1U << 24)
31
#define ARES__HTABLE_MIN_BUCKETS    (1U << 4)
32
#define ARES__HTABLE_EXPAND_PERCENT 75
33

34
struct ares_htable {
35
  ares_htable_hashfunc_t    hash;
36
  ares_htable_bucket_key_t  bucket_key;
37
  ares_htable_bucket_free_t bucket_free;
38
  ares_htable_key_eq_t      key_eq;
39
  unsigned int              seed;
40
  unsigned int              size;
41
  size_t                    num_keys;
42
  size_t                    num_collisions;
43
  /* NOTE: if we converted buckets into ares_slist_t we could guarantee on
44
   *       hash collisions we would have O(log n) worst case insert and search
45
   *       performance.  (We'd also need to make key_eq into a key_cmp to
46
   *       support sort).  That said, risk with a random hash seed is near zero,
47
   *       and ares_slist_t is heavier weight, so I think using ares_llist_t
48
   *       is an overall win. */
49
  ares_llist_t            **buckets;
50
};
51

52
static unsigned int ares_htable_generate_seed(ares_htable_t *htable)
8,434✔
53
{
54
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
55
  /* Seed needs to be static for fuzzing */
56
  return 0;
57
#else
58
  unsigned int seed = 0;
8,434✔
59
  time_t       t    = time(NULL);
8,434✔
60

61
  /* Mix stack address, heap address, and time to generate a random seed, it
62
   * doesn't have to be super secure, just quick.  Likelihood of a hash
63
   * collision attack is very low with a small amount of effort */
64
  seed |= (unsigned int)((size_t)htable & 0xFFFFFFFF);
8,434✔
65
  seed |= (unsigned int)((size_t)&seed & 0xFFFFFFFF);
8,434✔
66
  seed |= (unsigned int)(((ares_uint64_t)t) & 0xFFFFFFFF);
8,434✔
67
  return seed;
8,434✔
68
#endif
69
}
70

71
static void ares_htable_buckets_destroy(ares_llist_t **buckets,
8,541✔
72
                                        unsigned int   size,
73
                                        ares_bool_t    destroy_vals)
74
{
75
  unsigned int i;
76

77
  if (buckets == NULL) {
8,541✔
78
    return;
21✔
79
  }
80

81
  for (i = 0; i < size; i++) {
157,801✔
82
    if (buckets[i] == NULL) {
149,281✔
83
      continue;
124,776✔
84
    }
85

86
    if (!destroy_vals) {
24,505✔
87
      ares_llist_replace_destructor(buckets[i], NULL);
257✔
88
    }
89

90
    ares_llist_destroy(buckets[i]);
24,505✔
91
  }
92

93
  ares_free(buckets);
8,520✔
94
}
95

96
void ares_htable_destroy(ares_htable_t *htable)
8,465✔
97
{
98
  if (htable == NULL) {
8,465✔
99
    return;
34✔
100
  }
101
  ares_htable_buckets_destroy(htable->buckets, htable->size, ARES_TRUE);
8,431✔
102
  ares_free(htable);
8,431✔
103
}
104

105
ares_htable_t *ares_htable_create(ares_htable_hashfunc_t    hash_func,
8,446✔
106
                                  ares_htable_bucket_key_t  bucket_key,
107
                                  ares_htable_bucket_free_t bucket_free,
108
                                  ares_htable_key_eq_t      key_eq)
109
{
110
  ares_htable_t *htable = NULL;
8,446✔
111

112
  if (hash_func == NULL || bucket_key == NULL || bucket_free == NULL ||
8,446✔
113
      key_eq == NULL) {
114
    goto fail;
1✔
115
  }
116

117
  htable = ares_malloc_zero(sizeof(*htable));
8,445✔
118
  if (htable == NULL) {
8,445✔
119
    goto fail;
11✔
120
  }
121

122
  htable->hash        = hash_func;
8,434✔
123
  htable->bucket_key  = bucket_key;
8,434✔
124
  htable->bucket_free = bucket_free;
8,434✔
125
  htable->key_eq      = key_eq;
8,434✔
126
  htable->seed        = ares_htable_generate_seed(htable);
8,434✔
127
  htable->size        = ARES__HTABLE_MIN_BUCKETS;
8,434✔
128
  htable->buckets = ares_malloc_zero(sizeof(*htable->buckets) * htable->size);
8,434✔
129

130
  if (htable->buckets == NULL) {
8,434✔
131
    goto fail;
11✔
132
  }
133

134
  return htable;
8,423✔
135

136
fail:
23✔
137
  ares_htable_destroy(htable);
23✔
138
  return NULL;
23✔
139
}
140

141
const void **ares_htable_all_buckets(const ares_htable_t *htable, size_t *num)
4,323✔
142
{
143
  const void **out = NULL;
4,323✔
144
  size_t       cnt = 0;
4,323✔
145
  size_t       i;
146

147
  if (htable == NULL || num == NULL) {
4,323✔
148
    return NULL; /* LCOV_EXCL_LINE */
149
  }
150

151
  *num = 0;
4,323✔
152

153
  out = ares_malloc_zero(sizeof(*out) * htable->num_keys);
4,323✔
154
  if (out == NULL) {
4,323✔
155
    return NULL; /* LCOV_EXCL_LINE */
156
  }
157

158
  for (i = 0; i < htable->size; i++) {
75,523✔
159
    ares_llist_node_t *node;
160
    for (node = ares_llist_node_first(htable->buckets[i]); node != NULL;
81,469✔
161
         node = ares_llist_node_next(node)) {
10,269✔
162
      out[cnt++] = ares_llist_node_val(node);
10,269✔
163
    }
164
  }
165

166
  *num = cnt;
4,323✔
167
  return out;
4,323✔
168
}
169

170
/*! Grabs the Hashtable index from the key and length.  The h index is
171
 *  the hash of the function reduced to the size of the bucket list.
172
 *  We are doing "hash & (size - 1)" since we are guaranteeing a power of
173
 *  2 for size. This is equivalent to "hash % size", but should be more
174
 * efficient */
175
#define HASH_IDX(h, key) h->hash(key, h->seed) & (h->size - 1)
176

177
static ares_llist_node_t *ares_htable_find(const ares_htable_t *htable,
132,964✔
178
                                           unsigned int idx, const void *key)
179
{
180
  ares_llist_node_t *node = NULL;
132,964✔
181

182
  for (node = ares_llist_node_first(htable->buckets[idx]); node != NULL;
149,618✔
183
       node = ares_llist_node_next(node)) {
16,653✔
184
    if (htable->key_eq(key, htable->bucket_key(ares_llist_node_val(node)))) {
71,562✔
185
      break;
54,909✔
186
    }
187
  }
188

189
  return node;
132,964✔
190
}
191

192
static ares_bool_t ares_htable_expand(ares_htable_t *htable)
110✔
193
{
194
  ares_llist_t **buckets  = NULL;
110✔
195
  unsigned int   old_size = htable->size;
110✔
196
  size_t         i;
197
  ares_llist_t **prealloc_llist     = NULL;
110✔
198
  size_t         prealloc_llist_len = 0;
110✔
199
  ares_bool_t    rv                 = ARES_FALSE;
110✔
200

201
  /* Not a failure, just won't expand */
202
  if (old_size == ARES__HTABLE_MAX_BUCKETS) {
110✔
203
    return ARES_TRUE; /* LCOV_EXCL_LINE */
204
  }
205

206
  htable->size <<= 1;
110✔
207

208
  /* We must pre-allocate all memory we'll need before moving entries to the
209
   * new hash array.  Otherwise if there's a memory allocation failure in the
210
   * middle, we wouldn't be able to recover. */
211
  buckets = ares_malloc_zero(sizeof(*buckets) * htable->size);
110✔
212
  if (buckets == NULL) {
110✔
213
    goto done; /* LCOV_EXCL_LINE */
214
  }
215

216
  /* The maximum number of new llists we'll need is the number of collisions
217
   * that were recorded */
218
  prealloc_llist_len = htable->num_collisions;
110✔
219
  if (prealloc_llist_len) {
110✔
220
    prealloc_llist =
221
      ares_malloc_zero(sizeof(*prealloc_llist) * prealloc_llist_len);
100✔
222
    if (prealloc_llist == NULL) {
100✔
223
      goto done; /* LCOV_EXCL_LINE */
224
    }
225
  }
226
  for (i = 0; i < prealloc_llist_len; i++) {
2,809✔
227
    prealloc_llist[i] = ares_llist_create(htable->bucket_free);
2,699✔
228
    if (prealloc_llist[i] == NULL) {
2,699✔
229
      goto done;
×
230
    }
231
  }
232

233
  /* Iterate across all buckets and move the entries to the new buckets */
234
  htable->num_collisions = 0;
110✔
235
  for (i = 0; i < old_size; i++) {
14,414✔
236
    ares_llist_node_t *node;
237

238
    /* Nothing in this bucket */
239
    if (htable->buckets[i] == NULL) {
14,304✔
240
      continue;
6,275✔
241
    }
242

243
    /* Fast path optimization (most likely case), there is likely only a single
244
     * entry in both the source and destination, check for this to confirm and
245
     * if so, just move the bucket over */
246
    if (ares_llist_len(htable->buckets[i]) == 1) {
8,029✔
247
      const void *val = ares_llist_first_val(htable->buckets[i]);
5,768✔
248
      size_t      idx = HASH_IDX(htable, htable->bucket_key(val));
5,768✔
249

250
      if (buckets[idx] == NULL) {
5,768✔
251
        /* Swap! */
252
        buckets[idx]       = htable->buckets[i];
5,768✔
253
        htable->buckets[i] = NULL;
5,768✔
254
        continue;
5,768✔
255
      }
256
    }
257

258
    /* Slow path, collisions */
259
    while ((node = ares_llist_node_first(htable->buckets[i])) != NULL) {
6,249✔
260
      const void *val = ares_llist_node_val(node);
4,960✔
261
      size_t      idx = HASH_IDX(htable, htable->bucket_key(val));
4,960✔
262

263
      /* Try fast path again as maybe we popped one collision off and the
264
       * next we can reuse the llist parent */
265
      if (buckets[idx] == NULL && ares_llist_len(htable->buckets[i]) == 1) {
4,960✔
266
        /* Swap! */
267
        buckets[idx]       = htable->buckets[i];
972✔
268
        htable->buckets[i] = NULL;
972✔
269
        break;
972✔
270
      }
271

272
      /* Grab one off our preallocated list */
273
      if (buckets[idx] == NULL) {
3,988✔
274
        /* Silence static analysis, this isn't possible but it doesn't know */
275
        if (prealloc_llist == NULL || prealloc_llist_len == 0) {
2,442✔
276
          goto done; /* LCOV_EXCL_LINE */
277
        }
278
        buckets[idx] = prealloc_llist[prealloc_llist_len - 1];
2,442✔
279
        prealloc_llist_len--;
2,442✔
280
      } else {
281
        /* Collision occurred since the bucket wasn't empty */
282
        htable->num_collisions++;
1,546✔
283
      }
284

285
      ares_llist_node_mvparent_first(node, buckets[idx]);
3,988✔
286
    }
287

288
    /* Abandoned bucket, destroy */
289
    if (htable->buckets[i] != NULL) {
2,261✔
290
      ares_llist_destroy(htable->buckets[i]);
1,289✔
291
      htable->buckets[i] = NULL;
1,289✔
292
    }
293
  }
294

295
  /* We have guaranteed all the buckets have either been moved or destroyed,
296
   * so we just call ares_free() on the array and swap out the pointer */
297
  ares_free(htable->buckets);
110✔
298
  htable->buckets = buckets;
110✔
299
  buckets         = NULL;
110✔
300
  rv              = ARES_TRUE;
110✔
301

302
done:
110✔
303
  ares_free(buckets);
110✔
304
  /* destroy any unused preallocated buckets */
305
  ares_htable_buckets_destroy(prealloc_llist, (unsigned int)prealloc_llist_len,
110✔
306
                              ARES_FALSE);
307

308
  /* On failure, we need to restore the htable size */
309
  if (rv != ARES_TRUE) {
110✔
310
    htable->size = old_size; /* LCOV_EXCL_LINE */
311
  }
312

313
  return rv;
110✔
314
}
315

316
ares_bool_t ares_htable_insert(ares_htable_t *htable, void *bucket)
30,471✔
317
{
318
  unsigned int       idx  = 0;
30,471✔
319
  ares_llist_node_t *node = NULL;
30,471✔
320
  const void        *key  = NULL;
30,471✔
321

322
  if (htable == NULL || bucket == NULL) {
30,471✔
323
    return ARES_FALSE;
1✔
324
  }
325

326

327
  key = htable->bucket_key(bucket);
30,470✔
328
  idx = HASH_IDX(htable, key);
30,470✔
329

330
  /* See if we have a matching bucket already, if so, replace it */
331
  node = ares_htable_find(htable, idx, key);
30,470✔
332
  if (node != NULL) {
30,470✔
333
    ares_llist_node_replace(node, bucket);
×
334
    return ARES_TRUE;
×
335
  }
336

337
  /* Check to see if we should rehash because likelihood of collisions has
338
   * increased beyond our threshold */
339
  if (htable->num_keys + 1 >
30,470✔
340
      (htable->size * ARES__HTABLE_EXPAND_PERCENT) / 100) {
30,470✔
341
    if (!ares_htable_expand(htable)) {
110✔
342
      return ARES_FALSE; /* LCOV_EXCL_LINE */
343
    }
344
    /* If we expanded, need to calculate a new index */
345
    idx = HASH_IDX(htable, key);
110✔
346
  }
347

348
  /* We lazily allocate the linked list */
349
  if (htable->buckets[idx] == NULL) {
30,470✔
350
    htable->buckets[idx] = ares_llist_create(htable->bucket_free);
23,099✔
351
    if (htable->buckets[idx] == NULL) {
23,099✔
352
      return ARES_FALSE;
4✔
353
    }
354
  }
355

356
  node = ares_llist_insert_first(htable->buckets[idx], bucket);
30,466✔
357
  if (node == NULL) {
30,466✔
358
    return ARES_FALSE;
4✔
359
  }
360

361
  /* Track collisions for rehash stability */
362
  if (ares_llist_len(htable->buckets[idx]) > 1) {
30,462✔
363
    htable->num_collisions++;
4,971✔
364
  }
365

366
  htable->num_keys++;
30,462✔
367

368
  return ARES_TRUE;
30,462✔
369
}
370

371
void *ares_htable_get(const ares_htable_t *htable, const void *key)
88,574✔
372
{
373
  unsigned int idx;
374

375
  if (htable == NULL || key == NULL) {
88,574✔
UNCOV
376
    return NULL;
×
377
  }
378

379
  idx = HASH_IDX(htable, key);
88,574✔
380

381
  return ares_llist_node_val(ares_htable_find(htable, idx, key));
88,574✔
382
}
383

384
ares_bool_t ares_htable_remove(ares_htable_t *htable, const void *key)
13,921✔
385
{
386
  ares_llist_node_t *node;
387
  unsigned int       idx;
388

389
  if (htable == NULL || key == NULL) {
13,921✔
390
    return ARES_FALSE;
1✔
391
  }
392

393
  idx  = HASH_IDX(htable, key);
13,920✔
394
  node = ares_htable_find(htable, idx, key);
13,920✔
395
  if (node == NULL) {
13,920✔
396
    return ARES_FALSE;
×
397
  }
398

399
  htable->num_keys--;
13,920✔
400

401
  /* Reduce collisions */
402
  if (ares_llist_len(ares_llist_node_parent(node)) > 1) {
13,920✔
403
    htable->num_collisions--;
1,728✔
404
  }
405

406
  ares_llist_node_destroy(node);
13,920✔
407
  return ARES_TRUE;
13,920✔
408
}
409

410
size_t ares_htable_num_keys(const ares_htable_t *htable)
3,281✔
411
{
412
  if (htable == NULL) {
3,281✔
413
    return 0;
1✔
414
  }
415
  return htable->num_keys;
3,280✔
416
}
417

418
unsigned int ares_htable_hash_FNV1a(const unsigned char *key, size_t key_len,
72,543✔
419
                                    unsigned int seed)
420
{
421
  unsigned int hv = seed ^ 2166136261U;
72,543✔
422
  size_t       i;
423

424
  for (i = 0; i < key_len; i++) {
509,593✔
425
    hv ^= (unsigned int)key[i];
437,050✔
426
    /* hv *= 16777619 (0x01000193) */
427
    hv += (hv << 1) + (hv << 4) + (hv << 7) + (hv << 8) + (hv << 24);
437,050✔
428
  }
429

430
  return hv;
72,543✔
431
}
432

433
/* Case insensitive version, meant for ASCII strings */
434
unsigned int ares_htable_hash_FNV1a_casecmp(const unsigned char *key,
71,258✔
435
                                            size_t key_len, unsigned int seed)
436
{
437
  unsigned int hv = seed ^ 2166136261U;
71,258✔
438
  size_t       i;
439

440
  for (i = 0; i < key_len; i++) {
798,706✔
441
    hv ^= (unsigned int)ares_tolower(key[i]);
727,448✔
442
    /* hv *= 16777619 (0x01000193) */
443
    hv += (hv << 1) + (hv << 4) + (hv << 7) + (hv << 8) + (hv << 24);
727,448✔
444
  }
445

446
  return hv;
71,258✔
447
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc