• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

c-ares / c-ares / #53

27 Sep 2023 12:43PM UTC coverage: 88.055% (+0.02%) from 88.04%
#53

push

travis-ci

bradh352
fix #206 by allowing NULL to be passed to socket function callbacks to use defaults

21 of 21 new or added lines in 1 file covered. (100.0%)

5057 of 5743 relevant lines covered (88.06%)

11801.46 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.6
/src/lib/ares__htable.c
1
/* Copyright (C) 2023 by Brad House
2
 *
3
 * Permission to use, copy, modify, and distribute this
4
 * software and its documentation for any purpose and without
5
 * fee is hereby granted, provided that the above copyright
6
 * notice appear in all copies and that both that copyright
7
 * notice and this permission notice appear in supporting
8
 * documentation, and that the name of M.I.T. not be used in
9
 * advertising or publicity pertaining to distribution of the
10
 * software without specific, written prior permission.
11
 * M.I.T. makes no representations about the suitability of
12
 * this software for any purpose.  It is provided "as is"
13
 * without express or implied warranty.
14
 *
15
 * SPDX-License-Identifier: MIT
16
 */
17
#include "ares_setup.h"
18
#include "ares.h"
19
#include "ares_private.h"
20
#include "ares__llist.h"
21
#include "ares__htable.h"
22

23
#define ARES__HTABLE_MAX_BUCKETS (1U<<24)
24
#define ARES__HTABLE_MIN_BUCKETS (1U<<4)
25
#define ARES__HTABLE_EXPAND_PERCENT 75
26

27
struct ares__htable {
28
  ares__htable_hashfunc_t    hash;
29
  ares__htable_bucket_key_t  bucket_key;
30
  ares__htable_bucket_free_t bucket_free;
31
  ares__htable_key_eq_t      key_eq;
32
  unsigned int               seed;
33
  unsigned int               size;
34
  size_t                     num_keys;
35
  /* NOTE: if we converted buckets into ares__slist_t we could guarantee on
36
   *       hash collisions we would have O(log n) worst case insert and search
37
   *       performance.  (We'd also need to make key_eq into a key_cmp to
38
   *       support sort).  That said, risk with a random hash seed is near zero,
39
   *       and ares__slist_t is heavier weight so I think using ares__llist_t is
40
   *       is an overall win. */
41
  ares__llist_t            **buckets;
42
};
43

44

45
static unsigned int ares__htable_generate_seed(ares__htable_t *htable)
618✔
46
{
47
  unsigned int seed = 0;
618✔
48

49
  /* Mix stack address, heap address, and time to generate a random seed, it
50
   * doesn't have to be super secure, just quick.  Likelihood of a hash
51
   * collision attack is very low with a small amount of effort */
52
  seed |= (unsigned int)((size_t)htable & 0xFFFFFFFF);
618✔
53
  seed |= (unsigned int)((size_t)&seed & 0xFFFFFFFF);
618✔
54
  seed |= (unsigned int)time(NULL) & 0xFFFFFFFF;
618✔
55
  return seed;
618✔
56
}
57

58
static void ares__htable_buckets_destroy(ares__llist_t **buckets,
627✔
59
                                         unsigned int size,
60
                                         unsigned char destroy_vals)
61
{
62
  unsigned int i;
63

64
  if (buckets == NULL)
627✔
65
    return;
5✔
66

67
  for (i=0; i<size; i++) {
12,414✔
68
    if (buckets[i] == NULL)
11,792✔
69
      continue;
10,405✔
70

71
    if (!destroy_vals)
1,387✔
72
      ares__llist_replace_destructor(buckets[i], NULL);
542✔
73

74
    ares__llist_destroy(buckets[i]);
1,387✔
75
  }
76

77
  ares_free(buckets);
622✔
78
}
79

80

81
void ares__htable_destroy(ares__htable_t *htable)
635✔
82
{
83
  if (htable == NULL)
635✔
84
    return;
19✔
85
  ares__htable_buckets_destroy(htable->buckets, htable->size, 1);
616✔
86
  ares_free(htable);
616✔
87
}
88

89

90
ares__htable_t *ares__htable_create(ares__htable_hashfunc_t    hash_func,
625✔
91
                                    ares__htable_bucket_key_t  bucket_key,
92
                                    ares__htable_bucket_free_t bucket_free,
93
                                    ares__htable_key_eq_t      key_eq)
94
{
95
  ares__htable_t *htable = NULL;
625✔
96

97
  if (hash_func == NULL || bucket_key == NULL || bucket_free == NULL ||
625✔
98
      key_eq == NULL) {
99
    goto fail;
×
100
  }
101

102
  htable = ares_malloc(sizeof(*htable));
625✔
103
  if (htable == NULL)
625✔
104
    goto fail;
7✔
105

106
  memset(htable, 0, sizeof(*htable));
618✔
107

108
  htable->hash        = hash_func;
618✔
109
  htable->bucket_key  = bucket_key;
618✔
110
  htable->bucket_free = bucket_free;
618✔
111
  htable->key_eq      = key_eq;
618✔
112
  htable->seed        = ares__htable_generate_seed(htable);
618✔
113
  htable->size        = ARES__HTABLE_MIN_BUCKETS;
618✔
114
  htable->buckets     = ares_malloc(sizeof(*htable->buckets) * htable->size);
618✔
115

116
  if (htable->buckets == NULL)
618✔
117
    goto fail;
5✔
118

119
  memset(htable->buckets, 0, sizeof(*htable->buckets) * htable->size);
613✔
120

121
  return htable;
613✔
122

123
fail:
12✔
124
  ares__htable_destroy(htable);
12✔
125
  return NULL;
12✔
126
}
127

128

129
/*! Grabs the Hashtable index from the key and length.  The h index is
130
 *  the hash of the function reduced to the size of the bucket list.
131
 *  We are doing "hash & (size - 1)" since we are guaranteeing a power of
132
 *  2 for size. This is equivalent to "hash % size", but should be more
133
 * efficient */
134
#define HASH_IDX(h, key) h->hash(key, h->seed) & (h->size - 1)
135

136
static ares__llist_node_t *ares__htable_find(ares__htable_t *htable,
6,723✔
137
                                             unsigned int idx,
138
                                             const void *key)
139
{
140
  ares__llist_node_t *node = NULL;
6,723✔
141

142
  for (node = ares__llist_node_first(htable->buckets[idx]);
7,517✔
143
       node != NULL;
144
       node = ares__llist_node_next(node)) {
794✔
145

146
    if (htable->key_eq(key, htable->bucket_key(ares__llist_node_val(node))))
4,695✔
147
      break;
3,901✔
148
  }
149

150
  return node;
6,723✔
151
}
152

153

154
static unsigned int ares__htable_expand(ares__htable_t *htable)
11✔
155
{
156
  ares__llist_t **buckets  = NULL;
11✔
157
  unsigned int    old_size = htable->size;
11✔
158
  size_t          i;
159

160
  /* Not a failure, just won't expand */
161
  if (old_size == ARES__HTABLE_MAX_BUCKETS)
11✔
162
    return 1;
×
163

164
  htable->size <<= 1;
11✔
165

166
  /* We must do this in 2 passes as we want it to be non-destructive in case
167
   * there is a memory allocation failure.  So we will actually use more 
168
   * memory doing it this way, but at least we might be able to gracefully
169
   * recover */
170
  buckets = ares_malloc(sizeof(*buckets) * htable->size);
11✔
171
  if (buckets == NULL)
11✔
172
    goto fail;
×
173

174
  memset(buckets, 0, sizeof(*buckets) * htable->size);
11✔
175

176
  for (i=0; i<old_size; i++) {
1,019✔
177
    ares__llist_node_t *node;
178
    for (node = ares__llist_node_first(htable->buckets[i]);
1,764✔
179
         node != NULL;
180
         node = ares__llist_node_next(node)) {
756✔
181

182
      void  *val = ares__llist_node_val(node);
756✔
183
      size_t idx = HASH_IDX(htable, htable->bucket_key(val));
756✔
184

185
      if (buckets[idx] == NULL) {
756✔
186
        buckets[idx] = ares__llist_create(htable->bucket_free);
627✔
187
        if (buckets[idx] == NULL)
627✔
188
          goto fail;
×
189
      }
190

191
      if (ares__llist_insert_first(buckets[idx], val) == NULL) {
756✔
192
        goto fail;
×
193
      }
194

195
    }
196
  }
197

198
  /* Swap out buckets */
199
  ares__htable_buckets_destroy(htable->buckets, old_size, 0);
11✔
200
  htable->buckets = buckets;
11✔
201
  return 1;
11✔
202

203
fail:
×
204
  ares__htable_buckets_destroy(buckets, htable->size, 0);
×
205
  htable->size = old_size;
×
206

207
  return 0;
×
208
}
209

210

211
unsigned int ares__htable_insert(ares__htable_t *htable, void *bucket)
1,064✔
212
{
213
  unsigned int        idx  = 0;
1,064✔
214
  ares__llist_node_t *node = NULL;
1,064✔
215
  const void         *key  = NULL;
1,064✔
216

217
  if (htable == NULL || bucket == NULL)
1,064✔
218
    return 0;
×
219

220

221
  key  = htable->bucket_key(bucket);
1,064✔
222
  idx  = HASH_IDX(htable, key);
1,064✔
223

224
  /* See if we have a matching bucket already, if so, replace it */
225
  node = ares__htable_find(htable, idx, key);
1,064✔
226
  if (node != NULL) {
1,064✔
227
    ares__llist_node_replace(node, bucket);
×
228
    return 1;
×
229
  }
230

231
  /* Check to see if we should rehash because likelihood of collisions has
232
   * increased beyond our threshold */
233
  if (htable->num_keys+1 > (htable->size * ARES__HTABLE_EXPAND_PERCENT) / 100) {
1,064✔
234
    if (!ares__htable_expand(htable)) {
11✔
235
      return 0;
×
236
    }
237
    /* If we expanded, need to calculate a new index */
238
    idx = HASH_IDX(htable, key);
11✔
239
  }
240

241
  /* We lazily allocate the linked list */
242
  if (htable->buckets[idx] == NULL) {
1,064✔
243
    htable->buckets[idx] = ares__llist_create(htable->bucket_free);
761✔
244
    if (htable->buckets[idx] == NULL)
761✔
245
      return 0;
1✔
246
  }
247
  
248
  node = ares__llist_insert_first(htable->buckets[idx], bucket);
1,063✔
249
  if (node == NULL)
1,063✔
250
    return 0;
×
251

252
  htable->num_keys++;
1,063✔
253

254
  return 1;
1,063✔
255
}
256

257
  
258
void *ares__htable_get(ares__htable_t *htable, const void *key)
3,808✔
259
{
260
  unsigned int idx;
261

262
  if (htable == NULL || key == NULL)
3,808✔
263
    return NULL;
×
264

265
  idx = HASH_IDX(htable, key);
3,808✔
266

267
  return ares__llist_node_val(ares__htable_find(htable, idx, key));
3,808✔
268
}
269

270

271
unsigned int ares__htable_remove(ares__htable_t *htable, const void *key)
1,851✔
272
{
273
  ares__llist_node_t *node;
274
  unsigned int        idx;
275

276
  if (htable == NULL || key == NULL)
1,851✔
277
    return 0;
×
278

279
  idx  = HASH_IDX(htable, key);
1,851✔
280
  node = ares__htable_find(htable, idx, key);
1,851✔
281
  if (node == NULL)
1,851✔
282
    return 0;
788✔
283

284
  htable->num_keys--;
1,063✔
285
  ares__llist_node_destroy(node);
1,063✔
286
  return 1;
1,063✔
287
}
288

289
size_t ares__htable_num_keys(ares__htable_t *htable)
×
290
{
291
  if (htable == NULL)
×
292
    return 0;
×
293
  return htable->num_keys;
×
294
}
295

296
unsigned int ares__htable_hash_FNV1a(const unsigned char *key, size_t key_len,
7,490✔
297
                                     unsigned int seed)
298
{
299
  /* recommended seed is 2166136261U, but we don't want collisions */
300
  unsigned int         hv   = seed; 
7,490✔
301
  size_t               i;
302

303
  for (i = 0; i < key_len; i++) {
56,622✔
304
    hv ^= (unsigned int)key[i];
49,132✔
305
    /* hv *= 0x01000193 */
306
    hv += (hv<<1) + (hv<<4) + (hv<<7) + (hv<<8) + (hv<<24);
49,132✔
307
  }
308

309
  return hv;
7,490✔
310
}
311

312
/* tolower() is locale-specific.  Use a lookup table fast conversion that only
313
 * operates on ASCII */
314
static const unsigned char ares__tolower_lookup[] = {
315
  0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
316
  0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,
317
  0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
318
  0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,
319
  0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,
320
  0x28,0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F,
321
  0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,
322
  0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,
323
  0x40,0x61,0x62,0x63,0x64,0x65,0x66,0x67,
324
  0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,
325
  0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,
326
  0x78,0x79,0x7A,0x5B,0x5C,0x5D,0x5E,0x5F,
327
  0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67,
328
  0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,
329
  0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,
330
  0x78,0x79,0x7A,0x7B,0x7C,0x7D,0x7E,0x7F,
331
  0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,
332
  0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F,
333
  0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,
334
  0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F,
335
  0xA0,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,
336
  0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF,
337
  0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,
338
  0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF,
339
  0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,
340
  0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF,
341
  0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,
342
  0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF,
343
  0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,
344
  0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF,
345
  0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,
346
  0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF
347
};
348

349

350
/* Case insensitive version, meant for ASCII strings */
351
unsigned int ares__htable_hash_FNV1a_casecmp(const unsigned char *key, size_t key_len,
×
352
                                             unsigned int seed)
353
{
354
  /* recommended seed is 2166136261U, but we don't want collisions */
355
  unsigned int         hv   = seed;
×
356
  size_t               i;
357

358
  for (i = 0; i < key_len; i++) {
×
359
    hv ^= (unsigned int)ares__tolower_lookup[key[i]];
×
360
    /* hv *= 0x01000193 */
361
    hv += (hv<<1) + (hv<<4) + (hv<<7) + (hv<<8) + (hv<<24);
×
362
  }
363

364
  return hv;
×
365
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc