• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

proftpd / proftpd / 14552756858

19 Apr 2025 08:53PM UTC coverage: 93.03% (+0.4%) from 92.667%
14552756858

push

github

51358 of 55206 relevant lines covered (93.03%)

213.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.3
/src/pool.c
1
/*
2
 * ProFTPD - FTP server daemon
3
 * Copyright (c) 1997, 1998 Public Flood Software
4
 * Copyright (c) 1999, 2000 MacGyver aka Habeeb J. Dihu <macgyver@tos.net>
5
 * Copyright (c) 2001-2025 The ProFTPD Project team
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2 of the License, or
10
 * (at your option) any later version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
20
 *
21
 * As a special exemption, Public Flood Software/MacGyver aka Habeeb J. Dihu
22
 * and other respective copyright holders give permission to link this program
23
 * with OpenSSL, and distribute the resulting executable, without including
24
 * the source code for OpenSSL in the source distribution.
25
 */
26

27
/* Resource allocation code */
28

29
#include "conf.h"
30

31
/* Manage free storage blocks */
32

33
union align {
34
  char *cp;
35
  void (*f)(void);
36
  long l;
37
  FILE *fp;
38
  double d;
39
};
40

41
#define CLICK_SZ (sizeof(union align))
42

43
union block_hdr {
44
  union align a;
45

46
  /* Padding */
47
#if defined(_LP64) || defined(__LP64__)
48
  char pad[32];
49
#endif
50

51
  /* Actual header */
52
  struct {
53
    void *endp;
54
    union block_hdr *next;
55
    void *first_avail;
56
  } h;
57
};
58

59
static union block_hdr *block_freelist = NULL;
60

61
/* Statistics */
62
static unsigned int stat_malloc = 0;        /* incr when malloc required */
63
static unsigned int stat_freehit = 0;        /* incr when freelist used */
64

65
static const char *trace_channel = "pool";
66

67
/* Debug flags */
68
static int debug_flags = 0;
69

70
#ifdef PR_USE_DEVEL
71
static void oom_printf(const char *fmt, ...) {
×
72
  char buf[PR_TUNABLE_BUFFER_SIZE];
×
73
  va_list msg;
×
74

75
  memset(buf, '\0', sizeof(buf));
×
76

77
  va_start(msg, fmt);
×
78
  pr_vsnprintf(buf, sizeof(buf), fmt, msg);
×
79
  va_end(msg);
×
80

81
  buf[sizeof(buf)-1] = '\0';
×
82
  fprintf(stderr, "%s\n", buf);
×
83
}
×
84
#endif /* PR_USE_DEVEL */
85

86
/* Lowest level memory allocation functions
87
 */
88

89
static void null_alloc(void) {
×
90
  pr_log_pri(PR_LOG_ALERT, "Out of memory!");
×
91
#ifdef PR_USE_DEVEL
92
  if (debug_flags & PR_POOL_DEBUG_FL_OOM_DUMP_POOLS) {
×
93
    pr_pool_debug_memory(oom_printf);
×
94
  }
95
#endif
96

97
  exit(1);
×
98
}
99

100
static void *smalloc(size_t size) {
11,605✔
101
  void *res;
11,605✔
102

103
  if (size == 0) {
11,605✔
104
    /* Avoid zero-length malloc(); on non-POSIX systems, the behavior is
105
     * not dependable.  And on POSIX systems, malloc(3) might still return
106
     * a "unique pointer" for a zero-length allocation (or NULL).
107
     *
108
     * Either way, a zero-length allocation request here means that someone
109
     * is doing something they should not be doing.
110
     */
111
    null_alloc();
×
112
  }
113

114
  res = malloc(size);
11,605✔
115
  if (res == NULL) {
11,605✔
116
    null_alloc();
×
117
  }
118

119
  return res;
11,605✔
120
}
121

122
/* Grab a completely new block from the system pool.  Relies on malloc()
123
 * to return truly aligned memory.
124
 */
125
static union block_hdr *malloc_block(size_t size) {
11,605✔
126
  union block_hdr *blok =
11,605✔
127
    (union block_hdr *) smalloc(size + sizeof(union block_hdr));
11,605✔
128

129
  blok->h.next = NULL;
11,605✔
130
  blok->h.first_avail = (char *) (blok + 1);
11,605✔
131
  blok->h.endp = size + (char *) blok->h.first_avail;
11,605✔
132

133
  return blok;
11,605✔
134
}
135

136
static void chk_on_blk_list(union block_hdr *blok, union block_hdr *free_blk,
16,447✔
137
    const char *pool_tag) {
138

139
#ifdef PR_USE_DEVEL
140
  /* Debug code */
141

142
  while (free_blk) {
111,318✔
143
    if (free_blk != blok) {
94,871✔
144
      free_blk = free_blk->h.next;
94,871✔
145
      continue;
94,871✔
146
    }
147

148
    pr_log_pri(PR_LOG_WARNING, "fatal: DEBUG: Attempt to free already free "
×
149
     "block in pool '%s'", pool_tag ? pool_tag : "<unnamed>");
150
    exit(1);
×
151
  }
152
#endif /* PR_USE_DEVEL */
153
}
16,447✔
154

155
/* Free a chain of blocks -- _must_ call with alarms blocked. */
156

157
static void free_blocks(union block_hdr *blok, const char *pool_tag) {
26,356✔
158
  /* Puts new blocks at head of block list, point next pointer of
159
   * last block in chain to free blocks we already had.
160
   */
161

162
  union block_hdr *old_free_list = block_freelist;
26,356✔
163

164
  if (blok == NULL) {
26,356✔
165
    /* Don't free an empty pool. */
166
    return;
167
  }
168

169
  block_freelist = blok;
15,366✔
170

171
  /* Adjust first_avail pointers */
172

173
  while (blok->h.next) {
16,447✔
174
    chk_on_blk_list(blok, old_free_list, pool_tag);
1,081✔
175
    blok->h.first_avail = (char *) (blok + 1);
1,081✔
176
    blok = blok->h.next;
1,081✔
177
  }
178

179
  chk_on_blk_list(blok, old_free_list, pool_tag);
15,366✔
180
  blok->h.first_avail = (char *) (blok + 1);
15,366✔
181
  blok->h.next = old_free_list;
15,366✔
182
}
183

184
/* Get a new block, from the free list if possible, otherwise malloc a new
185
 * one.  minsz is the requested size of the block to be allocated.
186
 * If exact is TRUE, then minsz is the exact size of the allocated block;
187
 * otherwise, the allocated size will be rounded up from minsz to the nearest
188
 * multiple of BLOCK_MINFREE.
189
 *
190
 * Important: BLOCK ALARMS BEFORE CALLING
191
 */
192

193
static union block_hdr *new_block(int minsz, int exact) {
17,217✔
194
  union block_hdr **lastptr = &block_freelist;
17,217✔
195
  union block_hdr *blok = block_freelist;
17,217✔
196

197
  if (!exact) {
17,217✔
198
    minsz = 1 + ((minsz - 1) / BLOCK_MINFREE);
16,307✔
199
    minsz *= BLOCK_MINFREE;
16,307✔
200
  }
201

202
  /* Check if we have anything of the requested size on our free list first...
203
   */
204
  while (blok) {
17,390✔
205
    if (minsz <= ((char *) blok->h.endp - (char *) blok->h.first_avail)) {
5,785✔
206
      *lastptr = blok->h.next;
5,612✔
207
      blok->h.next = NULL;
5,612✔
208

209
      stat_freehit++;
5,612✔
210
      return blok;
5,612✔
211
    }
212

213
    lastptr = &blok->h.next;
173✔
214
    blok = blok->h.next;
173✔
215
  }
216

217
  /* Nope...damn.  Have to malloc() a new one. */
218
  stat_malloc++;
11,605✔
219
  return malloc_block(minsz);
11,605✔
220
}
221

222
struct cleanup;
223

224
static void run_cleanups(struct cleanup *c);
225

226
/* Pool internal and management */
227

228
struct pool_rec {
229
  union block_hdr *first;
230
  union block_hdr *last;
231
  struct cleanup *cleanups;
232
  struct pool_rec *sub_pools;
233
  struct pool_rec *sub_next;
234
  struct pool_rec *sub_prev;
235
  struct pool_rec *parent;
236
  char *free_first_avail;
237
  const char *tag;
238
};
239

240
pool *permanent_pool = NULL;
241
pool *global_config_pool = NULL;
242

243
/* Each pool structure is allocated in the start of it's own first block,
244
 * so there is a need to know how many bytes that is (once properly
245
 * aligned).
246
 */
247

248
#define POOL_HDR_CLICKS (1 + ((sizeof(struct pool_rec) - 1) / CLICK_SZ))
249
#define POOL_HDR_BYTES (POOL_HDR_CLICKS * CLICK_SZ)
250

251
static unsigned long blocks_in_block_list(union block_hdr *blok) {
252
  unsigned long count = 0;
253

254
  while (blok) {
171✔
255
    count++;
99✔
256
    blok = blok->h.next;
99✔
257
  }
258

259
  return count;
63✔
260
}
261

262
static unsigned long bytes_in_block_list(union block_hdr *blok) {
63✔
263
  unsigned long size = 0;
63✔
264

265
  while (blok) {
171✔
266
    size += ((char *) blok->h.endp - (char *) (blok + 1));
99✔
267
    blok = blok->h.next;
99✔
268
  }
269

270
  return size;
271
}
272

273
static unsigned int subpools_in_pool(pool *p) {
139✔
274
  unsigned int count = 0;
139✔
275
  pool *iter;
139✔
276

277
  if (p->sub_pools == NULL) {
139✔
278
    return 0;
279
  }
280

281
  for (iter = p->sub_pools; iter; iter = iter->sub_next) {
135✔
282
    /* Count one for the current subpool (iter). */
283
    count += (subpools_in_pool(iter) + 1);
76✔
284
  }
285

286
  return count;
287
}
288

289
/* Visit all pools, starting with the top-level permanent pool, walking the
290
 * hierarchy.
291
 */
292
static unsigned long visit_pools(pool *p, unsigned long level,
46✔
293
    void (*visit)(const pr_pool_info_t *, void *), void *user_data) {
294
  unsigned long total_bytes = 0;
46✔
295

296
  if (p == NULL) {
46✔
297
    return 0;
298
  }
299

300
  for (; p; p = p->sub_next) {
109✔
301
    unsigned long byte_count = 0, block_count = 0;
63✔
302
    unsigned int subpool_count = 0;
63✔
303
    pr_pool_info_t pinfo;
63✔
304

305
    byte_count = bytes_in_block_list(p->first);
211✔
306
    block_count = blocks_in_block_list(p->first);
63✔
307
    subpool_count = subpools_in_pool(p);
63✔
308

309
    total_bytes += byte_count;
63✔
310

311
    memset(&pinfo, 0, sizeof(pinfo));
63✔
312
    pinfo.have_pool_info = TRUE;
63✔
313
    pinfo.tag = p->tag;
63✔
314
    pinfo.ptr = p;
63✔
315
    pinfo.byte_count = byte_count;
63✔
316
    pinfo.block_count = block_count;
63✔
317
    pinfo.subpool_count = subpool_count;
63✔
318
    pinfo.level = level;
63✔
319

320
    visit(&pinfo, user_data);
63✔
321

322
    /* Recurse */
323
    if (p->sub_pools) {
63✔
324
      total_bytes += visit_pools(p->sub_pools, level + 1, visit, user_data);
35✔
325
    }
326
  }
327

328
  return total_bytes;
329
}
330

331
static void pool_printf(const char *fmt, ...) {
64✔
332
  char buf[PR_TUNABLE_BUFFER_SIZE];
64✔
333
  va_list msg;
64✔
334

335
  memset(buf, '\0', sizeof(buf));
64✔
336

337
  va_start(msg, fmt);
64✔
338
  pr_vsnprintf(buf, sizeof(buf), fmt, msg);
64✔
339
  va_end(msg);
64✔
340

341
  buf[sizeof(buf)-1] = '\0';
64✔
342
  pr_trace_msg(trace_channel, 5, "%s", buf);
64✔
343
}
64✔
344

345
static void pool_visitf(const pr_pool_info_t *pinfo, void *user_data) {
46✔
346
  void (*debugf)(const char *, ...) = user_data;
46✔
347

348
  if (pinfo->have_pool_info) {
46✔
349

350
    /* The emitted message is:
351
     *
352
     *  <pool-tag> [pool-ptr] (n B, m L, r P)
353
     *
354
     * where n is the number of bytes (B), m is the number of allocated blocks
355
     * in the pool list (L), and r is the number of sub-pools (P).
356
     */
357

358
    if (pinfo->level == 0) {
34✔
359
      debugf("%s [%p] (%lu B, %lu L, %u P)",
6✔
360
        pinfo->tag ? pinfo->tag : "<unnamed>", pinfo->ptr,
6✔
361
        pinfo->byte_count, pinfo->block_count, pinfo->subpool_count);
6✔
362

363
    } else {
364
      char indent_text[80] = "";
28✔
365

366
      if (pinfo->level > 1) {
28✔
367
        memset(indent_text, ' ', sizeof(indent_text)-1);
13✔
368

369
        if ((pinfo->level - 1) * 3 >= sizeof(indent_text)) {
13✔
370
          indent_text[sizeof(indent_text)-1] = 0;
371

372
        } else {
373
          indent_text[(pinfo->level - 1) * 3] = '\0';
13✔
374
        }
375
      }
376

377
      debugf("%s + %s [%p] (%lu B, %lu L, %u P)", indent_text,
28✔
378
        pinfo->tag ? pinfo->tag : "<unnamed>", pinfo->ptr,
28✔
379
        pinfo->byte_count, pinfo->block_count, pinfo->subpool_count);
28✔
380
    }
381
  }
382

383
  if (pinfo->have_freelist_info) {
46✔
384
    debugf("Free block list: %lu bytes", pinfo->freelist_byte_count);
6✔
385
  }
386

387
  if (pinfo->have_total_info) {
46✔
388
    debugf("Total %lu bytes allocated", pinfo->total_byte_count);
6✔
389
    debugf("%lu blocks allocated", pinfo->total_blocks_allocated);
6✔
390
    debugf("%lu blocks reused", pinfo->total_blocks_reused);
6✔
391
  }
392
}
46✔
393

394
void pr_pool_debug_memory(void (*debugf)(const char *, ...)) {
6✔
395
  if (debugf == NULL) {
6✔
396
    debugf = pool_printf;
6✔
397
  }
398

399
  debugf("Memory pool allocation:");
6✔
400
  pr_pool_debug_memory2(pool_visitf, debugf);
6✔
401
}
6✔
402

403
void pr_pool_debug_memory2(void (*visit)(const pr_pool_info_t *, void *),
12✔
404
    void *user_data) {
405
  unsigned long freelist_byte_count = 0, freelist_block_count = 0,
12✔
406
    total_byte_count = 0;
12✔
407
  pr_pool_info_t pinfo;
12✔
408

409
  if (visit == NULL) {
12✔
410
    return;
1✔
411
  }
412

413
  /* Per pool */
414
  total_byte_count = visit_pools(permanent_pool, 0, visit, user_data);
11✔
415

416
  /* Free list */
417
  if (block_freelist) {
11✔
418
    freelist_byte_count = bytes_in_block_list(block_freelist);
23✔
419
    freelist_block_count = blocks_in_block_list(block_freelist);
11✔
420
  }
421

422
  memset(&pinfo, 0, sizeof(pinfo));
11✔
423
  pinfo.have_freelist_info = TRUE;
11✔
424
  pinfo.freelist_byte_count = freelist_byte_count;
11✔
425
  pinfo.freelist_block_count = freelist_block_count;
11✔
426

427
  visit(&pinfo, user_data);
11✔
428

429
  /* Totals */
430
  memset(&pinfo, 0, sizeof(pinfo));
11✔
431
  pinfo.have_total_info = TRUE;
11✔
432
  pinfo.total_byte_count = total_byte_count;
11✔
433
  pinfo.total_blocks_allocated = stat_malloc;
11✔
434
  pinfo.total_blocks_reused = stat_freehit;
11✔
435

436
  visit(&pinfo, user_data);
11✔
437
}
438

439
int pr_pool_debug_set_flags(int flags) {
2✔
440
  if (flags < 0) {
2✔
441
    errno = EINVAL;
1✔
442
    return -1;
1✔
443
  }
444

445
  debug_flags = flags;
1✔
446
  return 0;
1✔
447
}
448

449
void pr_pool_tag(pool *p, const char *tag) {
12,712✔
450
  if (p == NULL ||
12,712✔
451
      tag == NULL) {
12,696✔
452
    return;
453
  }
454

455
  p->tag = tag;
12,710✔
456
}
457

458
const char *pr_pool_get_tag(pool *p) {
3✔
459
  if (p == NULL) {
3✔
460
    errno = EINVAL;
1✔
461
    return NULL;
1✔
462
  }
463

464
  return p->tag;
2✔
465
}
466

467
/* Release the entire free block list */
468
static void pool_release_free_block_list(void) {
16✔
469
  union block_hdr *blok = NULL, *next = NULL;
16✔
470

471
  pr_alarms_block();
16✔
472

473
  for (blok = block_freelist; blok; blok = next) {
166✔
474
    next = blok->h.next;
150✔
475
    free(blok);
150✔
476
  }
477
  block_freelist = NULL;
16✔
478

479
  pr_alarms_unblock();
16✔
480
}
16✔
481

482
struct pool_rec *make_sub_pool(struct pool_rec *p) {
12,784✔
483
  union block_hdr *blok;
12,784✔
484
  pool *new_pool;
12,784✔
485

486
  pr_alarms_block();
12,784✔
487

488
  blok = new_block(0, FALSE);
12,784✔
489

490
  new_pool = (pool *) blok->h.first_avail;
12,784✔
491
  blok->h.first_avail = POOL_HDR_BYTES + (char *) blok->h.first_avail;
12,784✔
492

493
  memset(new_pool, 0, sizeof(struct pool_rec));
12,784✔
494
  new_pool->free_first_avail = blok->h.first_avail;
12,784✔
495
  new_pool->first = new_pool->last = blok;
12,784✔
496

497
  if (p != NULL) {
12,784✔
498
    new_pool->parent = p;
11,783✔
499
    new_pool->sub_next = p->sub_pools;
11,783✔
500

501
    if (new_pool->sub_next != NULL) {
11,783✔
502
      new_pool->sub_next->sub_prev = new_pool;
8,588✔
503
    }
504

505
    p->sub_pools = new_pool;
11,783✔
506
  }
507

508
  pr_alarms_unblock();
12,784✔
509

510
  return new_pool;
12,784✔
511
}
512

513
struct pool_rec *pr_pool_create_sz(struct pool_rec *p, size_t sz) {
908✔
514
  union block_hdr *blok;
908✔
515
  pool *new_pool;
908✔
516

517
  pr_alarms_block();
908✔
518

519
  blok = new_block(sz + POOL_HDR_BYTES, TRUE);
908✔
520

521
  new_pool = (pool *) blok->h.first_avail;
908✔
522
  blok->h.first_avail = POOL_HDR_BYTES + (char *) blok->h.first_avail;
908✔
523

524
  memset(new_pool, 0, sizeof(struct pool_rec));
908✔
525
  new_pool->free_first_avail = blok->h.first_avail;
908✔
526
  new_pool->first = new_pool->last = blok;
908✔
527

528
  if (p != NULL) {
908✔
529
    new_pool->parent = p;
902✔
530
    new_pool->sub_next = p->sub_pools;
902✔
531

532
    if (new_pool->sub_next != NULL) {
902✔
533
      new_pool->sub_next->sub_prev = new_pool;
63✔
534
    }
535

536
    p->sub_pools = new_pool;
902✔
537
  }
538

539
  pr_alarms_unblock();
908✔
540

541
  return new_pool;
908✔
542
}
543

544
/* Initialize the pool system by creating the base permanent_pool. */
545

546
void init_pools(void) {
16✔
547
  if (permanent_pool == NULL) {
16✔
548
    permanent_pool = make_sub_pool(NULL);
16✔
549
  }
550

551
  pr_pool_tag(permanent_pool, "permanent_pool");
16✔
552
}
16✔
553

554
void free_pools(void) {
16✔
555
  destroy_pool(permanent_pool);
16✔
556
  permanent_pool = NULL;
16✔
557
  pool_release_free_block_list();
16✔
558
}
16✔
559

560
static void clear_pool(struct pool_rec *p) {
13,178✔
561

562
  /* Sanity check. */
563
  if (p == NULL) {
13,178✔
564
    return;
565
  }
566

567
  pr_alarms_block();
13,178✔
568

569
  /* Run through any cleanups. */
570
  run_cleanups(p->cleanups);
13,178✔
571
  p->cleanups = NULL;
13,178✔
572

573
  /* Destroy subpools. */
574
  while (p->sub_pools != NULL) {
18,683✔
575
    destroy_pool(p->sub_pools);
5,505✔
576
  }
577

578
  p->sub_pools = NULL;
13,178✔
579

580
  free_blocks(p->first->h.next, p->tag);
13,178✔
581
  p->first->h.next = NULL;
13,178✔
582

583
  p->last = p->first;
13,178✔
584
  p->first->h.first_avail = p->free_first_avail;
13,178✔
585

586
  p->tag = NULL;
13,178✔
587
  pr_alarms_unblock();
13,178✔
588
}
589

590
void destroy_pool(pool *p) {
13,195✔
591
  if (p == NULL) {
13,195✔
592
    return;
593
  }
594

595
  pr_alarms_block();
13,178✔
596

597
  if (p->parent != NULL) {
13,178✔
598
    if (p->parent->sub_pools == p) {
12,284✔
599
      p->parent->sub_pools = p->sub_next;
12,054✔
600
    }
601

602
    if (p->sub_prev != NULL) {
12,284✔
603
      p->sub_prev->sub_next = p->sub_next;
230✔
604
    }
605

606
    if (p->sub_next != NULL) {
12,284✔
607
      p->sub_next->sub_prev = p->sub_prev;
8,408✔
608
    }
609
  }
610

611
  clear_pool(p);
13,178✔
612
  free_blocks(p->first, p->tag);
13,178✔
613

614
  pr_alarms_unblock();
13,178✔
615

616
#if defined(PR_DEVEL_NO_POOL_FREELIST)
617
  /* If configured explicitly to do so, call free(3) on the freelist after
618
   * a pool is destroyed.  This can be useful for tracking down use-after-free
619
   * and other memory issues using libraries such as dmalloc.
620
   */
621
  pool_release_free_block_list();
622
#endif /* PR_DEVEL_NO_POOL_FREELIST */
623
}
624

625
/* Allocation interface...
626
 */
627

628
static void *alloc_pool(struct pool_rec *p, size_t reqsz, int exact) {
35,451✔
629
  /* Round up requested size to an even number of aligned units */
630
  size_t nclicks = 1 + ((reqsz - 1) / CLICK_SZ);
35,451✔
631
  size_t sz = nclicks * CLICK_SZ;
35,451✔
632
  union block_hdr *blok;
35,451✔
633
  char *first_avail, *new_first_avail;
35,451✔
634

635
  if (p == NULL) {
35,451✔
636
    errno = EINVAL;
×
637
    return NULL;
×
638
  }
639

640
  /* For performance, see if space is available in the most recently
641
   * allocated block.
642
   */
643

644
  blok = p->last;
35,451✔
645
  if (blok == NULL) {
35,451✔
646
    errno = EINVAL;
×
647
    return NULL;
×
648
  }
649

650
  first_avail = blok->h.first_avail;
35,451✔
651

652
  if (reqsz == 0) {
35,451✔
653
    /* Don't try to allocate memory of zero length.
654
     *
655
     * This should NOT happen normally; if it does, by returning NULL we
656
     * almost guarantee a null pointer dereference.
657
     */
658
    errno = EINVAL;
5✔
659
    return NULL;
5✔
660
  }
661

662
  new_first_avail = first_avail + sz;
35,446✔
663

664
  if (new_first_avail <= (char *) blok->h.endp) {
35,446✔
665
    blok->h.first_avail = new_first_avail;
31,921✔
666
    return (void *) first_avail;
31,921✔
667
  }
668

669
  /* Need a new one that's big enough */
670
  pr_alarms_block();
3,525✔
671

672
  blok = new_block(sz, exact);
3,525✔
673
  p->last->h.next = blok;
3,525✔
674
  p->last = blok;
3,525✔
675

676
  first_avail = blok->h.first_avail;
3,525✔
677
  blok->h.first_avail = sz + (char *) blok->h.first_avail;
3,525✔
678

679
  pr_alarms_unblock();
3,525✔
680
  return (void *) first_avail;
3,525✔
681
}
682

683
void *palloc(struct pool_rec *p, size_t sz) {
35,445✔
684
  return alloc_pool(p, sz, FALSE);
7,810✔
685
}
686

687
void *pallocsz(struct pool_rec *p, size_t sz) {
6✔
688
  return alloc_pool(p, sz, TRUE);
3✔
689
}
690

691
void *pcalloc(struct pool_rec *p, size_t sz) {
27,243✔
692
  void *res;
27,243✔
693

694
  if (p == NULL) {
27,243✔
695
    errno = EINVAL;
×
696
    return NULL;
×
697
  }
698

699
  res = palloc(p, sz);
27,243✔
700
  memset(res, '\0', sz);
27,243✔
701

702
  return res;
27,243✔
703
}
704

705
void *pcallocsz(struct pool_rec *p, size_t sz) {
3✔
706
  void *res;
3✔
707

708
  if (p == NULL) {
3✔
709
    errno = EINVAL;
×
710
    return NULL;
×
711
  }
712

713
  res = pallocsz(p, sz);
3✔
714
  memset(res, '\0', sz);
3✔
715

716
  return res;
3✔
717
}
718

719
/* Array functions */
720

721
array_header *make_array(pool *p, unsigned int nelts, size_t elt_size) {
393✔
722
  array_header *res;
393✔
723

724
  if (p == NULL ||
393✔
725
      elt_size == 0) {
393✔
726
    errno = EINVAL;
3✔
727
    return NULL;
3✔
728
  }
729

730
  res = palloc(p, sizeof(array_header));
390✔
731

732
  if (nelts < 1) {
390✔
733
    nelts = 1;
734
  }
735

736
  res->elts = pcalloc(p, nelts * elt_size);
390✔
737
  res->pool = p;
390✔
738
  res->elt_size = elt_size;
390✔
739
  res->nelts = 0;
390✔
740
  res->nalloc = nelts;
390✔
741

742
  return res;
390✔
743
}
744

745
void clear_array(array_header *arr) {
5✔
746
  if (arr == NULL) {
5✔
747
    return;
748
  }
749

750
  arr->elts = pcalloc(arr->pool, arr->nalloc * arr->elt_size);
4✔
751
  arr->nelts = 0;
4✔
752
}
753

754
void *push_array(array_header *arr) {
658✔
755
  if (arr == NULL) {
658✔
756
    errno = EINVAL;
1✔
757
    return NULL;
1✔
758
  }
759

760
  if (arr->nelts == arr->nalloc) {
657✔
761
    char *new_data = pcalloc(arr->pool, arr->nalloc * arr->elt_size * 2);
197✔
762

763
    memcpy(new_data, arr->elts, arr->nalloc * arr->elt_size);
197✔
764
    arr->elts = new_data;
197✔
765
    arr->nalloc *= 2;
197✔
766
  }
767

768
  ++arr->nelts;
657✔
769
  return ((char *) arr->elts) + (arr->elt_size * (arr->nelts - 1));
657✔
770
}
771

772
int array_cat2(array_header *dst, const array_header *src) {
16✔
773
  size_t elt_size;
16✔
774

775
  if (dst == NULL ||
16✔
776
      src == NULL) {
16✔
777
    errno = EINVAL;
6✔
778
    return -1;
6✔
779
  }
780

781
  elt_size = dst->elt_size;
10✔
782

783
  if (dst->nelts + src->nelts > dst->nalloc) {
10✔
784
    size_t new_size;
6✔
785
    char *new_data;
6✔
786

787
    new_size = dst->nalloc * 2;
6✔
788
    if (new_size == 0) {
6✔
789
      ++new_size;
790
    }
791

792
    while ((dst->nelts + src->nelts) > new_size) {
14✔
793
      new_size *= 2;
8✔
794
    }
795

796
    new_data = pcalloc(dst->pool, elt_size * new_size);
6✔
797
    memcpy(new_data, dst->elts, dst->nalloc * elt_size);
6✔
798

799
    dst->elts = new_data;
6✔
800
    dst->nalloc = new_size;
6✔
801
  }
802

803
  memcpy(((char *) dst->elts) + (dst->nelts * elt_size), (char *) src->elts,
10✔
804
         elt_size * src->nelts);
10✔
805
  dst->nelts += src->nelts;
10✔
806

807
  return 0;
10✔
808
}
809

810
void array_cat(array_header *dst, const array_header *src) {
10✔
811
  (void) array_cat2(dst, src);
9✔
812
}
9✔
813

814
array_header *copy_array(pool *p, const array_header *arr) {
9✔
815
  array_header *res;
9✔
816

817
  if (p == NULL ||
9✔
818
      arr == NULL) {
9✔
819
    errno = EINVAL;
4✔
820
    return NULL;
4✔
821
  }
822

823
  res = make_array(p, arr->nalloc, arr->elt_size);
5✔
824

825
  if (arr->nelts > 0) {
5✔
826
    memcpy(res->elts, arr->elts, arr->elt_size * arr->nelts);
5✔
827
  }
828

829
  res->nelts = arr->nelts;
5✔
830
  return res;
5✔
831
}
832

833
/* copy an array that is assumed to consist solely of strings */
834
array_header *copy_array_str(pool *p, const array_header *arr) {
5✔
835
  register unsigned int i;
5✔
836
  array_header *res;
5✔
837

838
  if (p == NULL ||
5✔
839
      arr == NULL) {
5✔
840
    errno = EINVAL;
3✔
841
    return NULL;
3✔
842
  }
843

844
  res = copy_array(p, arr);
2✔
845

846
  for (i = 0; i < arr->nelts; i++) {
7✔
847
    ((char **) res->elts)[i] = pstrdup(p, ((char **) res->elts)[i]);
3✔
848
  }
849

850
  return res;
851
}
852

853
array_header *copy_array_hdr(pool *p, const array_header *arr) {
5✔
854
  array_header *res;
5✔
855

856
  if (p == NULL ||
5✔
857
      arr == NULL) {
5✔
858
    errno = EINVAL;
3✔
859
    return NULL;
3✔
860
  }
861

862
  res = palloc(p, sizeof(array_header));
2✔
863

864
  res->elts = arr->elts;
2✔
865
  res->pool = p;
2✔
866
  res->elt_size = arr->elt_size;
2✔
867
  res->nelts = arr->nelts;
2✔
868
  res->nalloc = arr->nelts;                /* Force overflow on push */
2✔
869

870
  return res;
2✔
871
}
872

873
array_header *append_arrays(pool *p, const array_header *first,
8✔
874
    const array_header *second) {
875
  array_header *res;
8✔
876

877
  if (p == NULL ||
8✔
878
      first == NULL ||
8✔
879
      second == NULL) {
880
    errno = EINVAL;
7✔
881
    return NULL;
7✔
882
  }
883

884
  res = copy_array_hdr(p, first);
1✔
885

886
  array_cat(res, second);
1✔
887
  return res;
1✔
888
}
889

890
/* Generic cleanups */
891

892
typedef struct cleanup {
893
  void *user_data;
894
  void (*cleanup_cb)(void *);
895
  struct cleanup *next;
896

897
} cleanup_t;
898

899
void register_cleanup2(pool *p, void *user_data, void (*cleanup_cb)(void*)) {
926✔
900
  cleanup_t *c;
926✔
901

902
  if (p == NULL) {
926✔
903
    return;
904
  }
905

906
  c = pcalloc(p, sizeof(cleanup_t));
924✔
907
  c->user_data = user_data;
924✔
908
  c->cleanup_cb = cleanup_cb;
924✔
909

910
  /* Add this cleanup to the given pool's list of cleanups. */
911
  c->next = p->cleanups;
924✔
912
  p->cleanups = c;
924✔
913
}
914

915
void register_cleanup(pool *p, void *user_data, void (*plain_cleanup_cb)(void*),
5✔
916
    void (*child_cleanup_cb)(void *)) {
917
  (void) child_cleanup_cb;
5✔
918
  register_cleanup2(p, user_data, plain_cleanup_cb);
5✔
919
}
5✔
920

921
void unregister_cleanup(pool *p, void *user_data, void (*cleanup_cb)(void *)) {
3✔
922
  cleanup_t *c, **lastp;
3✔
923

924
  if (p == NULL) {
3✔
925
    return;
926
  }
927

928
  c = p->cleanups;
2✔
929
  lastp = &p->cleanups;
2✔
930

931
  while (c != NULL) {
2✔
932
    if (c->user_data == user_data &&
2✔
933
        (c->cleanup_cb == cleanup_cb || cleanup_cb == NULL)) {
2✔
934

935
      /* Remove the given cleanup by pointing the previous next pointer to
936
       * the matching cleanup's next pointer.
937
       */
938
      *lastp = c->next;
2✔
939
      break;
2✔
940
    }
941

942
    lastp = &c->next;
×
943
    c = c->next;
×
944
  }
945
}
946

947
static void run_cleanups(cleanup_t *c) {
13,178✔
948
  while (c != NULL) {
14,036✔
949
    if (c->cleanup_cb) {
858✔
950
      (*c->cleanup_cb)(c->user_data);
856✔
951
    }
952

953
    c = c->next;
858✔
954
  }
955
}
13,178✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc