• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 15057632786

15 May 2025 09:01PM UTC coverage: 72.267% (+0.02%) from 72.244%
15057632786

push

github

bluca
man: document how to hook stuff into system wakeup

Fixes: #6364

298523 of 413084 relevant lines covered (72.27%)

738132.88 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.62
/src/basic/alloc-util.h
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2
#pragma once
3

4
#include <alloca.h>
5
#include <malloc.h>
6
#include <stddef.h>
7
#include <string.h>
8

9
#include "assert-util.h"
10
#include "cleanup-util.h"
11
#include "macro.h"
12
#include "memory-util.h"
13

14
#if HAS_FEATURE_MEMORY_SANITIZER
15
#  include <sanitizer/msan_interface.h>
16
#endif
17

18
/* If for some reason more than 4M are allocated on the stack, let's abort immediately. It's better than
19
 * proceeding and smashing the stack limits. Note that by default RLIMIT_STACK is 8M on Linux. */
20
#define ALLOCA_MAX (4U*1024U*1024U)
21

22
#define new(t, n) ((t*) malloc_multiply(n, sizeof(t)))
23

24
#define new0(t, n) ((t*) calloc((n) ?: 1, sizeof(t)))
25

26
#define alloca_safe(n)                                                  \
27
        ({                                                              \
28
                size_t _nn_ = (n);                                      \
29
                assert(_nn_ <= ALLOCA_MAX);                             \
30
                alloca(_nn_ == 0 ? 1 : _nn_);                           \
31
        })                                                              \
32

33
#define newa(t, n)                                                      \
34
        ({                                                              \
35
                size_t _n_ = (n);                                       \
36
                assert_se(MUL_ASSIGN_SAFE(&_n_, sizeof(t)));            \
37
                (t*) alloca_safe(_n_);                                  \
38
        })
39

40
#define newa0(t, n)                                                     \
41
        ({                                                              \
42
                size_t _n_ = (n);                                       \
43
                assert_se(MUL_ASSIGN_SAFE(&_n_, sizeof(t)));            \
44
                (t*) alloca0(_n_);                                      \
45
        })
46

47
#define newdup(t, p, n) ((t*) memdup_multiply(p, n, sizeof(t)))
48

49
#define newdup_suffix0(t, p, n) ((t*) memdup_suffix0_multiply(p, n, sizeof(t)))
50

51
#define malloc0(n) (calloc(1, (n) ?: 1))
52

53
#define free_and_replace(a, b)                  \
54
        free_and_replace_full(a, b, free)
55

56
void* memdup(const void *p, size_t l) _alloc_(2);
57
void* memdup_suffix0(const void *p, size_t l); /* We can't use _alloc_() here, since we return a buffer one byte larger than the specified size */
58

59
#define memdupa(p, l)                           \
60
        ({                                      \
61
                void *_q_;                      \
62
                size_t _l_ = l;                 \
63
                _q_ = alloca_safe(_l_);         \
64
                memcpy_safe(_q_, p, _l_);       \
65
        })
66

67
#define memdupa_suffix0(p, l)                   \
68
        ({                                      \
69
                void *_q_;                      \
70
                size_t _l_ = l;                 \
71
                _q_ = alloca_safe(_l_ + 1);     \
72
                ((uint8_t*) _q_)[_l_] = 0;      \
73
                memcpy_safe(_q_, p, _l_);       \
74
        })
75

76
static inline void unsetp(void *p) {
67,488✔
77
        /* A trivial "destructor" that can be used in cases where we want to
78
         * unset a pointer from a _cleanup_ function. */
79

80
        *(void**)p = NULL;
67,488✔
81
}
82

83
static inline void freep(void *p) {
222,950,012✔
84
        *(void**)p = mfree(*(void**) p);
261,369,714✔
85
}
7,978,337✔
86

87
#define _cleanup_free_ _cleanup_(freep)
88

89
static inline bool size_multiply_overflow(size_t size, size_t need) {
11,387,570✔
90
        return _unlikely_(need != 0 && size > (SIZE_MAX / need));
11,371,790✔
91
}
92

93
_malloc_ _alloc_(1, 2) static inline void *malloc_multiply(size_t need, size_t size) {
22,411,735✔
94
        if (size_multiply_overflow(size, need))
19,647,943✔
95
                return NULL;
96

97
        return malloc(size * need ?: 1);
22,427,508✔
98
}
99

100
_alloc_(2, 3) static inline void *memdup_multiply(const void *p, size_t need, size_t size) {
415,286✔
101
        if (size_multiply_overflow(size, need))
415,286✔
102
                return NULL;
103

104
        return memdup(p, size * need);
415,286✔
105
}
106

107
/* Note that we can't decorate this function with _alloc_() since the returned memory area is one byte larger
108
 * than the product of its parameters. */
109
static inline void *memdup_suffix0_multiply(const void *p, size_t need, size_t size) {
943,984✔
110
        if (size_multiply_overflow(size, need))
943,984✔
111
                return NULL;
112

113
        return memdup_suffix0(p, size * need);
943,984✔
114
}
115

116
static inline size_t GREEDY_ALLOC_ROUND_UP(size_t l) {
54,730,450✔
117
        size_t m;
54,730,450✔
118

119
        /* Round up allocation sizes a bit to some reasonable, likely larger value. This is supposed to be
120
         * used for cases which are likely called in an allocation loop of some form, i.e. that repetitively
121
         * grow stuff, for example strv_extend() and suchlike.
122
         *
123
         * Note the difference to GREEDY_REALLOC() here, as this helper operates on a single size value only,
124
         * and rounds up to next multiple of 2, needing no further counter.
125
         *
126
         * Note the benefits of direct ALIGN_POWER2() usage: type-safety for size_t, sane handling for very
127
         * small (i.e. <= 2) and safe handling for very large (i.e. > SSIZE_MAX) values. */
128

129
        if (l <= 2)
54,730,450✔
130
                return 2; /* Never allocate less than 2 of something.  */
131

132
        m = ALIGN_POWER2(l);
52,877,445✔
133
        if (m == 0) /* overflow? */
52,877,445✔
134
                return l;
×
135

136
        return m;
137
}
138

139
void* greedy_realloc(void **p, size_t need, size_t size);
140
void* greedy_realloc0(void **p, size_t need, size_t size);
141
void* greedy_realloc_append(void **p, size_t *n_p, const void *from, size_t n_from, size_t size);
142

143
#define GREEDY_REALLOC(array, need)                                     \
144
        greedy_realloc((void**) &(array), (need), sizeof((array)[0]))
145

146
#define GREEDY_REALLOC0(array, need)                                    \
147
        greedy_realloc0((void**) &(array), (need), sizeof((array)[0]))
148

149
#define GREEDY_REALLOC_APPEND(array, n_array, from, n_from)             \
150
        ({                                                              \
151
                const typeof(*(array)) *_from_ = (from);                \
152
                greedy_realloc_append((void**) &(array), &(n_array), _from_, (n_from), sizeof((array)[0])); \
153
        })
154

155
#define alloca0(n)                                      \
156
        ({                                              \
157
                char *_new_;                            \
158
                size_t _len_ = n;                       \
159
                _new_ = alloca_safe(_len_);             \
160
                memset(_new_, 0, _len_);                \
161
        })
162

163
/* It's not clear what alignment glibc/gcc alloca() guarantee, hence provide a guaranteed safe version */
164
#define alloca_align(size, align)                                       \
165
        ({                                                              \
166
                void *_ptr_;                                            \
167
                size_t _mask_ = (align) - 1;                            \
168
                size_t _size_ = size;                                   \
169
                _ptr_ = alloca_safe(_size_ + _mask_);                   \
170
                (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_);         \
171
        })
172

173
#define alloca0_align(size, align)                                      \
174
        ({                                                              \
175
                void *_new_;                                            \
176
                size_t _xsize_ = (size);                                \
177
                _new_ = alloca_align(_xsize_, (align));                 \
178
                memset(_new_, 0, _xsize_);                              \
179
        })
180

181
#if HAS_FEATURE_MEMORY_SANITIZER
182
#  define msan_unpoison(r, s) __msan_unpoison(r, s)
183
#else
184
#  define msan_unpoison(r, s)
185
#endif
186

187
/* Dummy allocator to tell the compiler that the new size of p is newsize. The implementation returns the
188
 * pointer as is; the only reason for its existence is as a conduit for the _alloc_ attribute.  This must not
189
 * be inlined (hence a non-static function with _noinline_ because LTO otherwise tries to inline it) because
190
 * gcc then loses the attributes on the function.
191
 * See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96503 */
192
void *expand_to_usable(void *p, size_t newsize) _alloc_(2) _returns_nonnull_ _noinline_;
193

194
static inline size_t malloc_sizeof_safe(void **xp) {
560,649,300✔
195
        if (_unlikely_(!xp || !*xp))
560,649,300✔
196
                return 0;
197

198
        size_t sz = malloc_usable_size(*xp);
560,250,483✔
199
        *xp = expand_to_usable(*xp, sz);
560,250,483✔
200
        /* GCC doesn't see the _returns_nonnull_ when built with ubsan, so yet another hint to make it doubly
201
         * clear that expand_to_usable won't return NULL.
202
         * See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79265 */
203
        if (!*xp)
560,250,483✔
204
                assert_not_reached();
×
205
        return sz;
206
}
207

208
/* This returns the number of usable bytes in a malloc()ed region as per malloc_usable_size(), which may
209
 * return a value larger than the size that was actually allocated. Access to that additional memory is
210
 * discouraged because it violates the C standard; a compiler cannot see that this as valid. To help the
211
 * compiler out, the MALLOC_SIZEOF_SAFE macro 'allocates' the usable size using a dummy allocator function
212
 * expand_to_usable. There is a possibility of malloc_usable_size() returning different values during the
213
 * lifetime of an object, which may cause problems, but the glibc allocator does not do that at the moment. */
214
#define MALLOC_SIZEOF_SAFE(x) \
215
        malloc_sizeof_safe((void**) &__builtin_choose_expr(__builtin_constant_p(x), (void*) { NULL }, (x)))
216

217
/* Inspired by ELEMENTSOF() but operates on malloc()'ed memory areas: typesafely returns the number of items
218
 * that fit into the specified memory block */
219
#define MALLOC_ELEMENTSOF(x) \
220
        (__builtin_choose_expr(                                         \
221
                __builtin_types_compatible_p(typeof(x), typeof(&*(x))), \
222
                MALLOC_SIZEOF_SAFE(x)/sizeof((x)[0]),                   \
223
                VOID_0))
224

225
/* Free every element of the array. */
226
static inline void free_many(void **p, size_t n) {
62,343✔
227
        assert(p || n == 0);
62,343✔
228

229
        FOREACH_ARRAY(i, p, n)
1,077,387✔
230
                *i = mfree(*i);
1,015,044✔
231
}
62,343✔
232

233
/* Typesafe wrapper for char** rather than void**. Unfortunately C won't implicitly cast this. */
234
static inline void free_many_charp(char **c, size_t n) {
1,228✔
235
        free_many((void**) c, n);
1,228✔
236
}
×
237

238
_alloc_(2) static inline void *realloc0(void *p, size_t new_size) {
29,107✔
239
        size_t old_size;
29,107✔
240
        void *q;
29,107✔
241

242
        /* Like realloc(), but initializes anything appended to zero */
243

244
        old_size = MALLOC_SIZEOF_SAFE(p);
29,107✔
245

246
        q = realloc(p, new_size);
29,107✔
247
        if (!q)
29,107✔
248
                return NULL;
29,107✔
249

250
        new_size = MALLOC_SIZEOF_SAFE(q); /* Update with actually allocated space */
29,107✔
251

252
        if (new_size > old_size)
29,107✔
253
                memset((uint8_t*) q + old_size, 0, new_size - old_size);
25,934✔
254

255
        return q;
29,107✔
256
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc