• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 15263807472

26 May 2025 08:53PM UTC coverage: 72.046% (-0.002%) from 72.048%
15263807472

push

github

yuwata
src/core/manager.c: log preset activity on first boot

This gives us a little more information about what units were enabled
or disabled on that first boot and will be useful for OS developers
tracking down the source of unit state.

An example with this enabled looks like:

```
NET: Registered PF_VSOCK protocol family
systemd[1]: Applying preset policy.
systemd[1]: Unit /etc/systemd/system/dnsmasq.service is masked, ignoring.
systemd[1]: Unit /etc/systemd/system/systemd-repart.service is masked, ignoring.
systemd[1]: Removed '/etc/systemd/system/sockets.target.wants/systemd-resolved-monitor.socket'.
systemd[1]: Removed '/etc/systemd/system/sockets.target.wants/systemd-resolved-varlink.socket'.
systemd[1]: Created symlink '/etc/systemd/system/multi-user.target.wants/var-mnt-workdir.mount' → '/etc/systemd/system/var-mnt-workdir.mount'.
systemd[1]: Created symlink '/etc/systemd/system/multi-user.target.wants/var-mnt-workdir\x2dtmp.mount' → '/etc/systemd/system/var-mnt-workdir\x2dtmp.mount'.
systemd[1]: Created symlink '/etc/systemd/system/afterburn-sshkeys.target.requires/afterburn-sshkeys@core.service' → '/usr/lib/systemd/system/afterburn-sshkeys@.service'.
systemd[1]: Created symlink '/etc/systemd/system/sockets.target.wants/systemd-resolved-varlink.socket' → '/usr/lib/systemd/system/systemd-resolved-varlink.socket'.
systemd[1]: Created symlink '/etc/systemd/system/sockets.target.wants/systemd-resolved-monitor.socket' → '/usr/lib/systemd/system/systemd-resolved-monitor.socket'.
systemd[1]: Populated /etc with preset unit settings.
```

Considering it only happens on first boot and not on every boot I think
the extra information is worth the extra verbosity in the logs just for
that boot.

5 of 6 new or added lines in 1 file covered. (83.33%)

5463 existing lines in 165 files now uncovered.

299151 of 415222 relevant lines covered (72.05%)

702386.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.56
/src/basic/alloc-util.h
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2
#pragma once
3

4
#include <stdlib.h>
5

6
#include "forward.h"
7
#include "memory-util.h"
8

9
#if HAS_FEATURE_MEMORY_SANITIZER
10
#  include <sanitizer/msan_interface.h>
11
#endif
12

13
/* If for some reason more than 4M are allocated on the stack, let's abort immediately. It's better than
14
 * proceeding and smashing the stack limits. Note that by default RLIMIT_STACK is 8M on Linux. */
15
#define ALLOCA_MAX (4U*1024U*1024U)
16

17
#define new(t, n) ((t*) malloc_multiply(n, sizeof(t)))
18

19
#define new0(t, n) ((t*) calloc((n) ?: 1, sizeof(t)))
20

21
#define alloca_safe(n)                                                  \
22
        ({                                                              \
23
                size_t _nn_ = (n);                                      \
24
                assert(_nn_ <= ALLOCA_MAX);                             \
25
                alloca(_nn_ == 0 ? 1 : _nn_);                           \
26
        })                                                              \
27

28
#define newa(t, n)                                                      \
29
        ({                                                              \
30
                size_t _n_ = (n);                                       \
31
                assert_se(MUL_ASSIGN_SAFE(&_n_, sizeof(t)));            \
32
                (t*) alloca_safe(_n_);                                  \
33
        })
34

35
#define newa0(t, n)                                                     \
36
        ({                                                              \
37
                size_t _n_ = (n);                                       \
38
                assert_se(MUL_ASSIGN_SAFE(&_n_, sizeof(t)));            \
39
                (t*) alloca0(_n_);                                      \
40
        })
41

42
#define newdup(t, p, n) ((t*) memdup_multiply(p, n, sizeof(t)))
43

44
#define newdup_suffix0(t, p, n) ((t*) memdup_suffix0_multiply(p, n, sizeof(t)))
45

46
#define malloc0(n) (calloc(1, (n) ?: 1))
47

48
#define free_and_replace(a, b)                  \
49
        free_and_replace_full(a, b, free)
50

51
void* memdup(const void *p, size_t l) _alloc_(2);
52
void* memdup_suffix0(const void *p, size_t l); /* We can't use _alloc_() here, since we return a buffer one byte larger than the specified size */
53

54
#define memdupa(p, l)                           \
55
        ({                                      \
56
                void *_q_;                      \
57
                size_t _l_ = l;                 \
58
                _q_ = alloca_safe(_l_);         \
59
                memcpy_safe(_q_, p, _l_);       \
60
        })
61

62
#define memdupa_suffix0(p, l)                   \
63
        ({                                      \
64
                void *_q_;                      \
65
                size_t _l_ = l;                 \
66
                _q_ = alloca_safe(_l_ + 1);     \
67
                ((uint8_t*) _q_)[_l_] = 0;      \
68
                memcpy_safe(_q_, p, _l_);       \
69
        })
70

71
static inline void unsetp(void *p) {
67,788✔
72
        /* A trivial "destructor" that can be used in cases where we want to
73
         * unset a pointer from a _cleanup_ function. */
74

75
        *(void**)p = NULL;
67,788✔
76
}
77

78
static inline void freep(void *p) {
197,462,272✔
79
        *(void**)p = mfree(*(void**) p);
235,871,987✔
80
}
7,997,473✔
81

82
#define _cleanup_free_ _cleanup_(freep)
83

84
static inline bool size_multiply_overflow(size_t size, size_t need) {
11,433,369✔
85
        return _unlikely_(need != 0 && size > (SIZE_MAX / need));
11,417,535✔
86
}
87

88
_malloc_ _alloc_(1, 2) static inline void *malloc_multiply(size_t need, size_t size) {
22,487,990✔
89
        if (size_multiply_overflow(size, need))
19,695,278✔
90
                return NULL;
91

92
        return malloc(size * need ?: 1);
22,507,234✔
93
}
94

95
_alloc_(2, 3) static inline void *memdup_multiply(const void *p, size_t need, size_t size) {
414,036✔
96
        if (size_multiply_overflow(size, need))
414,036✔
97
                return NULL;
98

99
        return memdup(p, size * need);
414,036✔
100
}
101

102
/* Note that we can't decorate this function with _alloc_() since the returned memory area is one byte larger
103
 * than the product of its parameters. */
104
static inline void *memdup_suffix0_multiply(const void *p, size_t need, size_t size) {
953,639✔
105
        if (size_multiply_overflow(size, need))
953,639✔
106
                return NULL;
107

108
        return memdup_suffix0(p, size * need);
953,639✔
109
}
110

111
static inline size_t GREEDY_ALLOC_ROUND_UP(size_t l) {
46,398,108✔
112
        size_t m;
46,398,108✔
113

114
        /* Round up allocation sizes a bit to some reasonable, likely larger value. This is supposed to be
115
         * used for cases which are likely called in an allocation loop of some form, i.e. that repetitively
116
         * grow stuff, for example strv_extend() and suchlike.
117
         *
118
         * Note the difference to GREEDY_REALLOC() here, as this helper operates on a single size value only,
119
         * and rounds up to next multiple of 2, needing no further counter.
120
         *
121
         * Note the benefits of direct ALIGN_POWER2() usage: type-safety for size_t, sane handling for very
122
         * small (i.e. <= 2) and safe handling for very large (i.e. > SSIZE_MAX) values. */
123

124
        if (l <= 2)
46,398,108✔
125
                return 2; /* Never allocate less than 2 of something.  */
126

127
        m = ALIGN_POWER2(l);
44,538,562✔
128
        if (m == 0) /* overflow? */
44,538,562✔
UNCOV
129
                return l;
×
130

131
        return m;
132
}
133

134
void* greedy_realloc(void **p, size_t need, size_t size);
135
void* greedy_realloc0(void **p, size_t need, size_t size);
136
void* greedy_realloc_append(void **p, size_t *n_p, const void *from, size_t n_from, size_t size);
137

138
#define GREEDY_REALLOC(array, need)                                     \
139
        greedy_realloc((void**) &(array), (need), sizeof((array)[0]))
140

141
#define GREEDY_REALLOC0(array, need)                                    \
142
        greedy_realloc0((void**) &(array), (need), sizeof((array)[0]))
143

144
#define GREEDY_REALLOC_APPEND(array, n_array, from, n_from)             \
145
        ({                                                              \
146
                const typeof(*(array)) *_from_ = (from);                \
147
                greedy_realloc_append((void**) &(array), &(n_array), _from_, (n_from), sizeof((array)[0])); \
148
        })
149

150
#define alloca0(n)                                      \
151
        ({                                              \
152
                char *_new_;                            \
153
                size_t _len_ = n;                       \
154
                _new_ = alloca_safe(_len_);             \
155
                memset(_new_, 0, _len_);                \
156
        })
157

158
/* It's not clear what alignment glibc/gcc alloca() guarantee, hence provide a guaranteed safe version */
159
#define alloca_align(size, align)                                       \
160
        ({                                                              \
161
                void *_ptr_;                                            \
162
                size_t _mask_ = (align) - 1;                            \
163
                size_t _size_ = size;                                   \
164
                _ptr_ = alloca_safe(_size_ + _mask_);                   \
165
                (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_);         \
166
        })
167

168
#define alloca0_align(size, align)                                      \
169
        ({                                                              \
170
                void *_new_;                                            \
171
                size_t _xsize_ = (size);                                \
172
                _new_ = alloca_align(_xsize_, (align));                 \
173
                memset(_new_, 0, _xsize_);                              \
174
        })
175

176
#if HAS_FEATURE_MEMORY_SANITIZER
177
#  define msan_unpoison(r, s) __msan_unpoison(r, s)
178
#else
179
#  define msan_unpoison(r, s)
180
#endif
181

182
/* Dummy allocator to tell the compiler that the new size of p is newsize. The implementation returns the
183
 * pointer as is; the only reason for its existence is as a conduit for the _alloc_ attribute.  This must not
184
 * be inlined (hence a non-static function with _noinline_ because LTO otherwise tries to inline it) because
185
 * gcc then loses the attributes on the function.
186
 * See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96503 */
187
void *expand_to_usable(void *p, size_t newsize) _alloc_(2) _returns_nonnull_ _noinline_;
188

189
size_t malloc_sizeof_safe(void **xp);
190

191
/* This returns the number of usable bytes in a malloc()ed region as per malloc_usable_size(), which may
192
 * return a value larger than the size that was actually allocated. Access to that additional memory is
193
 * discouraged because it violates the C standard; a compiler cannot see that this as valid. To help the
194
 * compiler out, the MALLOC_SIZEOF_SAFE macro 'allocates' the usable size using a dummy allocator function
195
 * expand_to_usable. There is a possibility of malloc_usable_size() returning different values during the
196
 * lifetime of an object, which may cause problems, but the glibc allocator does not do that at the moment. */
197
#define MALLOC_SIZEOF_SAFE(x) \
198
        malloc_sizeof_safe((void**) &__builtin_choose_expr(__builtin_constant_p(x), (void*) { NULL }, (x)))
199

200
/* Inspired by ELEMENTSOF() but operates on malloc()'ed memory areas: typesafely returns the number of items
201
 * that fit into the specified memory block */
202
#define MALLOC_ELEMENTSOF(x) \
203
        (__builtin_choose_expr(                                         \
204
                __builtin_types_compatible_p(typeof(x), typeof(&*(x))), \
205
                MALLOC_SIZEOF_SAFE(x)/sizeof((x)[0]),                   \
206
                VOID_0))
207

208
/* Free every element of the array. */
209
static inline void free_many(void **p, size_t n) {
69,398✔
210
        assert(p || n == 0);
69,398✔
211

212
        FOREACH_ARRAY(i, p, n)
1,099,481✔
213
                *i = mfree(*i);
1,030,083✔
214
}
69,398✔
215

216
/* Typesafe wrapper for char** rather than void**. Unfortunately C won't implicitly cast this. */
217
static inline void free_many_charp(char **c, size_t n) {
8,234✔
218
        free_many((void**) c, n);
8,234✔
219
}
6,977✔
220

221
_alloc_(2) static inline void *realloc0(void *p, size_t new_size) {
29,117✔
222
        size_t old_size;
29,117✔
223
        void *q;
29,117✔
224

225
        /* Like realloc(), but initializes anything appended to zero */
226

227
        old_size = MALLOC_SIZEOF_SAFE(p);
29,117✔
228

229
        q = realloc(p, new_size);
29,117✔
230
        if (!q)
29,117✔
231
                return NULL;
29,117✔
232

233
        new_size = MALLOC_SIZEOF_SAFE(q); /* Update with actually allocated space */
29,117✔
234

235
        if (new_size > old_size)
29,117✔
236
                memset((uint8_t*) q + old_size, 0, new_size - old_size);
25,942✔
237

238
        return q;
29,117✔
239
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc