• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 15288324789

27 May 2025 07:40PM UTC coverage: 71.981% (-0.07%) from 72.046%
15288324789

push

github

yuwata
timedate: print better errors when systemd-timesyncd.service unavailable

If the error is a common bus error indicating the service is not
available, print a more user-friendly message indicating so.

0 of 7 new or added lines in 1 file covered. (0.0%)

3467 existing lines in 62 files now uncovered.

299170 of 415625 relevant lines covered (71.98%)

704053.27 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

59.47
/src/shared/bpf-program.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <fcntl.h>
4
#include <linux/bpf.h>
5
#include <linux/bpf_insn.h>
6
#include <unistd.h>
7

8
#include "alloc-util.h"
9
#include "bpf-program.h"
10
#include "errno-util.h"
11
#include "escape.h"
12
#include "extract-word.h"
13
#include "fd-util.h"
14
#include "fdset.h"
15
#include "log.h"
16
#include "memory-util.h"
17
#include "missing_syscall.h"
18
#include "parse-util.h"
19
#include "path-util.h"
20
#include "serialize.h"
21
#include "set.h"
22
#include "string-table.h"
23
#include "string-util.h"
24

25
static const char *const bpf_cgroup_attach_type_table[__MAX_BPF_ATTACH_TYPE] = {
26
        [BPF_CGROUP_INET_INGRESS] =     "ingress",
27
        [BPF_CGROUP_INET_EGRESS] =      "egress",
28
        [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
29
        [BPF_CGROUP_SOCK_OPS] =         "sock_ops",
30
        [BPF_CGROUP_DEVICE] =           "device",
31
        [BPF_CGROUP_INET4_BIND] =       "bind4",
32
        [BPF_CGROUP_INET6_BIND] =       "bind6",
33
        [BPF_CGROUP_INET4_CONNECT] =    "connect4",
34
        [BPF_CGROUP_INET6_CONNECT] =    "connect6",
35
        [BPF_CGROUP_INET4_POST_BIND] =  "post_bind4",
36
        [BPF_CGROUP_INET6_POST_BIND] =  "post_bind6",
37
        [BPF_CGROUP_UDP4_SENDMSG] =     "sendmsg4",
38
        [BPF_CGROUP_UDP6_SENDMSG] =     "sendmsg6",
39
        [BPF_CGROUP_SYSCTL] =           "sysctl",
40
        [BPF_CGROUP_UDP4_RECVMSG] =     "recvmsg4",
41
        [BPF_CGROUP_UDP6_RECVMSG] =     "recvmsg6",
42
        [BPF_CGROUP_GETSOCKOPT] =       "getsockopt",
43
        [BPF_CGROUP_SETSOCKOPT] =       "setsockopt",
44
};
45

46
DEFINE_STRING_TABLE_LOOKUP(bpf_cgroup_attach_type, int);
738✔
47

48
DEFINE_HASH_OPS_WITH_KEY_DESTRUCTOR(bpf_program_hash_ops, void, trivial_hash_func, trivial_compare_func, bpf_program_free);
×
49

50
int bpf_program_supported(void) {
2,166✔
51
        static int cached = 0;
2,166✔
52
        int r;
2,166✔
53

54
        if (cached != 0)
2,166✔
55
                return cached;
2,166✔
56

57
        /* Currently, we only use the following three types:
58
         * - BPF_PROG_TYPE_CGROUP_SKB, supported since kernel v4.10 (0e33661de493db325435d565a4a722120ae4cbf3),
59
         * - BPF_PROG_TYPE_CGROUP_DEVICE, supported since kernel v4.15 (ebc614f687369f9df99828572b1d85a7c2de3d92),
60
         * - BPF_PROG_TYPE_CGROUP_SOCK_ADDR, supported since kernel v4.17 (4fbac77d2d092b475dda9eea66da674369665427).
61
         * As our baseline on the kernel is v5.4, it is enough to check if one BPF program can be created and loaded. */
62

63
        _cleanup_(bpf_program_freep) BPFProgram *program = NULL;
244✔
64
        r = bpf_program_new(BPF_PROG_TYPE_CGROUP_SKB, /* prog_name = */ NULL, &program);
244✔
65
        if (r < 0)
244✔
UNCOV
66
                return cached = log_debug_errno(r, "Can't allocate CGROUP SKB BPF program, assuming BPF is not supported: %m");
×
67

68
        static const struct bpf_insn trivial[] = {
244✔
69
                BPF_MOV64_IMM(BPF_REG_0, 1),
70
                BPF_EXIT_INSN()
71
        };
72
        r = bpf_program_add_instructions(program, trivial, ELEMENTSOF(trivial));
244✔
73
        if (r < 0)
244✔
UNCOV
74
                return cached = log_debug_errno(r, "Can't add trivial instructions to CGROUP SKB BPF program, assuming BPF is not supported: %m");
×
75

76
        r = bpf_program_load_kernel(program, /* log_buf = */ NULL, /* log_size = */ 0);
244✔
77
        if (r < 0)
244✔
78
                return cached = log_debug_errno(r, "Can't load kernel CGROUP SKB BPF program, assuming BPF is not supported: %m");
189✔
79

80
        /* Unfortunately the kernel allows us to create BPF_PROG_TYPE_CGROUP_SKB (maybe also other types)
81
         * programs even when CONFIG_CGROUP_BPF is turned off at kernel compilation time. This sucks of course:
82
         * why does it allow us to create a cgroup BPF program if we can't do a thing with it later?
83
         *
84
         * We detect this case by issuing the BPF_PROG_DETACH bpf() call with invalid file descriptors: if
85
         * CONFIG_CGROUP_BPF is turned off, then the call will fail early with EINVAL. If it is turned on the
86
         * parameters are validated however, and that'll fail with EBADF then.
87
         *
88
         * The check seems also important when we are running with sanitizers. With sanitizers (at least with
89
         * LLVM v20), the following check and other bpf() calls fails even if the kernel supports BPF. To
90
         * avoid unexpected fail when running with sanitizers, let's explicitly check if bpf() syscall works. */
91

92
        /* Clang and GCC (>=15) do not 0-pad with structured initialization, causing the kernel to reject the
93
         * bpf_attr as invalid. See: https://github.com/torvalds/linux/blob/v5.9/kernel/bpf/syscall.c#L65
94
         * Hence, we cannot use structured initialization here, and need to clear the structure with zero
95
         * explicitly before use. */
96
        union bpf_attr attr;
55✔
97
        zero(attr);
55✔
98
        attr.attach_type = BPF_CGROUP_INET_EGRESS; /* since kernel v4.10 (0e33661de493db325435d565a4a722120ae4cbf3) */
55✔
99
        attr.target_fd = -EBADF;
55✔
100
        attr.attach_bpf_fd = -EBADF;
55✔
101

102
        if (bpf(BPF_PROG_DETACH, &attr, sizeof(attr)) < 0) {
55✔
103
                if (errno == EBADF) /* YAY! */
55✔
104
                        return cached = true;
55✔
105

UNCOV
106
                return cached = log_debug_errno(errno, "Didn't get EBADF from invalid BPF_PROG_DETACH call: %m");
×
107
        }
108

UNCOV
109
        return cached = log_debug_errno(SYNTHETIC_ERRNO(EBADE),
×
110
                                        "Wut? Kernel accepted our invalid BPF_PROG_DETACH call? Something is weird, assuming BPF is broken and hence not supported.");
111
}
112

113
BPFProgram *bpf_program_free(BPFProgram *p) {
117,265✔
114
        if (!p)
117,265✔
115
                return NULL;
116
        /* Unfortunately, the kernel currently doesn't implicitly detach BPF programs from their cgroups when the last
117
         * fd to the BPF program is closed. This has nasty side-effects since this means that abnormally terminated
118
         * programs that attached one of their BPF programs to a cgroup will leave this program pinned for good with
119
         * zero chance of recovery, until the cgroup is removed. This is particularly problematic if the cgroup in
120
         * question is the root cgroup (or any other cgroup belonging to a service that cannot be restarted during
121
         * operation, such as dbus), as the memory for the BPF program can only be reclaimed through a reboot. To
122
         * counter this, we track closely to which cgroup a program was attached to and will detach it on our own
123
         * whenever we close the BPF fd. */
124
        (void) bpf_program_cgroup_detach(p);
893✔
125

126
        safe_close(p->kernel_fd);
893✔
127
        free(p->prog_name);
893✔
128
        free(p->instructions);
893✔
129
        free(p->attached_path);
893✔
130

131
        return mfree(p);
893✔
132
}
133

134
 /* struct bpf_prog_info info must be initialized since its value is both input and output
135
  * for BPF_OBJ_GET_INFO_BY_FD syscall. */
UNCOV
136
static int bpf_program_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, uint32_t info_len) {
×
UNCOV
137
        union bpf_attr attr;
×
138

139
        /* Explicitly memset to zero since some compilers may produce non-zero-initialized padding when
140
         * structured initialization is used.
141
         * Refer to https://github.com/systemd/systemd/issues/18164
142
         */
UNCOV
143
        zero(attr);
×
UNCOV
144
        attr.info.bpf_fd = prog_fd;
×
UNCOV
145
        attr.info.info_len = info_len;
×
UNCOV
146
        attr.info.info = PTR_TO_UINT64(info);
×
147

UNCOV
148
        return RET_NERRNO(bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)));
×
149
}
150

151
int bpf_program_new(uint32_t prog_type, const char *prog_name, BPFProgram **ret) {
580✔
152
        _cleanup_(bpf_program_freep) BPFProgram *p = NULL;
1,160✔
153
        _cleanup_free_ char *name = NULL;
580✔
154

155
        if (prog_name) {
580✔
156
                if (strlen(prog_name) >= BPF_OBJ_NAME_LEN)
336✔
157
                        return -ENAMETOOLONG;
158

159
                name = strdup(prog_name);
336✔
160
                if (!name)
336✔
161
                        return -ENOMEM;
162
        }
163

164
        p = new(BPFProgram, 1);
580✔
165
        if (!p)
580✔
166
                return -ENOMEM;
167

168
        *p = (BPFProgram) {
580✔
169
                .prog_type = prog_type,
170
                .kernel_fd = -EBADF,
171
                .prog_name = TAKE_PTR(name),
580✔
172
        };
173

174
        *ret = TAKE_PTR(p);
580✔
175

176
        return 0;
580✔
177
}
178

179
int bpf_program_new_from_bpffs_path(const char *path, BPFProgram **ret) {
×
180
        _cleanup_(bpf_program_freep) BPFProgram *p = NULL;
×
UNCOV
181
        struct bpf_prog_info info = {};
×
UNCOV
182
        int r;
×
183

184
        assert(path);
×
UNCOV
185
        assert(ret);
×
186

187
        p = new(BPFProgram, 1);
×
188
        if (!p)
×
189
                return -ENOMEM;
190

UNCOV
191
        *p = (BPFProgram) {
×
192
                .prog_type = BPF_PROG_TYPE_UNSPEC,
193
                .kernel_fd = -EBADF,
194
        };
195

UNCOV
196
        r = bpf_program_load_from_bpf_fs(p, path);
×
UNCOV
197
        if (r < 0)
×
198
                return r;
199

UNCOV
200
        r = bpf_program_get_info_by_fd(p->kernel_fd, &info, sizeof(info));
×
UNCOV
201
        if (r < 0)
×
202
                return r;
203

UNCOV
204
        p->prog_type = info.type;
×
UNCOV
205
        *ret = TAKE_PTR(p);
×
206

UNCOV
207
        return 0;
×
208
}
209

210
int bpf_program_add_instructions(BPFProgram *p, const struct bpf_insn *instructions, size_t count) {
3,201✔
211

212
        assert(p);
3,201✔
213

214
        if (p->kernel_fd >= 0) /* don't allow modification after we uploaded things to the kernel */
3,201✔
215
                return -EBUSY;
216

217
        if (!GREEDY_REALLOC(p->instructions, p->n_instructions + count))
3,201✔
218
                return -ENOMEM;
219

220
        memcpy(p->instructions + p->n_instructions, instructions, sizeof(struct bpf_insn) * count);
3,201✔
221
        p->n_instructions += count;
3,201✔
222

223
        return 0;
3,201✔
224
}
225

226
int bpf_program_load_kernel(BPFProgram *p, char *log_buf, size_t log_size) {
580✔
227
        union bpf_attr attr;
580✔
228

229
        assert(p);
580✔
230

231
        if (p->kernel_fd >= 0) { /* make this idempotent */
580✔
UNCOV
232
                memzero(log_buf, log_size);
×
UNCOV
233
                return 0;
×
234
        }
235

236
        // FIXME: Clang doesn't 0-pad with structured initialization, causing
237
        // the kernel to reject the bpf_attr as invalid. See:
238
        // https://github.com/torvalds/linux/blob/v5.9/kernel/bpf/syscall.c#L65
239
        // Ideally it should behave like GCC, so that we can remove these workarounds.
240
        zero(attr);
580✔
241
        attr.prog_type = p->prog_type;
580✔
242
        attr.insns = PTR_TO_UINT64(p->instructions);
580✔
243
        attr.insn_cnt = p->n_instructions;
580✔
244
        attr.license = PTR_TO_UINT64("GPL");
580✔
245
        attr.log_buf = PTR_TO_UINT64(log_buf);
580✔
246
        attr.log_level = !!log_buf;
580✔
247
        attr.log_size = log_size;
580✔
248
        if (p->prog_name)
580✔
249
                strncpy(attr.prog_name, p->prog_name, BPF_OBJ_NAME_LEN - 1);
336✔
250

251
        p->kernel_fd = bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
580✔
252
        if (p->kernel_fd < 0)
580✔
253
                return -errno;
189✔
254

255
        return 0;
256
}
257

UNCOV
258
int bpf_program_load_from_bpf_fs(BPFProgram *p, const char *path) {
×
UNCOV
259
        union bpf_attr attr;
×
260

UNCOV
261
        assert(p);
×
262

UNCOV
263
        if (p->kernel_fd >= 0) /* don't overwrite an assembled or loaded program */
×
UNCOV
264
                return -EBUSY;
×
265

UNCOV
266
        zero(attr);
×
UNCOV
267
        attr.pathname = PTR_TO_UINT64(path);
×
268

UNCOV
269
        p->kernel_fd = bpf(BPF_OBJ_GET, &attr, sizeof(attr));
×
UNCOV
270
        if (p->kernel_fd < 0)
×
UNCOV
271
                return -errno;
×
272

273
        return 0;
274
}
275

276
int bpf_program_cgroup_attach(BPFProgram *p, int type, const char *path, uint32_t flags) {
336✔
277
        _cleanup_free_ char *copy = NULL;
336✔
278
        _cleanup_close_ int fd = -EBADF;
336✔
279
        union bpf_attr attr;
336✔
280
        int r;
336✔
281

282
        assert(p);
336✔
283
        assert(type >= 0);
336✔
284
        assert(path);
336✔
285

286
        if (!IN_SET(flags, 0, BPF_F_ALLOW_OVERRIDE, BPF_F_ALLOW_MULTI))
336✔
287
                return -EINVAL;
288

289
        /* We need to track which cgroup the program is attached to, and we can only track one attachment, hence let's
290
        * refuse this early. */
291
        if (p->attached_path) {
336✔
UNCOV
292
                if (!path_equal(p->attached_path, path))
×
293
                        return -EBUSY;
UNCOV
294
                if (p->attached_type != type)
×
295
                        return -EBUSY;
UNCOV
296
                if (p->attached_flags != flags)
×
297
                        return -EBUSY;
298

299
                /* Here's a shortcut: if we previously attached this program already, then we don't have to do so
300
                 * again. Well, with one exception: if we are in BPF_F_ALLOW_OVERRIDE mode then someone else might have
301
                 * replaced our program since the last time, hence let's reattach it again, just to be safe. In flags
302
                 * == 0 mode this is not an issue since nobody else can replace our program in that case, and in flags
303
                 * == BPF_F_ALLOW_MULTI mode any other's program would be installed in addition to ours hence ours
304
                 * would remain in effect. */
UNCOV
305
                if (flags != BPF_F_ALLOW_OVERRIDE)
×
306
                        return 0;
307
        }
308

309
        /* Ensure we have a kernel object for this. */
310
        r = bpf_program_load_kernel(p, NULL, 0);
336✔
311
        if (r < 0)
336✔
312
                return r;
313

314
        copy = strdup(path);
336✔
315
        if (!copy)
336✔
316
                return -ENOMEM;
317

318
        fd = open(path, O_DIRECTORY|O_RDONLY|O_CLOEXEC);
336✔
319
        if (fd < 0)
336✔
UNCOV
320
                return -errno;
×
321

322
        zero(attr);
336✔
323
        attr.attach_type = type;
336✔
324
        attr.target_fd = fd;
336✔
325
        attr.attach_bpf_fd = p->kernel_fd;
336✔
326
        attr.attach_flags = flags;
336✔
327

328
        if (bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)) < 0)
336✔
UNCOV
329
                return -errno;
×
330

331
        free_and_replace(p->attached_path, copy);
336✔
332
        p->attached_type = type;
336✔
333
        p->attached_flags = flags;
336✔
334

335
        return 0;
336✔
336
}
337

338
int bpf_program_cgroup_detach(BPFProgram *p) {
893✔
339
        _cleanup_close_ int fd = -EBADF;
893✔
340

341
        assert(p);
893✔
342

343
        if (!p->attached_path)
893✔
344
                return -EUNATCH;
345

346
        fd = open(p->attached_path, O_DIRECTORY|O_RDONLY|O_CLOEXEC);
224✔
347
        if (fd < 0) {
224✔
348
                if (errno != ENOENT)
105✔
UNCOV
349
                        return -errno;
×
350

351
                /* If the cgroup does not exist anymore, then we don't have to explicitly detach, it got detached
352
                 * implicitly by the removal, hence don't complain */
353

354
        } else {
355
                union bpf_attr attr;
119✔
356

357
                zero(attr);
119✔
358
                attr.attach_type = p->attached_type;
119✔
359
                attr.target_fd = fd;
119✔
360
                attr.attach_bpf_fd = p->kernel_fd;
119✔
361

362
                if (bpf(BPF_PROG_DETACH, &attr, sizeof(attr)) < 0)
119✔
363
                        return -errno;
×
364
        }
365

366
        p->attached_path = mfree(p->attached_path);
224✔
367

368
        return 0;
224✔
369
}
370

UNCOV
371
int bpf_map_new(
×
372
                const char *name,
373
                enum bpf_map_type type,
374
                size_t key_size,
375
                size_t value_size,
376
                size_t max_entries,
377
                uint32_t flags) {
378

379
        union bpf_attr attr;
×
UNCOV
380
        const char *n = name;
×
381

382
        zero(attr);
×
383
        attr.map_type = type;
×
UNCOV
384
        attr.key_size = key_size;
×
385
        attr.value_size = value_size;
×
386
        attr.max_entries = max_entries;
×
387
        attr.map_flags = flags;
×
388

389
        /* The map name is primarily informational for debugging purposes, and typically too short
390
         * to carry the full unit name, hence we employ a trivial lossy escaping to make it fit
391
         * (truncation + only alphanumerical, "." and "_" are allowed as per
392
         * https://docs.kernel.org/bpf/maps.html#usage-notes) */
393
        for (size_t i = 0; i < sizeof(attr.map_name) - 1 && *n; i++, n++)
×
394
                attr.map_name[i] = strchr(ALPHANUMERICAL ".", *n) ? *n : '_';
×
395

396
        return RET_NERRNO(bpf(BPF_MAP_CREATE, &attr, sizeof(attr)));
×
397
}
398

399
int bpf_map_update_element(int fd, const void *key, void *value) {
×
UNCOV
400
        union bpf_attr attr;
×
401

UNCOV
402
        zero(attr);
×
UNCOV
403
        attr.map_fd = fd;
×
404
        attr.key = PTR_TO_UINT64(key);
×
405
        attr.value = PTR_TO_UINT64(value);
×
406

407
        return RET_NERRNO(bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)));
×
408
}
409

UNCOV
410
int bpf_map_lookup_element(int fd, const void *key, void *value) {
×
411
        union bpf_attr attr;
×
412

UNCOV
413
        zero(attr);
×
414
        attr.map_fd = fd;
×
415
        attr.key = PTR_TO_UINT64(key);
×
416
        attr.value = PTR_TO_UINT64(value);
×
417

418
        return RET_NERRNO(bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
×
419
}
420

421
int bpf_program_pin(int prog_fd, const char *bpffs_path) {
×
422
        union bpf_attr attr;
×
423

424
        zero(attr);
×
UNCOV
425
        attr.pathname = PTR_TO_UINT64((void *) bpffs_path);
×
426
        attr.bpf_fd = prog_fd;
×
427

UNCOV
428
        return RET_NERRNO(bpf(BPF_OBJ_PIN, &attr, sizeof(attr)));
×
429
}
430

UNCOV
431
int bpf_program_get_id_by_fd(int prog_fd, uint32_t *ret_id) {
×
UNCOV
432
        struct bpf_prog_info info = {};
×
UNCOV
433
        int r;
×
434

UNCOV
435
        assert(ret_id);
×
436

UNCOV
437
        r = bpf_program_get_info_by_fd(prog_fd, &info, sizeof(info));
×
UNCOV
438
        if (r < 0)
×
UNCOV
439
                return r;
×
440

UNCOV
441
        *ret_id = info.id;
×
442

UNCOV
443
        return 0;
×
444
};
445

446
int bpf_program_serialize_attachment(
15,693✔
447
                FILE *f,
448
                FDSet *fds,
449
                const char *key,
450
                BPFProgram *p) {
451

452
        _cleanup_free_ char *escaped = NULL;
15,693✔
453
        int copy, r;
15,693✔
454

455
        if (!p || !p->attached_path)
15,693✔
456
                return 0;
457

458
        assert(p->kernel_fd >= 0);
425✔
459

460
        escaped = cescape(p->attached_path);
425✔
461
        if (!escaped)
425✔
462
                return -ENOMEM;
463

464
        copy = fdset_put_dup(fds, p->kernel_fd);
425✔
465
        if (copy < 0)
425✔
UNCOV
466
                return log_error_errno(copy, "Failed to add BPF kernel fd to serialize: %m");
×
467

468
        r = serialize_item_format(
425✔
469
                        f,
470
                        key,
471
                        "%i %s %s",
472
                        copy,
473
                        bpf_cgroup_attach_type_to_string(p->attached_type),
474
                        escaped);
475
        if (r < 0)
425✔
476
                return r;
477

478
        /* After serialization, let's forget the fact that this program is attached. The attachment — if you
479
         * so will — is now 'owned' by the serialization, and not us anymore. Why does that matter? Because
480
         * of BPF's less-than-ideal lifecycle handling: to detach a program from a cgroup we have to
481
         * explicitly do so, it's not done implicitly on close(). Now, since we are serializing here we don't
482
         * want the program to be detached while freeing things, so that the attachment can be retained after
483
         * deserializing again. bpf_program_free() implicitly detaches things, if attached_path is non-NULL,
484
         * hence we set it to NULL here. */
485

486
        p->attached_path = mfree(p->attached_path);
425✔
487
        return 0;
425✔
488
}
489

490
int bpf_program_serialize_attachment_set(FILE *f, FDSet *fds, const char *key, Set *set) {
10,462✔
491
        BPFProgram *p;
10,462✔
492
        int r;
10,462✔
493

494
        SET_FOREACH(p, set) {
10,462✔
UNCOV
495
                r = bpf_program_serialize_attachment(f, fds, key, p);
×
UNCOV
496
                if (r < 0)
×
UNCOV
497
                        return r;
×
498
        }
499

500
        return 0;
10,462✔
501
}
502

503
int bpf_program_deserialize_attachment(const char *v, FDSet *fds, BPFProgram **bpfp) {
313✔
504
        _cleanup_free_ char *sfd = NULL, *sat = NULL, *unescaped = NULL;
313✔
UNCOV
505
        _cleanup_(bpf_program_freep) BPFProgram *p = NULL;
×
506
        _cleanup_close_ int fd = -EBADF;
313✔
507
        ssize_t l;
313✔
508
        int ifd, at, r;
313✔
509

510
        assert(v);
313✔
511
        assert(bpfp);
313✔
512

513
        /* Extract first word: the fd number */
514
        r = extract_first_word(&v, &sfd, NULL, 0);
313✔
515
        if (r < 0)
313✔
516
                return r;
517
        if (r == 0)
313✔
518
                return -EINVAL;
519

520
        ifd = parse_fd(sfd);
313✔
521
        if (ifd < 0)
313✔
522
                return r;
523

524
        /* Extract second word: the attach type */
525
        r = extract_first_word(&v, &sat, NULL, 0);
313✔
526
        if (r < 0)
313✔
527
                return r;
528
        if (r == 0)
313✔
529
                return -EINVAL;
530

531
        at = bpf_cgroup_attach_type_from_string(sat);
313✔
532
        if (at < 0)
313✔
533
                return at;
534

535
        /* The rest is the path */
536
        if (isempty(v))
626✔
537
                return -EINVAL;
538

539
        l = cunescape(v, 0, &unescaped);
313✔
540
        if (l < 0)
313✔
UNCOV
541
                return l;
×
542

543
        fd = fdset_remove(fds, ifd);
313✔
544
        if (fd < 0)
313✔
545
                return fd;
546

547
        p = new(BPFProgram, 1);
313✔
548
        if (!p)
313✔
549
                return -ENOMEM;
550

551
        *p = (BPFProgram) {
313✔
552
                .kernel_fd = TAKE_FD(fd),
313✔
553
                .prog_type = BPF_PROG_TYPE_UNSPEC,
554
                .attached_path = TAKE_PTR(unescaped),
313✔
555
                .attached_type = at,
556
        };
557

558
        if (*bpfp)
313✔
559
                bpf_program_free(*bpfp);
×
560

561
        *bpfp = TAKE_PTR(p);
313✔
562
        return 0;
313✔
563
}
564

UNCOV
565
int bpf_program_deserialize_attachment_set(const char *v, FDSet *fds, Set **bpfsetp) {
×
UNCOV
566
        BPFProgram *p = NULL;
×
UNCOV
567
        int r;
×
568

UNCOV
569
        assert(v);
×
UNCOV
570
        assert(bpfsetp);
×
571

UNCOV
572
        r = bpf_program_deserialize_attachment(v, fds, &p);
×
UNCOV
573
        if (r < 0)
×
UNCOV
574
                return r;
×
575

UNCOV
576
        r = set_ensure_consume(bpfsetp, &bpf_program_hash_ops, p);
×
UNCOV
577
        if (r < 0)
×
UNCOV
578
                return r;
×
579

580
        return 0;
581
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc