• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 14481768866

15 Apr 2025 11:30PM UTC coverage: 72.112% (+0.08%) from 72.031%
14481768866

push

github

yuwata
docs: fix mkosi section for Environment= setting

297023 of 411890 relevant lines covered (72.11%)

685615.48 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

81.55
/src/shared/cgroup-setup.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <unistd.h>
4

5
#include "cgroup-setup.h"
6
#include "cgroup-util.h"
7
#include "errno-util.h"
8
#include "fd-util.h"
9
#include "fileio.h"
10
#include "fs-util.h"
11
#include "missing_magic.h"
12
#include "mkdir.h"
13
#include "parse-util.h"
14
#include "path-util.h"
15
#include "process-util.h"
16
#include "recurse-dir.h"
17
#include "stdio-util.h"
18
#include "string-util.h"
19
#include "user-util.h"
20

21
int cg_weight_parse(const char *s, uint64_t *ret) {
531✔
22
        uint64_t u;
531✔
23
        int r;
531✔
24

25
        assert(s);
531✔
26
        assert(ret);
531✔
27

28
        if (isempty(s)) {
531✔
29
                *ret = CGROUP_WEIGHT_INVALID;
×
30
                return 0;
×
31
        }
32

33
        r = safe_atou64(s, &u);
531✔
34
        if (r < 0)
531✔
35
                return r;
36

37
        if (u < CGROUP_WEIGHT_MIN || u > CGROUP_WEIGHT_MAX)
531✔
38
                return -ERANGE;
39

40
        *ret = u;
531✔
41
        return 0;
531✔
42
}
43

44
int cg_cpu_weight_parse(const char *s, uint64_t *ret) {
529✔
45
        assert(s);
529✔
46
        assert(ret);
529✔
47

48
        if (streq(s, "idle"))
529✔
49
                return *ret = CGROUP_WEIGHT_IDLE;
×
50

51
        return cg_weight_parse(s, ret);
529✔
52
}
53

54
static int trim_cb(
116,940✔
55
                RecurseDirEvent event,
56
                const char *path,
57
                int dir_fd,
58
                int inode_fd,
59
                const struct dirent *de,
60
                const struct statx *sx,
61
                void *userdata) {
62

63
        /* Failures to delete inner cgroup we ignore (but debug log in case error code is unexpected) */
64
        if (event == RECURSE_DIR_LEAVE &&
116,940✔
65
            de->d_type == DT_DIR &&
592✔
66
            unlinkat(dir_fd, de->d_name, AT_REMOVEDIR) < 0 &&
296✔
67
            !IN_SET(errno, ENOENT, ENOTEMPTY, EBUSY))
236✔
68
                log_debug_errno(errno, "Failed to trim inner cgroup %s, ignoring: %m", path);
1✔
69

70
        return RECURSE_DIR_CONTINUE;
116,940✔
71
}
72

73
int cg_trim(const char *path, bool delete_root) {
6,221✔
74
        _cleanup_free_ char *fs = NULL;
6,221✔
75
        int r;
6,221✔
76

77
        r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, path, NULL, &fs);
6,221✔
78
        if (r < 0)
6,221✔
79
                return r;
80

81
        r = recurse_dir_at(
6,221✔
82
                        AT_FDCWD,
83
                        fs,
84
                        /* statx_mask = */ 0,
85
                        /* n_depth_max = */ UINT_MAX,
86
                        RECURSE_DIR_ENSURE_TYPE,
87
                        trim_cb,
88
                        /* userdata = */ NULL);
89
        if (r == -ENOENT) /* non-existing is the ultimate trimming, hence no error */
6,221✔
90
                r = 0;
91
        else if (r < 0)
2,382✔
92
                log_debug_errno(r, "Failed to trim subcgroups of '%s': %m", path);
×
93

94
        /* If we shall delete the top-level cgroup, then propagate the failure to do so (except if it is
95
         * already gone anyway). Also, let's debug log about this failure, except if the error code is an
96
         * expected one. */
97
        if (delete_root && !empty_or_root(path) &&
12,257✔
98
            rmdir(fs) < 0 && errno != ENOENT) {
9,877✔
99
                if (!IN_SET(errno, ENOTEMPTY, EBUSY))
2✔
100
                        log_debug_errno(errno, "Failed to trim cgroup '%s': %m", path);
×
101
                RET_GATHER(r, -errno);
2✔
102
        }
103

104
        return r;
105
}
106

107
/* Create a cgroup in the hierarchy of controller.
108
 * Returns 0 if the group already existed, 1 on success, negative otherwise.
109
 */
110
int cg_create(const char *path) {
4,392✔
111
        _cleanup_free_ char *fs = NULL;
4,392✔
112
        int r;
4,392✔
113

114
        r = cg_get_path_and_check(SYSTEMD_CGROUP_CONTROLLER, path, NULL, &fs);
4,392✔
115
        if (r < 0)
4,392✔
116
                return r;
117

118
        r = mkdir_parents(fs, 0755);
4,392✔
119
        if (r < 0)
4,392✔
120
                return r;
121

122
        r = RET_NERRNO(mkdir(fs, 0755));
4,392✔
123
        if (r == -EEXIST)
1,490✔
124
                return 0;
125
        if (r < 0)
2,902✔
126
                return r;
×
127

128
        return 1;
129
}
130

131
int cg_attach(const char *path, pid_t pid) {
11,913✔
132
        _cleanup_free_ char *fs = NULL;
11,913✔
133
        char c[DECIMAL_STR_MAX(pid_t) + 2];
11,913✔
134
        int r;
11,913✔
135

136
        assert(path);
11,913✔
137
        assert(pid >= 0);
11,913✔
138

139
        r = cg_get_path_and_check(SYSTEMD_CGROUP_CONTROLLER, path, "cgroup.procs", &fs);
11,913✔
140
        if (r < 0)
11,913✔
141
                return r;
142

143
        if (pid == 0)
11,913✔
144
                pid = getpid_cached();
11,753✔
145

146
        xsprintf(c, PID_FMT "\n", pid);
11,913✔
147

148
        r = write_string_file(fs, c, WRITE_STRING_FILE_DISABLE_BUFFER);
11,913✔
149
        if (r == -EOPNOTSUPP && cg_is_threaded(path) > 0)
11,913✔
150
                /* When the threaded mode is used, we cannot read/write the file. Let's return recognizable error. */
151
                return -EUCLEAN;
152
        if (r < 0)
11,913✔
153
                return r;
3✔
154

155
        return 0;
156
}
157

158
int cg_fd_attach(int fd, pid_t pid) {
7✔
159
        char c[DECIMAL_STR_MAX(pid_t) + 2];
7✔
160

161
        assert(fd >= 0);
7✔
162
        assert(pid >= 0);
7✔
163

164
        if (pid == 0)
7✔
165
                pid = getpid_cached();
×
166

167
        xsprintf(c, PID_FMT "\n", pid);
7✔
168

169
        return write_string_file_at(fd, "cgroup.procs", c, WRITE_STRING_FILE_DISABLE_BUFFER);
7✔
170
}
171

172
int cg_create_and_attach(const char *path, pid_t pid) {
410✔
173
        int r, q;
410✔
174

175
        /* This does not remove the cgroup on failure */
176

177
        assert(pid >= 0);
410✔
178

179
        r = cg_create(path);
410✔
180
        if (r < 0)
410✔
181
                return r;
182

183
        q = cg_attach(path, pid);
410✔
184
        if (q < 0)
410✔
185
                return q;
×
186

187
        return r;
188
}
189

190
int cg_set_access(
662✔
191
                const char *path,
192
                uid_t uid,
193
                gid_t gid) {
194

195
        static const struct {
662✔
196
                const char *name;
197
                bool fatal;
198
        } attributes[] = {
199
                { "cgroup.procs",           true  },
200
                { "cgroup.subtree_control", true  },
201
                { "cgroup.threads",         false },
202
                { "memory.oom.group",       false },
203
                { "memory.reclaim",         false },
204
        };
205

206
        _cleanup_free_ char *fs = NULL;
662✔
207
        int r;
662✔
208

209
        assert(path);
662✔
210

211
        if (uid == UID_INVALID && gid == GID_INVALID)
662✔
212
                return 0;
213

214
        /* Configure access to the cgroup itself */
215
        r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, path, NULL, &fs);
185✔
216
        if (r < 0)
185✔
217
                return r;
218

219
        r = chmod_and_chown(fs, 0755, uid, gid);
185✔
220
        if (r < 0)
185✔
221
                return r;
222

223
        /* Configure access to the cgroup's attributes */
224
        FOREACH_ELEMENT(i, attributes) {
1,110✔
225
                _cleanup_free_ char *a = path_join(fs, i->name);
1,850✔
226
                if (!a)
925✔
227
                        return -ENOMEM;
228

229
                r = chmod_and_chown(a, 0644, uid, gid);
925✔
230
                if (r < 0) {
925✔
231
                        if (i->fatal)
×
232
                                return r;
233

234
                        log_debug_errno(r, "Failed to set access on cgroup %s, ignoring: %m", a);
925✔
235
                }
236
        }
237

238
        return 0;
239
}
240

241
struct access_callback_data {
242
        uid_t uid;
243
        gid_t gid;
244
        int error;
245
};
246

247
static int access_callback(
3,387✔
248
                RecurseDirEvent event,
249
                const char *path,
250
                int dir_fd,
251
                int inode_fd,
252
                const struct dirent *de,
253
                const struct statx *sx,
254
                void *userdata) {
255

256
        if (!IN_SET(event, RECURSE_DIR_ENTER, RECURSE_DIR_ENTRY))
3,387✔
257
                return RECURSE_DIR_CONTINUE;
258

259
        struct access_callback_data *d = ASSERT_PTR(userdata);
3,213✔
260

261
        assert(path);
3,213✔
262
        assert(inode_fd >= 0);
3,213✔
263

264
        if (fchownat(inode_fd, "", d->uid, d->gid, AT_EMPTY_PATH) < 0)
3,213✔
265
                RET_GATHER(d->error, log_debug_errno(errno, "Failed to change ownership of '%s', ignoring: %m", path));
×
266

267
        return RECURSE_DIR_CONTINUE;
268
}
269

270
int cg_set_access_recursive(
320✔
271
                const char *path,
272
                uid_t uid,
273
                gid_t gid) {
274

275
        _cleanup_close_ int fd = -EBADF;
320✔
276
        _cleanup_free_ char *fs = NULL;
320✔
277
        int r;
320✔
278

279
        assert(path);
320✔
280

281
        /* A recursive version of cg_set_access(). But note that this one changes ownership of *all* files,
282
         * not just the allowlist that cg_set_access() uses. Use cg_set_access() on the cgroup you want to
283
         * delegate, and cg_set_access_recursive() for any subcgroups you might want to create below it. */
284

285
        if (!uid_is_valid(uid) && !gid_is_valid(gid))
466✔
286
                return 0;
287

288
        r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, path, NULL, &fs);
174✔
289
        if (r < 0)
174✔
290
                return r;
291

292
        fd = open(fs, O_DIRECTORY|O_CLOEXEC);
174✔
293
        if (fd < 0)
174✔
294
                return -errno;
×
295

296
        struct access_callback_data d = {
174✔
297
                .uid = uid,
298
                .gid = gid,
299
        };
300

301
        r = recurse_dir(fd,
174✔
302
                        fs,
303
                        /* statx_mask= */ 0,
304
                        /* n_depth_max= */ UINT_MAX,
305
                        RECURSE_DIR_SAME_MOUNT|RECURSE_DIR_INODE_FD|RECURSE_DIR_TOPLEVEL,
306
                        access_callback,
307
                        &d);
308
        if (r < 0)
174✔
309
                return r;
310

311
        assert(d.error <= 0);
174✔
312
        return d.error;
313
}
314

315
int cg_migrate(
244✔
316
                const char *from,
317
                const char *to,
318
                CGroupFlags flags) {
319

320
        _cleanup_set_free_ Set *s = NULL;
244✔
321
        bool done;
244✔
322
        int r, ret = 0;
244✔
323

324
        assert(from);
244✔
325
        assert(to);
244✔
326

327
        do {
244✔
328
                _cleanup_fclose_ FILE *f = NULL;
×
329
                pid_t pid;
244✔
330

331
                done = true;
244✔
332

333
                r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, from, &f);
244✔
334
                if (r < 0)
244✔
335
                        return RET_GATHER(ret, r);
×
336

337
                while ((r = cg_read_pid(f, &pid, flags)) > 0) {
2,008✔
338
                        /* Throw an error if unmappable PIDs are in output, we can't migrate those. */
339
                        if (pid == 0)
1,764✔
340
                                return -EREMOTE;
341

342
                        /* This might do weird stuff if we aren't a single-threaded program. However, we
343
                         * luckily know we are. */
344
                        if (FLAGS_SET(flags, CGROUP_IGNORE_SELF) && pid == getpid_cached())
1,764✔
345
                                continue;
×
346

347
                        if (set_contains(s, PID_TO_PTR(pid)))
1,764✔
348
                                continue;
×
349

350
                        if (pid_is_kernel_thread(pid) > 0)
1,764✔
351
                                continue;
1,764✔
352

353
                        r = cg_attach(to, pid);
×
354
                        if (r < 0) {
×
355
                                if (r != -ESRCH)
×
356
                                        RET_GATHER(ret, r);
×
357
                        } else if (ret == 0)
×
358
                                ret = 1;
×
359

360
                        done = false;
×
361

362
                        r = set_ensure_put(&s, /* hash_ops = */ NULL, PID_TO_PTR(pid));
×
363
                        if (r < 0)
×
364
                                return RET_GATHER(ret, r);
×
365
                }
366
                if (r < 0)
244✔
367
                        return RET_GATHER(ret, r);
×
368
        } while (!done);
244✔
369

370
        return ret;
371
}
372

373
int cg_enable(
4,054✔
374
                CGroupMask supported,
375
                CGroupMask mask,
376
                const char *p,
377
                CGroupMask *ret_result_mask) {
378

379
        _cleanup_fclose_ FILE *f = NULL;
4,054✔
380
        _cleanup_free_ char *fs = NULL;
4,054✔
381
        CGroupController c;
4,054✔
382
        CGroupMask ret = 0;
4,054✔
383
        int r;
4,054✔
384

385
        assert(p);
4,054✔
386

387
        if (supported == 0) {
4,054✔
388
                if (ret_result_mask)
×
389
                        *ret_result_mask = 0;
×
390
                return 0;
×
391
        }
392

393
        r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, p, "cgroup.subtree_control", &fs);
4,054✔
394
        if (r < 0)
4,054✔
395
                return r;
396

397
        for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
56,756✔
398
                CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
52,702✔
399
                const char *n;
52,702✔
400

401
                if (!FLAGS_SET(CGROUP_MASK_V2, bit))
52,702✔
402
                        continue;
32,432✔
403

404
                if (!FLAGS_SET(supported, bit))
20,270✔
405
                        continue;
3,149✔
406

407
                n = cgroup_controller_to_string(c);
17,121✔
408
                {
17,121✔
409
                        char s[1 + strlen(n) + 1];
17,121✔
410

411
                        s[0] = FLAGS_SET(mask, bit) ? '+' : '-';
17,121✔
412
                        strcpy(s + 1, n);
17,121✔
413

414
                        if (!f) {
17,121✔
415
                                f = fopen(fs, "we");
4,031✔
416
                                if (!f)
4,031✔
417
                                        return log_debug_errno(errno, "Failed to open cgroup.subtree_control file of %s: %m", p);
×
418
                        }
419

420
                        r = write_string_stream(f, s, WRITE_STRING_FILE_DISABLE_BUFFER);
17,121✔
421
                        if (r < 0) {
17,121✔
422
                                log_debug_errno(r, "Failed to %s controller %s for %s (%s): %m",
6✔
423
                                                FLAGS_SET(mask, bit) ? "enable" : "disable", n, p, fs);
424
                                clearerr(f);
6✔
425

426
                                /* If we can't turn off a controller, leave it on in the reported resulting mask. This
427
                                 * happens for example when we attempt to turn off a controller up in the tree that is
428
                                 * used down in the tree. */
429
                                if (!FLAGS_SET(mask, bit) && r == -EBUSY) /* You might wonder why we check for EBUSY
6✔
430
                                                                           * only here, and not follow the same logic
431
                                                                           * for other errors such as EINVAL or
432
                                                                           * EOPNOTSUPP or anything else. That's
433
                                                                           * because EBUSY indicates that the
434
                                                                           * controllers is currently enabled and
435
                                                                           * cannot be disabled because something down
436
                                                                           * the hierarchy is still using it. Any other
437
                                                                           * error most likely means something like "I
438
                                                                           * never heard of this controller" or
439
                                                                           * similar. In the former case it's hence
440
                                                                           * safe to assume the controller is still on
441
                                                                           * after the failed operation, while in the
442
                                                                           * latter case it's safer to assume the
443
                                                                           * controller is unknown and hence certainly
444
                                                                           * not enabled. */
445
                                        ret |= bit;
×
446
                        } else {
447
                                /* Otherwise, if we managed to turn on a controller, set the bit reflecting that. */
448
                                if (FLAGS_SET(mask, bit))
17,115✔
449
                                        ret |= bit;
3,597✔
450
                        }
451
                }
452
        }
453

454
        /* Let's return the precise set of controllers now enabled for the cgroup. */
455
        if (ret_result_mask)
4,054✔
456
                *ret_result_mask = ret;
3,917✔
457

458
        return 0;
459
}
460

461
int cg_has_legacy(void) {
685✔
462
        struct statfs fs;
685✔
463

464
        /* Checks if any legacy controller/hierarchy is mounted. */
465

466
        if (statfs("/sys/fs/cgroup/", &fs) < 0) {
685✔
467
                if (errno == ENOENT) /* sysfs not mounted? */
×
468
                        return false;
685✔
469

470
                return log_error_errno(errno, "Failed to statfs /sys/fs/cgroup/: %m");
×
471
        }
472

473
        if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC) ||
685✔
474
            is_fs_type(&fs, SYSFS_MAGIC)) /* not mounted yet */
×
475
                return false;
476

477
        if (is_fs_type(&fs, TMPFS_MAGIC)) {
×
478
                log_info("Found tmpfs on /sys/fs/cgroup/, assuming legacy hierarchy.");
×
479
                return true;
×
480
        }
481

482
        return log_error_errno(SYNTHETIC_ERRNO(ENOMEDIUM),
×
483
                               "Unknown filesystem type %llx mounted on /sys/fs/cgroup/.",
484
                               (unsigned long long) fs.f_type);
485
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc