• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 20495933613

24 Dec 2025 06:25PM UTC coverage: 72.64% (-0.06%) from 72.701%
20495933613

push

github

YHNdnzj
man: document version for BindNetworkInterface instead of using ignore list

The ignore list is for older stuff, all new interfaces must be documented
with a version.

Follow-up for c1c787651

309829 of 426528 relevant lines covered (72.64%)

1135837.86 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

66.67
/src/basic/cgroup-util.h
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2
#pragma once
3

4
#include "basic-forward.h"
5

6
#define SYSTEMD_CGROUP_CONTROLLER "_systemd"
7

8
/* An enum of well known cgroup controllers */
9
typedef enum CGroupController {
10
        /* Original cgroup controllers */
11
        CGROUP_CONTROLLER_CPU,
12
        CGROUP_CONTROLLER_CPUACCT,    /* v1 only */
13
        CGROUP_CONTROLLER_CPUSET,     /* v2 only */
14
        CGROUP_CONTROLLER_IO,         /* v2 only */
15
        CGROUP_CONTROLLER_BLKIO,      /* v1 only */
16
        CGROUP_CONTROLLER_MEMORY,
17
        CGROUP_CONTROLLER_DEVICES,    /* v1 only */
18
        CGROUP_CONTROLLER_PIDS,
19

20
        /* BPF-based pseudo-controllers, v2 only */
21
        CGROUP_CONTROLLER_BPF_FIREWALL,
22
        CGROUP_CONTROLLER_BPF_DEVICES,
23
        CGROUP_CONTROLLER_BPF_FOREIGN,
24
        CGROUP_CONTROLLER_BPF_SOCKET_BIND,
25
        CGROUP_CONTROLLER_BPF_RESTRICT_NETWORK_INTERFACES,
26
        CGROUP_CONTROLLER_BPF_BIND_NETWORK_INTERFACE,
27
        /* The BPF hook implementing RestrictFileSystems= is not defined here.
28
         * It's applied as late as possible in exec_invoke() so we don't block
29
         * our own unit setup code. */
30

31
        _CGROUP_CONTROLLER_MAX,
32
        _CGROUP_CONTROLLER_INVALID = -EINVAL,
33
} CGroupController;
34

35
#define CGROUP_CONTROLLER_TO_MASK(c) (1U << (c))
36

37
/* A bit mask of well known cgroup controllers */
38
typedef enum CGroupMask {
39
        CGROUP_MASK_CPU = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_CPU),
40
        CGROUP_MASK_CPUACCT = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_CPUACCT),
41
        CGROUP_MASK_CPUSET = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_CPUSET),
42
        CGROUP_MASK_IO = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_IO),
43
        CGROUP_MASK_BLKIO = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BLKIO),
44
        CGROUP_MASK_MEMORY = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_MEMORY),
45
        CGROUP_MASK_DEVICES = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_DEVICES),
46
        CGROUP_MASK_PIDS = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_PIDS),
47
        CGROUP_MASK_BPF_FIREWALL = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_FIREWALL),
48
        CGROUP_MASK_BPF_DEVICES = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_DEVICES),
49
        CGROUP_MASK_BPF_FOREIGN = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_FOREIGN),
50
        CGROUP_MASK_BPF_SOCKET_BIND = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_SOCKET_BIND),
51
        CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_RESTRICT_NETWORK_INTERFACES),
52
        CGROUP_MASK_BPF_BIND_NETWORK_INTERFACE = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_BIND_NETWORK_INTERFACE),
53

54
        /* All real cgroup v1 controllers */
55
        CGROUP_MASK_V1 = CGROUP_MASK_CPU|CGROUP_MASK_CPUACCT|CGROUP_MASK_BLKIO|CGROUP_MASK_MEMORY|CGROUP_MASK_DEVICES|CGROUP_MASK_PIDS,
56

57
        /* All real cgroup v2 controllers */
58
        CGROUP_MASK_V2 = CGROUP_MASK_CPU|CGROUP_MASK_CPUSET|CGROUP_MASK_IO|CGROUP_MASK_MEMORY|CGROUP_MASK_PIDS,
59

60
        /* All controllers we want to delegate in case of Delegate=yes. Which are pretty much the v2 controllers only, as delegation on v1 is not safe, and bpf stuff isn't a real controller */
61
        CGROUP_MASK_DELEGATE = CGROUP_MASK_V2,
62

63
        /* All cgroup v2 BPF pseudo-controllers */
64
        CGROUP_MASK_BPF = CGROUP_MASK_BPF_FIREWALL|CGROUP_MASK_BPF_DEVICES|CGROUP_MASK_BPF_FOREIGN|CGROUP_MASK_BPF_SOCKET_BIND|CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES|CGROUP_MASK_BPF_BIND_NETWORK_INTERFACE,
65

66
        _CGROUP_MASK_ALL = CGROUP_CONTROLLER_TO_MASK(_CGROUP_CONTROLLER_MAX) - 1,
67
} CGroupMask;
68

69
/* Special values for all weight knobs on unified hierarchy */
70
#define CGROUP_WEIGHT_INVALID UINT64_MAX
71
#define CGROUP_WEIGHT_IDLE UINT64_C(0)
72
#define CGROUP_WEIGHT_MIN UINT64_C(1)
73
#define CGROUP_WEIGHT_MAX UINT64_C(10000)
74
#define CGROUP_WEIGHT_DEFAULT UINT64_C(100)
75

76
#define CGROUP_LIMIT_MIN UINT64_C(0)
77
#define CGROUP_LIMIT_MAX UINT64_MAX
78

79
static inline bool CGROUP_WEIGHT_IS_OK(uint64_t x) {
×
80
        return
×
81
            x == CGROUP_WEIGHT_INVALID ||
×
82
            (x >= CGROUP_WEIGHT_MIN && x <= CGROUP_WEIGHT_MAX);
×
83
}
84

85
/* IO limits on unified hierarchy */
86
typedef enum CGroupIOLimitType {
87
        CGROUP_IO_RBPS_MAX,
88
        CGROUP_IO_WBPS_MAX,
89
        CGROUP_IO_RIOPS_MAX,
90
        CGROUP_IO_WIOPS_MAX,
91

92
        _CGROUP_IO_LIMIT_TYPE_MAX,
93
        _CGROUP_IO_LIMIT_TYPE_INVALID = -EINVAL,
94
} CGroupIOLimitType;
95

96
extern const uint64_t cgroup_io_limit_defaults[_CGROUP_IO_LIMIT_TYPE_MAX];
97

98
const char* cgroup_io_limit_type_to_string(CGroupIOLimitType t) _const_;
99
CGroupIOLimitType cgroup_io_limit_type_from_string(const char *s) _pure_;
100
void cgroup_io_limits_list(void);
101

102
/* Special values for the io.bfq.weight attribute */
103
#define CGROUP_BFQ_WEIGHT_INVALID UINT64_MAX
104
#define CGROUP_BFQ_WEIGHT_MIN UINT64_C(1)
105
#define CGROUP_BFQ_WEIGHT_MAX UINT64_C(1000)
106
#define CGROUP_BFQ_WEIGHT_DEFAULT UINT64_C(100)
107

108
/* Convert the normal io.weight value to io.bfq.weight */
109
static inline uint64_t BFQ_WEIGHT(uint64_t io_weight) {
×
110
        return
×
111
            io_weight <= CGROUP_WEIGHT_DEFAULT ?
112
            CGROUP_BFQ_WEIGHT_DEFAULT - (CGROUP_WEIGHT_DEFAULT - io_weight) * (CGROUP_BFQ_WEIGHT_DEFAULT - CGROUP_BFQ_WEIGHT_MIN) / (CGROUP_WEIGHT_DEFAULT - CGROUP_WEIGHT_MIN) :
×
113
            CGROUP_BFQ_WEIGHT_DEFAULT + (io_weight - CGROUP_WEIGHT_DEFAULT) * (CGROUP_BFQ_WEIGHT_MAX - CGROUP_BFQ_WEIGHT_DEFAULT) / (CGROUP_WEIGHT_MAX - CGROUP_WEIGHT_DEFAULT);
×
114
}
115

116
/*
117
 * General rules:
118
 *
119
 * We require absolute cgroup paths. When returning, we will always
120
 * generate paths with multiple adjacent / removed.
121
 */
122

123
int cg_is_available(void);
124

125
int cg_path_open(const char *path);
126
int cg_cgroupid_open(int cgroupfs_fd, uint64_t id);
127

128
int cg_path_from_cgroupid(int cgroupfs_fd, uint64_t id, char **ret);
129
int cg_get_cgroupid_at(int dfd, const char *path, uint64_t *ret);
130
static inline int cg_path_get_cgroupid(const char *path, uint64_t *ret) {
5,434✔
131
        return cg_get_cgroupid_at(AT_FDCWD, path, ret);
5,434✔
132
}
133
static inline int cg_fd_get_cgroupid(int fd, uint64_t *ret) {
44✔
134
        return cg_get_cgroupid_at(fd, NULL, ret);
44✔
135
}
136

137
typedef enum CGroupFlags {
138
        CGROUP_SIGCONT            = 1 << 0,
139
        CGROUP_IGNORE_SELF        = 1 << 1,
140
        CGROUP_DONT_SKIP_UNMAPPED = 1 << 2,
141
} CGroupFlags;
142

143
int cg_enumerate_processes(const char *path, FILE **ret);
144
int cg_read_pid(FILE *f, pid_t *ret, CGroupFlags flags);
145
int cg_read_pidref(FILE *f, PidRef *ret, CGroupFlags flags);
146

147
int cg_enumerate_subgroups(const char *path, DIR **ret);
148
int cg_read_subgroup(DIR *d, char **ret);
149

150
typedef int (*cg_kill_log_func_t)(const PidRef *pid, int sig, void *userdata);
151

152
int cg_kill(const char *path, int sig, CGroupFlags flags, Set *killed_pids, cg_kill_log_func_t log_kill, void *userdata);
153
int cg_kill_kernel_sigkill(const char *path);
154
int cg_kill_recursive(const char *path, int sig, CGroupFlags flags, Set *killed_pids, cg_kill_log_func_t log_kill, void *userdata);
155

156
int cg_split_spec(const char *spec, char **ret_controller, char **ret_path);
157

158
int cg_get_path(const char *path, const char *suffix, char **ret);
159

160
int cg_pid_get_path(pid_t pid, char **ret);
161
int cg_pidref_get_path(const PidRef *pidref, char **ret);
162

163
int cg_is_threaded(const char *path);
164

165
int cg_is_delegated(const char *path);
166
int cg_is_delegated_fd(int fd);
167

168
int cg_has_coredump_receive(const char *path);
169

170
int cg_set_attribute(const char *path, const char *attribute, const char *value);
171
int cg_get_attribute(const char *path, const char *attribute, char **ret);
172
int cg_get_attribute_as_uint64(const char *path, const char *attribute, uint64_t *ret);
173
int cg_get_attribute_as_bool(const char *path, const char *attribute);
174

175
int cg_get_keyed_attribute(const char *path, const char *attribute, char * const *keys, char **values);
176

177
int cg_get_owner(const char *path, uid_t *ret_uid);
178

179
int cg_set_xattr(const char *path, const char *name, const void *value, size_t size, int flags);
180
int cg_get_xattr(const char *path, const char *name, char **ret, size_t *ret_size);
181
/* Returns negative on error, and 0 or 1 on success for the bool value */
182
int cg_get_xattr_bool(const char *path, const char *name);
183
int cg_remove_xattr(const char *path, const char *name);
184

185
int cg_is_empty(const char *path);
186

187
int cg_get_root_path(char **path);
188

189
int cg_path_get_session(const char *path, char **ret_session);
190
int cg_path_get_owner_uid(const char *path, uid_t *ret_uid);
191
int cg_path_get_unit_full(const char *path, char **ret_unit, char **ret_subgroup);
192
static inline int cg_path_get_unit(const char *path, char **ret_unit) {
16,873✔
193
        return cg_path_get_unit_full(path, ret_unit, NULL);
16,873✔
194
}
195
int cg_path_get_unit_path(const char *path, char **ret_unit);
196
int cg_path_get_user_unit_full(const char *path, char **ret_unit, char **ret_subgroup);
197
static inline int cg_path_get_user_unit(const char *path, char **ret_unit) {
7,793✔
198
        return cg_path_get_user_unit_full(path, ret_unit, NULL);
7,793✔
199
}
200
int cg_path_get_machine_name(const char *path, char **ret_machine);
201
int cg_path_get_slice(const char *path, char **ret_slice);
202
int cg_path_get_user_slice(const char *path, char **ret_slice);
203

204
int cg_shift_path(const char *cgroup, const char *cached_root, const char **ret_shifted);
205
int cg_pid_get_path_shifted(pid_t pid, const char *cached_root, char **ret_cgroup);
206

207
int cg_pid_get_session(pid_t pid, char **ret_session);
208
int cg_pidref_get_session(const PidRef *pidref, char **ret);
209
int cg_pid_get_owner_uid(pid_t pid, uid_t *ret_uid);
210
int cg_pidref_get_owner_uid(const PidRef *pidref, uid_t *ret);
211
int cg_pid_get_unit_full(pid_t pid, char **ret_unit, char **ret_subgroup);
212
static inline int cg_pid_get_unit(pid_t pid, char **ret_unit) {
85✔
213
        return cg_pid_get_unit_full(pid, ret_unit, NULL);
85✔
214
}
215
int cg_pidref_get_unit_full(const PidRef *pidref, char **ret_unit, char **ret_subgroup);
216
static inline int cg_pidref_get_unit(const PidRef *pidref, char **ret_unit) {
636✔
217
        return cg_pidref_get_unit_full(pidref, ret_unit, NULL);
636✔
218
}
219
int cg_pid_get_user_unit_full(pid_t pid, char **ret_unit, char **ret_subgroup);
220
static inline int cg_pid_get_user_unit(pid_t pid, char **ret_unit) {
42✔
221
        return cg_pid_get_user_unit_full(pid, ret_unit, NULL);
42✔
222
}
223
int cg_pidref_get_user_unit_full(const PidRef *pidref, char **ret_unit, char **ret_subgroup);
224
static inline int cg_pidref_get_user_unit(const PidRef *pidref, char **ret_unit) {
13✔
225
        return cg_pidref_get_user_unit_full(pidref, ret_unit, NULL);
13✔
226
}
227
int cg_pid_get_machine_name(pid_t pid, char **ret_machine);
228
int cg_pid_get_slice(pid_t pid, char **ret_slice);
229
int cg_pid_get_user_slice(pid_t pid, char **ret_slice);
230

231
int cg_path_decode_unit(const char *cgroup, char **ret_unit);
232

233
bool cg_needs_escape(const char *p) _pure_;
234
int cg_escape(const char *p, char **ret);
235
char* cg_unescape(const char *p) _pure_;
236

237
int cg_slice_to_path(const char *unit, char **ret);
238

239
int cg_mask_supported(CGroupMask *ret);
240
int cg_mask_supported_subtree(const char *root, CGroupMask *ret);
241
int cg_mask_from_string(const char *s, CGroupMask *ret);
242
int cg_mask_to_string(CGroupMask mask, char **ret);
243

244
bool cg_kill_supported(void);
245

246
const char* cgroup_controller_to_string(CGroupController c) _const_;
247
CGroupController cgroup_controller_from_string(const char *s) _pure_;
248

249
typedef enum ManagedOOMMode {
250
        MANAGED_OOM_AUTO,
251
        MANAGED_OOM_KILL,
252
        _MANAGED_OOM_MODE_MAX,
253
        _MANAGED_OOM_MODE_INVALID = -EINVAL,
254
} ManagedOOMMode;
255

256
const char* managed_oom_mode_to_string(ManagedOOMMode m) _const_;
257
ManagedOOMMode managed_oom_mode_from_string(const char *s) _pure_;
258

259
typedef enum ManagedOOMPreference {
260
        MANAGED_OOM_PREFERENCE_NONE = 0,
261
        MANAGED_OOM_PREFERENCE_AVOID = 1,
262
        MANAGED_OOM_PREFERENCE_OMIT = 2,
263
        _MANAGED_OOM_PREFERENCE_MAX,
264
        _MANAGED_OOM_PREFERENCE_INVALID = -EINVAL,
265
} ManagedOOMPreference;
266

267
const char* managed_oom_preference_to_string(ManagedOOMPreference a) _const_;
268
ManagedOOMPreference managed_oom_preference_from_string(const char *s) _pure_;
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc