• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 25084703852

28 Apr 2026 09:34PM UTC coverage: 71.849% (-0.02%) from 71.865%
25084703852

push

github

daandemeyer
ci: Reduce noise from claude-review workflow

322528 of 448894 relevant lines covered (71.85%)

1177215.84 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.77
/src/basic/uid-range.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#include <sched.h>
4
#include <string.h>
5

6
#include "alloc-util.h"
7
#include "errno-util.h"
8
#include "fd-util.h"
9
#include "format-util.h"
10
#include "namespace-util.h"
11
#include "path-util.h"
12
#include "pidref.h"
13
#include "process-util.h"
14
#include "sort-util.h"
15
#include "stat-util.h"
16
#include "uid-range.h"
17
#include "user-util.h"
18

19
UIDRange *uid_range_free(UIDRange *range) {
4,162✔
20
        if (!range)
4,162✔
21
                return NULL;
22

23
        free(range->entries);
1,195✔
24
        return mfree(range);
1,195✔
25
}
26

27
static bool uid_range_entry_intersect(const UIDRangeEntry *a, const UIDRangeEntry *b) {
108✔
28
        assert(a);
108✔
29
        assert(b);
108✔
30

31
        return a->start <= b->start + b->nr && a->start + a->nr >= b->start;
108✔
32
}
33

34
static int uid_range_entry_compare(const UIDRangeEntry *a, const UIDRangeEntry *b) {
298✔
35
        int r;
298✔
36

37
        assert(a);
298✔
38
        assert(b);
298✔
39

40
        r = CMP(a->start, b->start);
298✔
41
        if (r != 0)
141✔
42
                return r;
278✔
43

44
        return CMP(a->nr, b->nr);
20✔
45
}
46

47
static void uid_range_coalesce(UIDRange *range) {
756✔
48
        assert(range);
756✔
49

50
        if (range->n_entries <= 0)
756✔
51
                return;
52

53
        typesafe_qsort(range->entries, range->n_entries, uid_range_entry_compare);
402✔
54

55
        for (size_t i = 0; i < range->n_entries; i++) {
824✔
56
                UIDRangeEntry *x = range->entries + i;
422✔
57

58
                for (size_t j = i + 1; j < range->n_entries; j++) {
510✔
59
                        UIDRangeEntry *y = range->entries + j;
108✔
60
                        uid_t begin, end;
108✔
61

62
                        if (!uid_range_entry_intersect(x, y))
108✔
63
                                break;
64

65
                        begin = MIN(x->start, y->start);
88✔
66

67
                        /* Silence static analyzers, overflow is prevented by uid_range_add_internal() */
68
                        assert(x->start <= UINT32_MAX - x->nr);
88✔
69
                        assert(y->start <= UINT32_MAX - y->nr);
88✔
70
                        end = MAX(x->start + x->nr, y->start + y->nr);
88✔
71

72
                        x->start = begin;
88✔
73
                        x->nr = end - begin;
88✔
74

75
                        if (range->n_entries > j + 1)
88✔
76
                                memmove(y, y + 1, sizeof(UIDRangeEntry) * (range->n_entries - j - 1));
81✔
77

78
                        /* Silence static analyzers, n_entries > 0 since j < n_entries holds in the loop condition */
79
                        assert(range->n_entries > 0);
88✔
80
                        range->n_entries--;
88✔
81

82
                        /* Silence static analyzers, j cannot be 0 here since it starts at i + 1, i.e. >= 1 */
83
                        assert(j > 0);
88✔
84
                        j--;
85
                }
86
        }
87
}
88

89
int uid_range_add_internal(UIDRange **range, uid_t start, uid_t nr, bool coalesce) {
860✔
90
        _cleanup_(uid_range_freep) UIDRange *range_new = NULL;
860✔
91
        UIDRange *p;
860✔
92

93
        assert(range);
860✔
94

95
        if (nr <= 0)
860✔
96
                return 0;
97

98
        if (start > UINT32_MAX - nr) /* overflow check */
860✔
99
                return -ERANGE;
100

101
        if (*range)
860✔
102
                p = *range;
103
        else {
104
                range_new = new0(UIDRange, 1);
140✔
105
                if (!range_new)
140✔
106
                        return -ENOMEM;
107

108
                p = range_new;
109
        }
110

111
        if (!GREEDY_REALLOC(p->entries, p->n_entries + 1))
860✔
112
                return -ENOMEM;
113

114
        p->entries[p->n_entries++] = (UIDRangeEntry) {
860✔
115
                .start = start,
116
                .nr = nr,
117
        };
118

119
        if (coalesce)
860✔
120
                uid_range_coalesce(p);
152✔
121

122
        TAKE_PTR(range_new);
860✔
123
        *range = p;
860✔
124

125
        return 0;
860✔
126
}
127

128
int uid_range_add_str_full(UIDRange **range, const char *s, bool coalesce) {
40✔
129
        uid_t start, end;
40✔
130
        int r;
40✔
131

132
        assert(range);
40✔
133
        assert(s);
40✔
134

135
        r = parse_uid_range(s, &start, &end);
40✔
136
        if (r < 0)
40✔
137
                return r;
40✔
138

139
        return uid_range_add_internal(range, start, end - start + 1, coalesce);
40✔
140
}
141

142
int uid_range_next_lower(const UIDRange *range, uid_t *uid) {
272✔
143
        uid_t closest = UID_INVALID, candidate;
272✔
144

145
        assert(range);
272✔
146
        assert(uid);
272✔
147

148
        if (*uid == 0)
272✔
149
                return -EBUSY;
150

151
        candidate = *uid - 1;
272✔
152

153
        for (size_t i = 0; i < range->n_entries; i++) {
320✔
154
                uid_t begin, end;
272✔
155

156
                begin = range->entries[i].start;
272✔
157
                end = range->entries[i].start + range->entries[i].nr - 1;
272✔
158

159
                if (candidate >= begin && candidate <= end) {
272✔
160
                        *uid = candidate;
224✔
161
                        return 1;
224✔
162
                }
163

164
                if (end < candidate)
48✔
165
                        closest = end;
47✔
166
        }
167

168
        if (closest == UID_INVALID)
48✔
169
                return -EBUSY;
170

171
        *uid = closest;
47✔
172
        return 1;
47✔
173
}
174

175
bool uid_range_covers(const UIDRange *range, uid_t start, uid_t nr) {
237✔
176
        if (nr == 0) /* empty range? always covered... */
237✔
177
                return true;
178

179
        if (start > UINT32_MAX - nr) /* range overflows? definitely not covered... */
236✔
180
                return false;
181

182
        if (!range)
233✔
183
                return false;
184

185
        FOREACH_ARRAY(i, range->entries, range->n_entries)
249✔
186
                if (start >= i->start &&
237✔
187
                    start + nr <= i->start + i->nr)
231✔
188
                        return true;
189

190
        return false;
191
}
192

193
int uid_map_read_one(FILE *f, uid_t *ret_base, uid_t *ret_shift, uid_t *ret_range) {
1,832✔
194
        uid_t uid_base, uid_shift, uid_range;
1,832✔
195
        int r;
1,832✔
196

197
        assert(f);
1,832✔
198

199
        errno = 0;
1,832✔
200
        r = fscanf(f, UID_FMT " " UID_FMT " " UID_FMT "\n", &uid_base, &uid_shift, &uid_range);
1,832✔
201
        if (r == EOF)
1,832✔
202
                return errno_or_else(ENOMSG);
1,031✔
203
        assert(r >= 0);
801✔
204
        if (r != 3)
801✔
205
                return -EBADMSG;
206
        if (uid_range <= 0)
801✔
207
                return -EBADMSG;
208

209
        if (ret_base)
801✔
210
                *ret_base = uid_base;
801✔
211
        if (ret_shift)
801✔
212
                *ret_shift = uid_shift;
801✔
213
        if (ret_range)
801✔
214
                *ret_range = uid_range;
739✔
215

216
        return 0;
217
}
218

219
unsigned uid_range_size(const UIDRange *range) {
7✔
220
        if (!range)
7✔
221
                return 0;
222

223
        unsigned n = 0;
6✔
224

225
        FOREACH_ARRAY(e, range->entries, range->n_entries)
16✔
226
                n += e->nr;
10✔
227

228
        return n;
229
}
230

231
bool uid_range_is_empty(const UIDRange *range) {
556✔
232

233
        if (!range)
556✔
234
                return true;
235

236
        FOREACH_ARRAY(e, range->entries, range->n_entries)
554✔
237
                if (e->nr > 0)
195✔
238
                        return false;
239

240
        return true;
241
}
242

243
int uid_range_load_userns_full(const char *path, UIDRangeUsernsMode mode, bool coalesce, UIDRange **ret) {
960✔
244
        _cleanup_(uid_range_freep) UIDRange *range = NULL;
×
245
        _cleanup_fclose_ FILE *f = NULL;
960✔
246
        int r;
960✔
247

248
        /* If 'path' is NULL loads the UID range of the userns namespace we run. Otherwise load the data from
249
         * the specified file (which can be either uid_map or gid_map, in case caller needs to deal with GID
250
         * maps).
251
         *
252
         * To simplify things this will modify the passed array in case of later failure. */
253

254
        assert(mode >= 0);
960✔
255
        assert(mode < _UID_RANGE_USERNS_MODE_MAX);
960✔
256
        assert(ret);
960✔
257

258
        if (!path)
960✔
259
                path = IN_SET(mode, UID_RANGE_USERNS_INSIDE, UID_RANGE_USERNS_OUTSIDE) ? "/proc/self/uid_map" : "/proc/self/gid_map";
592✔
260

261
        f = fopen(path, "re");
960✔
262
        if (!f) {
960✔
263
                r = -errno;
×
264

265
                if (r == -ENOENT && path_startswith(path, "/proc/"))
×
266
                        return proc_mounted() > 0 ? -EOPNOTSUPP : -ENOSYS;
×
267

268
                return r;
269
        }
270

271
        range = new0(UIDRange, 1);
960✔
272
        if (!range)
960✔
273
                return -ENOMEM;
274

275
        for (;;) {
622✔
276
                uid_t uid_base, uid_shift, uid_range;
1,582✔
277

278
                r = uid_map_read_one(f, &uid_base, &uid_shift, &uid_range);
1,582✔
279
                if (r == -ENOMSG)
1,582✔
280
                        break;
281
                if (r < 0)
622✔
282
                        return r;
×
283

284
                r = uid_range_add_internal(
622✔
285
                                &range,
286
                                IN_SET(mode, UID_RANGE_USERNS_INSIDE, GID_RANGE_USERNS_INSIDE) ? uid_base : uid_shift,
622✔
287
                                uid_range,
288
                                /* coalesce= */ false);
289
                if (r < 0)
622✔
290
                        return r;
291
        }
292

293
        if (coalesce)
960✔
294
                uid_range_coalesce(range);
604✔
295

296
        *ret = TAKE_PTR(range);
960✔
297
        return 0;
960✔
298
}
299

300
int uid_range_load_userns_by_fd_full(int userns_fd, UIDRangeUsernsMode mode, bool coalesce, UIDRange **ret) {
807✔
301
        _cleanup_(pidref_done_sigkill_wait) PidRef pidref = PIDREF_NULL;
807✔
302
        int r;
807✔
303

304
        assert(userns_fd >= 0);
807✔
305
        assert(mode >= 0);
807✔
306
        assert(mode < _UID_RANGE_USERNS_MODE_MAX);
807✔
307
        assert(ret);
807✔
308

309
        r = is_our_namespace(userns_fd, NAMESPACE_USER);
807✔
310
        if (r < 0)
807✔
311
                return r;
312
        if (r > 0)
807✔
313
                return uid_range_load_userns_full(/* path= */ NULL, mode, coalesce, ret);
440✔
314

315
        r = userns_enter_and_pin(userns_fd, &pidref);
367✔
316
        if (r < 0)
367✔
317
                return r;
318

319
        const char *p = procfs_file_alloca(
367✔
320
                        pidref.pid,
321
                        IN_SET(mode, UID_RANGE_USERNS_INSIDE, UID_RANGE_USERNS_OUTSIDE) ? "uid_map" : "gid_map");
322

323
        return uid_range_load_userns_full(p, mode, coalesce, ret);
367✔
324
}
325

326
bool uid_range_overlaps(const UIDRange *range, uid_t start, uid_t nr) {
×
327

328
        if (!range)
×
329
                return false;
330

331
        /* Avoid overflow */
332
        if (start > UINT32_MAX - nr)
×
333
                nr = UINT32_MAX - start;
×
334

335
        if (nr == 0)
×
336
                return false;
337

338
        FOREACH_ARRAY(entry, range->entries, range->n_entries)
×
339
                if (start < entry->start + entry->nr &&
×
340
                    start + nr >= entry->start)
×
341
                        return true;
342

343
        return false;
344
}
345

346
int uid_range_clip(UIDRange *range, uid_t min, uid_t max) {
99✔
347
        assert(range);
99✔
348

349
        if (min > max)
99✔
350
                return -EINVAL;
351

352
        size_t t = 0;
98✔
353
        FOREACH_ARRAY(e, range->entries, range->n_entries) {
210✔
354
                uid_t entry_end = e->start + e->nr; /* one past the last UID in entry */
112✔
355

356
                /* Skip entries completely outside [min, max] */
357
                if (entry_end <= min || e->start > max)
112✔
358
                        continue;
6✔
359

360
                /* Trim the entry to fit within [min, max] */
361
                uid_t new_start = MAX(e->start, min);
106✔
362
                /* entry_end is exclusive, avoid overflow when max == UINT32_MAX */
363
                uid_t new_end = entry_end <= max ? entry_end : max + 1;
106✔
364
                assert(new_end > new_start);
106✔
365

366
                range->entries[t++] = (UIDRangeEntry) {
106✔
367
                        .start = new_start,
368
                        .nr = new_end - new_start,
106✔
369
                };
370
        }
371

372
        range->n_entries = t;
98✔
373

374
        return 0;
98✔
375
}
376

377
int uid_range_partition(UIDRange *range, uid_t size) {
97✔
378
        assert(range);
97✔
379
        assert(size > 0);
97✔
380

381
        /* Partitions the UID range entries into buckets of the given size. Any entry larger than the given
382
         * size will be partitioned into multiple entries, each of the given size. Any leftover UIDs in the
383
         * entry are dropped. Any entries smaller than the given size are also dropped. */
384

385
        /* Count how many entries we'll need after partitioning */
386
        size_t n_new_entries = 0;
97✔
387
        FOREACH_ARRAY(e, range->entries, range->n_entries)
201✔
388
                n_new_entries += e->nr / size;
104✔
389

390
        if (n_new_entries == 0) {
97✔
391
                range->n_entries = 0;
1✔
392
                return 0;
1✔
393
        }
394

395
        if (n_new_entries > range->n_entries && !GREEDY_REALLOC(range->entries, n_new_entries))
96✔
396
                return -ENOMEM;
397

398
        /* Work backwards to avoid overwriting entries we still need to read */
399
        size_t t = n_new_entries;
96✔
400
        for (size_t i = range->n_entries; i > 0; i--) {
199✔
401
                UIDRangeEntry *e = range->entries + i - 1;
103✔
402
                unsigned n_parts = e->nr / size;
103✔
403

404
                for (unsigned j = n_parts; j > 0; j--)
2,551,126✔
405
                        range->entries[--t] = (UIDRangeEntry) {
2,551,023✔
406
                                .start = e->start + (j - 1) * size,
2,551,023✔
407
                                .nr = size,
408
                        };
409
        }
410

411
        range->n_entries = n_new_entries;
96✔
412

413
        return 0;
96✔
414
}
415

416
int uid_range_copy(const UIDRange *range, UIDRange **ret) {
95✔
417
        assert(ret);
95✔
418

419
        if (!range) {
95✔
420
                *ret = NULL;
1✔
421
                return 0;
95✔
422
        }
423

424
        _cleanup_(uid_range_freep) UIDRange *copy = new0(UIDRange, 1);
94✔
425
        if (!copy)
94✔
426
                return -ENOMEM;
427

428
        if (range->n_entries > 0) {
94✔
429
                copy->entries = newdup(UIDRangeEntry, range->entries, range->n_entries);
93✔
430
                if (!copy->entries)
93✔
431
                        return -ENOMEM;
432

433
                copy->n_entries = range->n_entries;
93✔
434
        }
435

436
        *ret = TAKE_PTR(copy);
94✔
437
        return 0;
94✔
438
}
439

440
int uid_range_remove(UIDRange *range, uid_t start, uid_t size) {
101✔
441
        assert(range);
101✔
442

443
        if (size == 0)
101✔
444
                return 0;
445

446
        uid_t end = start + size; /* one past the last UID to remove */
100✔
447

448
        for (size_t i = 0; i < range->n_entries; i++) {
211✔
449
                UIDRangeEntry *e = range->entries + i;
111✔
450
                uid_t entry_end = e->start + e->nr;
111✔
451

452
                /* No overlap */
453
                if (entry_end <= start || e->start >= end)
111✔
454
                        continue;
11✔
455

456
                /* Check if this removal splits the entry into two parts */
457
                if (e->start < start && entry_end > end) {
100✔
458
                        /* Need to split: grow the array first */
459
                        if (!GREEDY_REALLOC(range->entries, range->n_entries + 1))
90✔
460
                                return -ENOMEM;
461

462
                        /* Re-fetch pointer after potential realloc */
463
                        e = range->entries + i;
90✔
464
                        entry_end = e->start + e->nr;
90✔
465

466
                        /* Shift everything after this entry to make room */
467
                        memmove(range->entries + i + 2, range->entries + i + 1,
90✔
468
                                (range->n_entries - i - 1) * sizeof(UIDRangeEntry));
90✔
469
                        range->n_entries++;
90✔
470

471
                        /* First part: before the removed range */
472
                        range->entries[i] = (UIDRangeEntry) {
90✔
473
                                .start = e->start,
90✔
474
                                .nr = start - e->start,
90✔
475
                        };
476

477
                        /* Second part: after the removed range */
478
                        range->entries[i + 1] = (UIDRangeEntry) {
90✔
479
                                .start = end,
480
                                .nr = entry_end - end,
90✔
481
                        };
482

483
                        /* Skip the newly inserted entry */
484
                        i++;
90✔
485
                        continue;
90✔
486
                }
487

488
                /* Removal covers the entire entry */
489
                if (start <= e->start && end >= entry_end) {
10✔
490
                        memmove(e, e + 1, (range->n_entries - i - 1) * sizeof(UIDRangeEntry));
6✔
491
                        range->n_entries--;
6✔
492
                        i--;
6✔
493
                        continue;
6✔
494
                }
495

496
                /* Removal trims the start of the entry */
497
                if (start <= e->start && end > e->start) {
4✔
498
                        e->nr = entry_end - end;
2✔
499
                        e->start = end;
2✔
500
                        continue;
2✔
501
                }
502

503
                /* Removal trims the end of the entry */
504
                if (start < entry_end && end >= entry_end) {
2✔
505
                        e->nr = start - e->start;
2✔
506
                        continue;
2✔
507
                }
508
        }
509

510
        return 0;
511
}
512

513
int uid_range_translate(const UIDRange *outside, const UIDRange *inside, uid_t uid, uid_t *ret) {
214✔
514
        assert(uid_range_entries(outside) == uid_range_entries(inside));
642✔
515
        assert(ret);
214✔
516

517
        /* Given two UID ranges that represent the outside UID range of a user namespace (the 2nd and 3rd
518
         * columns in /proc/xxx/uid_map) and the inside UID range of a user namespace (the 1st and 3rd
519
         * columns in /proc/xxx/uid_map), translates the given UID from the outside range to the inside
520
         * range. For example, given the following UID range:
521
         *
522
         * 0 1000 1
523
         *
524
         * calling uid_range_translate(outside, inside, 1000) will return 0 as the output UID. Alternatively,
525
         * calling uid_range_translate(inside, outside, 0) will return 1000 as the output UID.
526
         */
527

528
        for (size_t i = 0; i < uid_range_entries(outside); i++)
464✔
529
                assert(outside->entries[i].nr == inside->entries[i].nr);
250✔
530

531
        for (size_t i = 0; i < uid_range_entries(outside); i++) {
250✔
532
                const UIDRangeEntry *e = outside->entries + i;
240✔
533

534
                if (uid < e->start || uid >= e->start + e->nr)
240✔
535
                        continue;
36✔
536

537
                *ret = inside->entries[i].start + uid - e->start;
204✔
538
                return 0;
204✔
539
        }
540

541
        return -ESRCH;
542
}
543

544
int uid_range_translate_userns_fd(int userns_fd, UIDRangeUsernsMode mode, uid_t uid, uid_t *ret) {
×
545
        int r;
×
546

547
        assert(userns_fd >= 0);
×
548
        assert(IN_SET(mode, UID_RANGE_USERNS_OUTSIDE, GID_RANGE_USERNS_OUTSIDE));
×
549

550
        _cleanup_(uid_range_freep) UIDRange *outside_range = NULL;
×
551
        r = uid_range_load_userns_by_fd_full(userns_fd, mode, /* coalesce= */ false, &outside_range);
×
552
        if (r < 0)
×
553
                return r;
554

555
        mode = mode == UID_RANGE_USERNS_OUTSIDE ? UID_RANGE_USERNS_INSIDE : GID_RANGE_USERNS_INSIDE;
×
556

557
        _cleanup_(uid_range_freep) UIDRange *inside_range = NULL;
×
558
        r = uid_range_load_userns_by_fd_full(userns_fd, mode, /* coalesce= */ false, &inside_range);
×
559
        if (r < 0)
×
560
                return r;
561

562
        return uid_range_translate(outside_range, inside_range, uid, ret);
×
563
}
564

565
bool uid_range_equal(const UIDRange *a, const UIDRange *b) {
6✔
566
        if (a == b)
6✔
567
                return true;
568

569
        if (!a || !b)
6✔
570
                return false;
571

572
        if (a->n_entries != b->n_entries)
5✔
573
                return false;
574

575
        for (size_t i = 0; i < a->n_entries; i++) {
8✔
576
                if (a->entries[i].start != b->entries[i].start)
5✔
577
                        return false;
578
                if (a->entries[i].nr != b->entries[i].nr)
5✔
579
                        return false;
580
        }
581

582
        return true;
583
}
584

585
int uid_map_search_root(pid_t pid, UIDRangeUsernsMode mode, uid_t *ret) {
63✔
586
        int r;
63✔
587

588
        assert(pid_is_valid(pid));
63✔
589
        assert(IN_SET(mode, UID_RANGE_USERNS_OUTSIDE, GID_RANGE_USERNS_OUTSIDE));
63✔
590

591
        const char *p = procfs_file_alloca(pid, mode == UID_RANGE_USERNS_OUTSIDE ? "uid_map" : "gid_map");
63✔
592
        _cleanup_fclose_ FILE *f = fopen(p, "re");
126✔
593
        if (!f) {
63✔
594
                if (errno != ENOENT)
×
595
                        return -errno;
×
596

597
                r = proc_mounted();
×
598
                if (r < 0)
×
599
                        return -ENOENT; /* original error, if we can't determine /proc/ state */
600

601
                return r ? -ENOPKG : -ENOSYS;
×
602
        }
603

604
        for (;;) {
×
605
                uid_t uid_base = UID_INVALID, uid_shift = UID_INVALID;
63✔
606

607
                r = uid_map_read_one(f, &uid_base, &uid_shift, /* ret_range= */ NULL);
63✔
608
                if (r < 0)
63✔
609
                        return r;
63✔
610

611
                if (uid_base == 0) {
62✔
612
                        if (ret)
62✔
613
                                *ret = uid_shift;
62✔
614
                        return 0;
62✔
615
                }
616
        }
617
}
618

619
uid_t uid_range_base(const UIDRange *range) {
8✔
620

621
        /* Returns the lowest UID in the range (notw that elements are sorted, hence we just need to look at
622
         * the first one that is populated. */
623

624
        if (uid_range_is_empty(range))
8✔
625
                return UID_INVALID;
626

627
        FOREACH_ARRAY(e, range->entries, range->n_entries)
8✔
628
                if (e->nr > 0)
8✔
629
                        return e->start;
8✔
630

631
        return UID_INVALID;
632
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc