• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 14458263136

14 Apr 2025 06:41PM UTC coverage: 72.031% (+0.001%) from 72.03%
14458263136

push

github

yuwata
test: drop error conditions for old kernels (<3.2)

Now our baseline on the kernel is 5.4.

2 of 4 new or added lines in 1 file covered. (50.0%)

1428 existing lines in 44 files now uncovered.

297292 of 412726 relevant lines covered (72.03%)

683119.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

67.6
/src/shared/loop-util.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#if HAVE_VALGRIND_MEMCHECK_H
4
#include <valgrind/memcheck.h>
5
#endif
6

7
#include <errno.h>
8
#include <fcntl.h>
9
#include <linux/blkpg.h>
10
#include <linux/loop.h>
11
#include <sys/file.h>
12
#include <sys/ioctl.h>
13
#include <unistd.h>
14

15
#include "sd-device.h"
16

17
#include "alloc-util.h"
18
#include "blockdev-util.h"
19
#include "data-fd-util.h"
20
#include "device-util.h"
21
#include "devnum-util.h"
22
#include "dissect-image.h"
23
#include "env-util.h"
24
#include "errno-util.h"
25
#include "fd-util.h"
26
#include "fs-util.h"
27
#include "fileio.h"
28
#include "loop-util.h"
29
#include "missing_fs.h"
30
#include "parse-util.h"
31
#include "path-util.h"
32
#include "random-util.h"
33
#include "stat-util.h"
34
#include "stdio-util.h"
35
#include "string-util.h"
36
#include "tmpfile-util.h"
37

38
static void cleanup_clear_loop_close(int *fd) {
1,902✔
39
        if (*fd < 0)
1,902✔
40
                return;
41

42
        (void) ioctl(*fd, LOOP_CLR_FD);
×
43
        (void) safe_close(*fd);
×
44
}
45

46
static int loop_is_bound(int fd) {
1,902✔
47
        struct loop_info64 info;
1,902✔
48

49
        if (ioctl(ASSERT_FD(fd), LOOP_GET_STATUS64, &info) < 0) {
1,902✔
50
                if (errno == ENXIO)
1,902✔
51
                        return false; /* not bound! */
1,902✔
52

53
                return -errno;
×
54
        }
55

56
        return true; /* bound! */
57
}
58

59
static int open_lock_fd(int primary_fd, int operation) {
2,019✔
60
        _cleanup_close_ int lock_fd = -EBADF;
2,019✔
61

62
        assert(IN_SET(operation & ~LOCK_NB, LOCK_SH, LOCK_EX));
2,019✔
63

64
        lock_fd = fd_reopen(ASSERT_FD(primary_fd), O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
2,019✔
65
        if (lock_fd < 0)
2,019✔
66
                return lock_fd;
67

68
        if (flock(lock_fd, operation) < 0)
2,019✔
69
                return -errno;
×
70

71
        return TAKE_FD(lock_fd);
72
}
73

74
static int loop_configure_verify_direct_io(int fd, const struct loop_config *c) {
1,902✔
75
        assert(fd >= 0);
1,902✔
76
        assert(c);
1,902✔
77

78
        if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) {
1,902✔
79
                struct loop_info64 info;
1,901✔
80

81
                if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0)
1,901✔
82
                        return log_debug_errno(errno, "Failed to issue LOOP_GET_STATUS64: %m");
×
83

84
#if HAVE_VALGRIND_MEMCHECK_H
85
                VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
86
#endif
87

88
                /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
89
                 * device to the logical block size of the underlying file system. Since there was no nice
90
                 * way to query the value, we are not bothering to do this however. On newer kernels the
91
                 * block size is propagated automatically and does not require intervention from us. We'll
92
                 * check here if enabling direct IO worked, to make this easily debuggable however.
93
                 *
94
                 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
95
                 * enabling direct IO with iteratively larger block sizes until it eventually works.)
96
                 *
97
                 * On older kernels (e.g.: 5.10) when this is attempted on a file stored on a dm-crypt
98
                 * backed partition the kernel will start returning I/O errors when accessing the mounted
99
                 * loop device, so return a recognizable error that causes the operation to be started
100
                 * from scratch without the LO_FLAGS_DIRECT_IO flag. */
101
                if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO))
1,901✔
102
                        return log_debug_errno(
×
103
                                        SYNTHETIC_ERRNO(ENOANO),
104
                                        "Could not enable direct IO mode, retrying in buffered IO mode.");
105
        }
106

107
        return 0;
108
}
109

110
static int loop_configure_verify(int fd, const struct loop_config *c) {
1,902✔
111
        bool broken = false;
1,902✔
112
        int r;
1,902✔
113

114
        assert(fd >= 0);
1,902✔
115
        assert(c);
1,902✔
116

117
        if (c->block_size != 0) {
1,902✔
118
                uint32_t ssz;
1,902✔
119

120
                r = blockdev_get_sector_size(fd, &ssz);
1,902✔
121
                if (r < 0)
1,902✔
122
                        return r;
×
123

124
                if (ssz != c->block_size) {
1,902✔
125
                        log_debug("LOOP_CONFIGURE didn't honour requested block size %" PRIu32 ", got %" PRIu32 " instead. Ignoring.", c->block_size, ssz);
1,902✔
126
                        broken = true;
127
                }
128
        }
129

130
        if (c->info.lo_sizelimit != 0) {
1,902✔
131
                /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
132
                 * block device. If it's used, let's immediately check if it had the desired
133
                 * effect hence. And if not use classic LOOP_SET_STATUS64. */
134
                uint64_t z;
68✔
135

136
                r = blockdev_get_device_size(fd, &z);
68✔
137
                if (r < 0)
68✔
138
                        return r;
×
139

140
                if (z != c->info.lo_sizelimit) {
68✔
141
                        log_debug("LOOP_CONFIGURE is broken, doesn't honour .info.lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
68✔
142
                        broken = true;
143
                }
144
        }
145

146
        if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) {
1,902✔
147
                /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
148
                 * into the block device. Let's hence verify if things work correctly here
149
                 * before returning. */
150

151
                r = blockdev_partscan_enabled_fd(fd);
1,760✔
152
                if (r < 0)
1,760✔
153
                        return r;
154
                if (r == 0) {
1,760✔
155
                        log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
×
156
                        broken = true;
157
                }
158
        }
159

160
        r = loop_configure_verify_direct_io(fd, c);
1,902✔
161
        if (r < 0)
1,902✔
162
                return r;
163

164
        return !broken;
1,902✔
165
}
166

167
static int loop_configure_fallback(int fd, const struct loop_config *c) {
×
168
        struct loop_info64 info_copy;
×
169
        int r;
×
170

171
        assert(fd >= 0);
×
172
        assert(c);
×
173

174
        /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
175
         * them out. */
176
        info_copy = c->info;
×
177
        info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
×
178

179
        /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
180
         * ioctl can return EAGAIN in case we change the info.lo_offset field, if someone else is accessing the
181
         * block device while we try to reconfigure it. This is a pretty common case, since udev might
182
         * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
183
         * first, let's take the BSD lock to ensure that udev will not step in between the point in
184
         * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
185
         * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
186
         * needlessly if we are just racing against udev. The latter is protection against all other cases,
187
         * i.e. peers that do not take the BSD lock. */
188

189
        for (unsigned n_attempts = 0;;) {
×
190
                if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0)
×
191
                        break;
192

193
                if (errno != EAGAIN || ++n_attempts >= 64)
×
194
                        return log_debug_errno(errno, "Failed to configure loopback block device: %m");
×
195

196
                /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
197
                 * failed attempts we see */
198
                (void) usleep_safe(UINT64_C(10) * USEC_PER_MSEC +
×
199
                              random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
×
200
        }
201

202
        /* If a block size is requested then try to configure it. If that doesn't work, ignore errors, but
203
         * afterwards, let's validate what is in effect, and if it doesn't match what we want, fail */
UNCOV
204
        if (c->block_size != 0) {
×
UNCOV
205
                uint32_t ssz;
×
206

UNCOV
207
                if (ioctl(fd, LOOP_SET_BLOCK_SIZE, (unsigned long) c->block_size) < 0)
×
UNCOV
208
                        log_debug_errno(errno, "Failed to set sector size, ignoring: %m");
×
209

UNCOV
210
                r = blockdev_get_sector_size(fd, &ssz);
×
UNCOV
211
                if (r < 0)
×
UNCOV
212
                        return log_debug_errno(r, "Failed to read sector size: %m");
×
213
                if (ssz != c->block_size)
×
214
                        return log_debug_errno(SYNTHETIC_ERRNO(EIO), "Sector size of loopback device doesn't match what we requested, refusing.");
×
215
        }
216

217
        /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
UNCOV
218
        if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO))
×
219
                if (ioctl(fd, LOOP_SET_DIRECT_IO, 1UL) < 0)
×
220
                        log_debug_errno(errno, "Failed to enable direct IO mode, ignoring: %m");
×
221

222
        return loop_configure_verify_direct_io(fd, c);
×
223
}
224

225
static int loop_configure(
1,902✔
226
                int nr,
227
                int open_flags,
228
                int lock_op,
229
                const struct loop_config *c,
230
                LoopDevice **ret) {
231

232
        static bool loop_configure_broken = false;
1,902✔
233

234
        _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1,902✔
235
        _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -EBADF; /* This must be declared before lock_fd. */
×
236
        _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
3,804✔
237
        _cleanup_free_ char *node = NULL;
1,902✔
238
        uint64_t diskseq = 0;
1,902✔
239
        dev_t devno;
1,902✔
240
        int r;
1,902✔
241

242
        assert(nr >= 0);
1,902✔
243
        assert(c);
1,902✔
244
        assert(ret);
1,902✔
245

246
        if (asprintf(&node, "/dev/loop%i", nr) < 0)
1,902✔
UNCOV
247
                return log_oom_debug();
×
248

249
        r = sd_device_new_from_devname(&dev, node);
1,902✔
250
        if (r < 0)
1,902✔
UNCOV
251
                return log_debug_errno(r, "Failed to create sd_device object for \"%s\": %m", node);
×
252

253
        r = sd_device_get_devnum(dev, &devno);
1,902✔
254
        if (r < 0)
1,902✔
UNCOV
255
                return log_device_debug_errno(dev, r, "Failed to get devnum: %m");
×
256

257
        fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
1,902✔
258
        if (fd < 0)
1,902✔
UNCOV
259
                return log_device_debug_errno(dev, fd, "Failed to open device: %m");
×
260

261
        /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
262
         * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
263
         * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
264
         * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
265
         * long time udev would possibly never run on it again, even though the fd is unlocked, simply
266
         * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
267
         * automatically release the lock, after we are done. */
268
        lock_fd = open_lock_fd(fd, LOCK_EX);
1,902✔
269
        if (lock_fd < 0)
1,902✔
270
                return log_device_debug_errno(dev, lock_fd, "Failed to acquire lock: %m");
×
271

272
        log_device_debug(dev, "Acquired exclusive lock.");
1,928✔
273

274
        /* Let's see if backing file is really unattached. Someone may already attach a backing file without
275
         * taking BSD lock. */
276
        r = loop_is_bound(fd);
1,902✔
277
        if (r < 0)
1,902✔
UNCOV
278
                return log_device_debug_errno(dev, r, "Failed to check if the loopback block device is bound: %m");
×
279
        if (r > 0)
1,902✔
UNCOV
280
                return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EBUSY),
×
281
                                              "The loopback block device is already bound, ignoring.");
282

283
        /* Let's see if the device is really detached, i.e. currently has no associated partition block
284
         * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
285
         * superficially is detached but still has partition block devices associated for it. Let's then
286
         * manually remove the partitions via BLKPG, and tell the caller we did that via EUCLEAN, so they try
287
         * again. */
288
        r = block_device_remove_all_partitions(dev, fd);
1,902✔
289
        if (r < 0)
1,902✔
UNCOV
290
                return log_device_debug_errno(dev, r, "Failed to remove partitions on the loopback block device: %m");
×
291
        if (r > 0)
1,902✔
292
                /* Removed all partitions. Let's report this to the caller, to try again, and count this as
293
                 * an attempt. */
UNCOV
294
                return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EUCLEAN),
×
295
                                              "Removed partitions on the loopback block device.");
296

297
        if (!loop_configure_broken) {
1,902✔
298
                if (ioctl(fd, LOOP_CONFIGURE, c) < 0) {
1,902✔
299
                        /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other errors. */
UNCOV
300
                        if (!ERRNO_IS_IOCTL_NOT_SUPPORTED(errno))
×
UNCOV
301
                                return log_device_debug_errno(dev, errno, "ioctl(LOOP_CONFIGURE) failed: %m");
×
302

UNCOV
303
                        loop_configure_broken = true;
×
304
                } else {
305
                        loop_with_fd = TAKE_FD(fd);
1,902✔
306

307
                        r = loop_configure_verify(loop_with_fd, c);
1,902✔
308
                        if (r < 0)
1,902✔
309
                                return log_device_debug_errno(dev, r, "Failed to verify if loopback block device is correctly configured: %m");
×
310
                        if (r == 0) {
1,902✔
311
                                /* LOOP_CONFIGURE doesn't work. Remember that. */
UNCOV
312
                                loop_configure_broken = true;
×
313

314
                                /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
315
                                 * because LOOP_CLR_FD is async: if the operation cannot be executed right
316
                                 * away it just sets the autoclear flag on the device. This means there's a
317
                                 * good chance we cannot actually reuse the loopback device right-away. Hence
318
                                 * let's assume it's busy, avoid the trouble and let the calling loop call us
319
                                 * again with a new, likely unused device. */
UNCOV
320
                                return -EBUSY;
×
321
                        }
322
                }
323
        }
324

325
        if (loop_configure_broken) {
1,902✔
UNCOV
326
                if (ioctl(fd, LOOP_SET_FD, c->fd) < 0)
×
327
                        return log_device_debug_errno(dev, errno, "ioctl(LOOP_SET_FD) failed: %m");
×
328

UNCOV
329
                loop_with_fd = TAKE_FD(fd);
×
330

UNCOV
331
                r = loop_configure_fallback(loop_with_fd, c);
×
UNCOV
332
                if (r < 0)
×
333
                        return r;
334
        }
335

336
        r = fd_get_diskseq(loop_with_fd, &diskseq);
1,902✔
337
        if (r < 0 && r != -EOPNOTSUPP)
1,902✔
UNCOV
338
                return log_device_debug_errno(dev, r, "Failed to get diskseq: %m");
×
339

340
        switch (lock_op & ~LOCK_NB) {
1,902✔
341
        case LOCK_EX: /* Already in effect */
342
                break;
343
        case LOCK_SH: /* Downgrade */
1,800✔
344
                if (flock(lock_fd, lock_op) < 0)
1,800✔
UNCOV
345
                        return log_device_debug_errno(dev, errno, "Failed to downgrade lock level: %m");
×
346
                break;
347
        case LOCK_UN: /* Release */
×
UNCOV
348
                lock_fd = safe_close(lock_fd);
×
349
                break;
UNCOV
350
        default:
×
UNCOV
351
                assert_not_reached();
×
352
        }
353

354
        uint64_t device_size;
1,902✔
355
        r = blockdev_get_device_size(loop_with_fd, &device_size);
1,902✔
356
        if (r < 0)
1,902✔
UNCOV
357
                return log_device_debug_errno(dev, r, "Failed to get loopback device size: %m");
×
358

359
        LoopDevice *d = new(LoopDevice, 1);
1,902✔
360
        if (!d)
1,902✔
UNCOV
361
                return log_oom_debug();
×
362

363
        *d = (LoopDevice) {
1,902✔
364
                .n_ref = 1,
365
                .fd = TAKE_FD(loop_with_fd),
1,902✔
366
                .lock_fd = TAKE_FD(lock_fd),
1,902✔
367
                .node = TAKE_PTR(node),
1,902✔
368
                .nr = nr,
369
                .devno = devno,
370
                .dev = TAKE_PTR(dev),
1,902✔
371
                .diskseq = diskseq,
372
                .sector_size = c->block_size,
1,902✔
373
                .device_size = device_size,
374
                .created = true,
375
        };
376

377
        *ret = TAKE_PTR(d);
1,902✔
378
        return 0;
1,902✔
379
}
380

381
static int loop_device_make_internal(
4,050✔
382
                const char *path,
383
                int fd,
384
                int open_flags,
385
                uint64_t offset,
386
                uint64_t size,
387
                uint32_t sector_size,
388
                uint32_t loop_flags,
389
                int lock_op,
390
                LoopDevice **ret) {
391

UNCOV
392
        _cleanup_(loop_device_unrefp) LoopDevice *d = NULL;
×
393
        _cleanup_close_ int reopened_fd = -EBADF, control = -EBADF;
8,100✔
394
        _cleanup_free_ char *backing_file = NULL;
4,050✔
395
        struct loop_config config;
4,050✔
396
        int r, f_flags;
4,050✔
397
        struct stat st;
4,050✔
398

399
        assert(ret);
4,050✔
400
        assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
4,050✔
401

402
        if (fstat(ASSERT_FD(fd), &st) < 0)
4,050✔
UNCOV
403
                return -errno;
×
404

405
        if (S_ISBLK(st.st_mode)) {
4,050✔
406
                if (offset == 0 && IN_SET(size, 0, UINT64_MAX))
2✔
407
                        /* If this is already a block device and we are supposed to cover the whole of it
408
                         * then store an fd to the original open device node — and do not actually create an
409
                         * unnecessary loopback device for it. */
UNCOV
410
                        return loop_device_open_from_fd(fd, open_flags, lock_op, ret);
×
411
        } else {
412
                r = stat_verify_regular(&st);
4,048✔
413
                if (r < 0)
4,048✔
414
                        return r;
415
        }
416

417
        if (path) {
1,922✔
418
                r = path_make_absolute_cwd(path, &backing_file);
1,819✔
419
                if (r < 0)
1,819✔
420
                        return r;
421

422
                path_simplify(backing_file);
1,819✔
423
        } else {
424
                r = fd_get_path(fd, &backing_file);
103✔
425
                if (r < 0)
103✔
426
                        return r;
427
        }
428

429
        f_flags = fcntl(fd, F_GETFL);
1,922✔
430
        if (f_flags < 0)
1,922✔
UNCOV
431
                return -errno;
×
432

433
        if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) {
1,922✔
434
                /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
435
                 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
436
                 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
437
                 *
438
                 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
439
                 * from that automatically. */
440

441
                reopened_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags);
102✔
442
                if (reopened_fd < 0) {
102✔
UNCOV
443
                        if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO))
×
UNCOV
444
                                return log_debug_errno(reopened_fd, "Failed to reopen file descriptor without O_DIRECT: %m");
×
445

446
                        /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
UNCOV
447
                        log_debug_errno(reopened_fd, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
×
UNCOV
448
                        loop_flags &= ~LO_FLAGS_DIRECT_IO;
×
449
                } else
450
                        fd = reopened_fd; /* From now on, operate on our new O_DIRECT fd */
451
        }
452

453
        control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
1,922✔
454
        if (control < 0)
1,922✔
455
                return -errno;
20✔
456

457
        if (sector_size == 0)
1,902✔
458
                /* If no sector size is specified, default to the classic default */
459
                sector_size = 512;
×
460
        else if (sector_size == UINT32_MAX) {
1,902✔
461

462
                if (S_ISBLK(st.st_mode))
1,800✔
463
                        /* If the sector size is specified as UINT32_MAX we'll propagate the sector size of
464
                         * the underlying block device. */
UNCOV
465
                        r = blockdev_get_sector_size(fd, &sector_size);
×
466
                else {
467
                        _cleanup_close_ int non_direct_io_fd = -EBADF;
4,050✔
468
                        int probe_fd;
1,800✔
469

470
                        assert(S_ISREG(st.st_mode));
1,800✔
471

472
                        /* If sector size is specified as UINT32_MAX, we'll try to probe the right sector
473
                         * size of the image in question by looking for the GPT partition header at various
474
                         * offsets. This of course only works if the image already has a disk label.
475
                         *
476
                         * So here we actually want to read the file contents ourselves. This is quite likely
477
                         * not going to work if we managed to enable O_DIRECT, because in such a case there
478
                         * are some pretty strict alignment requirements to offset, size and target, but
479
                         * there's no way to query what alignment specifically is actually required. Hence,
480
                         * let's avoid the mess, and temporarily open an fd without O_DIRECT for the probing
481
                         * logic. */
482

483
                        if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) {
1,800✔
484
                                non_direct_io_fd = fd_reopen(fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
1,799✔
485
                                if (non_direct_io_fd < 0)
1,799✔
UNCOV
486
                                        return non_direct_io_fd;
×
487

488
                                probe_fd = non_direct_io_fd;
489
                        } else
490
                                probe_fd = fd;
491

492
                        r = probe_sector_size(probe_fd, &sector_size);
1,800✔
493
                }
494
                if (r < 0)
1,800✔
495
                        return r;
496
        }
497

498
        config = (struct loop_config) {
3,804✔
499
                .fd = fd,
500
                .block_size = sector_size,
501
                .info = {
502
                        /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
503
                        .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR,
1,902✔
504
                        .lo_offset = offset,
505
                        .lo_sizelimit = size == UINT64_MAX ? 0 : size,
1,902✔
506
                },
507
        };
508

509
        /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
510
         * be gone already, taken by somebody else racing against us. */
511
        for (unsigned n_attempts = 0;;) {
1,902✔
512
                usec_t usec;
1,902✔
513
                int nr;
1,902✔
514

515
                /* Let's take a lock on the control device first. On a busy system, where many programs
516
                 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
517
                 * around relatively heavy operations: asking for a free loopback device, then opening it,
518
                 * validating it, attaching something to it. Let's serialize this whole operation, to make
519
                 * unnecessary busywork less likely. Note that this is just something we do to optimize our
520
                 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
521
                 * necessary, it just means it's less likely we have to iterate through this loop again and
522
                 * again if our own code races against our own code.
523
                 *
524
                 * Note: our lock protocol is to take the /dev/loop-control lock first, and the block device
525
                 * lock second, if both are taken, and always in this order, to avoid ABBA locking issues. */
526
                if (flock(control, LOCK_EX) < 0)
1,902✔
UNCOV
527
                        return -errno;
×
528

529
                nr = ioctl(control, LOOP_CTL_GET_FREE);
1,902✔
530
                if (nr < 0)
1,902✔
UNCOV
531
                        return -errno;
×
532

533
                r = loop_configure(nr, open_flags, lock_op, &config, &d);
1,902✔
534
                if (r >= 0)
1,902✔
535
                        break;
536

537
                /* -ENODEV or friends: Somebody might've gotten the same number from the kernel, used the
538
                 * device, and called LOOP_CTL_REMOVE on it. Let's retry with a new number.
539
                 * -EBUSY: a file descriptor is already bound to the loopback block device.
540
                 * -EUCLEAN: some left-over partition devices that were cleaned up.
541
                 * -ENOANO: we tried to use LO_FLAGS_DIRECT_IO but the kernel rejected it. */
542
                if (!ERRNO_IS_DEVICE_ABSENT(r) && !IN_SET(r, -EBUSY, -EUCLEAN, -ENOANO))
×
543
                        return r;
544

545
                /* OK, this didn't work, let's try again a bit later, but first release the lock on the
546
                 * control device */
UNCOV
547
                if (flock(control, LOCK_UN) < 0)
×
UNCOV
548
                        return -errno;
×
549

UNCOV
550
                if (++n_attempts >= 64) /* Give up eventually */
×
551
                        return -EBUSY;
552

553
                /* If we failed to enable direct IO mode, let's retry without it. We restart the process as
554
                 * on some combination of kernel version and storage filesystem, the kernel is very unhappy
555
                 * about a failed DIRECT_IO enablement and throws I/O errors. */
UNCOV
556
                if (r == -ENOANO && FLAGS_SET(config.info.lo_flags, LO_FLAGS_DIRECT_IO)) {
×
557
                        config.info.lo_flags &= ~LO_FLAGS_DIRECT_IO;
×
UNCOV
558
                        open_flags &= ~O_DIRECT;
×
559

UNCOV
560
                        int non_direct_io_fd = fd_reopen(config.fd, O_CLOEXEC|O_NONBLOCK|open_flags);
×
UNCOV
561
                        if (non_direct_io_fd < 0)
×
562
                                return log_debug_errno(
×
563
                                                non_direct_io_fd,
564
                                                "Failed to reopen file descriptor without O_DIRECT: %m");
565

UNCOV
566
                        safe_close(reopened_fd);
×
UNCOV
567
                        fd = config.fd = /* For cleanups */ reopened_fd = non_direct_io_fd;
×
568
                }
569

570
                /* Wait some random time, to make collision less likely. Let's pick a random time in the
571
                 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
572
                usec = random_u64_range(UINT64_C(10) * USEC_PER_MSEC +
×
573
                                        UINT64_C(240) * USEC_PER_MSEC * n_attempts/64);
×
UNCOV
574
                log_debug("Trying again after %s.", FORMAT_TIMESPAN(usec, USEC_PER_MSEC));
×
575
                (void) usleep_safe(usec);
×
576
        }
577

578
        d->backing_file = TAKE_PTR(backing_file);
1,902✔
579
        d->backing_inode = st.st_ino;
1,902✔
580
        d->backing_devno = st.st_dev;
1,902✔
581

582
        log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64,
1,902✔
583
                  d->node,
584
                  major(d->devno), minor(d->devno),
585
                  d->nr,
586
                  d->diskseq);
587

588
        *ret = TAKE_PTR(d);
1,902✔
589
        return 0;
1,902✔
590
}
591

592
static uint32_t loop_flags_mangle(uint32_t loop_flags) {
4,050✔
593
        int r;
4,050✔
594

595
        r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
4,050✔
596
        if (r < 0 && r != -ENXIO)
4,050✔
UNCOV
597
                log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
×
598

599
        return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
4,050✔
600
}
601

602
int loop_device_make(
102✔
603
                int fd,
604
                int open_flags,
605
                uint64_t offset,
606
                uint64_t size,
607
                uint32_t sector_size,
608
                uint32_t loop_flags,
609
                int lock_op,
610
                LoopDevice **ret) {
611

612
        assert(fd >= 0);
102✔
613
        assert(ret);
102✔
614

615
        return loop_device_make_internal(
102✔
616
                        NULL,
617
                        fd,
618
                        open_flags,
619
                        offset,
620
                        size,
621
                        sector_size,
622
                        loop_flags_mangle(loop_flags),
623
                        lock_op,
624
                        ret);
625
}
626

627
int loop_device_make_by_path_at(
3,948✔
628
                int dir_fd,
629
                const char *path,
630
                int open_flags,
631
                uint32_t sector_size,
632
                uint32_t loop_flags,
633
                int lock_op,
634
                LoopDevice **ret) {
635

636
        int r, basic_flags, direct_flags, rdwr_flags;
3,948✔
637
        _cleanup_close_ int fd = -EBADF;
3,948✔
638
        bool direct = false;
3,948✔
639

640
        assert(dir_fd >= 0 || dir_fd == AT_FDCWD);
3,948✔
641
        assert(path);
3,948✔
642
        assert(ret);
3,948✔
643
        assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
3,948✔
644

645
        /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
646
         * read-only if we cannot. */
647

648
        loop_flags = loop_flags_mangle(loop_flags);
3,948✔
649

650
        /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
651
         * non-O_DIRECT mode automatically, if it fails. */
652

653
        basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
3,948✔
654
        direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0;
3,948✔
655
        rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR;
3,948✔
656

657
        fd = xopenat(dir_fd, path, basic_flags|direct_flags|rdwr_flags);
3,948✔
658
        if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
3,948✔
659
                fd = xopenat(dir_fd, path, basic_flags|rdwr_flags);
2,129✔
660
        else
UNCOV
661
                direct = direct_flags != 0;
×
662
        if (fd < 0) {
3,948✔
663
                r = fd;
1✔
664

665
                /* Retry read-only? */
666
                if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS))
1✔
667
                        return r;
668

UNCOV
669
                fd = xopenat(dir_fd, path, basic_flags|direct_flags|O_RDONLY);
×
UNCOV
670
                if (fd < 0 && direct_flags != 0) /* as above */
×
UNCOV
671
                        fd = xopenat(dir_fd, path, basic_flags|O_RDONLY);
×
672
                else
UNCOV
673
                        direct = direct_flags != 0;
×
UNCOV
674
                if (fd < 0)
×
675
                        return r; /* Propagate original error */
676

677
                open_flags = O_RDONLY;
678
        } else if (open_flags < 0)
3,947✔
679
                open_flags = O_RDWR;
73✔
680

681
        log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.",
15,612✔
682
                  path,
683
                  open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY",
684
                  open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "",
685
                  direct ? "enabled" : "disabled",
686
                  direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : "");
687

688
        return loop_device_make_internal(
3,947✔
689
                        dir_fd == AT_FDCWD ? path : NULL,
690
                        fd,
691
                        open_flags,
692
                        /* offset = */ 0,
693
                        /* size = */ 0,
694
                        sector_size,
695
                        loop_flags,
696
                        lock_op,
697
                        ret);
698
}
699

700
int loop_device_make_by_path_memory(
1✔
701
                const char *path,
702
                int open_flags,
703
                uint32_t sector_size,
704
                uint32_t loop_flags,
705
                int lock_op,
706
                LoopDevice **ret) {
707

708
        _cleanup_close_ int fd = -EBADF, mfd = -EBADF;
1✔
709
        _cleanup_free_ char *fn = NULL;
1✔
710
        struct stat st;
1✔
711
        int r;
1✔
712

713
        assert(path);
1✔
714
        assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
1✔
715
        assert(ret);
1✔
716

717
        loop_flags &= ~LO_FLAGS_DIRECT_IO; /* memfds don't support O_DIRECT, hence LO_FLAGS_DIRECT_IO can't be used either */
1✔
718

719
        fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|O_RDONLY);
1✔
720
        if (fd < 0)
1✔
UNCOV
721
                return -errno;
×
722

723
        if (fstat(fd, &st) < 0)
1✔
UNCOV
724
                return -errno;
×
725

726
        if (!S_ISREG(st.st_mode) && !S_ISBLK(st.st_mode))
1✔
727
                return -EBADF;
728

729
        r = path_extract_filename(path, &fn);
1✔
730
        if (r < 0)
1✔
731
                return r;
732

733
        mfd = memfd_clone_fd(fd, fn, open_flags|O_CLOEXEC);
1✔
734
        if (mfd < 0)
1✔
735
                return mfd;
736

737
        fd = safe_close(fd); /* Let's close the original early */
1✔
738

739
        return loop_device_make_internal(NULL, mfd, open_flags, 0, 0, sector_size, loop_flags, lock_op, ret);
1✔
740
}
741

742
static LoopDevice* loop_device_free(LoopDevice *d) {
1,937✔
743
        _cleanup_close_ int control = -EBADF;
1,937✔
744
        int r;
1,937✔
745

746
        if (!d)
1,937✔
747
                return NULL;
748

749
        /* Release any lock we might have on the device first. We want to open+lock the /dev/loop-control
750
         * device below, but our lock protocol says that if both control and block device locks are taken,
751
         * the control lock needs to be taken first, the block device lock second — in order to avoid ABBA
752
         * locking issues. Moreover, we want to issue LOOP_CLR_FD on the block device further down, and that
753
         * would fail if we had another fd open to the device. */
754
        d->lock_fd = safe_close(d->lock_fd);
1,937✔
755

756
        /* Let's open the control device early, and lock it, so that we can release our block device and
757
         * delete it in a synchronized fashion, and allocators won't needlessly see the block device as free
758
         * while we are about to delete it. */
759
        if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
1,937✔
760
                control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
1,700✔
761
                if (control < 0)
1,700✔
UNCOV
762
                        log_debug_errno(errno, "Failed to open loop control device, cannot remove loop device '%s', ignoring: %m", strna(d->node));
×
763
                else if (flock(control, LOCK_EX) < 0)
1,700✔
UNCOV
764
                        log_debug_errno(errno, "Failed to lock loop control device, ignoring: %m");
×
765
        }
766

767
        /* Then let's release the loopback block device */
768
        if (d->fd >= 0) {
1,937✔
769
                /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
770
                if (fsync(d->fd) < 0)
1,937✔
UNCOV
771
                        log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m");
×
772

773
                if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
1,937✔
774
                        /* We are supposed to clear the loopback device. Let's do this synchronously: lock
775
                         * the device, manually remove all partitions and then clear it. This should ensure
776
                         * udev doesn't concurrently access the devices, and we can be reasonably sure that
777
                         * once we are done here the device is cleared and all its partition children
778
                         * removed. Note that we lock our primary device fd here (and not a separate locking
779
                         * fd, as we do during allocation, since we want to keep the lock all the way through
780
                         * the LOOP_CLR_FD, but that call would fail if we had more than one fd open.) */
781

782
                        if (flock(d->fd, LOCK_EX) < 0)
1,700✔
UNCOV
783
                                log_debug_errno(errno, "Failed to lock loop block device, ignoring: %m");
×
784

785
                        r = block_device_remove_all_partitions(d->dev, d->fd);
1,700✔
786
                        if (r < 0)
1,700✔
UNCOV
787
                                log_debug_errno(r, "Failed to remove partitions of loopback block device, ignoring: %m");
×
788

789
                        if (ioctl(d->fd, LOOP_CLR_FD) < 0)
1,700✔
UNCOV
790
                                log_debug_errno(errno, "Failed to clear loop device, ignoring: %m");
×
791
                }
792

793
                safe_close(d->fd);
1,937✔
794
        }
795

796
        /* Now that the block device is released, let's also try to remove it */
797
        if (control >= 0) {
1,937✔
798
                useconds_t delay = 5 * USEC_PER_MSEC;  /* A total delay of 5090 ms between 39 attempts,
799
                                                        * (4*5 + 5*10 + 5*20 + … + 3*640) = 5090. */
800

801
                for (unsigned attempt = 1;; attempt++) {
66✔
802
                        if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0)
1,766✔
803
                                break;
804
                        if (errno != EBUSY || attempt > 38) {
66✔
805
                                log_debug_errno(errno, "Failed to remove device %s: %m", strna(d->node));
×
806
                                break;
807
                        }
808
                        if (attempt % 5 == 0) {
66✔
809
                                log_debug("Device is still busy after %u attempts…", attempt);
5✔
810
                                delay *= 2;
5✔
811
                        }
812

813
                        (void) usleep_safe(delay);
66✔
814
                }
815
        }
816

817
        free(d->node);
1,937✔
818
        sd_device_unref(d->dev);
1,937✔
819
        free(d->backing_file);
1,937✔
820
        return mfree(d);
1,937✔
821
}
822

823
DEFINE_TRIVIAL_REF_UNREF_FUNC(LoopDevice, loop_device, loop_device_free);
5,960✔
824

825
void loop_device_relinquish(LoopDevice *d) {
141✔
826
        assert(d);
141✔
827

828
        /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
829
         * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
830

831
        d->relinquished = true;
141✔
832
}
141✔
833

834
void loop_device_unrelinquish(LoopDevice *d) {
20✔
835
        assert(d);
20✔
836
        d->relinquished = false;
20✔
837
}
20✔
838

839
int loop_device_open(
116✔
840
                sd_device *dev,
841
                int open_flags,
842
                int lock_op,
843
                LoopDevice **ret) {
844

845
        _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
116✔
846
        _cleanup_free_ char *node = NULL, *backing_file = NULL;
116✔
847
        dev_t devnum, backing_devno = 0;
116✔
848
        struct loop_info64 info;
116✔
849
        ino_t backing_inode = 0;
116✔
850
        uint64_t diskseq = 0;
116✔
851
        LoopDevice *d;
116✔
852
        const char *s;
116✔
853
        int r, nr = -1;
116✔
854

855
        assert(dev);
116✔
856
        assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
116✔
857
        assert(ret);
116✔
858

859
        /* Even if fd is provided through the argument in loop_device_open_from_fd(), we reopen the inode
860
         * here, instead of keeping just a dup() clone of it around, since we want to ensure that the
861
         * O_DIRECT flag of the handle we keep is off, we have our own file index, and have the right
862
         * read/write mode in effect. */
863
        fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
116✔
864
        if (fd < 0)
116✔
865
                return fd;
866

867
        if ((lock_op & ~LOCK_NB) != LOCK_UN) {
116✔
868
                lock_fd = open_lock_fd(fd, lock_op);
116✔
869
                if (lock_fd < 0)
116✔
870
                        return lock_fd;
871
        }
872

873
        if (ioctl(fd, LOOP_GET_STATUS64, &info) >= 0) {
116✔
874
#if HAVE_VALGRIND_MEMCHECK_H
875
                /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
876
                VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
877
#endif
878
                nr = info.lo_number;
23✔
879

880
                if (sd_device_get_sysattr_value(dev, "loop/backing_file", &s) >= 0) {
23✔
881
                        backing_file = strdup(s);
13✔
882
                        if (!backing_file)
13✔
883
                                return -ENOMEM;
884
                }
885

886
                backing_devno = info.lo_device;
23✔
887
                backing_inode = info.lo_inode;
23✔
888
        }
889

890
        r = fd_get_diskseq(fd, &diskseq);
116✔
891
        if (r < 0 && r != -EOPNOTSUPP)
116✔
892
                return r;
893

894
        uint32_t sector_size;
116✔
895
        r = blockdev_get_sector_size(fd, &sector_size);
116✔
896
        if (r < 0)
116✔
897
                return r;
898

899
        uint64_t device_size;
116✔
900
        r = blockdev_get_device_size(fd, &device_size);
116✔
901
        if (r < 0)
116✔
902
                return r;
903

904
        r = sd_device_get_devnum(dev, &devnum);
116✔
905
        if (r < 0)
116✔
906
                return r;
907

908
        r = sd_device_get_devname(dev, &s);
116✔
909
        if (r < 0)
116✔
910
                return r;
911

912
        node = strdup(s);
116✔
913
        if (!node)
116✔
914
                return -ENOMEM;
915

916
        d = new(LoopDevice, 1);
116✔
917
        if (!d)
116✔
918
                return -ENOMEM;
919

920
        *d = (LoopDevice) {
232✔
921
                .n_ref = 1,
922
                .fd = TAKE_FD(fd),
116✔
923
                .lock_fd = TAKE_FD(lock_fd),
116✔
924
                .nr = nr,
925
                .node = TAKE_PTR(node),
116✔
926
                .dev = sd_device_ref(dev),
116✔
927
                .backing_file = TAKE_PTR(backing_file),
116✔
928
                .backing_inode = backing_inode,
929
                .backing_devno = backing_devno,
930
                .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
931
                .devno = devnum,
932
                .diskseq = diskseq,
933
                .sector_size = sector_size,
934
                .device_size = device_size,
935
                .created = false,
936
        };
937

938
        *ret = d;
116✔
939
        return 0;
116✔
940
}
941

942
int loop_device_open_from_fd(
2✔
943
                int fd,
944
                int open_flags,
945
                int lock_op,
946
                LoopDevice **ret) {
947

948
        _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
2✔
949
        int r;
2✔
950

951
        r = block_device_new_from_fd(ASSERT_FD(fd), 0, &dev);
2✔
952
        if (r < 0)
2✔
953
                return r;
954

955
        return loop_device_open(dev, open_flags, lock_op, ret);
2✔
956
}
957

UNCOV
958
int loop_device_open_from_path(
×
959
                const char *path,
960
                int open_flags,
961
                int lock_op,
962
                LoopDevice **ret) {
963

UNCOV
964
        _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
×
UNCOV
965
        int r;
×
966

UNCOV
967
        assert(path);
×
968

UNCOV
969
        r = block_device_new_from_path(path, 0, &dev);
×
UNCOV
970
        if (r < 0)
×
971
                return r;
972

973
        return loop_device_open(dev, open_flags, lock_op, ret);
×
974
}
975

UNCOV
976
static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
×
UNCOV
977
        char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1];
×
UNCOV
978
        _cleanup_free_ char *buffer = NULL;
×
979
        uint64_t current_offset, current_size, partno;
×
980
        _cleanup_close_ int whole_fd = -EBADF;
×
UNCOV
981
        struct stat st;
×
982
        dev_t devno;
×
UNCOV
983
        int r;
×
984

985
        /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
986
         * loopback device), and changes the offset, if needed. This is a fancy wrapper around
987
         * BLKPG_RESIZE_PARTITION. */
988

UNCOV
989
        if (fstat(ASSERT_FD(partition_fd), &st) < 0)
×
UNCOV
990
                return -errno;
×
991

992
        assert(S_ISBLK(st.st_mode));
×
993

994
        xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/partition", DEVNUM_FORMAT_VAL(st.st_rdev));
×
995
        r = read_one_line_file(sysfs, &buffer);
×
996
        if (r == -ENOENT) /* not a partition, cannot resize */
×
997
                return -ENOTTY;
998
        if (r < 0)
×
999
                return r;
UNCOV
1000
        r = safe_atou64(buffer, &partno);
×
UNCOV
1001
        if (r < 0)
×
1002
                return r;
1003

1004
        xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/start", DEVNUM_FORMAT_VAL(st.st_rdev));
×
1005

UNCOV
1006
        buffer = mfree(buffer);
×
1007
        r = read_one_line_file(sysfs, &buffer);
×
UNCOV
1008
        if (r < 0)
×
1009
                return r;
1010
        r = safe_atou64(buffer, &current_offset);
×
1011
        if (r < 0)
×
1012
                return r;
1013
        if (current_offset > UINT64_MAX/512U)
×
1014
                return -EINVAL;
1015
        current_offset *= 512U;
×
1016

UNCOV
1017
        r = blockdev_get_device_size(partition_fd, &current_size);
×
UNCOV
1018
        if (r < 0)
×
1019
                return r;
1020

1021
        if (size == UINT64_MAX && offset == UINT64_MAX)
×
1022
                return 0;
1023
        if (current_size == size && current_offset == offset)
×
1024
                return 0;
1025

1026
        xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/../dev", DEVNUM_FORMAT_VAL(st.st_rdev));
×
1027

1028
        buffer = mfree(buffer);
×
UNCOV
1029
        r = read_one_line_file(sysfs, &buffer);
×
1030
        if (r < 0)
×
1031
                return r;
1032
        r = parse_devnum(buffer, &devno);
×
1033
        if (r < 0)
×
1034
                return r;
1035

1036
        whole_fd = r = device_open_from_devnum(S_IFBLK, devno, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY, NULL);
×
UNCOV
1037
        if (r < 0)
×
1038
                return r;
1039

UNCOV
1040
        return block_device_resize_partition(
×
1041
                        whole_fd,
1042
                        partno,
1043
                        offset == UINT64_MAX ? current_offset : offset,
1044
                        size == UINT64_MAX ? current_size : size);
1045
}
1046

1047
int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) {
6✔
1048
        struct loop_info64 info;
6✔
1049

1050
        assert(d);
6✔
1051
        assert(d->fd >= 0);
6✔
1052

1053
        /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
1054
         * block device. If this loop device actually refers to a partition and not a loopback device, we'll
1055
         * try to adjust the partition offsets instead.
1056
         *
1057
         * If either offset or size is UINT64_MAX we won't change that parameter. */
1058

1059
        if (d->nr < 0) /* not a loopback device */
6✔
UNCOV
1060
                return resize_partition(d->fd, offset, size);
×
1061

1062
        if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
6✔
UNCOV
1063
                return -errno;
×
1064

1065
#if HAVE_VALGRIND_MEMCHECK_H
1066
        /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
1067
        VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
1068
#endif
1069

1070
        if (size == UINT64_MAX && offset == UINT64_MAX)
6✔
1071
                return 0;
1072
        if (info.lo_sizelimit == size && info.lo_offset == offset)
6✔
1073
                return 0;
1074

1075
        if (size != UINT64_MAX)
6✔
1076
                info.lo_sizelimit = size;
6✔
1077
        if (offset != UINT64_MAX)
6✔
1078
                info.lo_offset = offset;
×
1079

1080
        return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info));
6✔
1081
}
1082

1083
int loop_device_flock(LoopDevice *d, int operation) {
128✔
1084
        assert(IN_SET(operation & ~LOCK_NB, LOCK_UN, LOCK_SH, LOCK_EX));
128✔
1085
        assert(d);
128✔
1086

1087
        /* When unlocking just close the lock fd */
1088
        if ((operation & ~LOCK_NB) == LOCK_UN) {
128✔
1089
                d->lock_fd = safe_close(d->lock_fd);
126✔
1090
                return 0;
126✔
1091
        }
1092

1093
        /* If we had no lock fd so far, create one and lock it right-away */
1094
        if (d->lock_fd < 0) {
2✔
1095
                d->lock_fd = open_lock_fd(ASSERT_FD(d->fd), operation);
1✔
1096
                if (d->lock_fd < 0)
1✔
1097
                        return d->lock_fd;
1098

1099
                return 0;
1✔
1100
        }
1101

1102
        /* Otherwise change the current lock mode on the existing fd */
1103
        return RET_NERRNO(flock(d->lock_fd, operation));
1✔
1104
}
1105

1106
int loop_device_sync(LoopDevice *d) {
56✔
1107
        assert(d);
56✔
1108

1109
        /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
1110
         * we can check the return value though. */
1111

1112
        return RET_NERRNO(fsync(ASSERT_FD(d->fd)));
56✔
1113
}
1114

1115
int loop_device_set_autoclear(LoopDevice *d, bool autoclear) {
7✔
1116
        struct loop_info64 info;
7✔
1117

1118
        assert(d);
7✔
1119

1120
        if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0)
7✔
UNCOV
1121
                return -errno;
×
1122

1123
        if (autoclear == FLAGS_SET(info.lo_flags, LO_FLAGS_AUTOCLEAR))
7✔
1124
                return 0;
1125

1126
        SET_FLAG(info.lo_flags, LO_FLAGS_AUTOCLEAR, autoclear);
7✔
1127

1128
        if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
7✔
UNCOV
1129
                return -errno;
×
1130

1131
        return 1;
1132
}
1133

1134
int loop_device_set_filename(LoopDevice *d, const char *name) {
3✔
1135
        struct loop_info64 info;
3✔
1136

1137
        assert(d);
3✔
1138

1139
        /* Sets the .lo_file_name of the loopback device. This is supposed to contain the path to the file
1140
         * backing the block device, but is actually just a free-form string you can pass to the kernel. Most
1141
         * tools that actually care for the backing file path use the sysfs attribute file loop/backing_file
1142
         * which is a kernel generated string, subject to file system namespaces and such.
1143
         *
1144
         * .lo_file_name is useful since userspace can select it freely when creating a loopback block
1145
         * device, and we can use it for /dev/disk/by-loop-ref/ symlinks, and similar, so that apps can
1146
         * recognize their own loopback files. */
1147

1148
        if (name && strlen(name) >= sizeof(info.lo_file_name))
3✔
1149
                return -ENOBUFS;
3✔
1150

1151
        if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0)
3✔
UNCOV
1152
                return -errno;
×
1153

1154
        if (strneq((char*) info.lo_file_name, strempty(name), sizeof(info.lo_file_name)))
3✔
1155
                return 0;
1156

1157
        if (name) {
3✔
1158
                strncpy((char*) info.lo_file_name, name, sizeof(info.lo_file_name)-1);
3✔
1159
                info.lo_file_name[sizeof(info.lo_file_name)-1] = 0;
3✔
1160
        } else
UNCOV
1161
                memzero(info.lo_file_name, sizeof(info.lo_file_name));
×
1162

1163
        if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
3✔
UNCOV
1164
                return -errno;
×
1165

1166
        return 1;
1167
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc