• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

systemd / systemd / 21460395012

29 Jan 2026 12:02AM UTC coverage: 72.47% (-0.3%) from 72.793%
21460395012

push

github

yuwata
vmspawn: Add image format option to support qcow2

A QEMU qcow2 VM image can be internally sparse and compressed.
Support such images in vmspawn for both the main disk and any extra
disks.

0 of 43 new or added lines in 2 files covered. (0.0%)

1696 existing lines in 48 files now uncovered.

309976 of 427731 relevant lines covered (72.47%)

1246714.39 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

62.99
/src/shared/loop-util.c
1
/* SPDX-License-Identifier: LGPL-2.1-or-later */
2

3
#if HAVE_VALGRIND_MEMCHECK_H
4
#include <valgrind/memcheck.h>
5
#endif
6

7
#include <fcntl.h>
8
#include <linux/loop.h>
9
#include <sys/file.h>
10
#include <sys/ioctl.h>
11
#include <unistd.h>
12

13
#include "sd-device.h"
14

15
#include "alloc-util.h"
16
#include "blockdev-util.h"
17
#include "data-fd-util.h"
18
#include "device-util.h"
19
#include "devnum-util.h"
20
#include "dissect-image.h"
21
#include "env-util.h"
22
#include "errno-util.h"
23
#include "fd-util.h"
24
#include "fileio.h"
25
#include "fs-util.h"
26
#include "loop-util.h"
27
#include "parse-util.h"
28
#include "path-util.h"
29
#include "random-util.h"
30
#include "stat-util.h"
31
#include "stdio-util.h"
32
#include "string-util.h"
33
#include "time-util.h"
34

35
static void cleanup_clear_loop_close(int *fd) {
1,963✔
36
        if (*fd < 0)
1,963✔
37
                return;
38

39
        (void) ioctl(*fd, LOOP_CLR_FD);
×
40
        (void) safe_close(*fd);
×
41
}
42

43
static int loop_is_bound(int fd) {
1,963✔
44
        struct loop_info64 info;
1,963✔
45

46
        if (ioctl(ASSERT_FD(fd), LOOP_GET_STATUS64, &info) < 0) {
1,963✔
47
                if (errno == ENXIO)
1,963✔
48
                        return false; /* not bound! */
1,963✔
49

50
                return -errno;
×
51
        }
52

53
        return true; /* bound! */
54
}
55

56
static int open_lock_fd(int primary_fd, int operation) {
2,094✔
57
        _cleanup_close_ int lock_fd = -EBADF;
2,094✔
58

59
        assert(IN_SET(operation & ~LOCK_NB, LOCK_SH, LOCK_EX));
2,094✔
60

61
        lock_fd = fd_reopen(ASSERT_FD(primary_fd), O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
2,094✔
62
        if (lock_fd < 0)
2,094✔
63
                return lock_fd;
64

65
        if (flock(lock_fd, operation) < 0)
2,094✔
66
                return -errno;
×
67

68
        return TAKE_FD(lock_fd);
69
}
70

71
static int loop_configure_verify_direct_io(int fd, const struct loop_config *c) {
1,963✔
72
        assert(fd >= 0);
1,963✔
73
        assert(c);
1,963✔
74

75
        if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) {
1,963✔
76
                struct loop_info64 info;
1,962✔
77

78
                if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0)
1,962✔
79
                        return log_debug_errno(errno, "Failed to issue LOOP_GET_STATUS64: %m");
×
80

81
#if HAVE_VALGRIND_MEMCHECK_H
82
                VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
83
#endif
84

85
                /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
86
                 * device to the logical block size of the underlying file system. Since there was no nice
87
                 * way to query the value, we are not bothering to do this however. On newer kernels the
88
                 * block size is propagated automatically and does not require intervention from us. We'll
89
                 * check here if enabling direct IO worked, to make this easily debuggable however.
90
                 *
91
                 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
92
                 * enabling direct IO with iteratively larger block sizes until it eventually works.)
93
                 *
94
                 * On older kernels (e.g.: 5.10) when this is attempted on a file stored on a dm-crypt
95
                 * backed partition the kernel will start returning I/O errors when accessing the mounted
96
                 * loop device, so return a recognizable error that causes the operation to be started
97
                 * from scratch without the LO_FLAGS_DIRECT_IO flag. */
98
                if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO))
1,962✔
99
                        return log_debug_errno(
×
100
                                        SYNTHETIC_ERRNO(ENOANO),
101
                                        "Could not enable direct IO mode, retrying in buffered IO mode.");
102
        }
103

104
        return 0;
105
}
106

107
static int loop_configure_verify(int fd, const struct loop_config *c) {
1,963✔
108
        bool broken = false;
1,963✔
109
        int r;
1,963✔
110

111
        assert(fd >= 0);
1,963✔
112
        assert(c);
1,963✔
113

114
        if (c->block_size != 0) {
1,963✔
115
                uint32_t ssz;
1,963✔
116

117
                r = blockdev_get_sector_size(fd, &ssz);
1,963✔
118
                if (r < 0)
1,963✔
119
                        return r;
×
120

121
                if (ssz != c->block_size) {
1,963✔
122
                        log_debug("LOOP_CONFIGURE didn't honour requested block size %" PRIu32 ", got %" PRIu32 " instead. Ignoring.", c->block_size, ssz);
×
123
                        broken = true;
124
                }
125
        }
126

127
        if (c->info.lo_sizelimit != 0) {
1,963✔
128
                /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
129
                 * block device. If it's used, let's immediately check if it had the desired
130
                 * effect hence. And if not use classic LOOP_SET_STATUS64. */
131
                uint64_t z;
92✔
132

133
                r = blockdev_get_device_size(fd, &z);
92✔
134
                if (r < 0)
92✔
135
                        return r;
×
136

137
                if (z != c->info.lo_sizelimit) {
92✔
138
                        log_debug("LOOP_CONFIGURE is broken, doesn't honour .info.lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
×
139
                        broken = true;
140
                }
141
        }
142

143
        if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) {
1,963✔
144
                /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
145
                 * into the block device. Let's hence verify if things work correctly here
146
                 * before returning. */
147

148
                r = blockdev_partscan_enabled_fd(fd);
1,794✔
149
                if (r < 0)
1,794✔
150
                        return r;
151
                if (r == 0) {
1,794✔
152
                        log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
×
153
                        broken = true;
154
                }
155
        }
156

157
        r = loop_configure_verify_direct_io(fd, c);
1,963✔
158
        if (r < 0)
1,963✔
159
                return r;
160

161
        return !broken;
1,963✔
162
}
163

164
static int loop_configure_fallback(int fd, const struct loop_config *c) {
×
165
        struct loop_info64 info_copy;
×
166
        int r;
×
167

168
        assert(fd >= 0);
×
169
        assert(c);
×
170

171
        /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
172
         * them out. */
173
        info_copy = c->info;
×
174
        info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
×
175

176
        /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
177
         * ioctl can return EAGAIN in case we change the info.lo_offset field, if someone else is accessing the
178
         * block device while we try to reconfigure it. This is a pretty common case, since udev might
179
         * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
180
         * first, let's take the BSD lock to ensure that udev will not step in between the point in
181
         * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
182
         * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
183
         * needlessly if we are just racing against udev. The latter is protection against all other cases,
184
         * i.e. peers that do not take the BSD lock. */
185

186
        for (unsigned n_attempts = 0;;) {
×
187
                if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0)
×
188
                        break;
189

190
                if (errno != EAGAIN || ++n_attempts >= 64)
×
191
                        return log_debug_errno(errno, "Failed to configure loopback block device: %m");
×
192

193
                /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
194
                 * failed attempts we see */
195
                (void) usleep_safe(UINT64_C(10) * USEC_PER_MSEC +
×
196
                              random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
×
197
        }
198

199
        /* If a block size is requested then try to configure it. If that doesn't work, ignore errors, but
200
         * afterwards, let's validate what is in effect, and if it doesn't match what we want, fail */
201
        if (c->block_size != 0) {
×
202
                uint32_t ssz;
×
203

204
                if (ioctl(fd, LOOP_SET_BLOCK_SIZE, (unsigned long) c->block_size) < 0)
×
205
                        log_debug_errno(errno, "Failed to set sector size, ignoring: %m");
×
206

207
                r = blockdev_get_sector_size(fd, &ssz);
×
208
                if (r < 0)
×
209
                        return log_debug_errno(r, "Failed to read sector size: %m");
×
210
                if (ssz != c->block_size)
×
211
                        return log_debug_errno(SYNTHETIC_ERRNO(EIO), "Sector size of loopback device doesn't match what we requested, refusing.");
×
212
        }
213

214
        /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
215
        if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO))
×
216
                if (ioctl(fd, LOOP_SET_DIRECT_IO, 1UL) < 0)
×
217
                        log_debug_errno(errno, "Failed to enable direct IO mode, ignoring: %m");
×
218

219
        return loop_configure_verify_direct_io(fd, c);
×
220
}
221

222
static int loop_configure(
1,963✔
223
                int nr,
224
                int open_flags,
225
                int lock_op,
226
                const struct loop_config *c,
227
                LoopDevice **ret) {
228

229
        static bool loop_configure_broken = false;
1,963✔
230

231
        _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1,963✔
232
        _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -EBADF; /* This must be declared before lock_fd. */
×
233
        _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
3,926✔
234
        _cleanup_free_ char *node = NULL;
1,963✔
235
        uint64_t diskseq = 0;
1,963✔
236
        dev_t devno;
1,963✔
237
        int r;
1,963✔
238

239
        assert(nr >= 0);
1,963✔
240
        assert(c);
1,963✔
241
        assert(ret);
1,963✔
242

243
        if (asprintf(&node, "/dev/loop%i", nr) < 0)
1,963✔
244
                return log_oom_debug();
×
245

246
        r = sd_device_new_from_devname(&dev, node);
1,963✔
247
        if (r < 0)
1,963✔
248
                return log_debug_errno(r, "Failed to create sd_device object for \"%s\": %m", node);
×
249

250
        r = sd_device_get_devnum(dev, &devno);
1,963✔
251
        if (r < 0)
1,963✔
252
                return log_device_debug_errno(dev, r, "Failed to get devnum: %m");
×
253

254
        fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
1,963✔
255
        if (fd < 0)
1,963✔
256
                return log_device_debug_errno(dev, fd, "Failed to open device: %m");
×
257

258
        /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
259
         * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
260
         * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
261
         * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
262
         * long time udev would possibly never run on it again, even though the fd is unlocked, simply
263
         * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
264
         * automatically release the lock, after we are done. */
265
        lock_fd = open_lock_fd(fd, LOCK_EX);
1,963✔
266
        if (lock_fd < 0)
1,963✔
267
                return log_device_debug_errno(dev, lock_fd, "Failed to acquire lock: %m");
×
268

269
        log_device_debug(dev, "Acquired exclusive lock.");
1,990✔
270

271
        /* Let's see if backing file is really unattached. Someone may already attach a backing file without
272
         * taking BSD lock. */
273
        r = loop_is_bound(fd);
1,963✔
274
        if (r < 0)
1,963✔
275
                return log_device_debug_errno(dev, r, "Failed to check if the loopback block device is bound: %m");
×
276
        if (r > 0)
1,963✔
UNCOV
277
                return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EBUSY),
×
278
                                              "The loopback block device is already bound, ignoring.");
279

280
        /* Let's see if the device is really detached, i.e. currently has no associated partition block
281
         * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
282
         * superficially is detached but still has partition block devices associated for it. Let's then
283
         * manually remove the partitions via BLKPG, and tell the caller we did that via EUCLEAN, so they try
284
         * again. */
285
        r = block_device_remove_all_partitions(dev, fd);
1,963✔
286
        if (r < 0)
1,963✔
287
                return log_device_debug_errno(dev, r, "Failed to remove partitions on the loopback block device: %m");
×
288
        if (r > 0)
1,963✔
289
                /* Removed all partitions. Let's report this to the caller, to try again, and count this as
290
                 * an attempt. */
291
                return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EUCLEAN),
×
292
                                              "Removed partitions on the loopback block device.");
293

294
        if (!loop_configure_broken) {
1,963✔
295
                if (ioctl(fd, LOOP_CONFIGURE, c) < 0) {
1,963✔
296
                        /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other errors. */
297
                        if (!ERRNO_IS_IOCTL_NOT_SUPPORTED(errno))
×
298
                                return log_device_debug_errno(dev, errno, "ioctl(LOOP_CONFIGURE) failed: %m");
×
299

300
                        loop_configure_broken = true;
×
301
                } else {
302
                        loop_with_fd = TAKE_FD(fd);
1,963✔
303

304
                        r = loop_configure_verify(loop_with_fd, c);
1,963✔
305
                        if (r < 0)
1,963✔
306
                                return log_device_debug_errno(dev, r, "Failed to verify if loopback block device is correctly configured: %m");
×
307
                        if (r == 0) {
1,963✔
308
                                /* LOOP_CONFIGURE doesn't work. Remember that. */
309
                                loop_configure_broken = true;
×
310

311
                                /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
312
                                 * because LOOP_CLR_FD is async: if the operation cannot be executed right
313
                                 * away it just sets the autoclear flag on the device. This means there's a
314
                                 * good chance we cannot actually reuse the loopback device right-away. Hence
315
                                 * let's assume it's busy, avoid the trouble and let the calling loop call us
316
                                 * again with a new, likely unused device. */
317
                                return -EBUSY;
×
318
                        }
319
                }
320
        }
321

322
        if (loop_configure_broken) {
1,963✔
323
                if (ioctl(fd, LOOP_SET_FD, c->fd) < 0)
×
324
                        return log_device_debug_errno(dev, errno, "ioctl(LOOP_SET_FD) failed: %m");
×
325

326
                loop_with_fd = TAKE_FD(fd);
×
327

328
                r = loop_configure_fallback(loop_with_fd, c);
×
329
                if (r < 0)
×
330
                        return r;
331
        }
332

333
        r = fd_get_diskseq(loop_with_fd, &diskseq);
1,963✔
334
        if (r < 0 && r != -EOPNOTSUPP)
1,963✔
335
                return log_device_debug_errno(dev, r, "Failed to get diskseq: %m");
×
336

337
        switch (lock_op & ~LOCK_NB) {
1,963✔
338
        case LOCK_EX: /* Already in effect */
339
                break;
340
        case LOCK_SH: /* Downgrade */
1,837✔
341
                if (flock(lock_fd, lock_op) < 0)
1,837✔
342
                        return log_device_debug_errno(dev, errno, "Failed to downgrade lock level: %m");
×
343
                break;
344
        case LOCK_UN: /* Release */
×
345
                lock_fd = safe_close(lock_fd);
×
346
                break;
347
        default:
×
348
                assert_not_reached();
×
349
        }
350

351
        uint64_t device_size;
1,963✔
352
        r = blockdev_get_device_size(loop_with_fd, &device_size);
1,963✔
353
        if (r < 0)
1,963✔
354
                return log_device_debug_errno(dev, r, "Failed to get loopback device size: %m");
×
355

356
        LoopDevice *d = new(LoopDevice, 1);
1,963✔
357
        if (!d)
1,963✔
358
                return log_oom_debug();
×
359

360
        *d = (LoopDevice) {
1,963✔
361
                .n_ref = 1,
362
                .fd = TAKE_FD(loop_with_fd),
1,963✔
363
                .lock_fd = TAKE_FD(lock_fd),
1,963✔
364
                .node = TAKE_PTR(node),
1,963✔
365
                .nr = nr,
366
                .devno = devno,
367
                .dev = TAKE_PTR(dev),
1,963✔
368
                .diskseq = diskseq,
369
                .sector_size = c->block_size,
1,963✔
370
                .device_size = device_size,
371
                .created = true,
372
        };
373

374
        *ret = TAKE_PTR(d);
1,963✔
375
        return 0;
1,963✔
376
}
377

378
static int fd_get_max_discard(int fd, uint64_t *ret) {
×
379
        struct stat st;
×
380
        char sysfs_path[STRLEN("/sys/dev/block/" ":" "/queue/discard_max_bytes") + DECIMAL_STR_MAX(dev_t) * 2 + 1];
×
381
        _cleanup_free_ char *buffer = NULL;
×
382
        int r;
×
383

384
        assert(ret);
×
385

386
        if (fstat(ASSERT_FD(fd), &st) < 0)
×
387
                return -errno;
×
388

389
        if (!S_ISBLK(st.st_mode))
×
390
                return -ENOTBLK;
391

392
        xsprintf(sysfs_path, "/sys/dev/block/" DEVNUM_FORMAT_STR "/queue/discard_max_bytes", DEVNUM_FORMAT_VAL(st.st_rdev));
×
393

394
        r = read_one_line_file(sysfs_path, &buffer);
×
395
        if (r < 0)
×
396
                return r;
397

398
        return safe_atou64(buffer, ret);
×
399
}
400

401
static int fd_set_max_discard(int fd, uint64_t max_discard) {
×
402
        struct stat st;
×
403
        char sysfs_path[STRLEN("/sys/dev/block/" ":" "/queue/discard_max_bytes") + DECIMAL_STR_MAX(dev_t) * 2 + 1];
×
404

405
        if (fstat(ASSERT_FD(fd), &st) < 0)
×
406
                return -errno;
×
407

408
        if (!S_ISBLK(st.st_mode))
×
409
                return -ENOTBLK;
410

411
        xsprintf(sysfs_path, "/sys/dev/block/" DEVNUM_FORMAT_STR "/queue/discard_max_bytes", DEVNUM_FORMAT_VAL(st.st_rdev));
×
412

413
        return write_string_filef(sysfs_path, WRITE_STRING_FILE_DISABLE_BUFFER, "%" PRIu64, max_discard);
×
414
}
415

416
static int loop_device_make_internal(
1,983✔
417
                const char *path,
418
                int fd,
419
                int open_flags,
420
                uint64_t offset,
421
                uint64_t size,
422
                uint32_t sector_size,
423
                uint32_t loop_flags,
424
                int lock_op,
425
                LoopDevice **ret) {
426

427
        _cleanup_(loop_device_unrefp) LoopDevice *d = NULL;
×
428
        _cleanup_close_ int reopened_fd = -EBADF, control = -EBADF;
3,966✔
429
        _cleanup_free_ char *backing_file = NULL;
1,983✔
430
        struct loop_config config;
1,983✔
431
        int r, f_flags;
1,983✔
432
        struct stat st;
1,983✔
433

434
        assert(fd >= 0);
1,983✔
435
        assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
1,983✔
436
        assert(ret);
1,983✔
437

438
        f_flags = fcntl(fd, F_GETFL);
1,983✔
439
        if (f_flags < 0)
1,983✔
440
                return -errno;
×
441

442
        if (open_flags < 0) {
1,983✔
443
                /* If open_flags is unset, initialize it from the open fd */
444
                if (FLAGS_SET(f_flags, O_PATH))
×
445
                        return log_debug_errno(SYNTHETIC_ERRNO(EBADFD), "Access mode of image file indicates O_PATH, cannot determine read/write flags.");
×
446

447
                open_flags = f_flags & O_ACCMODE_STRICT;
×
448
                if (!IN_SET(open_flags, O_RDWR, O_RDONLY))
×
449
                        return log_debug_errno(SYNTHETIC_ERRNO(EBADFD), "Access mode of image file is write only (?)");
×
450
        }
451

452
        if (fstat(fd, &st) < 0)
1,983✔
453
                return -errno;
×
454

455
        if (S_ISBLK(st.st_mode)) {
1,983✔
456
                if (offset == 0 && IN_SET(size, 0, UINT64_MAX))
×
457
                        /* If this is already a block device and we are supposed to cover the whole of it
458
                         * then store an fd to the original open device node — and do not actually create an
459
                         * unnecessary loopback device for it. */
460
                        return loop_device_open_from_fd(fd, open_flags, lock_op, ret);
×
461
        } else {
462
                r = stat_verify_regular(&st);
1,983✔
463
                if (r < 0)
1,983✔
464
                        return r;
465
        }
466

467
        if (path) {
1,983✔
468
                r = path_make_absolute_cwd(path, &backing_file);
261✔
469
                if (r < 0)
261✔
470
                        return r;
471

472
                path_simplify(backing_file);
261✔
473
        } else {
474
                r = fd_get_path(fd, &backing_file);
1,722✔
475
                if (r < 0)
1,722✔
476
                        return r;
477
        }
478

479
        if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) {
1,983✔
480
                /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
481
                 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
482
                 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
483
                 *
484
                 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
485
                 * from that automatically. */
486

487
                reopened_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags);
126✔
488
                if (reopened_fd < 0) {
126✔
489
                        if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO))
×
490
                                return log_debug_errno(reopened_fd, "Failed to reopen file descriptor without O_DIRECT: %m");
×
491

492
                        /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
493
                        log_debug_errno(reopened_fd, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
×
494
                        loop_flags &= ~LO_FLAGS_DIRECT_IO;
×
495
                } else
496
                        fd = reopened_fd; /* From now on, operate on our new O_DIRECT fd */
497
        }
498

499
        control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
1,983✔
500
        if (control < 0)
1,983✔
501
                return -errno;
20✔
502

503
        if (sector_size == 0)
1,963✔
504
                /* If no sector size is specified, default to the classic default */
505
                sector_size = 512;
×
506
        else if (sector_size == UINT32_MAX) {
1,963✔
507

508
                if (S_ISBLK(st.st_mode))
1,837✔
509
                        /* If the sector size is specified as UINT32_MAX we'll propagate the sector size of
510
                         * the underlying block device. */
511
                        r = blockdev_get_sector_size(fd, &sector_size);
×
512
                else {
513
                        _cleanup_close_ int non_direct_io_fd = -EBADF;
1,983✔
514
                        int probe_fd;
1,837✔
515

516
                        assert(S_ISREG(st.st_mode));
1,837✔
517

518
                        /* If sector size is specified as UINT32_MAX, we'll try to probe the right sector
519
                         * size of the image in question by looking for the GPT partition header at various
520
                         * offsets. This of course only works if the image already has a disk label.
521
                         *
522
                         * So here we actually want to read the file contents ourselves. This is quite likely
523
                         * not going to work if we managed to enable O_DIRECT, because in such a case there
524
                         * are some pretty strict alignment requirements to offset, size and target, but
525
                         * there's no way to query what alignment specifically is actually required. Hence,
526
                         * let's avoid the mess, and temporarily open an fd without O_DIRECT for the probing
527
                         * logic. */
528

529
                        if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) {
1,837✔
530
                                non_direct_io_fd = fd_reopen(fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
1,836✔
531
                                if (non_direct_io_fd < 0)
1,836✔
532
                                        return non_direct_io_fd;
×
533

534
                                probe_fd = non_direct_io_fd;
535
                        } else
536
                                probe_fd = fd;
537

538
                        r = probe_sector_size(probe_fd, &sector_size);
1,837✔
539
                }
540
                if (r < 0)
1,837✔
541
                        return r;
542
        }
543

544
        config = (struct loop_config) {
3,926✔
545
                .fd = fd,
546
                .block_size = sector_size,
547
                .info = {
548
                        /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
549
                        .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE_STRICT) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR,
1,963✔
550
                        .lo_offset = offset,
551
                        .lo_sizelimit = size == UINT64_MAX ? 0 : size,
1,963✔
552
                },
553
        };
554

555
        /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
556
         * be gone already, taken by somebody else racing against us. */
557
        for (unsigned n_attempts = 0;;) {
1,963✔
558
                usec_t usec;
1,963✔
559
                int nr;
1,963✔
560

561
                /* Let's take a lock on the control device first. On a busy system, where many programs
562
                 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
563
                 * around relatively heavy operations: asking for a free loopback device, then opening it,
564
                 * validating it, attaching something to it. Let's serialize this whole operation, to make
565
                 * unnecessary busywork less likely. Note that this is just something we do to optimize our
566
                 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
567
                 * necessary, it just means it's less likely we have to iterate through this loop again and
568
                 * again if our own code races against our own code.
569
                 *
570
                 * Note: our lock protocol is to take the /dev/loop-control lock first, and the block device
571
                 * lock second, if both are taken, and always in this order, to avoid ABBA locking issues. */
572
                if (flock(control, LOCK_EX) < 0)
1,963✔
573
                        return -errno;
×
574

575
                nr = ioctl(control, LOOP_CTL_GET_FREE);
1,963✔
576
                if (nr < 0)
1,963✔
577
                        return -errno;
×
578

579
                r = loop_configure(nr, open_flags, lock_op, &config, &d);
1,963✔
580
                if (r >= 0)
1,963✔
581
                        break;
582

583
                /* -ENODEV or friends: Somebody might've gotten the same number from the kernel, used the
584
                 * device, and called LOOP_CTL_REMOVE on it. Let's retry with a new number.
585
                 * -EBUSY: a file descriptor is already bound to the loopback block device.
586
                 * -EUCLEAN: some left-over partition devices that were cleaned up.
587
                 * -ENOANO: we tried to use LO_FLAGS_DIRECT_IO but the kernel rejected it. */
UNCOV
588
                if (!ERRNO_IS_DEVICE_ABSENT(r) && !IN_SET(r, -EBUSY, -EUCLEAN, -ENOANO))
×
589
                        return r;
590

591
                /* OK, this didn't work, let's try again a bit later, but first release the lock on the
592
                 * control device */
UNCOV
593
                if (flock(control, LOCK_UN) < 0)
×
594
                        return -errno;
×
595

UNCOV
596
                if (++n_attempts >= 64) /* Give up eventually */
×
597
                        return -EBUSY;
598

599
                /* If we failed to enable direct IO mode, let's retry without it. We restart the process as
600
                 * on some combination of kernel version and storage filesystem, the kernel is very unhappy
601
                 * about a failed DIRECT_IO enablement and throws I/O errors. */
UNCOV
602
                if (r == -ENOANO && FLAGS_SET(config.info.lo_flags, LO_FLAGS_DIRECT_IO)) {
×
603
                        config.info.lo_flags &= ~LO_FLAGS_DIRECT_IO;
×
604
                        open_flags &= ~O_DIRECT;
×
605

606
                        int non_direct_io_fd = fd_reopen(config.fd, O_CLOEXEC|O_NONBLOCK|open_flags);
×
607
                        if (non_direct_io_fd < 0)
×
608
                                return log_debug_errno(
×
609
                                                non_direct_io_fd,
610
                                                "Failed to reopen file descriptor without O_DIRECT: %m");
611

612
                        safe_close(reopened_fd);
×
613
                        fd = config.fd = /* For cleanups */ reopened_fd = non_direct_io_fd;
×
614
                }
615

616
                /* Wait some random time, to make collision less likely. Let's pick a random time in the
617
                 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
UNCOV
618
                usec = random_u64_range(UINT64_C(10) * USEC_PER_MSEC +
×
UNCOV
619
                                        UINT64_C(240) * USEC_PER_MSEC * n_attempts/64);
×
UNCOV
620
                log_debug("Trying again after %s.", FORMAT_TIMESPAN(usec, USEC_PER_MSEC));
×
UNCOV
621
                (void) usleep_safe(usec);
×
622
        }
623

624
        if (S_ISBLK(st.st_mode)) {
1,963✔
625
                /* Propagate backing device's discard byte limit to our loopback block device. We do this in
626
                 * order to avoid that (supposedly quick) discard requests on the loopback device get turned
627
                 * into (likely slow) zero-out requests on backing devices that do not support discarding
628
                 * natively, but do support zero-out. */
629
                uint64_t discard_max_bytes;
×
630

631
                r = fd_get_max_discard(fd, &discard_max_bytes);
×
632
                if (r < 0)
×
633
                        log_debug_errno(r, "Failed to read 'discard_max_bytes' of backing device, ignoring: %m");
×
634
                else {
635
                        r = fd_set_max_discard(d->fd, discard_max_bytes);
×
636
                        if (r < 0)
×
637
                                log_debug_errno(r, "Failed to write 'discard_max_bytes' of loop device, ignoring: %m");
×
638
                }
639
        }
640

641
        d->backing_file = TAKE_PTR(backing_file);
1,963✔
642
        d->backing_inode = st.st_ino;
1,963✔
643
        d->backing_devno = st.st_dev;
1,963✔
644

645
        log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64,
1,963✔
646
                  d->node,
647
                  major(d->devno), minor(d->devno),
648
                  d->nr,
649
                  d->diskseq);
650

651
        *ret = TAKE_PTR(d);
1,963✔
652
        return 0;
1,963✔
653
}
654

655
static uint32_t loop_flags_mangle(uint32_t loop_flags) {
1,983✔
656
        int r;
1,983✔
657

658
        r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
1,983✔
659
        if (r < 0 && r != -ENXIO)
1,983✔
660
                log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
×
661

662
        return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
1,983✔
663
}
664

665
int loop_device_make(
126✔
666
                int fd,
667
                int open_flags,
668
                uint64_t offset,
669
                uint64_t size,
670
                uint32_t sector_size,
671
                uint32_t loop_flags,
672
                int lock_op,
673
                LoopDevice **ret) {
674

675
        assert(fd >= 0);
126✔
676
        assert(ret);
126✔
677

678
        return loop_device_make_internal(
126✔
679
                        NULL,
680
                        fd,
681
                        open_flags,
682
                        offset,
683
                        size,
684
                        sector_size,
685
                        loop_flags_mangle(loop_flags),
686
                        lock_op,
687
                        ret);
688
}
689

690
int loop_device_make_by_path_at(
1,857✔
691
                int dir_fd,
692
                const char *path,
693
                int open_flags,
694
                uint32_t sector_size,
695
                uint32_t loop_flags,
696
                int lock_op,
697
                LoopDevice **ret) {
698

699
        int r, basic_flags, direct_flags, rdwr_flags;
1,857✔
700
        _cleanup_close_ int fd = -EBADF;
1,857✔
701
        bool direct = false;
1,857✔
702

703
        assert(dir_fd >= 0 || dir_fd == AT_FDCWD);
1,857✔
704
        assert(ret);
1,857✔
705
        assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
1,857✔
706

707
        /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
708
         * read-only if we cannot. */
709

710
        loop_flags = loop_flags_mangle(loop_flags);
1,857✔
711

712
        /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
713
         * non-O_DIRECT mode automatically, if it fails. */
714

715
        basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
1,857✔
716
        direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0;
1,857✔
717
        rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR;
1,857✔
718

719
        fd = xopenat(dir_fd, path, basic_flags|direct_flags|rdwr_flags);
1,857✔
720
        if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
1,857✔
721
                fd = xopenat(dir_fd, path, basic_flags|rdwr_flags);
1✔
722
        else
723
                direct = direct_flags != 0;
×
724
        if (fd < 0) {
1,857✔
725
                r = fd;
1✔
726

727
                /* Retry read-only? */
728
                if (open_flags >= 0 || !ERRNO_IS_NEG_FS_WRITE_REFUSED(r))
1,858✔
729
                        return r;
730

731
                fd = xopenat(dir_fd, path, basic_flags|direct_flags|O_RDONLY);
×
732
                if (fd < 0 && direct_flags != 0) /* as above */
×
733
                        fd = xopenat(dir_fd, path, basic_flags|O_RDONLY);
×
734
                else
735
                        direct = direct_flags != 0;
×
736
                if (fd < 0)
×
737
                        return r; /* Propagate original error */
738

739
                open_flags = O_RDONLY;
740
        } else if (open_flags < 0)
1,856✔
741
                open_flags = O_RDWR;
77✔
742

743
        log_debug("Opened %s in %s access mode%s, with O_DIRECT %s%s.",
8,833✔
744
                  path ?: "loop device",
745
                  open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY",
746
                  open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "",
747
                  direct ? "enabled" : "disabled",
748
                  direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : "");
749

750
        return loop_device_make_internal(
3,451✔
751
                        dir_fd == AT_FDCWD ? path : NULL,
752
                        fd,
753
                        open_flags,
754
                        /* offset= */ 0,
755
                        /* size= */ 0,
756
                        sector_size,
757
                        loop_flags,
758
                        lock_op,
759
                        ret);
760
}
761

762
int loop_device_make_by_path_memory(
1✔
763
                const char *path,
764
                int open_flags,
765
                uint32_t sector_size,
766
                uint32_t loop_flags,
767
                int lock_op,
768
                LoopDevice **ret) {
769

770
        _cleanup_close_ int fd = -EBADF, mfd = -EBADF;
1✔
771
        _cleanup_free_ char *fn = NULL;
1✔
772
        struct stat st;
1✔
773
        int r;
1✔
774

775
        assert(path);
1✔
776
        assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
1✔
777
        assert(ret);
1✔
778

779
        loop_flags &= ~LO_FLAGS_DIRECT_IO; /* memfds don't support O_DIRECT, hence LO_FLAGS_DIRECT_IO can't be used either */
1✔
780

781
        fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|O_RDONLY);
1✔
782
        if (fd < 0)
1✔
783
                return -errno;
×
784

785
        if (fstat(fd, &st) < 0)
1✔
786
                return -errno;
×
787

788
        if (!S_ISREG(st.st_mode) && !S_ISBLK(st.st_mode))
1✔
789
                return -EBADF;
790

791
        r = path_extract_filename(path, &fn);
1✔
792
        if (r < 0)
1✔
793
                return r;
794

795
        mfd = memfd_clone_fd(fd, fn, open_flags|O_CLOEXEC);
1✔
796
        if (mfd < 0)
1✔
797
                return mfd;
798

799
        fd = safe_close(fd); /* Let's close the original early */
1✔
800

801
        return loop_device_make_internal(NULL, mfd, open_flags, 0, 0, sector_size, loop_flags, lock_op, ret);
1✔
802
}
803

804
static LoopDevice* loop_device_free(LoopDevice *d) {
2,005✔
805
        _cleanup_close_ int control = -EBADF;
2,005✔
806
        int r;
2,005✔
807

808
        if (!d)
2,005✔
809
                return NULL;
810

811
        /* Release any lock we might have on the device first. We want to open+lock the /dev/loop-control
812
         * device below, but our lock protocol says that if both control and block device locks are taken,
813
         * the control lock needs to be taken first, the block device lock second — in order to avoid ABBA
814
         * locking issues. Moreover, we want to issue LOOP_CLR_FD on the block device further down, and that
815
         * would fail if we had another fd open to the device. */
816
        d->lock_fd = safe_close(d->lock_fd);
2,005✔
817

818
        /* Let's open the control device early, and lock it, so that we can release our block device and
819
         * delete it in a synchronized fashion, and allocators won't needlessly see the block device as free
820
         * while we are about to delete it. */
821
        if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
2,005✔
822
                control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
1,738✔
823
                if (control < 0)
1,738✔
824
                        log_debug_errno(errno, "Failed to open loop control device, cannot remove loop device '%s', ignoring: %m", strna(d->node));
×
825
                else if (flock(control, LOCK_EX) < 0)
1,738✔
826
                        log_debug_errno(errno, "Failed to lock loop control device, ignoring: %m");
×
827
        }
828

829
        /* Then let's release the loopback block device */
830
        if (d->fd >= 0) {
2,005✔
831
                /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
832
                if (fsync(d->fd) < 0)
2,005✔
833
                        log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m");
×
834

835
                if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
2,005✔
836
                        /* We are supposed to clear the loopback device. Let's do this synchronously: lock
837
                         * the device, manually remove all partitions and then clear it. This should ensure
838
                         * udev doesn't concurrently access the devices, and we can be reasonably sure that
839
                         * once we are done here the device is cleared and all its partition children
840
                         * removed. Note that we lock our primary device fd here (and not a separate locking
841
                         * fd, as we do during allocation, since we want to keep the lock all the way through
842
                         * the LOOP_CLR_FD, but that call would fail if we had more than one fd open.) */
843

844
                        if (flock(d->fd, LOCK_EX) < 0)
1,738✔
845
                                log_debug_errno(errno, "Failed to lock loop block device, ignoring: %m");
×
846

847
                        r = block_device_remove_all_partitions(d->dev, d->fd);
1,738✔
848
                        if (r < 0)
1,738✔
849
                                log_debug_errno(r, "Failed to remove partitions of loopback block device, ignoring: %m");
×
850

851
                        if (ioctl(d->fd, LOOP_CLR_FD) < 0)
1,738✔
852
                                log_debug_errno(errno, "Failed to clear loop device, ignoring: %m");
×
853
                }
854

855
                safe_close(d->fd);
2,005✔
856
        }
857

858
        /* Now that the block device is released, let's also try to remove it */
859
        if (control >= 0) {
2,005✔
860
                useconds_t delay = 5 * USEC_PER_MSEC;  /* A total delay of 5090 ms between 39 attempts,
861
                                                        * (4*5 + 5*10 + 5*20 + … + 3*640) = 5090. */
862

863
                for (unsigned attempt = 1;; attempt++) {
120✔
864
                        if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0)
1,858✔
865
                                break;
866
                        if (errno != EBUSY || attempt > 38) {
121✔
867
                                log_debug_errno(errno, "Failed to remove device %s: %m", strna(d->node));
1✔
868
                                break;
869
                        }
870
                        if (attempt % 5 == 0) {
120✔
871
                                log_debug("Device is still busy after %u attempts…", attempt);
12✔
872
                                delay *= 2;
12✔
873
                        }
874

875
                        (void) usleep_safe(delay);
120✔
876
                }
877
        }
878

879
        free(d->node);
2,005✔
880
        sd_device_unref(d->dev);
2,005✔
881
        free(d->backing_file);
2,005✔
882
        return mfree(d);
2,005✔
883
}
884

885
DEFINE_TRIVIAL_REF_UNREF_FUNC(LoopDevice, loop_device, loop_device_free);
6,270✔
886

887
void loop_device_relinquish(LoopDevice *d) {
158✔
888
        assert(d);
158✔
889

890
        /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
891
         * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
892

893
        d->relinquished = true;
158✔
894
}
158✔
895

896
void loop_device_unrelinquish(LoopDevice *d) {
22✔
897
        assert(d);
22✔
898
        d->relinquished = false;
22✔
899
}
22✔
900

901
int loop_device_open(
130✔
902
                sd_device *dev,
903
                int open_flags,
904
                int lock_op,
905
                LoopDevice **ret) {
906

907
        _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
130✔
908
        _cleanup_free_ char *node = NULL, *backing_file = NULL;
130✔
909
        dev_t devnum, backing_devno = 0;
130✔
910
        struct loop_info64 info;
130✔
911
        ino_t backing_inode = 0;
130✔
912
        uint64_t diskseq = 0;
130✔
913
        LoopDevice *d;
130✔
914
        const char *s;
130✔
915
        int r, nr = -1;
130✔
916

917
        assert(dev);
130✔
918
        assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
130✔
919
        assert(ret);
130✔
920

921
        /* Even if fd is provided through the argument in loop_device_open_from_fd(), we reopen the inode
922
         * here, instead of keeping just a dup() clone of it around, since we want to ensure that the
923
         * O_DIRECT flag of the handle we keep is off, we have our own file index, and have the right
924
         * read/write mode in effect. */
925
        fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
130✔
926
        if (fd < 0)
130✔
927
                return fd;
928

929
        if ((lock_op & ~LOCK_NB) != LOCK_UN) {
130✔
930
                lock_fd = open_lock_fd(fd, lock_op);
130✔
931
                if (lock_fd < 0)
130✔
932
                        return lock_fd;
933
        }
934

935
        if (ioctl(fd, LOOP_GET_STATUS64, &info) >= 0) {
130✔
936
#if HAVE_VALGRIND_MEMCHECK_H
937
                /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
938
                VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
939
#endif
940
                nr = info.lo_number;
26✔
941

942
                if (sd_device_get_sysattr_value(dev, "loop/backing_file", &s) >= 0) {
26✔
943
                        backing_file = strdup(s);
17✔
944
                        if (!backing_file)
17✔
945
                                return -ENOMEM;
946
                }
947

948
                backing_devno = info.lo_device;
26✔
949
                backing_inode = info.lo_inode;
26✔
950
        }
951

952
        r = fd_get_diskseq(fd, &diskseq);
130✔
953
        if (r < 0 && r != -EOPNOTSUPP)
130✔
954
                return r;
955

956
        uint32_t sector_size;
130✔
957
        r = blockdev_get_sector_size(fd, &sector_size);
130✔
958
        if (r < 0)
130✔
959
                return r;
960

961
        uint64_t device_size;
130✔
962
        r = blockdev_get_device_size(fd, &device_size);
130✔
963
        if (r < 0)
130✔
964
                return r;
965

966
        r = sd_device_get_devnum(dev, &devnum);
130✔
967
        if (r < 0)
130✔
968
                return r;
969

970
        r = sd_device_get_devname(dev, &s);
130✔
971
        if (r < 0)
130✔
972
                return r;
973

974
        node = strdup(s);
130✔
975
        if (!node)
130✔
976
                return -ENOMEM;
977

978
        d = new(LoopDevice, 1);
130✔
979
        if (!d)
130✔
980
                return -ENOMEM;
981

982
        *d = (LoopDevice) {
260✔
983
                .n_ref = 1,
984
                .fd = TAKE_FD(fd),
130✔
985
                .lock_fd = TAKE_FD(lock_fd),
130✔
986
                .nr = nr,
987
                .node = TAKE_PTR(node),
130✔
988
                .dev = sd_device_ref(dev),
130✔
989
                .backing_file = TAKE_PTR(backing_file),
130✔
990
                .backing_inode = backing_inode,
991
                .backing_devno = backing_devno,
992
                .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
993
                .devno = devnum,
994
                .diskseq = diskseq,
995
                .sector_size = sector_size,
996
                .device_size = device_size,
997
                .created = false,
998
        };
999

1000
        *ret = d;
130✔
1001
        return 0;
130✔
1002
}
1003

1004
int loop_device_open_from_fd(
2✔
1005
                int fd,
1006
                int open_flags,
1007
                int lock_op,
1008
                LoopDevice **ret) {
1009

1010
        _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
2✔
1011
        int r;
2✔
1012

1013
        r = block_device_new_from_fd(ASSERT_FD(fd), 0, &dev);
2✔
1014
        if (r < 0)
2✔
1015
                return r;
1016

1017
        return loop_device_open(dev, open_flags, lock_op, ret);
2✔
1018
}
1019

1020
int loop_device_open_from_path(
×
1021
                const char *path,
1022
                int open_flags,
1023
                int lock_op,
1024
                LoopDevice **ret) {
1025

1026
        _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
×
1027
        int r;
×
1028

1029
        assert(path);
×
1030

1031
        r = block_device_new_from_path(path, 0, &dev);
×
1032
        if (r < 0)
×
1033
                return r;
1034

1035
        return loop_device_open(dev, open_flags, lock_op, ret);
×
1036
}
1037

1038
static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
×
1039
        char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1];
×
1040
        _cleanup_free_ char *buffer = NULL;
×
1041
        uint64_t current_offset, current_size, partno;
×
1042
        _cleanup_close_ int whole_fd = -EBADF;
×
1043
        struct stat st;
×
1044
        dev_t devno;
×
1045
        int r;
×
1046

1047
        /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
1048
         * loopback device), and changes the offset, if needed. This is a fancy wrapper around
1049
         * BLKPG_RESIZE_PARTITION. */
1050

1051
        if (fstat(ASSERT_FD(partition_fd), &st) < 0)
×
1052
                return -errno;
×
1053

1054
        assert(S_ISBLK(st.st_mode));
×
1055

1056
        xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/partition", DEVNUM_FORMAT_VAL(st.st_rdev));
×
1057
        r = read_one_line_file(sysfs, &buffer);
×
1058
        if (r == -ENOENT) /* not a partition, cannot resize */
×
1059
                return -ENOTTY;
1060
        if (r < 0)
×
1061
                return r;
1062
        r = safe_atou64(buffer, &partno);
×
1063
        if (r < 0)
×
1064
                return r;
1065

1066
        xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/start", DEVNUM_FORMAT_VAL(st.st_rdev));
×
1067

1068
        buffer = mfree(buffer);
×
1069
        r = read_one_line_file(sysfs, &buffer);
×
1070
        if (r < 0)
×
1071
                return r;
1072
        r = safe_atou64(buffer, &current_offset);
×
1073
        if (r < 0)
×
1074
                return r;
1075
        if (current_offset > UINT64_MAX/512U)
×
1076
                return -EINVAL;
1077
        current_offset *= 512U;
×
1078

1079
        r = blockdev_get_device_size(partition_fd, &current_size);
×
1080
        if (r < 0)
×
1081
                return r;
1082

1083
        if (size == UINT64_MAX && offset == UINT64_MAX)
×
1084
                return 0;
1085
        if (current_size == size && current_offset == offset)
×
1086
                return 0;
1087

1088
        xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/../dev", DEVNUM_FORMAT_VAL(st.st_rdev));
×
1089

1090
        buffer = mfree(buffer);
×
1091
        r = read_one_line_file(sysfs, &buffer);
×
1092
        if (r < 0)
×
1093
                return r;
1094
        r = parse_devnum(buffer, &devno);
×
1095
        if (r < 0)
×
1096
                return r;
1097

1098
        whole_fd = r = device_open_from_devnum(S_IFBLK, devno, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY, NULL);
×
1099
        if (r < 0)
×
1100
                return r;
1101

1102
        return block_device_resize_partition(
×
1103
                        whole_fd,
1104
                        partno,
1105
                        offset == UINT64_MAX ? current_offset : offset,
1106
                        size == UINT64_MAX ? current_size : size);
1107
}
1108

1109
int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) {
18✔
1110
        struct loop_info64 info;
18✔
1111

1112
        assert(d);
18✔
1113
        assert(d->fd >= 0);
18✔
1114

1115
        /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
1116
         * block device. If this loop device actually refers to a partition and not a loopback device, we'll
1117
         * try to adjust the partition offsets instead.
1118
         *
1119
         * If either offset or size is UINT64_MAX we won't change that parameter. */
1120

1121
        if (d->nr < 0) /* not a loopback device */
18✔
1122
                return resize_partition(d->fd, offset, size);
×
1123

1124
        if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
18✔
1125
                return -errno;
×
1126

1127
#if HAVE_VALGRIND_MEMCHECK_H
1128
        /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
1129
        VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
1130
#endif
1131

1132
        if ((size == UINT64_MAX || info.lo_sizelimit == size) &&
18✔
1133
            (offset == UINT64_MAX || info.lo_offset == offset))
×
1134
                return 0;
1135

1136
        if (size != UINT64_MAX)
18✔
1137
                info.lo_sizelimit = size;
18✔
1138
        if (offset != UINT64_MAX)
18✔
1139
                info.lo_offset = offset;
×
1140

1141
        return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info));
18✔
1142
}
1143

1144
int loop_device_flock(LoopDevice *d, int operation) {
140✔
1145
        assert(IN_SET(operation & ~LOCK_NB, LOCK_UN, LOCK_SH, LOCK_EX));
140✔
1146
        assert(d);
140✔
1147

1148
        /* When unlocking just close the lock fd */
1149
        if ((operation & ~LOCK_NB) == LOCK_UN) {
140✔
1150
                d->lock_fd = safe_close(d->lock_fd);
138✔
1151
                return 0;
138✔
1152
        }
1153

1154
        /* If we had no lock fd so far, create one and lock it right-away */
1155
        if (d->lock_fd < 0) {
2✔
1156
                d->lock_fd = open_lock_fd(ASSERT_FD(d->fd), operation);
1✔
1157
                if (d->lock_fd < 0)
1✔
1158
                        return d->lock_fd;
1159

1160
                return 0;
1✔
1161
        }
1162

1163
        /* Otherwise change the current lock mode on the existing fd */
1164
        return RET_NERRNO(flock(d->lock_fd, operation));
1✔
1165
}
1166

1167
int loop_device_sync(LoopDevice *d) {
73✔
1168
        assert(d);
73✔
1169

1170
        /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
1171
         * we can check the return value though. */
1172

1173
        return RET_NERRNO(fsync(ASSERT_FD(d->fd)));
73✔
1174
}
1175

1176
int loop_device_set_autoclear(LoopDevice *d, bool autoclear) {
8✔
1177
        struct loop_info64 info;
8✔
1178

1179
        assert(d);
8✔
1180

1181
        if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0)
8✔
1182
                return -errno;
×
1183

1184
        if (autoclear == FLAGS_SET(info.lo_flags, LO_FLAGS_AUTOCLEAR))
8✔
1185
                return 0;
1186

1187
        SET_FLAG(info.lo_flags, LO_FLAGS_AUTOCLEAR, autoclear);
8✔
1188

1189
        if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
8✔
1190
                return -errno;
×
1191

1192
        return 1;
1193
}
1194

1195
int loop_device_set_filename(LoopDevice *d, const char *name) {
4✔
1196
        struct loop_info64 info;
4✔
1197

1198
        assert(d);
4✔
1199

1200
        /* Sets the .lo_file_name of the loopback device. This is supposed to contain the path to the file
1201
         * backing the block device, but is actually just a free-form string you can pass to the kernel. Most
1202
         * tools that actually care for the backing file path use the sysfs attribute file loop/backing_file
1203
         * which is a kernel generated string, subject to file system namespaces and such.
1204
         *
1205
         * .lo_file_name is useful since userspace can select it freely when creating a loopback block
1206
         * device, and we can use it for /dev/disk/by-loop-ref/ symlinks, and similar, so that apps can
1207
         * recognize their own loopback files. */
1208

1209
        if (name && strlen(name) >= sizeof(info.lo_file_name))
4✔
1210
                return -ENOBUFS;
4✔
1211

1212
        if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0)
4✔
1213
                return -errno;
×
1214

1215
        if (strneq((char*) info.lo_file_name, strempty(name), sizeof(info.lo_file_name)))
4✔
1216
                return 0;
1217

1218
        if (name) {
4✔
1219
                strncpy((char*) info.lo_file_name, name, sizeof(info.lo_file_name)-1);
4✔
1220
                info.lo_file_name[sizeof(info.lo_file_name)-1] = 0;
4✔
1221
        } else
1222
                memzero(info.lo_file_name, sizeof(info.lo_file_name));
×
1223

1224
        if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
4✔
1225
                return -errno;
×
1226

1227
        return 1;
1228
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc