• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

psy0rz / zfs_autobackup / 11019776789

24 Sep 2024 06:29PM UTC coverage: 85.64% (-0.3%) from 85.914%
11019776789

push

github

psy0rz
major change: we now always filter all properties that start with "autobackup:". fixes #150 and #221

714 of 880 branches covered (81.14%)

Branch coverage included in aggregate %.

3 of 3 new or added lines in 1 file covered. (100.0%)

5 existing lines in 1 file now uncovered.

1743 of 1989 relevant lines covered (87.63%)

1.75 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.36
/zfs_autobackup/ZfsDataset.py
1
import re
2✔
2
from datetime import datetime
2✔
3
import sys
2✔
4
import time
2✔
5

6
from .ExecuteNode import ExecuteError
2✔
7

8

9
class ZfsDataset:
2✔
10
    """a zfs dataset (filesystem/volume/snapshot/clone) Note that a dataset
11
    doesn't have to actually exist (yet/anymore) Also most properties are cached
12
    for performance-reasons, but also to allow --test to function correctly.
13
    """
14

15
    # illegal properties per dataset type. these will be removed from --set-properties and --filter-properties
16
    ILLEGAL_PROPERTIES = {
2✔
17
        'filesystem': [],
18
        'volume': ["canmount"],
19
    }
20

21
    def __init__(self, zfs_node, name, force_exists=None):
2✔
22
        """
23
        Args:
24
            :type zfs_node: ZfsNode
25
            :type name: str
26
            :type force_exists: bool
27
        """
28
        self.zfs_node = zfs_node
2✔
29
        self.name = name  # full name
2✔
30

31
        # caching
32
        self.__snapshots = None  # type: None|list[ZfsDataset]
2✔
33
        self.__written_since_ours = None  # type: None|int
2✔
34
        self.__exists_check = None  # type: None|bool
2✔
35
        self.__properties = None  # type: None|dict[str,str]
2✔
36
        self.__recursive_datasets = None  # type: None|list[ZfsDataset]
2✔
37
        self.__datasets = None  # type: None|list[ZfsDataset]
2✔
38

39
        self.invalidate_cache()
2✔
40
        self.force_exists = force_exists
2✔
41

42
    def invalidate_cache(self):
2✔
43
        """clear caches"""
44
        self.force_exists = None
2✔
45
        self.__snapshots = None
2✔
46
        self.__written_since_ours = None
2✔
47
        self.__exists_check = None
2✔
48
        self.__properties = None
2✔
49
        self.__recursive_datasets = None
2✔
50
        self.__datasets = None
2✔
51

52
    def __repr__(self):
2✔
53
        return "{}: {}".format(self.zfs_node, self.name)
2✔
54

55
    def __str__(self):
2✔
56

57
        return self.name
2✔
58

59
    def __eq__(self, obj):
2✔
60
        if not isinstance(obj, ZfsDataset):
2✔
61
            return False
2✔
62

63
        return self.name == obj.name
2✔
64

65
    def verbose(self, txt):
2✔
66
        """
67
        Args:
68
            :type txt: str
69
        """
70
        self.zfs_node.verbose("{}: {}".format(self.name, txt))
2✔
71

72
    def error(self, txt):
2✔
73
        """
74
        Args:
75
            :type txt: str
76
        """
77
        self.zfs_node.error("{}: {}".format(self.name, txt))
2✔
78

79
    def warning(self, txt):
2✔
80
        """
81
        Args:
82
            :type txt: str
83
        """
84
        self.zfs_node.warning("{}: {}".format(self.name, txt))
2✔
85

86
    def debug(self, txt):
2✔
87
        """
88
        Args:
89
            :type txt: str
90
        """
91
        self.zfs_node.debug("{}: {}".format(self.name, txt))
2✔
92

93
    def split_path(self):
2✔
94
        """return the path elements as an array"""
95
        return self.name.split("/")
2✔
96

97
    def lstrip_path(self, count):
2✔
98
        """return name with first count components stripped
99

100
        Args:
101
            :type count: int
102
        """
103
        components = self.split_path()
2✔
104
        if count > len(components):
2✔
105
            raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
2✔
106

107
        return "/".join(components[count:])
2✔
108

109
    def rstrip_path(self, count):
2✔
110
        """return name with last count components stripped
111

112
        Args:
113
            :type count: int
114
        """
115
        return "/".join(self.split_path()[:-count])
2✔
116

117
    @property
2✔
118
    def filesystem_name(self):
2✔
119
        """filesystem part of the name (before the @)"""
120
        if self.is_snapshot:
2✔
121
            (filesystem, snapshot) = self.name.split("@")
2✔
122
            return filesystem
2✔
123
        else:
124
            return self.name
2✔
125

126
    @property
2✔
127
    def snapshot_name(self):
2✔
128
        """snapshot part of the name"""
129
        if not self.is_snapshot:
2!
130
            raise (Exception("This is not a snapshot"))
×
131

132
        (filesystem, snapshot_name) = self.name.split("@")
2✔
133
        return snapshot_name
2✔
134

135
    @property
2✔
136
    def is_snapshot(self):
2✔
137
        """true if this dataset is a snapshot"""
138
        return self.name.find("@") != -1
2✔
139

140
    @property
2✔
141
    def is_excluded(self):
2✔
142
        """true if this dataset is a snapshot and matches the exclude pattern"""
143
        if not self.is_snapshot:
2!
144
            return False
×
145

146
        for pattern in self.zfs_node.exclude_snapshot_patterns:
2!
147
            if pattern.search(self.name) is not None:
×
148
                self.debug("Excluded (path matches snapshot exclude pattern)")
×
149
                return True
×
150

151
    def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged):
2✔
152
        """determine if dataset should be selected for backup (called from
153
        ZfsNode)
154

155
        Args:
156
            :type exclude_paths: list[str]
157
            :type value: str
158
            :type source: str
159
            :type inherited: bool
160
            :type exclude_received: bool
161
            :type exclude_unchanged: int
162

163
            :param value: Value of the zfs property ("false"/"true"/"child"/parent/"-")
164
            :param source: Source of the zfs property ("local"/"received", "-")
165
            :param inherited: True of the value/source was inherited from a higher dataset.
166

167
        Returns: True : Selected
168
                 False: Excluded
169
                 None: No property found
170
        """
171

172
        # sanity checks
173
        if source not in ["local", "received", "-"]:
2!
174
            # probably a program error in zfs-autobackup or new feature in zfs
175
            raise (Exception(
×
176
                "{} autobackup-property has illegal source: '{}' (possible BUG)".format(self.name, source)))
177

178
        if value not in ["false", "true", "child", "parent", "-"]:
2!
179
            # user error
180
            raise (Exception(
×
181
                "{} autobackup-property has illegal value: '{}'".format(self.name, value)))
182

183
        # non specified, ignore
184
        if value == "-":
2✔
185
            return None
2✔
186

187
        # only select childs of this dataset, ignore
188
        if value == "child" and not inherited:
2✔
189
            return False
2✔
190

191
        # only select parent, no childs, ignore
192
        if value == "parent" and inherited:
2✔
193
            return False
2✔
194

195
        # manually excluded by property
196
        if value == "false":
2✔
197
            self.verbose("Excluded")
2✔
198
            return False
2✔
199

200
        # from here on the dataset is selected by property, now do additional exclusion checks
201

202
        # our path starts with one of the excluded paths?
203
        for exclude_path in exclude_paths:
2✔
204
            # if self.name.startswith(exclude_path):
205
            if (self.name + "/").startswith(exclude_path + "/"):
2!
206
                # too noisy for verbose
UNCOV
207
                self.debug("Excluded (path in exclude list)")
×
UNCOV
208
                return False
×
209

210
        if source == "received":
2!
UNCOV
211
            if exclude_received:
×
UNCOV
212
                self.verbose("Excluded (dataset already received)")
×
UNCOV
213
                return False
×
214

215
        if not self.is_changed(exclude_unchanged):
2✔
216
            self.verbose("Excluded (by --exclude-unchanged)")
2✔
217
            return False
2✔
218

219
        self.verbose("Selected")
2✔
220
        return True
2✔
221

222
    @property
2✔
223
    def parent(self):
2✔
224
        """get zfs-parent of this dataset. for snapshots this means it will get
225
        the filesystem/volume that it belongs to. otherwise it will return the
226
        parent according to path
227

228
        we cache this so everything in the parent that is cached also stays.
229

230
        returns None if there is no parent.
231
        :rtype: ZfsDataset | None
232
        """
233
        if self.is_snapshot:
2✔
234
            return self.zfs_node.get_dataset(self.filesystem_name)
2✔
235
        else:
236
            stripped = self.rstrip_path(1)
2✔
237
            if stripped:
2✔
238
                return self.zfs_node.get_dataset(stripped)
2✔
239
            else:
240
                return None
2✔
241

242
    # NOTE: unused for now
243
    # def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
244
    #     """find previous snapshot in this dataset. None if it doesn't exist.
245
    #
246
    #     also_other_snapshots: set to true to also return snapshots that where
247
    #     not created by us. (is_ours)
248
    #
249
    #     Args:
250
    #         :type snapshot: str or ZfsDataset.ZfsDataset
251
    #         :type also_other_snapshots: bool
252
    #     """
253
    #
254
    #     if self.is_snapshot:
255
    #         raise (Exception("Please call this on a dataset."))
256
    #
257
    #     index = self.find_snapshot_index(snapshot)
258
    #     while index:
259
    #         index = index - 1
260
    #         if also_other_snapshots or self.snapshots[index].is_ours():
261
    #             return self.snapshots[index]
262
    #     return None
263

264
    def find_next_snapshot(self, snapshot):
2✔
265
        """find next snapshot in this dataset. None if it doesn't exist
266

267
        Args:
268
            :type snapshot: ZfsDataset
269
        """
270

271
        if self.is_snapshot:
2!
272
            raise (Exception("Please call this on a dataset."))
×
273

274
        index = self.find_snapshot_index(snapshot)
2✔
275
        while index is not None and index < len(self.snapshots) - 1:
2✔
276
            index = index + 1
2✔
277
            return self.snapshots[index]
2✔
278
        return None
2✔
279

280
    @property
2✔
281
    def exists_check(self):
2✔
282
        """check on disk if it exists"""
283

284
        if self.__exists_check is None:
2✔
285
            self.debug("Checking if dataset exists")
2✔
286
            self.__exists_check = (len(self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True,
2✔
287
                                                         valid_exitcodes=[0, 1],
288
                                                         hide_errors=True)) > 0)
289

290
        return self.__exists_check
2✔
291

292
    @property
2✔
293
    def exists(self):
2✔
294
        """returns True if dataset should exist.
295
           Use force_exists to force a specific value, if you already know. Useful for performance and test reasons
296
        """
297

298
        if self.force_exists is not None:
2✔
299
            if self.force_exists:
2✔
300
                self.debug("Dataset should exist")
2✔
301
            else:
302
                self.debug("Dataset should not exist")
2✔
303
            return self.force_exists
2✔
304
        else:
305
            return self.exists_check
2✔
306

307
    def create_filesystem(self, parents=False, unmountable=True):
2✔
308
        """create a filesystem
309

310
        Args:
311
            :type parents: bool
312
        """
313

314
        # recurse up
315
        if parents and self.parent and not self.parent.exists:
2✔
316
            self.parent.create_filesystem(parents, unmountable)
2✔
317

318
        cmd = ["zfs", "create"]
2✔
319

320
        if unmountable:
2!
321
            cmd.extend(["-o", "canmount=off"])
2✔
322

323
        cmd.append(self.name)
2✔
324
        self.zfs_node.run(cmd)
2✔
325

326
        self.force_exists = True
2✔
327

328
    def destroy(self, fail_exception=False, deferred=False, verbose=True):
2✔
329
        """destroy the dataset. by default failures are not an exception, so we
330
        can continue making backups
331

332
        Args:
333
            :type fail_exception: bool
334
        """
335

336
        if verbose:
2✔
337
            self.verbose("Destroying")
2✔
338
        else:
339
            self.debug("Destroying")
2✔
340

341
        if self.is_snapshot:
2✔
342
            self.release()
2✔
343

344
        try:
2✔
345
            if deferred and self.is_snapshot:
2!
346
                self.zfs_node.run(["zfs", "destroy", "-d", self.name])
×
347
            else:
348
                self.zfs_node.run(["zfs", "destroy", self.name])
2✔
349

350
            self.invalidate_cache()
2✔
351
            self.force_exists = False
2✔
352
            return True
2✔
353
        except ExecuteError:
2✔
354
            if not fail_exception:
2!
355
                return False
×
356
            else:
357
                raise
2✔
358

359
    @property
2✔
360
    def properties(self):
2✔
361
        """all zfs properties"""
362

363
        if self.__properties is None:
2✔
364

365
            cmd = [
2✔
366
                "zfs", "get", "-H", "-o", "property,value", "-p", "all", self.name
367
            ]
368

369
            self.debug("Getting zfs properties")
2✔
370

371
            self.__properties = {}
2✔
372
            for pair in self.zfs_node.run(tab_split=True, cmd=cmd, readonly=True, valid_exitcodes=[0]):
2✔
373
                if len(pair) == 2:
2✔
374
                    self.__properties[pair[0]] = pair[1]
2✔
375

376
        return self.__properties
2✔
377

378
    def is_changed(self, min_changed_bytes=1):
2✔
379
        """dataset is changed since ANY latest snapshot ?
380

381
        Args:
382
            :type min_changed_bytes: int
383
        """
384
        self.debug("Checking if dataset is changed")
2✔
385

386
        if min_changed_bytes == 0:
2✔
387
            return True
2✔
388

389
        if int(self.properties['written']) < min_changed_bytes:
2✔
390
            return False
2✔
391
        else:
392
            return True
2✔
393

394
    def is_ours(self):
2✔
395
        """return true if this snapshot name belong to the current backup_name and snapshot formatting"""
396
        return self.timestamp is not None
2✔
397

398
    @property
2✔
399
    def holds(self):
2✔
400
        """get list[holds] for dataset"""
401

402
        output = self.zfs_node.run(["zfs", "holds", "-H", self.name], valid_exitcodes=[0], tab_split=True,
2✔
403
                                   readonly=True)
404
        return map(lambda fields: fields[1], output)
2✔
405

406
    def is_hold(self):
2✔
407
        """did we hold this snapshot?"""
408
        return self.zfs_node.hold_name in self.holds
2✔
409

410
    def hold(self):
2✔
411
        """hold dataset"""
412
        self.debug("holding")
2✔
413
        self.zfs_node.run(["zfs", "hold", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
2✔
414

415
    def release(self):
2✔
416
        """release dataset"""
417
        if self.zfs_node.readonly or self.is_hold():
2✔
418
            self.debug("releasing")
2✔
419
            self.zfs_node.run(["zfs", "release", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
2✔
420

421
    @property
2✔
422
    def timestamp(self):
2✔
423
        """get timestamp from snapshot name. Only works for our own snapshots
424
        with the correct format. Snapshots that are not ours always return None
425

426
        :rtype: int|None
427
        """
428

429
        try:
2✔
430
            dt = datetime.strptime(self.snapshot_name, self.zfs_node.snapshot_time_format)
2✔
431
        except ValueError:
2✔
432
            return None
2✔
433

434
        if sys.version_info[0] >= 3:
2!
435
            from datetime import timezone
2✔
436
            if self.zfs_node.utc:
2✔
437
                dt = dt.replace(tzinfo=timezone.utc)
2✔
438
            seconds = dt.timestamp()
2✔
439
        else:
440
            # python2 has no good functions to deal with UTC. Yet the unix timestamp
441
            # must be in UTC to allow comparison against `time.time()` in on other parts
442
            # of this project (e.g. Thinner.py). If we are handling UTC timestamps,
443
            # we must adjust for that here.
444
            if self.zfs_node.utc:
×
445
                seconds = (dt - datetime(1970, 1, 1)).total_seconds()
×
446
            else:
447
                seconds = time.mktime(dt.timetuple())
×
448
        return seconds
2✔
449

450
    @property
2✔
451
    def snapshots(self):
2✔
452
        """get all snapshots of this dataset
453
        :rtype: list[ZfsDataset]
454
        """
455

456
        # cached?
457
        if self.__snapshots is None:
2✔
458
            self.debug("Getting snapshots")
2✔
459

460
            cmd = [
2✔
461
                "zfs", "list", "-d", "1", "-r", "-t", "snapshot", "-H", "-o", "name", self.name
462
            ]
463

464
            self.__snapshots = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
2✔
465

466
        return self.__snapshots
2✔
467

468
    def cache_snapshot(self, snapshot):
2✔
469
        """Update our snapshot cache (if we have any)
470
        Args:
471
            :type snapshot: ZfsDataset
472
        """
473

474
        if self.__snapshots is not None:
2✔
475
            self.__snapshots.append(snapshot)
2✔
476

477
    @property
2✔
478
    def our_snapshots(self):
2✔
479
        """get list[snapshots] creates by us of this dataset"""
480
        ret = []
2✔
481

482
        for snapshot in self.snapshots:
2✔
483
            if snapshot.is_ours():
2✔
484
                ret.append(snapshot)
2✔
485

486
        return ret
2✔
487

488
    def find_snapshot_in_list(self, snapshots):
2✔
489
        """return ZfsDataset from the list of snapshots, if it matches the snapshot_name. Otherwise None
490
        Args:
491
            :type snapshots: list[ZfsDataset]
492
            :rtype: ZfsDataset|None
493
        """
494

495
        for snapshot in snapshots:
2✔
496
            if snapshot.snapshot_name == self.snapshot_name:
2✔
497
                return snapshot
2✔
498

499
        return None
2✔
500

501
    def find_snapshot(self, snapshot):
2✔
502
        """find snapshot by snapshot name (can be a snapshot_name or a different
503
        ZfsDataset) Returns None if it cant find it.
504

505
        Args:
506
            :rtype: ZfsDataset|None
507
            :type snapshot: str|ZfsDataset|None
508
        """
509

510
        if snapshot is None:
2✔
511
            return None
2✔
512

513
        if not isinstance(snapshot, ZfsDataset):
2!
514
            snapshot_name = snapshot
×
515
        else:
516
            snapshot_name = snapshot.snapshot_name
2✔
517

518
        for snapshot in self.snapshots:
2✔
519
            if snapshot.snapshot_name == snapshot_name:
2✔
520
                return snapshot
2✔
521

522
        return None
2✔
523

524
    def find_snapshot_index(self, snapshot):
2✔
525
        """find snapshot index by snapshot (can be a snapshot_name or
526
        ZfsDataset)
527

528
        Args:
529
            :type snapshot: str or ZfsDataset
530
        """
531

532
        if not isinstance(snapshot, ZfsDataset):
2!
533
            snapshot_name = snapshot
×
534
        else:
535
            snapshot_name = snapshot.snapshot_name
2✔
536

537
        index = 0
2✔
538
        for snapshot in self.snapshots:
2!
539
            if snapshot.snapshot_name == snapshot_name:
2✔
540
                return index
2✔
541
            index = index + 1
2✔
542

543
        return None
×
544

545
    @property
2✔
546
    def written_since_ours(self):
2✔
547
        """get number of bytes written since our last snapshot"""
548

549
        if self.__written_since_ours is None:
2!
550
            latest_snapshot = self.our_snapshots[-1]
2✔
551

552
            self.debug("Getting bytes written since our last snapshot")
2✔
553
            cmd = ["zfs", "get", "-H", "-ovalue", "-p", "written@" + str(latest_snapshot), self.name]
2✔
554

555
            output = self.zfs_node.run(readonly=True, tab_split=False, cmd=cmd, valid_exitcodes=[0])
2✔
556

557
            self.__written_since_ours = int(output[0])
2✔
558

559
        return self.__written_since_ours
2✔
560

561
    def is_changed_ours(self, min_changed_bytes=1):
2✔
562
        """dataset is changed since OUR latest snapshot?
563

564
        Args:
565
            :type min_changed_bytes: int
566
        """
567

568
        if min_changed_bytes == 0:
2✔
569
            return True
2✔
570

571
        if not self.our_snapshots:
2✔
572
            return True
2✔
573

574
        # NOTE: filesystems can have a very small amount written without actual changes in some cases
575
        if self.written_since_ours < min_changed_bytes:
2✔
576
            return False
2✔
577

578
        return True
2✔
579

580
    @property
2✔
581
    def recursive_datasets(self, types="filesystem,volume"):
2✔
582
        """get all (non-snapshot) datasets recursively under us
583

584
        Args:
585
            :type types: str
586
            :rtype: list[ZfsDataset]
587
        """
588

589
        if self.__recursive_datasets is None:
2✔
590
            self.debug("Getting all recursive datasets under us")
2✔
591

592
            names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
2✔
593
                "zfs", "list", "-r", "-t", types, "-o", "name", "-H", self.name
594
            ])
595

596
            self.__recursive_datasets = self.zfs_node.get_datasets(names[1:], force_exists=True)
2✔
597

598
        return self.__recursive_datasets
2✔
599

600
    @property
2✔
601
    def datasets(self, types="filesystem,volume"):
2✔
602
        """get all (non-snapshot) datasets directly under us
603

604
        Args:
605
            :type types: str
606
        """
607

608
        if self.__datasets is None:
2!
609
            self.debug("Getting all datasets under us")
2✔
610

611
            names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
2✔
612
                "zfs", "list", "-r", "-t", types, "-o", "name", "-H", "-d", "1", self.name
613
            ])
614

615
            self.__datasets = self.zfs_node.get_datasets(names[1:], force_exists=True)
2✔
616

617
        return self.__datasets
2✔
618

619
    def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded,
2✔
620
                  send_pipes, zfs_compressed):
621
        """returns a pipe with zfs send output for this snapshot
622

623
        resume_token: resume sending from this token. (in that case we don't
624
        need to know snapshot names)
625

626
        Args:
627
            :param send_pipes: output cmd array that will be added to actual zfs send command. (e.g. mbuffer or compression program)
628
            :type send_pipes: list[str]
629
            :type features: list[str]
630
            :type prev_snapshot: ZfsDataset
631
            :type resume_token: str
632
            :type show_progress: bool
633
            :type raw: bool
634
        """
635
        # build source command
636
        cmd = []
2✔
637

638
        cmd.extend(["zfs", "send", ])
2✔
639

640
        # all kind of performance options:
641
        if 'large_blocks' in features and "-L" in self.zfs_node.supported_send_options:
2✔
642
            # large block support (only if recordsize>128k which is seldomly used)
643
            cmd.append("-L")  # --large-block
2✔
644

645
        if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
2✔
646
            cmd.append("-e")  # --embed; WRITE_EMBEDDED, more compact stream
2✔
647

648
        if zfs_compressed and "-c" in self.zfs_node.supported_send_options:
2✔
649
            cmd.append("-c")  # --compressed; use compressed WRITE records
2✔
650

651
        # raw? (send over encrypted data in its original encrypted form without decrypting)
652
        if raw:
2✔
653
            cmd.append("--raw")
2✔
654

655
        # progress output
656
        if show_progress:
2✔
657
            cmd.append("-v")  # --verbose
2✔
658
            cmd.append("-P")  # --parsable
2✔
659

660
        # resume a previous send? (don't need more parameters in that case)
661
        if resume_token:
2✔
662
            cmd.extend(["-t", resume_token])
2✔
663

664
        else:
665
            # send properties
666
            if send_properties:
2✔
667
                cmd.append("-p")  # --props
2✔
668

669
            # incremental?
670
            if prev_snapshot:
2✔
671
                cmd.extend(["-i", "@" + prev_snapshot.snapshot_name])
2✔
672

673
            cmd.append(self.name)
2✔
674

675
        cmd.extend(send_pipes)
2✔
676

677
        output_pipe = self.zfs_node.run(cmd, pipe=True, readonly=True)
2✔
678

679
        return output_pipe
2✔
680

681
    def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False,
2✔
682
                  force=False):
683
        """starts a zfs recv for this snapshot and uses pipe as input
684

685
        note: you can it both on a snapshot or filesystem object. The
686
        resulting zfs command is the same, only our object cache is invalidated
687
        differently.
688

689
        Args:
690
            :param recv_pipes: input cmd array that will be prepended to actual zfs recv command. (e.g. mbuffer or decompression program)
691
            :type pipe: subprocess.pOpen
692
            :type features: list[str]
693
            :type filter_properties: list[str]
694
            :type set_properties: list[str]
695
            :type ignore_exit_code: bool
696
        """
697

698
        if set_properties is None:
2!
699
            set_properties = []
×
700

701
        if filter_properties is None:
2!
702
            filter_properties = []
×
703

704
        # build target command
705
        cmd = []
2✔
706

707
        cmd.extend(recv_pipes)
2✔
708

709
        cmd.extend(["zfs", "recv"])
2✔
710

711
        # don't let zfs recv mount everything thats received (even with canmount=noauto!)
712
        cmd.append("-u")
2✔
713

714
        for property_ in filter_properties:
2✔
715
            cmd.extend(["-x", property_])
2✔
716

717
        for property_ in set_properties:
2✔
718
            cmd.extend(["-o", property_])
2✔
719

720
        # verbose output
721
        cmd.append("-v")
2✔
722

723
        if force:
2✔
724
            cmd.append("-F")
2✔
725

726
        if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
2!
727
            # support resuming
728
            self.debug("Enabled resume support")
2✔
729
            cmd.append("-s")
2✔
730

731
        cmd.append(self.filesystem_name)
2✔
732

733
        if ignore_exit_code:
2!
734
            valid_exitcodes = []
×
735
        else:
736
            valid_exitcodes = [0]
2✔
737

738
        # self.zfs_node.reset_progress()
739
        self.zfs_node.run(cmd, inp=pipe, valid_exitcodes=valid_exitcodes)
2✔
740

741
        # invalidate cache
742
        self.invalidate_cache()
2✔
743

744
        # in test mode we assume everything was ok and it exists
745
        if self.zfs_node.readonly:
2✔
746
            self.force_exists = True
2✔
747

748
        # check if transfer was really ok (exit codes have been wrong before due to bugs in zfs-utils and some
749
        # errors should be ignored, thats where the ignore_exitcodes is for.)
750
        if not self.exists:
2✔
751
            self.error("error during transfer")
1✔
752
            raise (Exception("Target doesn't exist after transfer, something went wrong."))
1✔
753

754
        # at this point we're sure the actual dataset exists
755
        self.parent.force_exists = True
2✔
756

757
    def automount(self):
2✔
758
        """Mount the dataset as if one did a zfs mount -a, but only for this dataset
759
        Failure to mount doesnt result in an exception, but outputs errors to STDERR.
760

761
        """
762

763
        self.debug("Auto mounting")
2✔
764

765
        if self.properties['type'] != "filesystem":
2✔
766
            return
2✔
767

768
        if self.properties['canmount'] != 'on':
2✔
769
            return
2✔
770

771
        if self.properties['mountpoint'] == 'legacy':
2!
772
            return
×
773

774
        if self.properties['mountpoint'] == 'none':
2!
775
            return
×
776

777
        if self.properties['encryption'] != 'off' and self.properties['keystatus'] == 'unavailable':
2✔
778
            return
2✔
779

780
        self.zfs_node.run(["zfs", "mount", self.name], valid_exitcodes=[0, 1])
2✔
781

782
    def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
2✔
783
                          filter_properties, set_properties, ignore_recv_exit_code, resume_token,
784
                          raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
785
        """transfer this snapshot to target_snapshot. specify prev_snapshot for
786
        incremental transfer
787

788
        connects a send_pipe() to recv_pipe()
789

790
        Args:
791
            :type send_pipes: list[str]
792
            :type recv_pipes: list[str]
793
            :type target_snapshot: ZfsDataset
794
            :type features: list[str]
795
            :type prev_snapshot: ZfsDataset
796
            :type show_progress: bool
797
            :type filter_properties: list[str]
798
            :type set_properties: list[str]
799
            :type ignore_recv_exit_code: bool
800
            :type resume_token: str
801
            :type raw: bool
802
        """
803

804
        if set_properties is None:
2!
805
            set_properties = []
×
806
        if filter_properties is None:
2!
807
            filter_properties = []
×
808

809
        self.debug("Transfer snapshot to {}".format(target_snapshot.filesystem_name))
2✔
810

811
        if resume_token:
2✔
812
            self.verbose("resuming")
2✔
813

814
        # initial or increment
815
        if not prev_snapshot:
2✔
816
            self.verbose("-> {} (new)".format(target_snapshot.filesystem_name))
2✔
817
        else:
818
            # incremental
819
            self.verbose("-> {}".format(target_snapshot.filesystem_name))
2✔
820

821
        # do it
822
        pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
2✔
823
                              resume_token=resume_token, raw=raw, send_properties=send_properties,
824
                              write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
825
        target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
2✔
826
                                  set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code,
827
                                  recv_pipes=recv_pipes, force=force)
828

829
        # try to automount it, if its the initial transfer
830
        if not prev_snapshot:
2✔
831
            # in test mode it doesnt actually exist, so dont try to mount it/read properties
832
            if not target_snapshot.zfs_node.readonly:
2✔
833
                target_snapshot.parent.automount()
2✔
834

835
    def abort_resume(self):
2✔
836
        """abort current resume state"""
837
        self.debug("Aborting resume")
2✔
838
        self.zfs_node.run(["zfs", "recv", "-A", self.name])
2✔
839

840
    def rollback(self):
2✔
841
        """rollback to latest existing snapshot on this dataset"""
842

843
        for snapshot in reversed(self.snapshots):
2!
844
            if snapshot.exists:
2!
845
                self.debug("Rolling back")
2✔
846
                self.zfs_node.run(["zfs", "rollback", snapshot.name])
2✔
847
                return
2✔
848

849
    def get_resume_snapshot(self, resume_token):
2✔
850
        """returns snapshot that will be resumed by this resume token (run this
851
        on source with target-token)
852

853
        Args:
854
            :type resume_token: str
855
        """
856
        # use zfs send -n option to determine this
857
        # NOTE: on smartos stderr, on linux stdout
858
        (stdout, stderr) = self.zfs_node.run(["zfs", "send", "-t", resume_token, "-n", "-v"], valid_exitcodes=[0, 255],
2✔
859
                                             readonly=True, return_stderr=True)
860
        if stdout:
2!
861
            lines = stdout
2✔
862
        else:
863
            lines = stderr
×
864
        for line in lines:
2!
865
            matches = re.findall("toname = .*@(.*)", line)
2✔
866
            if matches:
2✔
867
                snapshot_name = matches[0]
2✔
868
                snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot_name)
2✔
869
                snapshot.debug("resume token belongs to this snapshot")
2✔
870
                return snapshot
2✔
871

872
        return None
×
873

874
    def thin_list(self, keeps=None, ignores=None):
2✔
875
        """determines list[snapshots] that should be kept or deleted based on
876
        the thinning schedule. cull the herd!
877

878
        returns: ( keeps, obsoletes )
879

880
        Args:
881
            :param keeps: list[snapshots] to always keep (usually the last)
882
            :param ignores: snapshots to completely ignore (usually incompatible target snapshots that are going to be destroyed anyway)
883
            :type keeps: list[ZfsDataset]
884
            :type ignores: list[ZfsDataset]
885
        """
886

887
        if ignores is None:
2!
888
            ignores = []
2✔
889
        if keeps is None:
2!
890
            keeps = []
×
891

892
        snapshots = [snapshot for snapshot in self.our_snapshots if snapshot not in ignores]
2✔
893

894
        return self.zfs_node.thin_list(snapshots, keep_snapshots=keeps)
2✔
895

896
    def thin(self, skip_holds=False):
2✔
897
        """destroys snapshots according to thin_list, except last snapshot
898

899
        Args:
900
            :type skip_holds: bool
901
        """
902

903
        (keeps, obsoletes) = self.thin_list(keeps=self.our_snapshots[-1:])
2✔
904
        for obsolete in obsoletes:
2✔
905
            if skip_holds and obsolete.is_hold():
2✔
906
                obsolete.verbose("Keeping (common snapshot)")
2✔
907
            else:
908
                obsolete.destroy()
2✔
909
                self.snapshots.remove(obsolete)
2✔
910

911
    def find_common_snapshot(self, target_dataset, guid_check):
2✔
912
        """find latest common snapshot between us and target returns None if its
913
        an initial transfer
914

915
        Args:
916
            :rtype: ZfsDataset|None
917
            :type guid_check: bool
918
            :type target_dataset: ZfsDataset
919
        """
920

921
        if not target_dataset.exists or not target_dataset.snapshots:
2✔
922
            # target has nothing yet
923
            return None
2✔
924
        else:
925
            for source_snapshot in reversed(self.snapshots):
2✔
926
                target_snapshot = target_dataset.find_snapshot(source_snapshot)
2✔
927
                if target_snapshot:
2✔
928
                    if guid_check and source_snapshot.properties['guid'] != target_snapshot.properties['guid']:
2✔
929
                        target_snapshot.warning("Common snapshots have mismatching GUID, ignoring.")
2✔
930
                    else:
931
                        target_snapshot.debug("common snapshot")
2✔
932
                        return source_snapshot
2✔
933
            # target_dataset.error("Cant find common snapshot with source.")
934
            raise (Exception("Cant find common snapshot with target."))
2✔
935

936
    def find_incompatible_snapshots(self, common_snapshot, raw):
2✔
937
        """returns a list[snapshots] that is incompatible for a zfs recv onto
938
        the common_snapshot. all direct followup snapshots with written=0 are
939
        compatible.
940

941
        in raw-mode nothing is compatible. issue #219
942

943
        Args:
944
            :type common_snapshot: ZfsDataset
945
            :type raw: bool
946
        """
947

948
        ret = []
2✔
949

950
        if common_snapshot and self.snapshots:
2✔
951
            followup = True
2✔
952
            for snapshot in self.snapshots[self.find_snapshot_index(common_snapshot) + 1:]:
2✔
953
                if raw or not followup or int(snapshot.properties['written']) != 0:
2✔
954
                    followup = False
2✔
955
                    ret.append(snapshot)
2✔
956

957
        return ret
2✔
958

959
    def get_allowed_properties(self, filter_properties, set_properties):
2✔
960
        """only returns lists of allowed properties for this dataset type
961

962
        Args:
963
            :type filter_properties: list[str]
964
            :type set_properties: list[str]
965
        """
966

967
        allowed_filter_properties = []
2✔
968
        allowed_set_properties = []
2✔
969
        illegal_properties = self.ILLEGAL_PROPERTIES[self.properties['type']]
2✔
970
        for set_property in set_properties:
2✔
971
            (property_, value) = set_property.split("=")
2✔
972
            if property_ not in illegal_properties:
2✔
973
                allowed_set_properties.append(set_property)
2✔
974

975
        for filter_property in filter_properties:
2✔
976
            if filter_property not in illegal_properties:
2!
977
                allowed_filter_properties.append(filter_property)
2✔
978

979
        return allowed_filter_properties, allowed_set_properties
2✔
980

981
    def _pre_clean(self, source_common_snapshot, target_dataset, source_obsoletes, target_obsoletes, target_transfers):
2✔
982
        """cleanup old stuff before starting snapshot syncing
983

984
        Args:
985
            :type source_common_snapshot: ZfsDataset
986
            :type target_dataset: ZfsDataset
987
            :type source_obsoletes: list[ZfsDataset]
988
            :type target_obsoletes: list[ZfsDataset]
989
            :type target_transfers: list[ZfsDataset]
990
        """
991

992
        # on source: delete all obsoletes that are not in target_transfers (except common snapshot)
993
        for source_snapshot in self.snapshots:
2✔
994
            if (source_snapshot in source_obsoletes
2✔
995
                    and source_common_snapshot != source_snapshot
996
                    and source_snapshot.find_snapshot_in_list(target_transfers) is None):
997
                source_snapshot.destroy()
2✔
998

999
        # on target: destroy everything thats obsolete, except common_snapshot
1000
        if target_dataset.exists:
2✔
1001
            for target_snapshot in target_dataset.snapshots:
2✔
1002
                if (target_snapshot in target_obsoletes) \
2✔
1003
                        and (not source_common_snapshot or (
1004
                        target_snapshot.snapshot_name != source_common_snapshot.snapshot_name)):
1005
                    if target_snapshot.exists:
2!
1006
                        target_snapshot.destroy()
2✔
1007

1008
    def _validate_resume_token(self, target_dataset, start_snapshot):
2✔
1009
        """validate and get (or destory) resume token
1010

1011
        Args:
1012
            :type target_dataset: ZfsDataset
1013
            :type start_snapshot: ZfsDataset
1014
        """
1015

1016
        if target_dataset.exists and 'receive_resume_token' in target_dataset.properties:
2✔
1017
            if start_snapshot is None:
2!
1018
                target_dataset.verbose("Aborting resume, its obsolete.")
×
1019
                target_dataset.abort_resume()
×
1020
            else:
1021
                resume_token = target_dataset.properties['receive_resume_token']
2✔
1022
                # not valid anymore
1023
                resume_snapshot = self.get_resume_snapshot(resume_token)
2✔
1024
                if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name:
2✔
1025
                    target_dataset.verbose("Aborting resume, its no longer valid.")
2✔
1026
                    target_dataset.abort_resume()
2✔
1027
                else:
1028
                    return resume_token
2✔
1029

1030
    def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
2✔
1031
        """Determine at what snapshot to start syncing to target_dataset and what to sync and what to keep.
1032

1033
        Args:
1034
            :rtype: ( ZfsDataset,  list[ZfsDataset], list[ZfsDataset], list[ZfsDataset], list[ZfsDataset] )
1035
            :type target_dataset: ZfsDataset
1036
            :type also_other_snapshots: bool
1037
            :type guid_check: bool
1038
            :type raw: bool
1039

1040
        Returns:
1041
            tuple: A tuple containing:
1042
                - ZfsDataset: The common snapshot
1043
                - list[ZfsDataset]: Our obsolete source snapshots, after transfer is done. (will be thinned asap)
1044
                - list[ZfsDataset]: Our obsolete target snapshots, after transfer is done. (will be thinned asap)
1045
                - list[ZfsDataset]: Transfer target snapshots. These need to be transferred.
1046
                - list[ZfsDataset]: Incompatible target snapshots. Target snapshots that are in the way, after the common snapshot. (need to be destroyed to continue)
1047

1048
        """
1049

1050
        ### 1: determine common and start snapshot
1051
        target_dataset.debug("Determining start snapshot")
2✔
1052
        source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check)
2✔
1053
        incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(source_common_snapshot, raw)
2✔
1054

1055
        # let thinner decide whats obsolete on source after the transfer is done, keeping the last snapshot as common.
1056
        source_obsoletes = []
2✔
1057
        if self.our_snapshots:
2✔
1058
            source_obsoletes = self.thin_list(keeps=[self.our_snapshots[-1]])[1]
2✔
1059

1060
        ### 2: Determine possible target snapshots
1061

1062
        # start with snapshots that already exist, minus imcompatibles
1063
        if target_dataset.exists:
2✔
1064
            possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if
2✔
1065
                                         snapshot not in incompatible_target_snapshots]
1066
        else:
1067
            possible_target_snapshots = []
2✔
1068

1069
        # add all snapshots from the source, starting after the common snapshot if it exists
1070
        if source_common_snapshot:
2✔
1071
            source_snapshot = self.find_next_snapshot(source_common_snapshot)
2✔
1072
        else:
1073
            if self.snapshots:
2✔
1074
                source_snapshot = self.snapshots[0]
2✔
1075
            else:
1076
                source_snapshot = None
2✔
1077

1078
        while source_snapshot:
2✔
1079
            # we want it?
1080
            if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded:
2✔
1081
                # create virtual target snapshot
1082
                target_snapshot = target_dataset.zfs_node.get_dataset(
2✔
1083
                    target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False)
1084
                possible_target_snapshots.append(target_snapshot)
2✔
1085
            source_snapshot = self.find_next_snapshot(source_snapshot)
2✔
1086

1087
        ### 3: Let the thinner decide what it wants by looking at all the possible target_snaphots at once
1088
        if possible_target_snapshots:
2✔
1089
            (target_keeps, target_obsoletes) = target_dataset.zfs_node.thin_list(possible_target_snapshots,
2✔
1090
                                                                                 keep_snapshots=[
1091
                                                                                     possible_target_snapshots[-1]])
1092
        else:
1093
            target_keeps = []
2✔
1094
            target_obsoletes = []
2✔
1095

1096
        ### 4: Look at what the thinner wants and create a list of snapshots we still need to transfer
1097
        target_transfers = []
2✔
1098
        for target_keep in target_keeps:
2✔
1099
            if not target_keep.exists:
2✔
1100
                target_transfers.append(target_keep)
2✔
1101

1102
        return source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers, incompatible_target_snapshots
2✔
1103

1104
    def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_incompatible):
2✔
1105
        """destroy incompatbile snapshots on target before sync, or inform user
1106
        what to do
1107

1108
        Args:
1109
            :type incompatible_target_snapshots: list[ZfsDataset]
1110
            :type destroy_incompatible: bool
1111
        """
1112

1113
        if incompatible_target_snapshots:
2✔
1114
            if not destroy_incompatible:
2✔
1115
                for snapshot in incompatible_target_snapshots:
2✔
1116
                    snapshot.error("Incompatible snapshot")
2✔
1117
                raise (Exception("Please destroy incompatible snapshots on target, or use --destroy-incompatible."))
2✔
1118
            else:
1119
                for snapshot in incompatible_target_snapshots:
2✔
1120
                    snapshot.verbose("Incompatible snapshot")
2✔
1121
                    snapshot.destroy(fail_exception=True)
2✔
1122
                    self.snapshots.remove(snapshot)
2✔
1123

1124
                if len(incompatible_target_snapshots) > 0:
2!
1125
                    self.rollback()
2✔
1126

1127
    def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
2✔
1128
                       ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
1129
                       no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check):
1130
        """sync this dataset's snapshots to target_dataset, while also thinning
1131
        out old snapshots along the way.
1132

1133
        Args:
1134
            :type send_pipes: list[str]
1135
            :type recv_pipes: list[str]
1136
            :type target_dataset: ZfsDataset
1137
            :type features: list[str]
1138
            :type show_progress: bool
1139
            :type filter_properties: list[str]
1140
            :type set_properties: list[str]
1141
            :type ignore_recv_exit_code: bool
1142
            :type holds: bool
1143
            :type rollback: bool
1144
            :type decrypt: bool
1145
            :type also_other_snapshots: bool
1146
            :type no_send: bool
1147
            :type guid_check: bool
1148
        """
1149

1150
        # self.verbose("-> {}".format(target_dataset))
1151

1152
        # defaults for these settings if there is no encryption stuff going on:
1153
        send_properties = True
2✔
1154
        raw = False
2✔
1155
        write_embedded = True
2✔
1156

1157
        # source dataset encrypted?
1158
        if self.properties.get('encryption', 'off') != 'off':
2✔
1159
            # user wants to send it over decrypted?
1160
            if decrypt:
2✔
1161
                # when decrypting, zfs cant send properties
1162
                send_properties = False
2✔
1163
            else:
1164
                # keep data encrypted by sending it raw (including properties)
1165
                raw = True
2✔
1166

1167
        (source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers,
2✔
1168
         incompatible_target_snapshots) = \
1169
            self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
1170
                            guid_check=guid_check, raw=raw)
1171

1172
        # NOTE: we do a pre-clean because we dont want filesystems to fillup when backups keep failing.
1173
        # Also usefull with no_send to still cleanup stuff.
1174
        self._pre_clean(
2✔
1175
            source_common_snapshot=source_common_snapshot, target_dataset=target_dataset,
1176
            target_transfers=target_transfers, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
1177

1178
        # handle incompatible stuff on target
1179
        target_dataset.handle_incompatible_snapshots(incompatible_target_snapshots, destroy_incompatible)
2✔
1180

1181
        # now actually transfer the snapshots, if we want
1182
        if no_send or len(target_transfers) == 0:
2✔
1183
            return
2✔
1184

1185
        # check if we can resume
1186
        resume_token = self._validate_resume_token(target_dataset, target_transfers[0])
2✔
1187

1188
        (active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties,
2✔
1189
                                                                                        set_properties)
1190

1191
        # always filter properties that start with 'autobackup:' (https://github.com/psy0rz/zfs_autobackup/issues/221)
1192
        for property in self.properties:
2✔
1193
            if property.startswith('autobackup:'):
2✔
1194
                active_filter_properties.append(property)
2✔
1195

1196
        # encrypt at target?
1197
        if encrypt and not raw:
2✔
1198
            # filter out encryption properties to let encryption on the target take place
1199
            active_filter_properties.extend(["keylocation", "pbkdf2iters", "keyformat", "encryption"])
2✔
1200
            write_embedded = False
2✔
1201

1202
        # now actually transfer the snapshots
1203

1204
        do_rollback = rollback
2✔
1205
        prev_source_snapshot = source_common_snapshot
2✔
1206
        prev_target_snapshot = target_dataset.find_snapshot(source_common_snapshot)
2✔
1207
        for target_snapshot in target_transfers:
2✔
1208

1209
            source_snapshot = self.find_snapshot(target_snapshot)
2✔
1210

1211
            # do the rollback, one time at first transfer
1212
            if do_rollback:
2✔
1213
                target_dataset.rollback()
2✔
1214
                do_rollback = False
2✔
1215

1216
            source_snapshot.transfer_snapshot(target_snapshot, features=features,
2✔
1217
                                              prev_snapshot=prev_source_snapshot, show_progress=show_progress,
1218
                                              filter_properties=active_filter_properties,
1219
                                              set_properties=active_set_properties,
1220
                                              ignore_recv_exit_code=ignore_recv_exit_code,
1221
                                              resume_token=resume_token, write_embedded=write_embedded, raw=raw,
1222
                                              send_properties=send_properties, send_pipes=send_pipes,
1223
                                              recv_pipes=recv_pipes, zfs_compressed=zfs_compressed, force=force)
1224

1225
            resume_token = None
2✔
1226

1227
            # hold the new common snapshots and release the previous ones
1228
            if holds:
2✔
1229
                target_snapshot.hold()
2✔
1230
                source_snapshot.hold()
2✔
1231

1232
                if prev_source_snapshot:
2✔
1233
                    prev_source_snapshot.release()
2✔
1234

1235
                if prev_target_snapshot:
2✔
1236
                    prev_target_snapshot.release()
2✔
1237

1238
            # we may now destroy the previous source snapshot if its obsolete
1239
            if prev_source_snapshot in source_obsoletes:
2✔
1240
                prev_source_snapshot.destroy()
2✔
1241

1242
            # destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
1243
            # the rest was already destroyed or will not be send)
1244
            if prev_target_snapshot in target_obsoletes:
2✔
1245
                prev_target_snapshot.destroy()
2✔
1246

1247
            prev_source_snapshot = source_snapshot
2✔
1248
            prev_target_snapshot = target_snapshot
2✔
1249

1250
            # source_snapshot = self.find_next_snapshot(source_snapshot, also_other_snapshots)
1251

1252
    def mount(self, mount_point):
2✔
1253

1254
        self.debug("Mounting")
2✔
1255

1256
        cmd = [
2✔
1257
            "mount", "-tzfs", self.name, mount_point
1258
        ]
1259

1260
        self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
2✔
1261

1262
    def unmount(self, mount_point):
2✔
1263

1264
        self.debug("Unmounting")
2✔
1265

1266
        cmd = [
2✔
1267
            "umount", mount_point
1268
        ]
1269

1270
        self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
2✔
1271

1272
    def clone(self, name):
2✔
1273
        """clones this snapshot and returns ZfsDataset of the clone"""
1274

1275
        self.debug("Cloning to {}".format(name))
2✔
1276

1277
        cmd = [
2✔
1278
            "zfs", "clone", self.name, name
1279
        ]
1280

1281
        self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
2✔
1282

1283
        return self.zfs_node.get_dataset(name, force_exists=True)
2✔
1284

1285
    def set(self, prop, value):
2✔
1286
        """set a zfs property"""
1287

1288
        self.debug("Setting {}={}".format(prop, value))
×
1289

1290
        cmd = [
×
1291
            "zfs", "set", "{}={}".format(prop, value), self.name
1292
        ]
1293

1294
        self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
×
1295

1296
        # invalidate cache
1297
        self.__properties = None
×
1298

1299
    def inherit(self, prop):
2✔
1300
        """inherit zfs property"""
1301

1302
        self.debug("Inheriting property {}".format(prop))
×
1303

1304
        cmd = [
×
1305
            "zfs", "inherit", prop, self.name
1306
        ]
1307

1308
        self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
×
1309

1310
        # invalidate cache
1311
        self.__properties = None
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc