• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

OpenCOMPES / sed / 5366169291

pending completion
5366169291

Pull #132

github

web-flow
Merge b952e8ed5 into 9f4469f8c
Pull Request #132: Extend black and flake8 line length to 100 characters

51 of 51 new or added lines in 14 files covered. (100.0%)

2898 of 3842 relevant lines covered (75.43%)

2.26 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

39.94
/sed/core/processor.py
1
"""This module contains the core class for the sed package
2

3
"""
4
import pathlib
3✔
5
from typing import Any
3✔
6
from typing import cast
3✔
7
from typing import Dict
3✔
8
from typing import List
3✔
9
from typing import Sequence
3✔
10
from typing import Tuple
3✔
11
from typing import Union
3✔
12

13
import dask.dataframe as ddf
3✔
14
import matplotlib.pyplot as plt
3✔
15
import numpy as np
3✔
16
import pandas as pd
3✔
17
import psutil
3✔
18
import xarray as xr
3✔
19

20
from sed.binning import bin_dataframe
3✔
21
from sed.calibrator import DelayCalibrator
3✔
22
from sed.calibrator import EnergyCalibrator
3✔
23
from sed.calibrator import MomentumCorrector
3✔
24
from sed.config import parse_config
3✔
25
from sed.core.dfops import apply_jitter
3✔
26
from sed.core.metadata import MetaHandler
3✔
27
from sed.diagnostics import grid_histogram
3✔
28
from sed.io import to_h5
3✔
29
from sed.io import to_nexus
3✔
30
from sed.io import to_tiff
3✔
31
from sed.loader import CopyTool
3✔
32
from sed.loader import get_loader
3✔
33

34
N_CPU = psutil.cpu_count()
3✔
35

36

37
class SedProcessor:
3✔
38
    """Processor class of sed. Contains wrapper functions defining a work flow for data
39
    correction, calibration and binning.
40

41
    Args:
42
        metadata (dict, optional): Dict of external Metadata. Defaults to None.
43
        config (Union[dict, str], optional): Config dictionary or config file name.
44
            Defaults to None.
45
        dataframe (Union[pd.DataFrame, ddf.DataFrame], optional): dataframe to load
46
            into the class. Defaults to None.
47
        files (List[str], optional): List of files to pass to the loader defined in
48
            the config. Defaults to None.
49
        folder (str, optional): Folder containing files to pass to the loader
50
            defined in the config. Defaults to None.
51
        collect_metadata (bool): Option to collect metadata from files.
52
            Defaults to False.
53
        **kwds: Keyword arguments passed to the reader.
54
    """
55

56
    def __init__(
3✔
57
        self,
58
        metadata: dict = None,
59
        config: Union[dict, str] = None,
60
        dataframe: Union[pd.DataFrame, ddf.DataFrame] = None,
61
        files: List[str] = None,
62
        folder: str = None,
63
        runs: Sequence[str] = None,
64
        collect_metadata: bool = False,
65
        **kwds,
66
    ):
67
        """Processor class of sed. Contains wrapper functions defining a work flow
68
        for data correction, calibration, and binning.
69

70
        Args:
71
            metadata (dict, optional): Dict of external Metadata. Defaults to None.
72
            config (Union[dict, str], optional): Config dictionary or config file name.
73
                Defaults to None.
74
            dataframe (Union[pd.DataFrame, ddf.DataFrame], optional): dataframe to load
75
                into the class. Defaults to None.
76
            files (List[str], optional): List of files to pass to the loader defined in
77
                the config. Defaults to None.
78
            folder (str, optional): Folder containing files to pass to the loader
79
                defined in the config. Defaults to None.
80
            runs (Sequence[str], optional): List of run identifiers to pass to the loader
81
                defined in the config. Defaults to None.
82
            collect_metadata (bool): Option to collect metadata from files.
83
                Defaults to False.
84
            **kwds: Keyword arguments passed to the reader.
85
        """
86
        self._config = parse_config(config)
3✔
87
        num_cores = self._config.get("binning", {}).get("num_cores", N_CPU - 1)
3✔
88
        if num_cores >= N_CPU:
3✔
89
            num_cores = N_CPU - 1
3✔
90
        self._config["binning"]["num_cores"] = num_cores
3✔
91

92
        self._dataframe: Union[pd.DataFrame, ddf.DataFrame] = None
3✔
93
        self._files: List[str] = []
3✔
94

95
        self._binned: xr.DataArray = None
3✔
96
        self._pre_binned: xr.DataArray = None
3✔
97

98
        self._dimensions: List[str] = []
3✔
99
        self._coordinates: Dict[Any, Any] = {}
3✔
100
        self.axis: Dict[Any, Any] = {}
3✔
101
        self._attributes = MetaHandler(meta=metadata)
3✔
102

103
        loader_name = self._config["core"]["loader"]
3✔
104
        self.loader = get_loader(
3✔
105
            loader_name=loader_name,
106
            config=self._config,
107
        )
108

109
        self.ec = EnergyCalibrator(
3✔
110
            loader=self.loader,
111
            config=self._config,
112
        )
113

114
        self.mc = MomentumCorrector(
3✔
115
            config=self._config,
116
        )
117

118
        self.dc = DelayCalibrator(
3✔
119
            config=self._config,
120
        )
121

122
        self.use_copy_tool = self._config.get("core", {}).get(
3✔
123
            "use_copy_tool",
124
            False,
125
        )
126
        if self.use_copy_tool:
3✔
127
            try:
×
128
                self.ct = CopyTool(
×
129
                    source=self._config["core"]["copy_tool_source"],
130
                    dest=self._config["core"]["copy_tool_dest"],
131
                    **self._config["core"].get("copy_tool_kwds", {}),
132
                )
133
            except KeyError:
×
134
                self.use_copy_tool = False
×
135

136
        # Load data if provided:
137
        if dataframe is not None or files is not None or folder is not None or runs is not None:
3✔
138
            self.load(
×
139
                dataframe=dataframe,
140
                metadata=metadata,
141
                files=files,
142
                folder=folder,
143
                runs=runs,
144
                collect_metadata=collect_metadata,
145
                **kwds,
146
            )
147

148
    def __repr__(self):
3✔
149
        if self._dataframe is None:
×
150
            df_str = "Data Frame: No Data loaded"
×
151
        else:
152
            df_str = self._dataframe.__repr__()
×
153
        coordinates_str = f"Coordinates: {self._coordinates}"
×
154
        dimensions_str = f"Dimensions: {self._dimensions}"
×
155
        pretty_str = df_str + "\n" + coordinates_str + "\n" + dimensions_str
×
156
        return pretty_str
×
157

158
    def __getitem__(self, val: str) -> pd.DataFrame:
3✔
159
        """Accessor to the underlying data structure.
160

161
        Args:
162
            val (str): Name of the dataframe column to retrieve.
163

164
        Returns:
165
            pd.DataFrame: Selected dataframe column.
166
        """
167
        return self._dataframe[val]
×
168

169
    @property
3✔
170
    def config(self) -> Dict[Any, Any]:
3✔
171
        """Getter attribute for the config dictionary
172

173
        Returns:
174
            Dict: The config dictionary.
175
        """
176
        return self._config
×
177

178
    @config.setter
3✔
179
    def config(self, config: Union[dict, str]):
3✔
180
        """Setter function for the config dictionary.
181

182
        Args:
183
            config (Union[dict, str]): Config dictionary or path of config file
184
                to load.
185
        """
186
        self._config = parse_config(config)
×
187
        num_cores = self._config.get("binning", {}).get("num_cores", N_CPU - 1)
×
188
        if num_cores >= N_CPU:
×
189
            num_cores = N_CPU - 1
×
190
        self._config["binning"]["num_cores"] = num_cores
×
191

192
    @property
3✔
193
    def dimensions(self) -> list:
3✔
194
        """Getter attribute for the dimensions.
195

196
        Returns:
197
            list: List of dimensions.
198
        """
199
        return self._dimensions
×
200

201
    @dimensions.setter
3✔
202
    def dimensions(self, dims: list):
3✔
203
        """Setter function for the dimensions.
204

205
        Args:
206
            dims (list): List of dimensions to set.
207
        """
208
        assert isinstance(dims, list)
×
209
        self._dimensions = dims
×
210

211
    @property
3✔
212
    def coordinates(self) -> dict:
3✔
213
        """Getter attribute for the coordinates dict.
214

215
        Returns:
216
            dict: Dictionary of coordinates.
217
        """
218
        return self._coordinates
×
219

220
    @coordinates.setter
3✔
221
    def coordinates(self, coords: dict):
3✔
222
        """Setter function for the coordinates dict
223

224
        Args:
225
            coords (dict): Dictionary of coordinates.
226
        """
227
        assert isinstance(coords, dict)
×
228
        self._coordinates = {}
×
229
        for k, v in coords.items():
×
230
            self._coordinates[k] = xr.DataArray(v)
×
231

232
    def cpy(self, path: Union[str, List[str]]) -> Union[str, List[str]]:
3✔
233
        """Function to mirror a list of files or a folder from a network drive to a
234
        local storage. Returns either the original or the copied path to the given
235
        path. The option to use this functionality is set by
236
        config["core"]["use_copy_tool"].
237

238
        Args:
239
            path (Union[str, List[str]]): Source path or path list.
240

241
        Returns:
242
            Union[str, List[str]]: Source or destination path or path list.
243
        """
244
        if self.use_copy_tool:
3✔
245
            if isinstance(path, list):
×
246
                path_out = []
×
247
                for file in path:
×
248
                    path_out.append(self.ct.copy(file))
×
249
                return path_out
×
250

251
            return self.ct.copy(path)
×
252

253
        if isinstance(path, list):
3✔
254
            return path
3✔
255

256
        return path
×
257

258
    def load(
3✔
259
        self,
260
        dataframe: Union[pd.DataFrame, ddf.DataFrame] = None,
261
        metadata: dict = None,
262
        files: List[str] = None,
263
        folder: str = None,
264
        runs: Sequence[str] = None,
265
        collect_metadata: bool = False,
266
        **kwds,
267
    ):
268
        """Load tabular data of single events into the dataframe object in the class.
269

270
        Args:
271
            dataframe (Union[pd.DataFrame, ddf.DataFrame], optional): data in tabular
272
                format. Accepts anything which can be interpreted by pd.DataFrame as
273
                an input. Defaults to None.
274
            metadata (dict, optional): Dict of external Metadata. Defaults to None.
275
            files (List[str], optional): List of file paths to pass to the loader.
276
                Defaults to None.
277
            runs (Sequence[str], optional): List of run identifiers to pass to the
278
                loader. Defaults to None.
279
            folder (str, optional): Folder path to pass to the loader.
280
                Defaults to None.
281

282
        Raises:
283
            ValueError: Raised if no valid input is provided.
284
        """
285
        if metadata is None:
3✔
286
            metadata = {}
3✔
287
        if dataframe is not None:
3✔
288
            self._dataframe = dataframe
×
289
        elif runs is not None:
3✔
290
            # If runs are provided, we only use the copy tool if also folder is provided.
291
            # In that case, we copy the whole provided base folder tree, and pass the copied
292
            # version to the loader as base folder to look for the runs.
293
            if folder is not None:
×
294
                dataframe, metadata = self.loader.read_dataframe(
×
295
                    folders=cast(str, self.cpy(folder)),
296
                    runs=runs,
297
                    metadata=metadata,
298
                    collect_metadata=collect_metadata,
299
                    **kwds,
300
                )
301
            else:
302
                dataframe, metadata = self.loader.read_dataframe(
×
303
                    runs=runs,
304
                    metadata=metadata,
305
                    collect_metadata=collect_metadata,
306
                    **kwds,
307
                )
308

309
        elif folder is not None:
3✔
310
            dataframe, metadata = self.loader.read_dataframe(
×
311
                folders=cast(str, self.cpy(folder)),
312
                metadata=metadata,
313
                collect_metadata=collect_metadata,
314
                **kwds,
315
            )
316

317
        elif files is not None:
3✔
318
            dataframe, metadata = self.loader.read_dataframe(
3✔
319
                files=cast(List[str], self.cpy(files)),
320
                metadata=metadata,
321
                collect_metadata=collect_metadata,
322
                **kwds,
323
            )
324

325
        else:
326
            raise ValueError(
×
327
                "Either 'dataframe', 'files', 'folder', or 'runs' needs to be provided!",
328
            )
329

330
        self._dataframe = dataframe
3✔
331
        self._files = self.loader.files
3✔
332

333
        for key in metadata:
3✔
334
            self._attributes.add(
×
335
                entry=metadata[key],
336
                name=key,
337
                duplicate_policy="merge",
338
            )
339

340
    # Momentum calibration workflow
341
    # 1. Bin raw detector data for distortion correction
342
    def bin_and_load_momentum_calibration(
3✔
343
        self,
344
        df_partitions: int = 100,
345
        axes: List[str] = None,
346
        bins: List[int] = None,
347
        ranges: Sequence[Tuple[float, float]] = None,
348
        plane: int = 0,
349
        width: int = 5,
350
        apply: bool = False,
351
        **kwds,
352
    ):
353
        """1st step of momentum correction work flow. Function to do an initial binning
354
        of the dataframe loaded to the class, slice a plane from it using an
355
        interactive view, and load it into the momentum corrector class.
356

357
        Args:
358
            df_partitions (int, optional): Number of dataframe partitions to use for
359
                the initial binning. Defaults to 100.
360
            axes (List[str], optional): Axes to bin.
361
                Defaults to config["momentum"]["axes"].
362
            bins (List[int], optional): Bin numbers to use for binning.
363
                Defaults to config["momentum"]["bins"].
364
            ranges (List[Tuple], optional): Ranges to use for binning.
365
                Defaults to config["momentum"]["ranges"].
366
            plane (int, optional): Initial value for the plane slider. Defaults to 0.
367
            width (int, optional): Initial value for the width slider. Defaults to 5.
368
            apply (bool, optional): Option to directly apply the values and select the
369
                slice. Defaults to False.
370
            **kwds: Keyword argument passed to the pre_binning function.
371
        """
372
        self._pre_binned = self.pre_binning(
3✔
373
            df_partitions=df_partitions,
374
            axes=axes,
375
            bins=bins,
376
            ranges=ranges,
377
            **kwds,
378
        )
379

380
        self.mc.load_data(data=self._pre_binned)
3✔
381
        self.mc.select_slicer(plane=plane, width=width, apply=apply)
3✔
382

383
    # 2. Generate the spline warp correction from momentum features.
384
    # Either autoselect features, or input features from view above.
385
    def generate_splinewarp(
3✔
386
        self,
387
        features: np.ndarray = None,
388
        rotation_symmetry: int = 6,
389
        auto_detect: bool = False,
390
        include_center: bool = True,
391
        **kwds,
392
    ):
393
        """2. Step of the distortion correction workflow: Detect feature points in
394
        momentum space, or assign the provided feature points, and generate a
395
        correction function restoring the symmetry in the image using a splinewarp
396
        algortihm.
397

398
        Args:
399
            features (np.ndarray, optional): np.ndarray of features. Defaults to None.
400
            rotation_symmetry (int, optional): Number of rotational symmetry axes.
401
                Defaults to 6.
402
            auto_detect (bool, optional): Whether to auto-detect the features.
403
                Defaults to False.
404
            include_center (bool, optional): Option to fix the position of the center
405
                point for the correction. Defaults to True.
406
        """
407
        if auto_detect:  # automatic feature selection
×
408
            sigma = kwds.pop(
×
409
                "sigma",
410
                self._config.get("momentum", {}).get("sigma", 5),
411
            )
412
            fwhm = kwds.pop(
×
413
                "fwhm",
414
                self._config.get("momentum", {}).get("fwhm", 8),
415
            )
416
            sigma_radius = kwds.pop(
×
417
                "sigma_radius",
418
                self._config.get("momentum", {}).get("sigma_radius", 1),
419
            )
420
            self.mc.feature_extract(
×
421
                sigma=sigma,
422
                fwhm=fwhm,
423
                sigma_radius=sigma_radius,
424
                rotsym=rotation_symmetry,
425
                **kwds,
426
            )
427
        else:  # Manual feature selection
428
            self.mc.add_features(
×
429
                features=features,
430
                rotsym=rotation_symmetry,
431
                **kwds,
432
            )
433

434
        self.mc.spline_warp_estimate(include_center=include_center, **kwds)
×
435

436
        if self.mc.slice is not None:
×
437
            print("Original slice with reference features")
×
438
            self.mc.view(annotated=True, backend="bokeh", crosshair=True)
×
439

440
            print("Corrected slice with target features")
×
441
            self.mc.view(
×
442
                image=self.mc.slice_corrected,
443
                annotated=True,
444
                points={"feats": self.mc.ptargs},
445
                backend="bokeh",
446
                crosshair=True,
447
            )
448

449
            print("Original slice with target features")
×
450
            self.mc.view(
×
451
                image=self.mc.slice,
452
                points={"feats": self.mc.ptargs},
453
                annotated=True,
454
                backend="bokeh",
455
            )
456

457
    # 3. Pose corrections. Provide interactive interface for correcting
458
    # scaling, shift and rotation
459
    def pose_adjustment(
3✔
460
        self,
461
        scale: float = 1,
462
        xtrans: float = 0,
463
        ytrans: float = 0,
464
        angle: float = 0,
465
        apply: bool = False,
466
        use_correction: bool = True,
467
    ):
468
        """3. step of the distortion correction workflow: Generate an interactive panel
469
        to adjust affine transformations that are applied to the image. Applies first
470
        a scaling, next an x/y translation, and last a rotation around the center of
471
        the image.
472

473
        Args:
474
            scale (float, optional): Initial value of the scaling slider.
475
                Defaults to 1.
476
            xtrans (float, optional): Initial value of the xtrans slider.
477
                Defaults to 0.
478
            ytrans (float, optional): Initial value of the ytrans slider.
479
                Defaults to 0.
480
            angle (float, optional): Initial value of the angle slider.
481
                Defaults to 0.
482
            apply (bool, optional): Option to directly apply the provided
483
                transformations. Defaults to False.
484
            use_correction (bool, option): Whether to use the spline warp correction
485
                or not. Defaults to True.
486
        """
487
        # Generate homomorphy as default if no distortion correction has been applied
488
        if self.mc.slice_corrected is None:
×
489
            if self.mc.slice is None:
×
490
                raise ValueError(
×
491
                    "No slice for corrections and transformations loaded!",
492
                )
493
            self.mc.slice_corrected = self.mc.slice
×
494

495
        if self.mc.cdeform_field is None or self.mc.rdeform_field is None:
×
496
            # Generate default distortion correction
497
            self.mc.add_features()
×
498
            self.mc.spline_warp_estimate()
×
499

500
        if not use_correction:
×
501
            self.mc.reset_deformation()
×
502

503
        self.mc.pose_adjustment(
×
504
            scale=scale,
505
            xtrans=xtrans,
506
            ytrans=ytrans,
507
            angle=angle,
508
            apply=apply,
509
        )
510

511
    def apply_momentum_correction(
3✔
512
        self,
513
        preview: bool = False,
514
    ):
515
        """Applies the distortion correction and pose adjustment (optional)
516
        to the dataframe.
517

518
        Args:
519
            rdeform_field (np.ndarray, optional): Row deformation field.
520
                Defaults to None.
521
            cdeform_field (np.ndarray, optional): Column deformation field.
522
                Defaults to None.
523
            inv_dfield (np.ndarray, optional): Inverse deformation field.
524
                Defaults to None.
525
            preview (bool): Option to preview the first elements of the data frame.
526
        """
527
        if self._dataframe is not None:
×
528
            print("Adding corrected X/Y columns to dataframe:")
×
529
            self._dataframe, metadata = self.mc.apply_corrections(
×
530
                df=self._dataframe,
531
            )
532
            # Add Metadata
533
            self._attributes.add(
×
534
                metadata,
535
                "momentum_correction",
536
                duplicate_policy="merge",
537
            )
538
            if preview:
×
539
                print(self._dataframe.head(10))
×
540
            else:
541
                print(self._dataframe)
×
542

543
    # 4. Calculate momentum calibration and apply correction and calibration
544
    # to the dataframe
545
    def calibrate_momentum_axes(
3✔
546
        self,
547
        point_a: Union[np.ndarray, List[int]] = None,
548
        point_b: Union[np.ndarray, List[int]] = None,
549
        k_distance: float = None,
550
        k_coord_a: Union[np.ndarray, List[float]] = None,
551
        k_coord_b: Union[np.ndarray, List[float]] = np.array([0.0, 0.0]),
552
        equiscale: bool = True,
553
        apply=False,
554
    ):
555
        """4. step of the momentum correction/calibration workflow. Calibrate momentum
556
        axes using either provided pixel coordinates of a high-symmetry point and its
557
        distance to the BZ center, or the k-coordinates of two points in the BZ
558
        (depending on the equiscale option). Opens an interactive panel for selecting
559
        the points.
560

561
        Args:
562
            point_a (Union[np.ndarray, List[int]]): Pixel coordinates of the first
563
                point used for momentum calibration.
564
            point_b (Union[np.ndarray, List[int]], optional): Pixel coordinates of the
565
                second point used for momentum calibration.
566
                Defaults to config["momentum"]["center_pixel"].
567
            k_distance (float, optional): Momentum distance between point a and b.
568
                Needs to be provided if no specific k-koordinates for the two points
569
                are given. Defaults to None.
570
            k_coord_a (Union[np.ndarray, List[float]], optional): Momentum coordinate
571
                of the first point used for calibration. Used if equiscale is False.
572
                Defaults to None.
573
            k_coord_b (Union[np.ndarray, List[float]], optional): Momentum coordinate
574
                of the second point used for calibration. Defaults to [0.0, 0.0].
575
            equiscale (bool, optional): Option to apply different scales to kx and ky.
576
                If True, the distance between points a and b, and the absolute
577
                position of point a are used for defining the scale. If False, the
578
                scale is calculated from the k-positions of both points a and b.
579
                Defaults to True.
580
            apply (bool, optional): Option to directly store the momentum calibration
581
                in the class. Defaults to False.
582
        """
583
        if point_b is None:
×
584
            point_b = self._config.get("momentum", {}).get(
×
585
                "center_pixel",
586
                [256, 256],
587
            )
588

589
        self.mc.select_k_range(
×
590
            point_a=point_a,
591
            point_b=point_b,
592
            k_distance=k_distance,
593
            k_coord_a=k_coord_a,
594
            k_coord_b=k_coord_b,
595
            equiscale=equiscale,
596
            apply=apply,
597
        )
598

599
    # 5. Apply correction and calibration to the dataframe
600
    def apply_momentum_calibration(
3✔
601
        self,
602
        calibration: dict = None,
603
        preview: bool = False,
604
    ):
605
        """5. step of the momentum calibration/distortion correction work flow: Apply
606
        any distortion correction and/or pose adjustment stored in the MomentumCorrector
607
        class and the momentum calibration to the dataframe.
608

609
        Args:
610
            calibration (dict, optional): Optional dictionary with calibration data to
611
                use. Defaults to None.
612
            preview (bool): Option to preview the first elements of the data frame.
613
        """
614
        if self._dataframe is not None:
×
615

616
            print("Adding kx/ky columns to dataframe:")
×
617
            self._dataframe, metadata = self.mc.append_k_axis(
×
618
                df=self._dataframe,
619
                calibration=calibration,
620
            )
621

622
            # Add Metadata
623
            self._attributes.add(
×
624
                metadata,
625
                "momentum_calibration",
626
                duplicate_policy="merge",
627
            )
628
            if preview:
×
629
                print(self._dataframe.head(10))
×
630
            else:
631
                print(self._dataframe)
×
632

633
    # Energy correction workflow
634
    # 1. Adjust the energy correction parameters
635
    def adjust_energy_correction(
3✔
636
        self,
637
        correction_type: str = None,
638
        amplitude: float = None,
639
        center: Tuple[float, float] = None,
640
        apply=False,
641
        **kwds,
642
    ):
643
        """1. step of the energy crrection workflow: Opens an interactive plot to
644
        adjust the parameters for the TOF/energy correction. Also pre-bins the data if
645
        they are not present yet.
646

647
        Args:
648
            correction_type (str, optional): Type of correction to apply to the TOF
649
                axis. Valid values are:
650

651
                - 'spherical'
652
                - 'Lorentzian'
653
                - 'Gaussian'
654
                - 'Lorentzian_asymmetric'
655

656
                Defaults to config["energy"]["correction_type"].
657
            amplitude (float, optional): Amplitude of the correction.
658
                Defaults to config["energy"]["correction"]["amplitude"].
659
            center (Tuple[float, float], optional): Center X/Y coordinates for the
660
                correction. Defaults to config["energy"]["correction"]["center"].
661
            apply (bool, optional): Option to directly apply the provided or default
662
                correction parameters. Defaults to False.
663
        """
664
        if self._pre_binned is None:
×
665
            print(
×
666
                "Pre-binned data not present, binning using defaults from config...",
667
            )
668
            self._pre_binned = self.pre_binning()
×
669

670
        self.ec.adjust_energy_correction(
×
671
            self._pre_binned,
672
            correction_type=correction_type,
673
            amplitude=amplitude,
674
            center=center,
675
            apply=apply,
676
            **kwds,
677
        )
678

679
    # 2. Apply energy correction to dataframe
680
    def apply_energy_correction(
3✔
681
        self,
682
        correction: dict = None,
683
        preview: bool = False,
684
        **kwds,
685
    ):
686
        """2. step of the energy correction workflow: Apply the enery correction
687
        parameters stored in the class to the dataframe.
688

689
        Args:
690
            correction (dict, optional): Dictionary containing the correction
691
                parameters. Defaults to config["energy"]["calibration"].
692
            preview (bool): Option to preview the first elements of the data frame.
693
            **kwds:
694
                Keyword args passed to ``EnergyCalibrator.apply_energy_correction``.
695
            preview (bool): Option to preview the first elements of the data frame.
696
            **kwds:
697
                Keyword args passed to ``EnergyCalibrator.apply_energy_correction``.
698
        """
699
        if self._dataframe is not None:
×
700
            print("Applying energy correction to dataframe...")
×
701
            self._dataframe, metadata = self.ec.apply_energy_correction(
×
702
                df=self._dataframe,
703
                correction=correction,
704
                **kwds,
705
            )
706

707
            # Add Metadata
708
            self._attributes.add(
×
709
                metadata,
710
                "energy_correction",
711
            )
712
            if preview:
×
713
                print(self._dataframe.head(10))
×
714
            else:
715
                print(self._dataframe)
×
716

717
    # Energy calibrator workflow
718
    # 1. Load and normalize data
719
    def load_bias_series(
3✔
720
        self,
721
        data_files: List[str],
722
        axes: List[str] = None,
723
        bins: List = None,
724
        ranges: Sequence[Tuple[float, float]] = None,
725
        biases: np.ndarray = None,
726
        bias_key: str = None,
727
        normalize: bool = None,
728
        span: int = None,
729
        order: int = None,
730
    ):
731
        """1. step of the energy calibration workflow: Load and bin data from
732
        single-event files.
733

734
        Args:
735
            data_files (List[str]): list of file paths to bin
736
            axes (List[str], optional): bin axes.
737
                Defaults to config["dataframe"]["tof_column"].
738
            bins (List, optional): number of bins.
739
                Defaults to config["energy"]["bins"].
740
            ranges (Sequence[Tuple[float, float]], optional): bin ranges.
741
                Defaults to config["energy"]["ranges"].
742
            biases (np.ndarray, optional): Bias voltages used. If missing, bias
743
                voltages are extracted from the data files.
744
            bias_key (str, optional): hdf5 path where bias values are stored.
745
                Defaults to config["energy"]["bias_key"].
746
            normalize (bool, optional): Option to normalize traces.
747
                Defaults to config["energy"]["normalize"].
748
            span (int, optional): span smoothing parameters of the LOESS method
749
                (see ``scipy.signal.savgol_filter()``).
750
                Defaults to config["energy"]["normalize_span"].
751
            order (int, optional): order smoothing parameters of the LOESS method
752
                (see ``scipy.signal.savgol_filter()``).
753
                Defaults to config["energy"]["normalize_order"].
754
        """
755
        self.ec.bin_data(
×
756
            data_files=cast(List[str], self.cpy(data_files)),
757
            axes=axes,
758
            bins=bins,
759
            ranges=ranges,
760
            biases=biases,
761
            bias_key=bias_key,
762
        )
763
        if (normalize is not None and normalize is True) or (
×
764
            normalize is None and self._config.get("energy", {}).get("normalize", True)
765
        ):
766
            if span is None:
×
767
                span = self._config.get("energy", {}).get("normalize_span", 7)
×
768
            if order is None:
×
769
                order = self._config.get("energy", {}).get(
×
770
                    "normalize_order",
771
                    1,
772
                )
773
            self.ec.normalize(smooth=True, span=span, order=order)
×
774
        self.ec.view(
×
775
            traces=self.ec.traces_normed,
776
            xaxis=self.ec.tof,
777
            backend="bokeh",
778
        )
779

780
    # 2. extract ranges and get peak positions
781
    def find_bias_peaks(
3✔
782
        self,
783
        ranges: Union[List[Tuple], Tuple],
784
        ref_id: int = 0,
785
        infer_others: bool = True,
786
        mode: str = "replace",
787
        radius: int = None,
788
        peak_window: int = None,
789
    ):
790
        """2. step of the energy calibration workflow: Find a peak within a given range
791
        for the indicated reference trace, and tries to find the same peak for all
792
        other traces. Uses fast_dtw to align curves, which might not be too good if the
793
        shape of curves changes qualitatively. Ideally, choose a reference trace in the
794
        middle of the set, and don't choose the range too narrow around the peak.
795
        Alternatively, a list of ranges for all traces can be provided.
796

797
        Args:
798
            ranges (Union[List[Tuple], Tuple]): Tuple of TOF values indicating a range.
799
                Alternatively, a list of ranges for all traces can be given.
800
            refid (int, optional): The id of the trace the range refers to.
801
                Defaults to 0.
802
            infer_others (bool, optional): Whether to determine the range for the other
803
                traces. Defaults to True.
804
            mode (str, optional): Whether to "add" or "replace" existing ranges.
805
                Defaults to "replace".
806
            radius (int, optional): Radius parameter for fast_dtw.
807
                Defaults to config["energy"]["fastdtw_radius"].
808
            peak_window (int, optional): Peak_window parameter for the peak detection
809
                algorthm. amount of points that have to have to behave monotoneously
810
                around a peak. Defaults to config["energy"]["peak_window"].
811
        """
812
        if radius is None:
×
813
            radius = self._config.get("energy", {}).get("fastdtw_radius", 2)
×
814
        self.ec.add_features(
×
815
            ranges=ranges,
816
            ref_id=ref_id,
817
            infer_others=infer_others,
818
            mode=mode,
819
            radius=radius,
820
        )
821
        self.ec.view(
×
822
            traces=self.ec.traces_normed,
823
            segs=self.ec.featranges,
824
            xaxis=self.ec.tof,
825
            backend="bokeh",
826
        )
827
        print(self.ec.featranges)
×
828
        if peak_window is None:
×
829
            peak_window = self._config.get("energy", {}).get("peak_window", 7)
×
830
        try:
×
831
            self.ec.feature_extract(peak_window=peak_window)
×
832
            self.ec.view(
×
833
                traces=self.ec.traces_normed,
834
                peaks=self.ec.peaks,
835
                backend="bokeh",
836
            )
837
        except IndexError:
×
838
            print("Could not determine all peaks!")
×
839
            raise
×
840

841
    # 3. Fit the energy calibration relation
842
    def calibrate_energy_axis(
3✔
843
        self,
844
        ref_id: int,
845
        ref_energy: float,
846
        method: str = None,
847
        energy_scale: str = None,
848
        **kwds,
849
    ):
850
        """3. Step of the energy calibration workflow: Calculate the calibration
851
        function for the energy axis, and apply it to the dataframe. Two
852
        approximations are implemented, a (normally 3rd order) polynomial
853
        approximation, and a d^2/(t-t0)^2 relation.
854

855
        Args:
856
            ref_id (int): id of the trace at the bias where the reference energy is
857
                given.
858
            ref_energy (float): Absolute energy of the detected feature at the bias
859
                of ref_id
860
            method (str, optional): Method for determining the energy calibration.
861

862
                - **'lmfit'**: Energy calibration using lmfit and 1/t^2 form.
863
                - **'lstsq'**, **'lsqr'**: Energy calibration using polynomial form.
864

865
                Defaults to config["energy"]["calibration_method"]
866
            energy_scale (str, optional): Direction of increasing energy scale.
867

868
                - **'kinetic'**: increasing energy with decreasing TOF.
869
                - **'binding'**: increasing energy with increasing TOF.
870

871
                Defaults to config["energy"]["energy_scale"]
872
        """
873
        if method is None:
×
874
            method = self._config.get("energy", {}).get(
×
875
                "calibration_method",
876
                "lmfit",
877
            )
878

879
        if energy_scale is None:
×
880
            energy_scale = self._config.get("energy", {}).get(
×
881
                "energy_scale",
882
                "kinetic",
883
            )
884

885
        self.ec.calibrate(
×
886
            ref_id=ref_id,
887
            ref_energy=ref_energy,
888
            method=method,
889
            energy_scale=energy_scale,
890
            **kwds,
891
        )
892
        print("Quality of Calibration:")
×
893
        self.ec.view(
×
894
            traces=self.ec.traces_normed,
895
            xaxis=self.ec.calibration["axis"],
896
            align=True,
897
            energy_scale=energy_scale,
898
            backend="bokeh",
899
        )
900
        print("E/TOF relationship:")
×
901
        self.ec.view(
×
902
            traces=self.ec.calibration["axis"][None, :],
903
            xaxis=self.ec.tof,
904
            backend="matplotlib",
905
            show_legend=False,
906
        )
907
        if energy_scale == "kinetic":
×
908
            plt.scatter(
×
909
                self.ec.peaks[:, 0],
910
                -(self.ec.biases - self.ec.biases[ref_id]) + ref_energy,
911
                s=50,
912
                c="k",
913
            )
914
        elif energy_scale == "binding":
×
915
            plt.scatter(
×
916
                self.ec.peaks[:, 0],
917
                self.ec.biases - self.ec.biases[ref_id] + ref_energy,
918
                s=50,
919
                c="k",
920
            )
921
        else:
922
            raise ValueError(
×
923
                'energy_scale needs to be either "binding" or "kinetic"',
924
                f", got {energy_scale}.",
925
            )
926
        plt.xlabel("Time-of-flight", fontsize=15)
×
927
        plt.ylabel("Energy (eV)", fontsize=15)
×
928
        plt.show()
×
929

930
    # 4. Apply energy calibration to the dataframe
931
    def append_energy_axis(
3✔
932
        self,
933
        calibration: dict = None,
934
        preview: bool = False,
935
        **kwds,
936
    ):
937
        """4. step of the energy calibration workflow: Apply the calibration function
938
        to to the dataframe. Two approximations are implemented, a (normally 3rd order)
939
        polynomial approximation, and a d^2/(t-t0)^2 relation. a calibration dictionary
940
        can be provided.
941

942
        Args:
943
            calibration (dict, optional): Calibration dict containing calibration
944
                parameters. Overrides calibration from class or config.
945
                Defaults to None.
946
            preview (bool): Option to preview the first elements of the data frame.
947
            **kwds:
948
                Keyword args passed to ``EnergyCalibrator.append_energy_axis``.
949
        """
950
        if self._dataframe is not None:
×
951
            print("Adding energy column to dataframe:")
×
952
            self._dataframe, metadata = self.ec.append_energy_axis(
×
953
                df=self._dataframe,
954
                calibration=calibration,
955
                **kwds,
956
            )
957

958
            # Add Metadata
959
            self._attributes.add(
×
960
                metadata,
961
                "energy_calibration",
962
                duplicate_policy="merge",
963
            )
964
            if preview:
×
965
                print(self._dataframe.head(10))
×
966
            else:
967
                print(self._dataframe)
×
968

969
    # Delay calibration function
970
    def calibrate_delay_axis(
3✔
971
        self,
972
        delay_range: Tuple[float, float] = None,
973
        datafile: str = None,
974
        preview: bool = False,
975
        **kwds,
976
    ):
977
        """Append delay column to dataframe. Either provide delay ranges, or read
978
        them from a file.
979

980
        Args:
981
            delay_range (Tuple[float, float], optional): The scanned delay range in
982
                picoseconds. Defaults to None.
983
            datafile (str, optional): The file from which to read the delay ranges.
984
                Defaults to None.
985
            preview (bool): Option to preview the first elements of the data frame.
986
            **kwds: Keyword args passed to ``DelayCalibrator.append_delay_axis``.
987
        """
988
        if self._dataframe is not None:
×
989
            print("Adding delay column to dataframe:")
×
990

991
            if delay_range is not None:
×
992
                self._dataframe, metadata = self.dc.append_delay_axis(
×
993
                    self._dataframe,
994
                    delay_range=delay_range,
995
                    **kwds,
996
                )
997
            else:
998
                if datafile is None:
×
999
                    try:
×
1000
                        datafile = self._files[0]
×
1001
                    except IndexError:
×
1002
                        print(
×
1003
                            "No datafile available, specify eihter",
1004
                            " 'datafile' or 'delay_range'",
1005
                        )
1006
                        raise
×
1007

1008
                self._dataframe, metadata = self.dc.append_delay_axis(
×
1009
                    self._dataframe,
1010
                    datafile=datafile,
1011
                    **kwds,
1012
                )
1013

1014
            # Add Metadata
1015
            self._attributes.add(
×
1016
                metadata,
1017
                "delay_calibration",
1018
                duplicate_policy="merge",
1019
            )
1020
            if preview:
×
1021
                print(self._dataframe.head(10))
×
1022
            else:
1023
                print(self._dataframe)
×
1024

1025
    def add_jitter(self, cols: Sequence[str] = None):
3✔
1026
        """Add jitter to the selected dataframe columns.
1027

1028
        Args:
1029
            cols (Sequence[str], optional): The colums onto which to apply jitter.
1030
                Defaults to config["dataframe"]["jitter_cols"].
1031
        """
1032
        if cols is None:
×
1033
            cols = self._config.get("dataframe", {}).get(
×
1034
                "jitter_cols",
1035
                self._dataframe.columns,
1036
            )  # jitter all columns
1037

1038
        self._dataframe = self._dataframe.map_partitions(
×
1039
            apply_jitter,
1040
            cols=cols,
1041
            cols_jittered=cols,
1042
        )
1043
        metadata = []
×
1044
        for col in cols:
×
1045
            metadata.append(col)
×
1046
        self._attributes.add(metadata, "jittering", duplicate_policy="append")
×
1047

1048
    def pre_binning(
3✔
1049
        self,
1050
        df_partitions: int = 100,
1051
        axes: List[str] = None,
1052
        bins: List[int] = None,
1053
        ranges: Sequence[Tuple[float, float]] = None,
1054
        **kwds,
1055
    ) -> xr.DataArray:
1056
        """Function to do an initial binning of the dataframe loaded to the class.
1057

1058
        Args:
1059
            df_partitions (int, optional): Number of dataframe partitions to use for
1060
                the initial binning. Defaults to 100.
1061
            axes (List[str], optional): Axes to bin.
1062
                Defaults to config["momentum"]["axes"].
1063
            bins (List[int], optional): Bin numbers to use for binning.
1064
                Defaults to config["momentum"]["bins"].
1065
            ranges (List[Tuple], optional): Ranges to use for binning.
1066
                Defaults to config["momentum"]["ranges"].
1067
            **kwds: Keyword argument passed to ``compute``.
1068

1069
        Returns:
1070
            xr.DataArray: pre-binned data-array.
1071
        """
1072
        if axes is None:
3✔
1073
            axes = self._config.get("momentum", {}).get(
3✔
1074
                "axes",
1075
                ["@x_column, @y_column, @tof_column"],
1076
            )
1077
        for loc, axis in enumerate(axes):
3✔
1078
            if axis.startswith("@"):
3✔
1079
                axes[loc] = self._config.get("dataframe").get(axis.strip("@"))
3✔
1080

1081
        if bins is None:
3✔
1082
            bins = self._config.get("momentum", {}).get(
3✔
1083
                "bins",
1084
                [512, 512, 300],
1085
            )
1086
        if ranges is None:
3✔
1087
            ranges_ = self._config.get("momentum", {}).get(
3✔
1088
                "ranges",
1089
                [[-256, 1792], [-256, 1792], [128000, 138000]],
1090
            )
1091
            ranges = [cast(Tuple[float, float], tuple(v)) for v in ranges_]
3✔
1092

1093
        assert self._dataframe is not None, "dataframe needs to be loaded first!"
3✔
1094

1095
        return self.compute(
3✔
1096
            bins=bins,
1097
            axes=axes,
1098
            ranges=ranges,
1099
            df_partitions=df_partitions,
1100
            **kwds,
1101
        )
1102

1103
    def compute(
3✔
1104
        self,
1105
        bins: Union[
1106
            int,
1107
            dict,
1108
            tuple,
1109
            List[int],
1110
            List[np.ndarray],
1111
            List[tuple],
1112
        ] = 100,
1113
        axes: Union[str, Sequence[str]] = None,
1114
        ranges: Sequence[Tuple[float, float]] = None,
1115
        **kwds,
1116
    ) -> xr.DataArray:
1117
        """Compute the histogram along the given dimensions.
1118

1119
        Args:
1120
            bins (int, dict, tuple, List[int], List[np.ndarray], List[tuple], optional):
1121
                Definition of the bins. Can be any of the following cases:
1122

1123
                - an integer describing the number of bins in on all dimensions
1124
                - a tuple of 3 numbers describing start, end and step of the binning
1125
                  range
1126
                - a np.arrays defining the binning edges
1127
                - a list (NOT a tuple) of any of the above (int, tuple or np.ndarray)
1128
                - a dictionary made of the axes as keys and any of the above as values.
1129

1130
                This takes priority over the axes and range arguments. Defaults to 100.
1131
            axes (Union[str, Sequence[str]], optional): The names of the axes (columns)
1132
                on which to calculate the histogram. The order will be the order of the
1133
                dimensions in the resulting array. Defaults to None.
1134
            ranges (Sequence[Tuple[float, float]], optional): list of tuples containing
1135
                the start and end point of the binning range. Defaults to None.
1136
            **kwds: Keyword arguments:
1137

1138
                - **hist_mode**: Histogram calculation method. "numpy" or "numba". See
1139
                  ``bin_dataframe`` for details. Defaults to
1140
                  config["binning"]["hist_mode"].
1141
                - **mode**: Defines how the results from each partition are combined.
1142
                  "fast", "lean" or "legacy". See ``bin_dataframe`` for details.
1143
                  Defaults to config["binning"]["mode"].
1144
                - **pbar**: Option to show the tqdm progress bar. Defaults to
1145
                  config["binning"]["pbar"].
1146
                - **n_cores**: Number of CPU cores to use for parallelization.
1147
                  Defaults to config["binning"]["num_cores"] or N_CPU-1.
1148
                - **threads_per_worker**: Limit the number of threads that
1149
                  multiprocessing can spawn per binning thread. Defaults to
1150
                  config["binning"]["threads_per_worker"].
1151
                - **threadpool_api**: The API to use for multiprocessing. "blas",
1152
                  "openmp" or None. See ``threadpool_limit`` for details. Defaults to
1153
                  config["binning"]["threadpool_API"].
1154
                - **df_partitions**: A list of dataframe partitions. Defaults to all
1155
                  partitions.
1156

1157
                Additional kwds are passed to ``bin_dataframe``.
1158

1159
        Raises:
1160
            AssertError: Rises when no dataframe has been loaded.
1161

1162
        Returns:
1163
            xr.DataArray: The result of the n-dimensional binning represented in an
1164
            xarray object, combining the data with the axes.
1165
        """
1166
        assert self._dataframe is not None, "dataframe needs to be loaded first!"
3✔
1167

1168
        hist_mode = kwds.pop("hist_mode", self._config["binning"]["hist_mode"])
3✔
1169
        mode = kwds.pop("mode", self._config["binning"]["mode"])
3✔
1170
        pbar = kwds.pop("pbar", self._config["binning"]["pbar"])
3✔
1171
        num_cores = kwds.pop("num_cores", self._config["binning"]["num_cores"])
3✔
1172
        threads_per_worker = kwds.pop(
3✔
1173
            "threads_per_worker",
1174
            self._config["binning"]["threads_per_worker"],
1175
        )
1176
        threadpool_api = kwds.pop(
3✔
1177
            "threadpool_API",
1178
            self._config["binning"]["threadpool_API"],
1179
        )
1180
        df_partitions = kwds.pop("df_partitions", None)
3✔
1181
        if df_partitions is not None:
3✔
1182
            dataframe = self._dataframe.partitions[
3✔
1183
                0 : min(df_partitions, self._dataframe.npartitions)
1184
            ]
1185
        else:
1186
            dataframe = self._dataframe
×
1187

1188
        self._binned = bin_dataframe(
3✔
1189
            df=dataframe,
1190
            bins=bins,
1191
            axes=axes,
1192
            ranges=ranges,
1193
            hist_mode=hist_mode,
1194
            mode=mode,
1195
            pbar=pbar,
1196
            n_cores=num_cores,
1197
            threads_per_worker=threads_per_worker,
1198
            threadpool_api=threadpool_api,
1199
            **kwds,
1200
        )
1201

1202
        for dim in self._binned.dims:
3✔
1203
            try:
3✔
1204
                self._binned[dim].attrs["unit"] = self._config["dataframe"]["units"][dim]
3✔
1205
            except KeyError:
×
1206
                pass
×
1207

1208
        self._binned.attrs["units"] = "counts"
3✔
1209
        self._binned.attrs["long_name"] = "photoelectron counts"
3✔
1210
        self._binned.attrs["metadata"] = self._attributes.metadata
3✔
1211

1212
        return self._binned
3✔
1213

1214
    def view_event_histogram(
3✔
1215
        self,
1216
        dfpid: int,
1217
        ncol: int = 2,
1218
        bins: Sequence[int] = None,
1219
        axes: Sequence[str] = None,
1220
        ranges: Sequence[Tuple[float, float]] = None,
1221
        backend: str = "bokeh",
1222
        legend: bool = True,
1223
        histkwds: dict = None,
1224
        legkwds: dict = None,
1225
        **kwds,
1226
    ):
1227
        """Plot individual histograms of specified dimensions (axes) from a substituent
1228
        dataframe partition.
1229

1230
        Args:
1231
            dfpid (int): Number of the data frame partition to look at.
1232
            ncol (int, optional): Number of columns in the plot grid. Defaults to 2.
1233
            bins (Sequence[int], optional): Number of bins to use for the speicified
1234
                axes. Defaults to config["histogram"]["bins"].
1235
            axes (Sequence[str], optional): Names of the axes to display.
1236
                Defaults to config["histogram"]["axes"].
1237
            ranges (Sequence[Tuple[float, float]], optional): Value ranges of all
1238
                specified axes. Defaults toconfig["histogram"]["ranges"].
1239
            backend (str, optional): Backend of the plotting library
1240
                ('matplotlib' or 'bokeh'). Defaults to "bokeh".
1241
            legend (bool, optional): Option to include a legend in the histogram plots.
1242
                Defaults to True.
1243
            histkwds (dict, optional): Keyword arguments for histograms
1244
                (see ``matplotlib.pyplot.hist()``). Defaults to {}.
1245
            legkwds (dict, optional): Keyword arguments for legend
1246
                (see ``matplotlib.pyplot.legend()``). Defaults to {}.
1247
            **kwds: Extra keyword arguments passed to
1248
                ``sed.diagnostics.grid_histogram()``.
1249

1250
        Raises:
1251
            TypeError: Raises when the input values are not of the correct type.
1252
        """
1253
        if bins is None:
×
1254
            bins = self._config["histogram"]["bins"]
×
1255
        if axes is None:
×
1256
            axes = self._config["histogram"]["axes"]
×
1257
        if ranges is None:
×
1258
            ranges = self._config["histogram"]["ranges"]
×
1259

1260
        input_types = map(type, [axes, bins, ranges])
×
1261
        allowed_types = [list, tuple]
×
1262

1263
        df = self._dataframe
×
1264

1265
        if not set(input_types).issubset(allowed_types):
×
1266
            raise TypeError(
×
1267
                "Inputs of axes, bins, ranges need to be list or tuple!",
1268
            )
1269

1270
        # Read out the values for the specified groups
1271
        group_dict_dd = {}
×
1272
        dfpart = df.get_partition(dfpid)
×
1273
        cols = dfpart.columns
×
1274
        for ax in axes:
×
1275
            group_dict_dd[ax] = dfpart.values[:, cols.get_loc(ax)]
×
1276
        group_dict = ddf.compute(group_dict_dd)[0]
×
1277

1278
        # Plot multiple histograms in a grid
1279
        grid_histogram(
×
1280
            group_dict,
1281
            ncol=ncol,
1282
            rvs=axes,
1283
            rvbins=bins,
1284
            rvranges=ranges,
1285
            backend=backend,
1286
            legend=legend,
1287
            histkwds=histkwds,
1288
            legkwds=legkwds,
1289
            **kwds,
1290
        )
1291

1292
    def save(
3✔
1293
        self,
1294
        faddr: str,
1295
        **kwds,
1296
    ):
1297
        """Saves the binned data to the provided path and filename.
1298

1299
        Args:
1300
            faddr (str): Path and name of the file to write. Its extension determines
1301
                the file type to write. Valid file types are:
1302

1303
                - "*.tiff", "*.tif": Saves a TIFF stack.
1304
                - "*.h5", "*.hdf5": Saves an HDF5 file.
1305
                - "*.nxs", "*.nexus": Saves a NeXus file.
1306

1307
            **kwds: Keyword argumens, which are passed to the writer functions:
1308
                For TIFF writing:
1309

1310
                - **alias_dict**: Dictionary of dimension aliases to use.
1311

1312
                For HDF5 writing:
1313

1314
                - **mode**: hdf5 read/write mode. Defaults to "w".
1315

1316
                For NeXus:
1317

1318
                - **reader**: Name of the nexustools reader to use.
1319
                  Defaults to config["nexus"]["reader"]
1320
                - **definiton**: NeXus application definition to use for saving.
1321
                  Must be supported by the used ``reader``. Defaults to
1322
                  config["nexus"]["definition"]
1323
                - **input_files**: A list of input files to pass to the reader.
1324
                  Defaults to config["nexus"]["input_files"]
1325
        """
1326
        if self._binned is None:
×
1327
            raise NameError("Need to bin data first!")
×
1328

1329
        extension = pathlib.Path(faddr).suffix
×
1330

1331
        if extension in (".tif", ".tiff"):
×
1332
            to_tiff(
×
1333
                data=self._binned,
1334
                faddr=faddr,
1335
                **kwds,
1336
            )
1337
        elif extension in (".h5", ".hdf5"):
×
1338
            to_h5(
×
1339
                data=self._binned,
1340
                faddr=faddr,
1341
                **kwds,
1342
            )
1343
        elif extension in (".nxs", ".nexus"):
×
1344
            reader = kwds.pop("reader", self._config["nexus"]["reader"])
×
1345
            definition = kwds.pop(
×
1346
                "definition",
1347
                self._config["nexus"]["definition"],
1348
            )
1349
            input_files = kwds.pop(
×
1350
                "input_files",
1351
                self._config["nexus"]["input_files"],
1352
            )
1353
            if isinstance(input_files, str):
×
1354
                input_files = [input_files]
×
1355

1356
            to_nexus(
×
1357
                data=self._binned,
1358
                faddr=faddr,
1359
                reader=reader,
1360
                definition=definition,
1361
                input_files=input_files,
1362
                **kwds,
1363
            )
1364

1365
        else:
1366
            raise NotImplementedError(
×
1367
                f"Unrecognized file format: {extension}.",
1368
            )
1369

1370
    def add_dimension(self, name: str, axis_range: Tuple):
3✔
1371
        """Add a dimension axis.
1372

1373
        Args:
1374
            name (str): name of the axis
1375
            axis_range (Tuple): range for the axis.
1376

1377
        Raises:
1378
            ValueError: Raised if an axis with that name already exists.
1379
        """
1380
        if name in self._coordinates:
×
1381
            raise ValueError(f"Axis {name} already exists")
×
1382

1383
        self.axis[name] = self.make_axis(axis_range)
×
1384

1385
    def make_axis(self, axis_range: Tuple) -> np.ndarray:
3✔
1386
        """Function to make an axis.
1387

1388
        Args:
1389
            axis_range (Tuple): range for the new axis.
1390
        """
1391

1392
        # TODO: What shall this function do?
1393
        return np.arange(*axis_range)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc