• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

maurergroup / dfttoolkit / 15230281031

24 May 2025 07:27PM UTC coverage: 29.031% (+7.3%) from 21.747%
15230281031

Pull #59

github

da1e13
web-flow
Merge dbeffb699 into e895278a4
Pull Request #59: Vibrations refactor

1208 of 4161 relevant lines covered (29.03%)

0.29 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

58.97
dfttoolkit/utils/math_utils.py
1
from copy import deepcopy
1✔
2
from typing import Any
1✔
3

4
import numpy as np
1✔
5
import numpy.typing as npt
1✔
6
import scipy
1✔
7

8

9
def get_rotation_matrix(
1✔
10
    vec_start: npt.NDArray[np.float64], vec_end: npt.NDArray[np.float64]
11
) -> npt.NDArray[np.float64]:
12
    """
13
    Calculate the rotation matrix to align two unit vectors.
14

15
    Given a two (unit) vectors, vec_start and vec_end, this function calculates the
16
    rotation matrix U, so that U * vec_start = vec_end.
17

18
    U the is rotation matrix that rotates vec_start to point in the direction
19
    of vec_end.
20

21
    https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/897677
22

23
    Parameters
24
    ----------
25
    vec_start, vec_end : NDArray[float64]
26
        Two vectors that should be aligned. Both vectors must have a l2-norm of 1.
27

28
    Returns
29
    -------
30
    NDArray[float64]
31
        The rotation matrix U as npt.NDArray with shape (3,3)
32
    """
33
    if not np.isclose(np.linalg.norm(vec_start), 1) and not np.isclose(
1✔
34
        np.linalg.norm(vec_end), 1
35
    ):
36
        raise ValueError("`vec_start` and `vec_end` args must be unit vectors")
×
37

38
    v = np.cross(vec_start, vec_end)
1✔
39
    c = np.dot(vec_start, vec_end)
1✔
40
    v_x = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
1✔
41
    return np.eye(3) + v_x + v_x.dot(v_x) / (1 + c)
1✔
42

43

44
def get_rotation_matrix_around_axis(axis: npt.NDArray, phi: float) -> npt.NDArray:
1✔
45
    """
46
    Generate a rotation matrix around a given vector.
47

48
    Parameters
49
    ----------
50
    axis : NDArray
51
        Axis around which the rotation is done.
52
    phi : float
53
        Angle of rotation around axis in radiants.
54

55
    Returns
56
    -------
57
    R : NDArray
58
        Rotation matrix
59
    """
60
    axis_vec = np.array(axis, dtype=np.float64)
1✔
61
    axis_vec /= np.linalg.norm(axis_vec)
1✔
62

63
    eye = np.eye(3, dtype=np.float64)
1✔
64
    ddt = np.outer(axis_vec, axis_vec)
1✔
65
    skew = np.array(
1✔
66
        [
67
            [0, axis_vec[2], -axis_vec[1]],
68
            [-axis_vec[2], 0, axis_vec[0]],
69
            [axis_vec[1], -axis_vec[0], 0],
70
        ],
71
        dtype=np.float64,
72
    )
73

74
    return ddt + np.cos(phi) * (eye - ddt) + np.sin(phi) * skew
1✔
75

76

77
def get_rotation_matrix_around_z_axis(phi: float) -> npt.NDArray:
1✔
78
    """
79
    Generate a rotation matrix around the z axis.
80

81
    Parameters
82
    ----------
83
    phi : float
84
        Angle of rotation around axis in radiants.
85

86
    Returns
87
    -------
88
    NDArray
89
        Rotation matrix
90
    """
91
    return get_rotation_matrix_around_axis(np.array([0.0, 0.0, 1.0]), phi)
×
92

93

94
def get_mirror_matrix(normal_vector: npt.NDArray) -> npt.NDArray:
1✔
95
    """
96
    Generate a transformation matrix for mirroring through plane given by the normal.
97

98
    Parameters
99
    ----------
100
    normal_vector : NDArray
101
        Normal vector of the mirror plane.
102

103
    Returns
104
    -------
105
    M : NDArray
106
        Mirror matrix
107
    """
108
    n_vec = normal_vector / np.linalg.norm(normal_vector)
1✔
109
    eps = np.finfo(np.float64).eps
1✔
110
    a = n_vec[0]
1✔
111
    b = n_vec[1]
1✔
112
    c = n_vec[2]
1✔
113
    M = np.array(
1✔
114
        [
115
            [1 - 2 * a**2, -2 * a * b, -2 * a * c],
116
            [-2 * a * b, 1 - 2 * b**2, -2 * b * c],
117
            [-2 * a * c, -2 * b * c, 1 - 2 * c**2],
118
        ]
119
    )
120
    M[np.abs(M) < eps * 10] = 0
1✔
121
    return M
1✔
122

123

124
def get_angle_between_vectors(
1✔
125
    vector_1: npt.NDArray, vector_2: npt.NDArray
126
) -> npt.NDArray:
127
    """
128
    Determine angle between two vectors.
129

130
    Parameters
131
    ----------
132
    vector_1 : npt.NDArray
133
    vector_2 : npt.NDArray
134

135
    Returns
136
    -------
137
    angle : float
138
        Angle in radiants.
139

140
    """
141
    return (
1✔
142
        np.dot(vector_1, vector_2) / np.linalg.norm(vector_1) / np.linalg.norm(vector_2)
143
    )
144

145

146
def get_fractional_coords(
1✔
147
    cartesian_coords: npt.NDArray, lattice_vectors: npt.NDArray
148
) -> npt.NDArray:
149
    """
150
    Transform cartesian coordinates into fractional coordinates.
151

152
    Parameters
153
    ----------
154
    cartesian_coords: [N x N_dim] numpy array
155
        Cartesian coordinates of atoms (can be Nx2 or Nx3)
156
    lattice_vectors: [N_dim x N_dim] numpy array:
157
        Matrix of lattice vectors: Each ROW corresponds to one lattice vector!
158

159
    Returns
160
    -------
161
    fractional_coords: [N x N_dim] numpy array
162
        Fractional coordinates of atoms
163

164
    """
165
    fractional_coords = np.linalg.solve(lattice_vectors.T, cartesian_coords.T)
1✔
166
    return fractional_coords.T
1✔
167

168

169
def get_cartesian_coords(
1✔
170
    frac_coords: npt.NDArray, lattice_vectors: npt.NDArray
171
) -> npt.NDArray:
172
    """
173
    Transform fractional coordinates into cartesian coordinates.
174

175
    Parameters
176
    ----------
177
    frac_coords: [N x N_dim] numpy array
178
        Fractional coordinates of atoms (can be Nx2 or Nx3)
179
    lattice_vectors: [N_dim x N_dim] numpy array:
180
        Matrix of lattice vectors: Each ROW corresponds to one lattice vector!
181

182
    Returns
183
    -------
184
    cartesian_coords: [N x N_dim] numpy array
185
        Cartesian coordinates of atoms
186

187
    """
188
    return np.dot(frac_coords, lattice_vectors)
1✔
189

190

191
def get_triple_product(a: npt.NDArray, b: npt.NDArray, c: npt.NDArray) -> npt.NDArray:
1✔
192
    """
193
    Get the triple product (DE: Spatprodukt): a*(bxc).
194

195
    Parameters
196
    ----------
197
    a: NDArray
198
        TODO
199
    b: NDArray
200
        TODO
201
    c: NDArray
202
        TODO
203

204
    Returns
205
    -------
206
    NDarray
207
        Triple product of each input vector
208
    """
209
    if len(a) != 3 or len(b) != 3 or len(c) != 3:
1✔
210
        raise ValueError("Each vector must be of length 3")
×
211

212
    return np.dot(np.cross(a, b), c)
1✔
213

214

215
def smooth_function(y: npt.NDArray, box_pts: int) -> npt.NDArray[np.floating[Any]]:
1✔
216
    """
217
    Smooths a function using convolution.
218

219
    Parameters
220
    ----------
221
    y : NDArray
222
        TODO
223
    box_pts : int
224
        TODO
225

226
    Returns
227
    -------
228
    y_smooth : NDArray[floating[Any]]
229
        TODO
230
    """
231
    box = np.ones(box_pts) / box_pts
1✔
232
    return np.convolve(y, box, mode="same")
1✔
233

234

235
def get_cross_correlation_function(
1✔
236
    signal_0: npt.NDArray,
237
    signal_1: npt.NDArray,
238
    detrend: bool = False,
239
) -> npt.NDArray:
240
    """
241
    Calculate the autocorrelation function for a given signal.
242

243
    Parameters
244
    ----------
245
    signal_0 : 1D NDArray
246
        First siganl for which the correlation function should be calculated.
247
    signal_1 : 1D NDArray
248
        Second siganl for which the correlation function should be calculated.
249

250
    Returns
251
    -------
252
    correlation : NDArray
253
        Autocorrelation function from 0 to max_lag.
254
    """
255
    if detrend:
1✔
256
        signal_0 = scipy.signal.detrend(signal_0)
×
257
        signal_1 = scipy.signal.detrend(signal_1)
×
258

259
    cross_correlation = np.correlate(signal_0, signal_1, mode="full")
1✔
260
    cross_correlation = cross_correlation[cross_correlation.size // 2 :]
1✔
261

262
    # normalize by number of overlapping data points
263
    cross_correlation /= np.arange(cross_correlation.size, 0, -1)
1✔
264
    cutoff = int(cross_correlation.size * 0.75)
1✔
265
    return cross_correlation[:cutoff]
1✔
266

267

268
def get_autocorrelation_function_manual_lag(
1✔
269
    signal: npt.NDArray, max_lag: int
270
) -> npt.NDArray:
271
    """
272
    Alternative method to determine the autocorrelation function for a given
273
    signal that used numpy.corrcoef.
274

275
    Allows the lag to be set manually.
276

277
    Parameters
278
    ----------
279
    signal : 1D npt.NDArray
280
        Siganl for which the autocorrelation function should be calculated.
281
    max_lag : Union[None, int]
282
        Autocorrelation will be calculated for a range of 0 to max_lag,
283
        where max_lag is the largest lag for the calculation of the
284
        autocorrelation function
285

286
    Returns
287
    -------
288
    autocorrelation : npt.NDArray
289
        Autocorrelation function from 0 to max_lag.
290
    """
291
    lag = npt.NDArray(range(max_lag))
×
292

293
    autocorrelation = np.array([np.nan] * max_lag)
×
294

295
    for i in lag:
×
296
        corr = 1.0 if i == 0 else np.corrcoef(signal[i], signal[:-i])[0][1]
×
297

298
        autocorrelation[i] = corr
×
299

300
    return autocorrelation
×
301

302

303
def get_fourier_transform(signal: npt.NDArray, time_step: float) -> tuple:
1✔
304
    """
305
    Calculate the fourier transform of a given siganl.
306

307
    Parameters
308
    ----------
309
    signal : 1D npt.NDArray
310
        Siganl for which the autocorrelation function should be calculated.
311
    time_step : float
312
        Time step of the signal in seconds.
313

314
    Returns
315
    -------
316
    (npt.NDArray, npt.NDArray)
317
        Frequencs and absolute values of the fourier transform.
318

319
    """
320
    f = scipy.fft.fftfreq(signal.size, d=time_step)
1✔
321
    y = scipy.fft.fft(signal)
1✔
322

323
    L = f >= 0
1✔
324

325
    return f[L], y[L]
1✔
326

327

328
def lorentzian(
1✔
329
    x: float | npt.NDArray[np.float64], a: float, b: float, c: float
330
) -> float | npt.NDArray[np.float64]:
331
    """
332
    Return a Lorentzian function.
333

334
    Parameters
335
    ----------
336
    x : float | NDArray[float64]
337
        Argument x of f(x) --> y.
338
    a : float
339
        Maximum of Lorentzian.
340
    b : float
341
        Width of Lorentzian.
342
    c : float
343
        Magnitude of Lorentzian.
344

345
    Returns
346
    -------
347
    f : float | NDArray[float64]
348
        Outupt of a Lorentzian function.
349
    """
350
    return c / (1.0 + ((x - a) / (b / 2.0)) ** 2)
1✔
351

352

353
def exponential(
1✔
354
    x: float | npt.NDArray[np.float64], a: float, b: float
355
) -> float | npt.NDArray[np.float64]:
356
    """
357
    Return an exponential function.
358

359
    Parameters
360
    ----------
361
    x : float | NDArray[float64]
362
        Argument x of f(x) --> y
363
    a : float
364
        TODO
365
    b : float
366
        TODO
367
    """
368
    return a * np.exp(x * b)
×
369

370

371
def double_exponential(
1✔
372
    x: float | npt.NDArray, a: float, b: float, c: float
373
) -> float | npt.NDArray:
374
    return a * (np.exp(x * b) + np.exp(x * c))
×
375

376

377
def gaussian_window(N: int, std: float = 0.4) -> npt.NDArray[np.float64]:
1✔
378
    """
379
    Generate a Gaussian window.
380

381
    Parameters
382
    ----------
383
    N : int
384
        Number of points in the window.
385
    std : float
386
        Standard deviation of the Gaussian window, normalized
387
        such that the maximum value occurs at the center of the window.
388

389
    Returns
390
    -------
391
    window : NDarray[float64]
392
        Gaussian window of length N.
393

394
    """
395
    n = np.linspace(-1, 1, N)
1✔
396
    return np.exp(-0.5 * (n / std) ** 2)
1✔
397

398

399
def apply_gaussian_window(
1✔
400
    data: npt.NDArray[np.float64], std: float = 0.4
401
) -> npt.NDArray[np.float64]:
402
    """
403
    Apply a Gaussian window to an array.
404

405
    Parameters
406
    ----------
407
    data : NDarray[float64]
408
        Input data array to be windowed.
409
    std : float
410
        Standard deviation of the Gaussian window.
411

412
    Returns
413
    -------
414
    windowed_data : NDArray[float64]
415
        Windowed data array.
416
    """
417
    N = len(data)
1✔
418
    window = gaussian_window(N, std)
1✔
419
    return data * window
1✔
420

421

422
def hann_window(N: int) -> npt.NDArray[np.float64]:
1✔
423
    """
424
    Generate a Hann window.
425

426
    Parameters
427
    ----------
428
    N : int
429
        Number of points in the window.
430

431
    Returns
432
    -------
433
    np.ndarray
434
        Hann window of length N.
435
    """
436
    return 0.5 * (1 - np.cos(2 * np.pi * np.arange(N) / (N - 1)))
×
437

438

439
def apply_window(data: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
1✔
440
    """
441
    Apply a Hann window to an array.
442

443
    Parameters
444
    ----------
445
    data : NDarray[float64]
446
        Input data array to be windowed.
447

448
    Returns
449
    -------
450
    NDarray[float64]
451
        Windowed data array.
452
    """
453
    N = len(data)
×
454
    window = hann_window(N)
×
455
    return data * window
×
456

457

458
def norm_matrix_by_dagonal(matrix: npt.NDArray) -> npt.NDArray:
1✔
459
    """
460
    Norms a matrix such that the diagonal becomes 1.
461

462
    | a_11 a_12 a_13 |       |   1   a'_12 a'_13 |
463
    | a_21 a_22 a_23 |  -->  | a'_21   1   a'_23 |
464
    | a_31 a_32 a_33 |       | a'_31 a'_32   1   |
465

466
    Parameters
467
    ----------
468
    matrix : NDArray
469
        Matrix that should be normed.
470

471
    Returns
472
    -------
473
    matrix : NDArray
474
        Normed matrix.
475

476
    """
477
    diagonal = np.array(np.diagonal(matrix))
1✔
478
    L = diagonal == 0.0
1✔
479
    diagonal[L] = 1.0
1✔
480

481
    new_matrix = deepcopy(matrix)
1✔
482
    new_matrix /= np.sqrt(
1✔
483
        np.tile(diagonal, (matrix.shape[1], 1)).T
484
        * np.tile(diagonal, (matrix.shape[0], 1))
485
    )
486

487
    return new_matrix
1✔
488

489

490
def mae(delta: npt.NDArray) -> np.floating:
1✔
491
    """
492
    Calculate the mean absolute error from a list of value differnces.
493

494
    Parameters
495
    ----------
496
    delta : np.ndarray
497
        Array containing differences
498

499
    Returns
500
    -------
501
    float
502
        mean absolute error
503
    """
504
    return np.mean(np.abs(delta))
1✔
505

506

507
def rel_mae(
1✔
508
    delta: np.ndarray, target_val: np.ndarray
509
) -> np.floating | np.float64 | float:
510
    """
511
    Calculated the relative mean absolute error from a list of value differnces,
512
    given the target values.
513

514
    Parameters
515
    ----------
516
    delta : np.ndarray
517
        Array containing differences
518
    target_val : np.ndarray
519
        Array of target values against which the difference should be compared
520

521
    Returns
522
    -------
523
    float
524
        relative mean absolute error
525

526
    """
527
    target_norm = np.mean(np.abs(target_val))
1✔
528
    return np.mean(np.abs(delta)).item() / (target_norm + 1e-9)
1✔
529

530

531
def rmse(delta: np.ndarray) -> float:
1✔
532
    """
533
    Calculated the root mean sqare error from a list of value differnces.
534

535
    Parameters
536
    ----------
537
    delta : np.ndarray
538
        Array containing differences
539

540
    Returns
541
    -------
542
    float
543
        root mean square error
544

545
    """
546
    return np.sqrt(np.mean(np.square(delta)))
×
547

548

549
def rel_rmse(delta: np.ndarray, target_val: np.ndarray) -> float:
1✔
550
    """
551
    Calculated the relative root mean sqare error from a list of value differnces,
552
    given the target values.
553

554
    Parameters
555
    ----------
556
    delta : np.ndarray
557
        Array containing differences
558
    target_val : np.ndarray
559
        Array of target values against which the difference should be compared
560

561
    Returns
562
    -------
563
    float
564
        relative root mean sqare error
565

566
    """
567
    target_norm = np.sqrt(np.mean(np.square(target_val)))
×
568
    return np.sqrt(np.mean(np.square(delta))) / (target_norm + 1e-9)
×
569

570

571
def get_moving_average(signal: npt.NDArray[np.float64], window_size: int):
1✔
572
    """
573
    Cacluated the moving average and the variance around the moving average.
574

575
    Parameters
576
    ----------
577
    signal : npt.NDArray[np.float64]
578
        Signal for which the moving average should be calculated.
579
    window_size : int
580
        Window size for the mocing average.
581

582
    Returns
583
    -------
584
    moving_avg : TYPE
585
        Moving average.
586
    variance : TYPE
587
        Variance around the moving average.
588

589
    """
590
    moving_avg = np.convolve(signal, np.ones(window_size) / window_size, mode="valid")
×
591
    variance = np.array(
×
592
        [
593
            np.var(signal[i : i + window_size])
594
            for i in range(len(signal) - window_size + 1)
595
        ]
596
    )
597

598
    return moving_avg, variance
×
599

600

601
def get_maxima_in_moving_interval(
1✔
602
    function_values, interval_size, step_size, filter_value
603
):
604
    """
605
    Moves an interval along the function and filters out all points smaller
606
    than filter_value time the maximal value in the interval.
607

608
    Parameters
609
    ----------
610
    function_values : array-like
611
        1D array of function values.
612
    interval_size : int
613
        Size of the interval (number of points).
614
    step_size : int
615
        Step size for moving the interval.
616

617
    Returns
618
    -------
619
        np.ndarray: Filtered array where points outside the threshold are set
620
        to NaN.
621

622
    """
623
    # Convert input to a NumPy array
624
    function_values = np.asarray(function_values)
×
625

626
    # Initialize an array to store the result
627
    filtered_indices = []
×
628

629
    # Slide the interval along the function
630
    for start in range(0, len(function_values) - interval_size + 1, step_size):
×
631
        # Define the end of the interval
632
        end = start + interval_size
×
633

634
        # Extract the interval
635
        interval = function_values[start:end]
×
636

637
        # Find the maximum value in the interval
638
        max_value = np.max(interval)
×
639

640
        # Apply the filter
641
        threshold = filter_value * max_value
×
642

643
        indices = np.where(interval >= threshold)
×
644
        filtered_indices += list(start + indices[0])
×
645

646
    return np.array(list(set(filtered_indices)))
×
647

648

649
def get_pearson_correlation_coefficient(x, y):
1✔
650
    mean_x = np.mean(x)
×
651
    mean_y = np.mean(y)
×
652
    std_x = np.std(x)
×
653
    std_y = np.std(y)
×
654

655
    return np.mean((x - mean_x) * (y - mean_y)) / std_x / std_y
×
656

657

658
def get_t_test(x, y):
1✔
659
    r = get_pearson_correlation_coefficient(x, y)
×
660

661
    n = len(x)
×
662

663
    return np.abs(r) * np.sqrt((n - 2) / (1 - r**2))
×
664

665

666
def probability_density(t, n):
1✔
667
    degrees_of_freedom = n - 2
×
668

669
    return (
×
670
        scipy.special.gamma((degrees_of_freedom + 1.0) / 2.0)
671
        / (
672
            np.sqrt(np.pi * degrees_of_freedom)
673
            * scipy.special.gamma(degrees_of_freedom / 2.0)
674
        )
675
        * (1 + t**2 / degrees_of_freedom) ** (-(degrees_of_freedom + 1.0) / 2.0)
676
    )
677

678

679
def get_significance(x, t):
1✔
680
    n = len(x)
×
681

682
    return scipy.integrate.quad(probability_density, -np.inf, t, args=(n))[0]
×
683

684

685
def squared_exponential_kernel(x1_vec, x2_vec, tau):
1✔
686
    """
687
    A simlpe squared exponential kernel to determine a similarity measure.
688

689
    Parameters
690
    ----------
691
    x1_vec : np.array
692
        Descriptor for first set of data of shape
693
        [data points, descriptor dimensions].
694
    x2_vec : np.array
695
        Descriptor for second set of data of shape
696
        [data points, descriptor dimensions].
697
    tau : float
698
        Correlation length for the descriptor.
699

700
    Returns
701
    -------
702
    K : np.array
703
        Matrix contianing pairwise kernel values.
704

705
    """
706
    # Ensure inputs are at least 2D (n_samples, n_features)
707
    x1 = np.atleast_2d(x1_vec)
×
708
    x2 = np.atleast_2d(x2_vec)
×
709

710
    # If they are 1D row-vectors, convert to column vectors
711
    if x1.shape[0] == 1 or x1.shape[1] == 1:
×
712
        x1 = x1.reshape(-1, 1)
×
713
    if x2.shape[0] == 1 or x2.shape[1] == 1:
×
714
        x2 = x2.reshape(-1, 1)
×
715

716
    # Compute squared distances (broadcasting works for both 1D and 2D now)
717
    diff = x1[:, np.newaxis, :] - x2[np.newaxis, :, :]
×
718
    sq_dist = np.sum(diff**2, axis=2)
×
719

720
    # Apply the RBF formula
721
    return np.exp(-0.5 * sq_dist / tau**2)
×
722

723

724
class GPR:
1✔
725
    def __init__(self, x, y, tau, sigma):
1✔
726
        """
727
        A very simple GPR designed for interpolation and smoothing of data.
728

729
        Parameters
730
        ----------
731
        x : np.array
732
            Descriptor of shape [data points, descriptor dimensions].
733
        y : np.array
734
            Data to be learned.
735
        tau : float
736
            Correlation length for the descriptor.
737
        sigma : float
738
            Uncertainity of the input data.
739

740
        Returns
741
        -------
742
        None.
743

744
        """
745
        K1 = squared_exponential_kernel(x, x, tau)
×
746

747
        self.K1_inv = np.linalg.inv(K1 + np.eye(len(x)) * sigma)
×
748
        self.x = x
×
749
        self.y = y
×
750
        self.tau = tau
×
751
        self.sigma = sigma
×
752

753
    def __call__(self, x):
1✔
754
        return self.predict(x)
×
755

756
    def predict(self, x):
1✔
757
        K2 = squared_exponential_kernel(x, self.x, self.tau)
×
758
        y_test0 = K2.dot(self.K1_inv)
×
759

760
        return y_test0.dot(self.y)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc