• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mantidproject / vesuvio / 17338207230

14 Aug 2025 03:07PM UTC coverage: 52.563% (-0.3%) from 52.82%
17338207230

push

github

web-flow
Inputs script improvements (#197)

* Remove vesuvio thickness from user inputs

- This value used to be hidden and hardcoded into the routine
- Following feedback from scientists, this change should be reverted to
how it was before

* Add system test for estimating h ratio

* Add option to chose index for H ratio

- Allow users to select mass to base hydrogen ratio from
- This change comes directly out of scientist's feedback since more
often than not they can estimate the ratio to one of the masses based
from stochiometry
- Avoids having to run the preliminary procedure so often

* Update unit test

* Catch error for missing fitting workspaces

Previously the error looked very intimidating and was not very
informative. Catching the error also allows the routine to continue
normally.

* Replace np.trapz with np.trapezoid

Due to change in numpy API

* Add comments and rename fit function

- Renamed `ansiogauss` (which should have been `anisogauss` anyway) to
`gauss2d`
- Added useful comments explaining several parameters to the user

7 of 42 new or added lines in 4 files covered. (16.67%)

2 existing lines in 1 file now uncovered.

1405 of 2673 relevant lines covered (52.56%)

0.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

9.69
/src/mvesuvio/analysis_fitting.py
1
import matplotlib.pyplot as plt
1✔
2
import numpy as np
1✔
3
from mantid.simpleapi import *
1✔
4
from scipy import optimize
1✔
5
from scipy import signal
1✔
6
from pathlib import Path
1✔
7
from iminuit import Minuit, cost
1✔
8
from iminuit.util import make_func_code, describe
1✔
9
import jacobi
1✔
10
import time
1✔
11
from mantid.kernel import logger
1✔
12

13
from mvesuvio.util import handle_config
1✔
14
from mvesuvio.util.analysis_helpers import print_table_workspace, pass_data_into_ws
1✔
15

16
repoPath = Path(__file__).absolute().parent  # Path to the repository
1✔
17

18

19
class FitInYSpace:
1✔
20
    def __init__(self, fi, ws_to_fit, ws_to_fit_ncp, ws_res):
1✔
21
        self.fitting_inputs = fi
×
22
        self.ws_to_fit = ws_to_fit
×
23
        self.ws_to_fit_ncp = ws_to_fit_ncp
×
24
        self.ws_resolution = ws_res
×
25

26
        fi.figSavePath = fi.save_path / "figures"
×
27
        fi.figSavePath.mkdir(exist_ok=True, parents=True)
×
28

29
    def run(self):
1✔
30
        wsResSum = SumSpectra(InputWorkspace=self.ws_resolution, OutputWorkspace=self.ws_resolution.name() + "_Sum")
×
31
        normalise_workspace(wsResSum)
×
32

33
        wsJoY, wsJoYAvg = ySpaceReduction(self.ws_to_fit, self.ws_to_fit_ncp, self.fitting_inputs)
×
34

35
        if self.fitting_inputs.do_symmetrisation:
×
36
            wsJoYAvg = symmetrizeWs(wsJoYAvg)
×
37

38
        fitProfileMinuit(self.fitting_inputs, wsJoYAvg, wsResSum)
×
39
        fitProfileMantidFit(self.fitting_inputs, wsJoYAvg, wsResSum)
×
40

41
        printYSpaceFitResults()
×
42

43
        if self.fitting_inputs.do_global_fit:
×
44
            runGlobalFit(wsJoY, self.ws_resolution, self.fitting_inputs)
×
45

46
        save_workspaces(self.fitting_inputs)
×
47
        return
×
48

49

50
def ySpaceReduction(wsTOF, ws_ncp, ic):
1✔
51
    """Seperate procedures depending on masking specified."""
52
    mass0 = ic.masses[0]
×
53
    ncp = ws_ncp.extractY()
×
54

55
    rebinPars = ic.range_for_rebinning_in_y_space
×
56

57
    if np.any(np.all(wsTOF.extractY() == 0, axis=0)):  # Masked columns present
×
58
        if ic.mask_zeros_with == "nan":
×
59
            # Build special workspace to store accumulated points
60
            wsJoY = convertToYSpace(wsTOF, mass0)
×
61
            xp = buildXRangeFromRebinPars(ic)
×
62
            wsJoYB = dataXBining(wsJoY, xp)  # Unusual ws with several dataY points per each dataX point
×
63

64
            # Need normalisation values from NCP masked workspace
65
            wsTOFNCP = replaceZerosWithNCP(wsTOF, ncp)
×
66
            wsJoYNCP = convertToYSpace(wsTOFNCP, mass0)
×
67
            wsJoYNCPN, wsJoYInt = rebinAndNorm(wsJoYNCP, rebinPars)
×
68

69
            # Normalize spectra of specieal workspace
70
            wsJoYN = Divide(wsJoYB, wsJoYInt, OutputWorkspace=wsJoYB.name() + "_norm")
×
71
            wsJoYAvg = weightedAvgXBins(wsJoYN, xp)
×
72
            return wsJoYN, wsJoYAvg
×
73

74
        elif ic.mask_zeros_with == "ncp":
×
75
            wsTOF = replaceZerosWithNCP(wsTOF, ncp)
×
76

77
        else:
78
            raise ValueError(
×
79
                """
80
            Masked TOF bins were found but no valid procedure in y-space fit was selected.
81
            Options: 'nan', 'ncp'
82
            """
83
            )
84

85
    wsJoY = convertToYSpace(wsTOF, mass0)
×
86
    wsJoYN, wsJoYI = rebinAndNorm(wsJoY, rebinPars)
×
87
    wsJoYAvg = weightedAvgCols(wsJoYN)
×
88
    return wsJoYN, wsJoYAvg
×
89

90

91
def convertToYSpace(wsTOF, mass0):
1✔
92
    wsJoY = ConvertToYSpace(wsTOF, Mass=mass0, OutputWorkspace=wsTOF.name() + "_joy")
×
93
    return wsJoY
×
94

95

96
def rebinAndNorm(wsJoY, rebinPars):
1✔
97
    wsJoYR = Rebin(
×
98
        InputWorkspace=wsJoY,
99
        Params=rebinPars,
100
        FullBinsOnly=True,
101
        OutputWorkspace=wsJoY.name() + "_rebin",
102
    )
103
    wsJoYInt = Integration(wsJoYR, OutputWorkspace=wsJoYR.name() + "_integrated")
×
104
    wsJoYNorm = Divide(wsJoYR, wsJoYInt, OutputWorkspace=wsJoYR.name() + "_norm")
×
105
    return wsJoYNorm, wsJoYInt
×
106

107

108
def replaceZerosWithNCP(ws, ncp):
1✔
109
    """
110
    Replaces columns of bins with zeros on dataY with NCP provided.
111
    """
112
    dataX, dataY, dataE = extractWS(ws)
×
113
    mask = np.all(dataY == 0, axis=0)  # Masked Cols
×
114

115
    dataY[:, mask] = ncp[:, mask[: ncp.shape[1]]]  # mask of ncp adjusted for last col present or not
×
116

117
    wsMasked = CloneWorkspace(ws, OutputWorkspace=ws.name() + "_NCPMasked")
×
118
    pass_data_into_ws(dataX, dataY, dataE, wsMasked)
×
119
    SumSpectra(wsMasked, OutputWorkspace=wsMasked.name() + "_Sum")
×
120
    return wsMasked
×
121

122

123
def buildXRangeFromRebinPars(yFitIC):
1✔
124
    # Range used in case mask is set to NAN
125
    first, step, last = [float(s) for s in yFitIC.range_for_rebinning_in_y_space.split(",")]
×
126
    xp = np.arange(first, last, step) + step / 2  # Correction to match Mantid range
×
127
    return xp
×
128

129

130
def dataXBining(ws, xp):
1✔
131
    """
132
    Changes dataX of a workspace to values in range of bin centers xp.
133
    Same as shifting dataY values to closest bin center.
134
    Output ws has several dataY values per dataX point.
135
    """
136

137
    assert np.min(xp[:-1] - xp[1:]) == np.max(xp[:-1] - xp[1:]), "Bin widths need to be the same."
×
138
    step = xp[1] - xp[0]  # Calculate step from first two numbers
×
139
    # Form bins with xp being the centers
140
    bins = np.append(xp, [xp[-1] + step]) - step / 2
×
141

142
    dataX, dataY, dataE = extractWS(ws)
×
143
    # Loop below changes only the values of DataX
144
    for i, x in enumerate(dataX):
×
145
        # Select only valid range xr
146
        mask = (x < np.min(bins)) | (x > np.max(bins))
×
147
        xr = x[~mask]
×
148

149
        idxs = np.digitize(xr, bins)
×
150
        newXR = np.array([xp[idx] for idx in idxs - 1])  # Bin idx 1 refers to first bin ie idx 0 of centers
×
151

152
        # Pad invalid values with nans
153
        newX = x
×
154
        newX[mask] = np.nan  # Cannot use 0 as to not be confused with a dataX value
×
155
        newX[~mask] = newXR
×
156
        dataX[i] = newX  # Update DataX
×
157

158
    # Mask DataE values in same places as DataY values
159
    dataE[dataY == 0] = 0
×
160

161
    wsXBins = CloneWorkspace(ws, OutputWorkspace=ws.name() + "_XBinned")
×
162
    wsXBins = pass_data_into_ws(dataX, dataY, dataE, wsXBins)
×
163
    return wsXBins
×
164

165

166
def weightedAvgXBins(wsXBins, xp):
1✔
167
    """Weighted average on ws where dataY points are grouped per dataX bin centers."""
168
    dataX, dataY, dataE = extractWS(wsXBins)
×
169

170
    meansY, meansE = weightedAvgXBinsArr(dataX, dataY, dataE, xp)
×
171

172
    wsYSpaceAvg = CreateWorkspace(
×
173
        DataX=xp,
174
        DataY=meansY,
175
        DataE=meansE,
176
        NSpec=1,
177
        OutputWorkspace=wsXBins.name() + "_wavg",
178
    )
179
    return wsYSpaceAvg
×
180

181

182
def weightedAvgXBinsArr(dataX, dataY, dataE, xp):
1✔
183
    """
184
    Weighted Average on arrays where several dataY points correspond to a single dataX point.
185
    xp is the range over which to perform the average.
186
    dataX points can only take values in xp.
187
    Ignores any zero or NaN value.
188
    """
189
    meansY = np.zeros(len(xp))
×
190
    meansE = np.zeros(len(xp))
×
191

192
    for i in range(len(xp)):
×
193
        # Perform weighted average over all dataY and dataE values with the same xp[i]
194
        # Change shape to column to match weighted average function
195
        pointMask = dataX == xp[i]
×
196
        allY = dataY[pointMask][:, np.newaxis]
×
197
        allE = dataE[pointMask][:, np.newaxis]
×
198

199
        # If no points were found for a given abcissae
200
        if np.sum(pointMask) == 0:
×
201
            mY, mE = 0, 0  # Mask with zeros
×
202

203
        # If one point was found, set to that point
204
        elif np.sum(pointMask) == 1:
×
205
            mY, mE = allY.flatten(), allE.flatten()
×
206

207
        # Weighted avg over all spectra and several points per spectra
208
        else:
209
            # Case of bootstrap replica with no errors
210
            if np.all(dataE == 0):
×
211
                mY = avgArr(allY)
×
212
                mE = 0
×
213

214
            # Default for most cases
215
            else:
216
                mY, mE = weightedAvgArr(allY, allE)  # Outputs masked values as zeros
×
217

218
        # DataY and DataE should never reach NaN, but safeguard in case they do
219
        if (mE == np.nan) | (mY == np.nan):
×
220
            mY, mE = 0, 0
×
221

222
        meansY[i] = mY
×
223
        meansE[i] = mE
×
224

225
    return meansY, meansE
×
226

227

228
def weightedAvgCols(wsYSpace):
1✔
229
    """Returns ws with weighted avg of columns of input ws"""
230
    dataX, dataY, dataE = extractWS(wsYSpace)
×
231
    if np.all(dataE == 0):  # Bootstrap case where errors are not used
×
232
        meanY = avgArr(dataY)
×
233
        meanE = np.zeros(meanY.shape)
×
234
    else:
235
        meanY, meanE = weightedAvgArr(dataY, dataE)
×
236
    wsYSpaceAvg = CreateWorkspace(
×
237
        DataX=dataX[0, :],
238
        DataY=meanY,
239
        DataE=meanE,
240
        NSpec=1,
241
        OutputWorkspace=wsYSpace.name() + "_wavg",
242
    )
243
    return wsYSpaceAvg
×
244

245

246
def avgArr(dataYO):
1✔
247
    """
248
    Average over columns of 2D dataY.
249
    Ignores any zero values as being masked.
250
    """
251

252
    assert len(dataYO) > 1, "Averaging needs more than one element."
×
253

254
    dataY = dataYO.copy()
×
255
    dataY[dataY == 0] = np.nan
×
256
    meanY = np.nanmean(dataY, axis=0)
×
257
    meanY[meanY == np.nan] = 0
×
258

259
    assert np.all(np.all(dataYO == 0, axis=0) == (meanY == 0)), "Columns of zeros should give zero."
×
260
    return meanY
×
261

262

263
def weightedAvgArr(dataYO, dataEO):
1✔
264
    """
265
    Weighted average over columns of 2D arrays.
266
    Ignores any zero or NaN value.
267
    """
268

269
    # Run some tests
270
    assert dataYO.shape == dataEO.shape, "Y and E arrays should have same shape for weighted average."
×
271
    assert np.all((dataYO == 0) == (dataEO == 0)), (
×
272
        f"Masked zeros should match in DataY and DataE: {np.argwhere((dataYO == 0) != (dataEO == 0))}"
273
    )
274
    assert np.all(np.isnan(dataYO) == np.isnan(dataEO)), "Masked nans should match in DataY and DataE."
×
275
    assert len(dataYO) > 1, "Weighted average needs more than one element to be performed."
×
276

277
    dataY = dataYO.copy()  # Copy arrays not to change original data
×
278
    dataE = dataEO.copy()
×
279

280
    # Ignore invalid data by changing zeros to nans
281
    # If data is already masked with nans, it remains unaltered
282
    zerosMask = dataY == 0
×
283
    dataY[zerosMask] = np.nan
×
284
    dataE[zerosMask] = np.nan
×
285

286
    meanY = np.nansum(dataY / np.square(dataE), axis=0) / np.nansum(1 / np.square(dataE), axis=0)
×
287
    meanE = np.sqrt(1 / np.nansum(1 / np.square(dataE), axis=0))
×
288

289
    # Change invalid data back to original masking format with zeros
290
    nanInfMask = (meanE == np.inf) | (meanE == np.nan) | (meanY == np.nan)
×
291
    meanY[nanInfMask] = 0
×
292
    meanE[nanInfMask] = 0
×
293

294
    # Test that columns of zeros are left unchanged
295
    assert np.all((meanY == 0) == (meanE == 0)), "Weighted avg output should have masks in the same DataY and DataE."
×
296
    assert np.all((np.all(dataYO == 0, axis=0) | np.all(np.isnan(dataYO), axis=0)) == (meanY == 0)), "Masked cols should be ignored."
×
297

298
    return meanY, meanE
×
299

300

301
def normalise_workspace(ws_name):
1✔
302
    """Updates workspace with the normalised version."""
303
    tmp_norm = Integration(ws_name)
×
304
    Divide(LHSWorkspace=ws_name, RHSWorkspace=tmp_norm, OutputWorkspace=ws_name)
×
305
    DeleteWorkspace("tmp_norm")
×
306

307

308
def extractWS(ws):
1✔
309
    """Directly exctracts data from workspace into arrays"""
310
    return ws.extractX(), ws.extractY(), ws.extractE()
×
311

312

313
def symmetrizeWs(avgYSpace):
1✔
314
    """
315
    Symmetrizes workspace after weighted average.
316
    Needs to have symmetric binning.
317
    """
318

319
    dataX, dataY, dataE = extractWS(avgYSpace)
×
320

321
    if np.all(dataE == 0):
×
322
        dataYS = symArr(dataY)
×
323
        dataES = np.zeros(dataYS.shape)
×
324
    else:
325
        dataYS, dataES = weightedSymArr(dataY, dataE)
×
326

327
    wsSym = CloneWorkspace(avgYSpace, OutputWorkspace=avgYSpace.name() + "_sym")
×
328
    wsSym = pass_data_into_ws(dataX, dataYS, dataES, wsSym)
×
329
    return wsSym
×
330

331

332
def symArr(dataYO):
1✔
333
    """
334
    Performs averaging between two oposite points.
335
    Takes only one 2D array.
336
    Any zero gets assigned the oposite value.
337
    """
338

339
    assert len(dataYO.shape) == 2, "Symmetrization is written for 2D arrays."
×
340
    dataY = dataYO.copy()  # Copy arrays not to risk changing original data
×
341
    coMask = dataY == 0
×
342
    dataY[coMask] = np.nan
×
343

344
    yFlip = np.flip(dataY, axis=1)
×
345

346
    dataYS = np.nanmean(np.stack((dataY, yFlip)), axis=0)  # Normal avg between two numbers, cut-offs get ignored
×
347

348
    dataYS[dataYS == np.nan] = 0
×
349
    np.testing.assert_array_equal(dataYS, np.flip(dataYS, axis=1)), f"Symmetrisation failed in {np.argwhere(dataYS != np.flip(dataYS))}"
×
350
    np.testing.assert_allclose(dataYS[coMask], np.flip(dataYO, axis=1)[coMask])
×
351
    return dataYS
×
352

353

354
def weightedSymArr(dataYO, dataEO):
1✔
355
    """
356
    Performs Inverse variance weighting between two oposite points.
357
    When one of the points is a cut-off and the other is a valid point,
358
    the final value will be the valid point.
359
    """
360
    assert len(dataYO.shape) == 2, "Symmetrization is written for 2D arrays."
×
361
    assert np.all((dataYO == 0) == (dataEO == 0)), "Masked values should have zeros on both dataY and dataE."
×
362

363
    dataY = dataYO.copy()  # Copy arrays not to risk changing original data
×
364
    dataE = dataEO.copy()
×
365

366
    cutOffMask = dataY == 0
×
367
    # Change values of yerr to leave cut-offs unchanged during symmetrisation
368
    dataE[cutOffMask] = np.full(np.sum(cutOffMask), np.inf)
×
369

370
    yFlip = np.flip(dataY, axis=1)
×
371
    eFlip = np.flip(dataE, axis=1)
×
372

373
    # Inverse variance weighting
374
    dataYS = (dataY / dataE**2 + yFlip / eFlip**2) / (1 / dataE**2 + 1 / eFlip**2)
×
375
    dataES = 1 / np.sqrt(1 / dataE**2 + 1 / eFlip**2)
×
376

377
    # Deal with effects from previously changing dataE=np.inf
378
    nanInfMask = (dataES == np.inf) | (dataES == np.nan) | (dataYS == np.nan)
×
379
    dataYS[nanInfMask] = 0
×
380
    dataES[nanInfMask] = 0
×
381

382
    # Test that arrays are symmetrised
383
    np.testing.assert_array_equal(dataYS, np.flip(dataYS, axis=1)), f"Symmetrisation failed in {np.argwhere(dataYS != np.flip(dataYS))}"
×
384
    np.testing.assert_array_equal(dataES, np.flip(dataES, axis=1)), f"Symmetrisation failed in {np.argwhere(dataES != np.flip(dataES))}"
×
385

386
    # Test that cut-offs were not included in the symmetrisation
387
    np.testing.assert_allclose(dataYS[cutOffMask], np.flip(dataYO, axis=1)[cutOffMask])
×
388
    np.testing.assert_allclose(dataES[cutOffMask], np.flip(dataEO, axis=1)[cutOffMask])
×
389

390
    return dataYS, dataES
×
391

392

393
def fitProfileMinuit(yFitIC, wsYSpaceSym, wsRes):
1✔
394
    dataX, dataY, dataE = extractFirstSpectra(wsYSpaceSym)
×
395
    resX, resY, resE = extractFirstSpectra(wsRes)
×
396
    assert np.all(dataX == resX), "Resolution should operate on the same range as DataX"
×
397

398
    model, defaultPars, sharedPars = selectModelAndPars(yFitIC.fitting_model)
×
399

400
    xDelta, resDense = oddPointsRes(resX, resY)
×
401

402
    def convolvedModel(x, y0, *pars):
×
403
        return y0 + signal.convolve(model(x, *pars), resDense, mode="same") * xDelta
×
404

405
    signature = describe(model)[:]  # Build signature of convolved function
×
406
    signature[1:1] = ["y0"]  # Add intercept as first fitting parameter after range 'x'
×
407

408
    # Initialize limits as None, constrained later
409
    convolvedModel._parameters = {key: None for key in signature}
×
410
    defaultPars["y0"] = 0  # Add initialization of parameter to dictionary
×
411

412
    # Fit only valid values, ignore cut-offs
413
    dataXNZ, dataYNZ, dataENZ = selectNonZeros(dataX, dataY, dataE)
×
414

415
    # Fit with Minuit
416
    if np.all(dataE == 0):  # Choose fitting without weights
×
417
        costFun = MyLeastSquares(dataXNZ, dataYNZ, convolvedModel)
×
418
    else:
419
        costFun = cost.LeastSquares(dataXNZ, dataYNZ, dataENZ, convolvedModel)
×
420

421
    m = Minuit(costFun, **defaultPars)
×
422

423
    m.limits["A"] = (0, None)
×
424
    if yFitIC.fitting_model == "doublewell":
×
425
        m.limits["d"] = (0, None)
×
426
        m.limits["R"] = (0, None)
×
427

428
    if yFitIC.fitting_model == "gauss":
×
429
        m.simplex()
×
430
        m.migrad()
×
431

432
        def constrFunc() -> None:  # No constraint function for gaussian profile
×
433
            return
×
434

435
    else:
436

437
        def constrFunc(*pars):  # Constrain physical model before convolution
×
438
            return model(dataXNZ, *pars[1:])  # First parameter is intercept, not part of model()
×
439

440
        m.simplex()
×
441
        m.scipy(constraints=optimize.NonlinearConstraint(constrFunc, 0, np.inf))
×
442

443
    # Explicit calculation of Hessian after the fit
444
    m.hesse()
×
445

446
    # Weighted Chi2
447
    chi2 = m.fval / (len(dataXNZ) - m.nfit)
×
448

449
    # Best fit and confidence band
450
    # Calculated for the whole range of dataX, including where zero
451
    dataYFit, dataYCov = jacobi.propagate(lambda pars: convolvedModel(dataX, *pars), m.values, m.covariance)
×
452
    dataYSigma = np.sqrt(np.diag(dataYCov))
×
453
    dataYSigma *= chi2  # Weight the confidence band
×
454
    Residuals = dataY - dataYFit
×
455

456
    wsYSpaceSym = CloneWorkspace(wsYSpaceSym, OutputWorkspace=wsYSpaceSym.name() + "_minuit_" + yFitIC.fitting_model)
×
457
    # Create workspace to store best fit curve and errors on the fit
458
    wsMinFit = createFitResultsWorkspace(wsYSpaceSym, dataX, dataY, dataE, dataYFit, dataYSigma, Residuals)
×
459
    saveMinuitPlot(yFitIC, wsMinFit, m)
×
460

461
    # Calculate correlation matrix
462
    corrMatrix = m.covariance.correlation()
×
463
    corrMatrix *= 100
×
464

465
    # Create correlation tableWorkspace
466
    createCorrelationTableWorkspace(wsYSpaceSym, m.parameters, corrMatrix)
×
467

468
    # Run Minos
469
    fitCols = runMinos(m, yFitIC, constrFunc, wsYSpaceSym.name())
×
470

471
    # Create workspace with final fitting parameters and their errors
472
    createFitParametersTableWorkspace(wsYSpaceSym, *fitCols, chi2)
×
473
    return
×
474

475

476
def extractFirstSpectra(ws):
1✔
477
    dataY = ws.extractY()[0]
×
478
    dataX = ws.extractX()[0]
×
479
    dataE = ws.extractE()[0]
×
480
    return dataX, dataY, dataE
×
481

482

483
def selectModelAndPars(modelFlag):
1✔
484
    """
485
    Selects the function to fit.
486
    Specifies the starting parameters of that function as default parameters.
487
    The shared parameters are used in the global fit.
488
    The defaultPars should be in the same order as the signature of the function
489
    """
490

491
    if modelFlag == "gauss":
×
492

493
        def model(x, A, x0, sigma):
×
494
            return A / (2 * np.pi) ** 0.5 / sigma * np.exp(-((x - x0) ** 2) / 2 / sigma**2)
×
495

496
        defaultPars = {"A": 1, "x0": 0, "sigma": 5}
×
497
        sharedPars = ["sigma"]  # Used only in Global fit
×
498

499
    elif modelFlag == "gauss_cntr":
×
500

501
        def model(x, A, sigma):
×
502
            return A / (2 * np.pi) ** 0.5 / sigma * np.exp(-(x**2) / 2 / sigma**2)
×
503

504
        defaultPars = {"A": 1, "sigma": 5}
×
505
        sharedPars = ["sigma"]  # Used only in Global fit
×
506

507
    elif modelFlag == "gcc4c6":
×
508

509
        def model(x, A, x0, sigma1, c4, c6):
×
510
            return (
×
511
                A
512
                * np.exp(-((x - x0) ** 2) / 2 / sigma1**2)
513
                / (np.sqrt(2 * np.pi * sigma1**2))
514
                * (
515
                    1
516
                    + c4 / 32 * (16 * ((x - x0) / np.sqrt(2) / sigma1) ** 4 - 48 * ((x - x0) / np.sqrt(2) / sigma1) ** 2 + 12)
517
                    + c6
518
                    / 384
519
                    * (
520
                        64 * ((x - x0) / np.sqrt(2) / sigma1) ** 6
521
                        - 480 * ((x - x0) / np.sqrt(2) / sigma1) ** 4
522
                        + 720 * ((x - x0) / np.sqrt(2) / sigma1) ** 2
523
                        - 120
524
                    )
525
                )
526
            )
527

528
        defaultPars = {"A": 1, "x0": 0, "sigma1": 6, "c4": 0, "c6": 0}
×
529
        sharedPars = ["sigma1", "c4", "c6"]  # Used only in Global fit
×
530

531
    elif modelFlag == "gcc4c6_cntr":
×
532

533
        def model(x, A, sigma1, c4, c6):
×
534
            return (
×
535
                A
536
                * np.exp(-(x**2) / 2 / sigma1**2)
537
                / (np.sqrt(2 * np.pi * sigma1**2))
538
                * (
539
                    1
540
                    + c4 / 32 * (16 * (x / np.sqrt(2) / sigma1) ** 4 - 48 * (x / np.sqrt(2) / sigma1) ** 2 + 12)
541
                    + c6
542
                    / 384
543
                    * (
544
                        64 * (x / np.sqrt(2) / sigma1) ** 6
545
                        - 480 * (x / np.sqrt(2) / sigma1) ** 4
546
                        + 720 * (x / np.sqrt(2) / sigma1) ** 2
547
                        - 120
548
                    )
549
                )
550
            )
551

552
        defaultPars = {"A": 1, "sigma1": 6, "c4": 0, "c6": 0}
×
553
        sharedPars = ["sigma1", "c4", "c6"]  # Used only in Global fit
×
554

555
    elif modelFlag == "gcc4":
×
556

557
        def model(x, A, x0, sigma1, c4):
×
558
            return (
×
559
                A
560
                * np.exp(-((x - x0) ** 2) / 2 / sigma1**2)
561
                / (np.sqrt(2 * np.pi * sigma1**2))
562
                * (1 + c4 / 32 * (16 * ((x - x0) / np.sqrt(2) / sigma1) ** 4 - 48 * ((x - x0) / np.sqrt(2) / sigma1) ** 2 + 12))
563
            )
564

565
        defaultPars = {"A": 1, "x0": 0, "sigma1": 6, "c4": 0}
×
566
        sharedPars = ["sigma1", "c4"]  # Used only in Global fit
×
567

568
    elif modelFlag == "gcc4_cntr":
×
569

570
        def model(x, A, sigma1, c4):
×
571
            return (
×
572
                A
573
                * np.exp(-(x**2) / 2 / sigma1**2)
574
                / (np.sqrt(2 * np.pi * sigma1**2))
575
                * (1 + c4 / 32 * (16 * (x / np.sqrt(2) / sigma1) ** 4 - 48 * (x / np.sqrt(2) / sigma1) ** 2 + 12))
576
            )
577

578
        defaultPars = {"A": 1, "sigma1": 6, "c4": 0}
×
579
        sharedPars = ["sigma1", "c4"]  # Used only in Global fit
×
580

581
    elif modelFlag == "gcc6":
×
582

583
        def model(x, A, x0, sigma1, c6):
×
584
            return (
×
585
                A
586
                * np.exp(-((x - x0) ** 2) / 2 / sigma1**2)
587
                / (np.sqrt(2 * np.pi * sigma1**2))
588
                * (
589
                    1
590
                    + +c6
591
                    / 384
592
                    * (
593
                        64 * ((x - x0) / np.sqrt(2) / sigma1) ** 6
594
                        - 480 * ((x - x0) / np.sqrt(2) / sigma1) ** 4
595
                        + 720 * ((x - x0) / np.sqrt(2) / sigma1) ** 2
596
                        - 120
597
                    )
598
                )
599
            )
600

601
        defaultPars = {"A": 1, "x0": 0, "sigma1": 6, "c6": 0}
×
602
        sharedPars = ["sigma1", "c6"]  # Used only in Global fit
×
603

604
    elif modelFlag == "gcc6_cntr":
×
605

606
        def model(x, A, sigma1, c6):
×
607
            return (
×
608
                A
609
                * np.exp(-(x**2) / 2 / sigma1**2)
610
                / (np.sqrt(2 * np.pi * sigma1**2))
611
                * (
612
                    1
613
                    + +c6
614
                    / 384
615
                    * (
616
                        64 * (x / np.sqrt(2) / sigma1) ** 6
617
                        - 480 * (x / np.sqrt(2) / sigma1) ** 4
618
                        + 720 * (x / np.sqrt(2) / sigma1) ** 2
619
                        - 120
620
                    )
621
                )
622
            )
623

624
        defaultPars = {"A": 1, "sigma1": 6, "c6": 0}
×
625
        sharedPars = ["sigma1", "c6"]  # Used only in Global fit
×
626

627
    elif modelFlag == "doublewell":
×
628

629
        def model(x, A, d, R, sig1, sig2):
×
630
            # h = 2.04
631
            theta = np.linspace(0, np.pi, 300)[:, np.newaxis]  # 300 points seem like a good estimate for ~10 examples
×
632
            y = x[np.newaxis, :]
×
633

634
            sigTH = np.sqrt(sig1**2 * np.cos(theta) ** 2 + sig2**2 * np.sin(theta) ** 2)
×
635
            alpha = 2 * (d * sig2 * sig1 * np.sin(theta) / sigTH) ** 2
×
636
            beta = (2 * sig1**2 * d * np.cos(theta) / sigTH**2) * y
×
637
            denom = 2.506628 * sigTH * (1 + R**2 + 2 * R * np.exp(-2 * d**2 * sig1**2))
×
638
            jp = np.exp(-(y**2) / (2 * sigTH**2)) * (1 + R**2 + 2 * R * np.exp(-alpha) * np.cos(beta)) / denom
×
639
            jp *= np.sin(theta)
×
640

NEW
641
            JBest = np.trapezoid(jp, x=theta, axis=0)
×
NEW
642
            JBest /= np.abs(np.trapezoid(JBest, x=y))
×
643
            JBest *= A
×
644
            return JBest
×
645

646
        defaultPars = {
×
647
            "A": 1,
648
            "d": 1,
649
            "R": 1,
650
            "sig1": 3,
651
            "sig2": 5,
652
        }  # TODO: Starting parameters and bounds?
653
        sharedPars = ["d", "R", "sig1", "sig2"]  # Only varying parameter is amplitude A
×
654

NEW
655
    elif modelFlag == "gauss2d":
×
656
        # Anisotropic case
UNCOV
657
        def model(x, A, sig1, sig2):
×
658
            # h = 2.04
659
            theta = np.linspace(0, np.pi, 300)[:, np.newaxis]
×
660
            y = x[np.newaxis, :]
×
661

662
            sigTH = np.sqrt(sig1**2 * np.cos(theta) ** 2 + sig2**2 * np.sin(theta) ** 2)
×
663
            jp = np.exp(-(y**2) / (2 * sigTH**2)) / (2.506628 * sigTH)
×
664
            jp *= np.sin(theta)
×
665

NEW
666
            JBest = np.trapezoid(jp, x=theta, axis=0)
×
NEW
667
            JBest /= np.abs(np.trapezoid(JBest, x=y))
×
668
            JBest *= A
×
669
            return JBest
×
670

671
        defaultPars = {"A": 1, "sig1": 3, "sig2": 5}
×
672
        sharedPars = ["sig1", "sig2"]
×
673

674
    elif modelFlag == "gauss3d":
×
675

676
        def model(x, A, sig_x, sig_y, sig_z):
×
677
            y = x[:, np.newaxis, np.newaxis]
×
678
            n_steps = 50  # Low number of integration steps because otherwise too slow
×
679
            theta = np.linspace(0, np.pi / 2, n_steps)[np.newaxis, :, np.newaxis]
×
680
            phi = np.linspace(0, np.pi / 2, n_steps)[np.newaxis, np.newaxis, :]
×
681

682
            S2_inv = (
×
683
                np.sin(theta) ** 2 * np.cos(phi) ** 2 / sig_x**2
684
                + np.sin(theta) ** 2 * np.sin(phi) ** 2 / sig_y**2
685
                + np.cos(theta) ** 2 / sig_z**2
686
            )
687

688
            J = np.sin(theta) / S2_inv * np.exp(-(y**2) / 2 * S2_inv)
×
689

NEW
690
            J = np.trapezoid(J, x=phi, axis=2)[:, :, np.newaxis]  # Keep shape
×
NEW
691
            J = np.trapezoid(J, x=theta, axis=1)
×
692

693
            J *= A * 2 / np.pi * 1 / np.sqrt(2 * np.pi) * 1 / (sig_x * sig_y * sig_z)  # Normalisation
×
694
            J = J.squeeze()
×
695
            return J
×
696

697
        defaultPars = {"A": 1, "sig_x": 5, "sig_y": 5, "sig_z": 5}
×
698
        sharedPars = ["sig_x", "sig_y", "sig_z"]
×
699

700
    else:
701
        raise ValueError(
×
702
            """
703
        Fitting Model not recognized, available options:
704
        'gauss', 'gauss_cntr', 'gcc4c6', 'gcc4c6_cntr', 'gcc4', 'gcc4_cntr, 'gcc6', 'gcc6_cntr', 'doublewell', 'gauss2d' gauss3d'"
705
        """
706
        )
707

708
    logger.notice(f"\nShared Parameters: {[key for key in sharedPars]}")
×
709
    logger.notice(f"\nUnshared Parameters: {[key for key in defaultPars if key not in sharedPars]}")
×
710

711
    assert all(isinstance(item, str) for item in sharedPars), "Parameters in list must be strings."
×
712
    assert describe(model)[-len(sharedPars) :] == sharedPars, (
×
713
        "Function signature needs to have shared parameters at the end: model(*unsharedPars, *sharedPars)"
714
    )
715

716
    return model, defaultPars, sharedPars
×
717

718

719
def selectNonZeros(dataX, dataY, dataE):
1✔
720
    """
721
    Selects non zero points.
722
    Uses zeros in dataY becasue dataE can be all zeros in one of the bootstrap types.
723
    """
724
    zeroMask = dataY == 0
×
725

726
    dataXNZ = dataX[~zeroMask]
×
727
    dataYNZ = dataY[~zeroMask]
×
728
    dataENZ = dataE[~zeroMask]
×
729
    return dataXNZ, dataYNZ, dataENZ
×
730

731

732
class MyLeastSquares:
1✔
733
    """
734
    Generic least-squares cost function without error.
735
    This structure is required for high compatibility with Minuit.
736
    """
737

738
    errordef = Minuit.LEAST_SQUARES  # for Minuit to compute errors correctly
1✔
739

740
    def __init__(self, x, y, model):
1✔
741
        self.model = model  # model predicts y for given x
×
742
        self.x = np.asarray(x)
×
743
        self.y = np.asarray(y)
×
744
        self.func_code = make_func_code(describe(model)[1:])
×
745

746
    def __call__(self, *par):  # we accept a variable number of model parameters
1✔
747
        ym = self.model(self.x, *par)
×
748
        return np.sum((self.y - ym) ** 2)
×
749

750
    @property
1✔
751
    def ndata(self):
1✔
752
        return len(self.x)
×
753

754

755
def createFitResultsWorkspace(wsYSpaceSym, dataX, dataY, dataE, dataYFit, dataYSigma, Residuals):
1✔
756
    """Creates workspace similar to the ones created by Mantid Fit."""
757

758
    ws_fit_complete = CreateWorkspace(
×
759
        DataX=np.concatenate((dataX, dataX, dataX)),
760
        DataY=np.concatenate((dataY, dataYFit, Residuals)),
761
        DataE=np.concatenate((dataE, dataYSigma, np.zeros(len(dataE)))),
762
        NSpec=3,
763
        OutputWorkspace=wsYSpaceSym.name() + "_Workspace",
764
    )
765
    return ws_fit_complete
×
766

767

768
def saveMinuitPlot(yFitIC, wsMinuitFit, mObj):
1✔
769
    """Saves figure with Minuit Fit."""
770

771
    leg = ""
×
772
    for p, v, e in zip(mObj.parameters, mObj.values, mObj.errors):
×
773
        leg += f"${p}={v:.2f} \\pm {e:.2f}$\n"
×
774

775
    fig, ax = plt.subplots(subplot_kw={"projection": "mantid"})
×
776
    ax.errorbar(wsMinuitFit, "k.", wkspIndex=0, label="Weighted Avg")
×
777
    ax.errorbar(wsMinuitFit, "r-", wkspIndex=1, label=leg)
×
778
    ax.plot(wsMinuitFit, "b.", wkspIndex=2, label="Residuals")
×
779
    ax.set_xlabel("YSpace")
×
780
    ax.set_ylabel("Counts")
×
781
    ax.set_title("Minuit Fit")
×
782
    ax.grid()
×
783
    ax.legend()
×
784

785
    fileName = wsMinuitFit.name() + ".pdf"
×
786
    savePath = yFitIC.figSavePath / fileName
×
787
    plt.savefig(savePath, bbox_inches="tight")
×
788
    plt.close(fig)
×
789
    return
×
790

791

792
def createCorrelationTableWorkspace(wsYSpaceSym, parameters, corrMatrix):
1✔
793
    tableWS = CreateEmptyTableWorkspace(OutputWorkspace=wsYSpaceSym.name() + "_NormalizedCovarianceMatrix")
×
794
    tableWS.setTitle("Minuit Fit")
×
795
    tableWS.addColumn(type="str", name="Name")
×
796
    for p in parameters:
×
797
        tableWS.addColumn(type="float", name=p)
×
798
    for p, arr in zip(parameters, corrMatrix):
×
799
        tableWS.addRow([p] + list(arr))
×
800
    print_table_workspace(tableWS)
×
801

802

803
def runMinos(mObj, yFitIC, constrFunc, wsName):
1✔
804
    """Outputs columns to be displayed in a table workspace"""
805

806
    # Extract info from fit before running any MINOS
807
    parameters = list(mObj.parameters)
×
808
    values = list(mObj.values)
×
809
    errors = list(mObj.errors)
×
810

811
    # If minos is set not to run, ouput columns with zeros on minos errors
812
    if not (yFitIC.run_minos):
×
813
        minosAutoErr = list(np.zeros((len(parameters), 2)))
×
814
        minosManErr = list(np.zeros((len(parameters), 2)))
×
815
        return parameters, values, errors, minosAutoErr, minosManErr
×
816

817
    bestFitVals = {}
×
818
    bestFitErrs = {}
×
819
    for p, v, e in zip(mObj.parameters, mObj.values, mObj.errors):
×
820
        bestFitVals[p] = v
×
821
        bestFitErrs[p] = e
×
822

823
    if yFitIC.fitting_model == "gauss":  # Case with no positivity constraint, can use automatic minos()
×
824
        mObj.minos()
×
825
        me = mObj.merrors
×
826

827
        # Build minos errors lists in suitable format
828
        minosAutoErr = []
×
829
        for p in parameters:
×
830
            minosAutoErr.append([me[p].lower, me[p].upper])
×
831
        minosManErr = list(np.zeros(np.array(minosAutoErr).shape))
×
832

833
        if yFitIC.show_plots:
×
834
            plotAutoMinos(mObj, wsName, yFitIC)
×
835

836
    else:  # Case with positivity constraint on function, use manual implementation
837
        # Changes values of minuit obj m, do not use m below this point
838
        merrors, fig = runAndPlotManualMinos(mObj, constrFunc, bestFitVals, bestFitErrs, yFitIC.show_plots, yFitIC, wsName)
×
839

840
        # Same as above, but the other way around
841
        minosManErr = []
×
842
        for p in parameters:
×
843
            minosManErr.append(merrors[p])
×
844
        minosAutoErr = list(np.zeros(np.array(minosManErr).shape))
×
845

846
        if yFitIC.show_plots:
×
847
            fig.show()
×
848

849
    return parameters, values, errors, minosAutoErr, minosManErr
×
850

851

852
def runAndPlotManualMinos(minuitObj, constrFunc, bestFitVals, bestFitErrs, showPlots, yFitIC, wsName):
1✔
853
    """
854
    Runs brute implementation of minos algorithm and
855
    plots the profile for each parameter along the way.
856
    """
857
    # Reason for two distinct operations inside the same function is that its easier
858
    # to build the minos plots for each parameter as they are being calculated.
859
    logger.notice("\nRunning Minos ... \n")
×
860

861
    # Set format of subplots
862
    height = 2
×
863
    width = int(np.ceil(len(minuitObj.parameters) / 2))
×
864
    figsize = (12, 7)
×
865
    # Output plot to Mantid
866
    fig, axs = plt.subplots(
×
867
        height,
868
        width,
869
        tight_layout=True,
870
        figsize=figsize,
871
        subplot_kw={"projection": "mantid"},
872
    )  # subplot_kw={'projection':'mantid'}
873
    fig.canvas.manager.set_window_title(wsName + "_minos")
×
874

875
    merrors = {}
×
876
    for p, ax in zip(minuitObj.parameters, axs.flat):
×
877
        lerr, uerr = runMinosForPar(minuitObj, constrFunc, p, 2, ax, bestFitVals, bestFitErrs, showPlots)
×
878
        merrors[p] = np.array([lerr, uerr])
×
879

880
    # Hide plots not in use:
881
    for ax in axs.flat:
×
882
        if not ax.lines:  # If empty list
×
883
            ax.set_visible(False)
×
884

885
    # ALl axes share same legend, so set figure legend to first axis
886
    handle, label = axs[0, 0].get_legend_handles_labels()
×
887
    fig.legend(handle, label, loc="lower right")
×
888
    savePath = yFitIC.figSavePath / fig.canvas.manager.get_window_title()
×
889
    plt.savefig(savePath, bbox_inches="tight")
×
890
    return merrors, fig
×
891

892

893
def runMinosForPar(minuitObj, constrFunc, var: str, bound: int, ax, bestFitVals, bestFitErrs, showPlots):
1✔
894
    resetMinuit(minuitObj, bestFitVals, bestFitErrs)
×
895
    # Run Fitting procedures again to be on the safe side and reset to minimum
896
    minuitObj.scipy(constraints=optimize.NonlinearConstraint(constrFunc, 0, np.inf))
×
897
    minuitObj.hesse()
×
898

899
    # Extract parameters from minimum
900
    varVal = minuitObj.values[var]
×
901
    varErr = minuitObj.errors[var]
×
902
    # Store fval of best fit
903
    fValsMin = minuitObj.fval  # Used to calculate error bands at the end
×
904

905
    varSpace = buildVarRange(bound, varVal, varErr)
×
906

907
    # Split variable space into right and left side
908
    lhsVarSpace, rhsVarSpace = np.split(varSpace, 2)
×
909
    lhsVarSpace = np.flip(lhsVarSpace)  # Flip to start at minimum
×
910

911
    for minimizer in ("Scipy", "Migrad"):
×
912
        resetMinuit(minuitObj, bestFitVals, bestFitErrs)
×
913
        rhsMinos = runMinosOnRange(minuitObj, var, rhsVarSpace, minimizer, constrFunc)
×
914

915
        resetMinuit(minuitObj, bestFitVals, bestFitErrs)
×
916
        lhsMinos = runMinosOnRange(minuitObj, var, lhsVarSpace, minimizer, constrFunc)
×
917

918
        wholeMinos = np.concatenate((np.flip(lhsMinos), rhsMinos), axis=None)  # Flip left hand side again
×
919

920
        if minimizer == "Scipy":  # Calculate minos errors from constrained scipy
×
921
            lerr, uerr = errsFromMinosCurve(varSpace, varVal, wholeMinos, fValsMin, dChi2=1)
×
922
            ax.plot(varSpace, wholeMinos, label="fVals Constr Scipy")
×
923

924
        elif minimizer == "Migrad":  # Plot migrad as well to see the difference between constrained and unconstrained
×
925
            plotProfile(ax, var, varSpace, wholeMinos, lerr, uerr, fValsMin, varVal, varErr)
×
926
        else:
927
            raise ValueError("Minimizer not recognized.")
×
928

929
    resetMinuit(minuitObj, bestFitVals, bestFitErrs)
×
930
    return lerr, uerr
×
931

932

933
def resetMinuit(minuitObj, bestFitVals, bestFitErrs):
1✔
934
    """Sets Minuit parameters to best fit values and errors."""
935
    for p in bestFitVals:
×
936
        minuitObj.values[p] = bestFitVals[p]
×
937
        minuitObj.errors[p] = bestFitErrs[p]
×
938
    return
×
939

940

941
def buildVarRange(bound, varVal, varErr):
1✔
942
    """Range of points over which cost function is evaluated."""
943
    # Create variable space more dense near the minima using a quadratic density
944
    limit = (bound * varErr) ** (1 / 2)  # Square root is corrected below
×
945
    varSpace = np.linspace(-limit, limit, 30)
×
946
    varSpace = varSpace**2 * np.sign(varSpace) + varVal
×
947
    assert len(varSpace) % 2 == 0, "Number of points in Minos range needs to be even"
×
948
    return varSpace
×
949

950

951
def runMinosOnRange(minuitObj, var, varRange, minimizer, constrFunc):
1✔
952
    result = np.zeros(varRange.size)
×
953
    minuitObj.fixed[var] = True
×
954

955
    # Unconstrained fit over side range
956
    for i, value in enumerate(varRange):
×
957
        minuitObj.values[var] = value  # Fix variable
×
958

959
        if minimizer == "Migrad":
×
960
            minuitObj.migrad()  # Fit
×
961
        elif minimizer == "Scipy":
×
962
            minuitObj.scipy(constraints=optimize.NonlinearConstraint(constrFunc, 0, np.inf))
×
963

964
        result[i] = minuitObj.fval  # Store minimum
×
965

966
    minuitObj.fixed[var] = False
×
967
    return result
×
968

969

970
def errsFromMinosCurve(varSpace, varVal, fValsScipy, fValsMin, dChi2=1):
1✔
971
    # Use intenpolation to create dense array of fmin values
972
    varSpaceDense = np.linspace(np.min(varSpace), np.max(varSpace), 100000)
×
973
    fValsScipyDense = np.interp(varSpaceDense, varSpace, fValsScipy)
×
974
    # Calculate points of intersection with line delta fmin val = 1
975
    idxErr = np.argwhere(np.diff(np.sign(fValsScipyDense - fValsMin - 1)))
×
976

977
    if idxErr.size != 2:  # Intersections not found, do not plot error range
×
978
        lerr, uerr = 0.0, 0.0
×
979
    else:
980
        lerr, uerr = varSpaceDense[idxErr].flatten() - varVal
×
981

982
        if lerr * uerr >= 0:  # Case where we get either two positive or two negative errors, ill defined profile
×
983
            lerr, uerr = 0, 0
×
984

985
    return lerr, uerr
×
986

987

988
def plotAutoMinos(minuitObj, wsName, yFitIC):
1✔
989
    # Set format of subplots
990
    height = 2
×
991
    width = int(np.ceil(len(minuitObj.parameters) / 2))
×
992
    figsize = (12, 7)
×
993
    # Output plot to Mantid
994
    fig, axs = plt.subplots(
×
995
        height,
996
        width,
997
        tight_layout=True,
998
        figsize=figsize,
999
        subplot_kw={"projection": "mantid"},
1000
    )
1001
    fig.canvas.manager.set_window_title(wsName + "_autominos")
×
1002

1003
    for p, ax in zip(minuitObj.parameters, axs.flat):
×
1004
        loc, fvals, status = minuitObj.mnprofile(p, bound=2)
×
1005

1006
        minfval = minuitObj.fval
×
1007
        minp = minuitObj.values[p]
×
1008
        hessp = minuitObj.errors[p]
×
1009
        lerr = minuitObj.merrors[p].lower
×
1010
        uerr = minuitObj.merrors[p].upper
×
1011
        plotProfile(ax, p, loc, fvals, lerr, uerr, minfval, minp, hessp)
×
1012

1013
    # Hide plots not in use:
1014
    for ax in axs.flat:
×
1015
        if not ax.lines:  # If empty list
×
1016
            ax.set_visible(False)
×
1017

1018
    # ALl axes share same legend, so set figure legend to first axis
1019
    handle, label = axs[0, 0].get_legend_handles_labels()
×
1020
    fig.legend(handle, label, loc="lower right")
×
1021
    savePath = yFitIC.figSavePath / fig.canvas.manager.get_window_title()
×
1022
    plt.savefig(savePath, bbox_inches="tight")
×
1023
    fig.show()
×
1024

1025

1026
def plotProfile(ax, var, varSpace, fValsMigrad, lerr, uerr, fValsMin, varVal, varErr):
1✔
1027
    """
1028
    Plots likelihood profilef for the Migrad fvals.
1029
    varSpace : x axis
1030
    fValsMigrad : y axis
1031
    """
1032

1033
    ax.set_title(var + f" = {varVal:.3f} {lerr:.3f} {uerr:+.3f}")
×
1034

1035
    ax.plot(varSpace, fValsMigrad, label="fVals Migrad")
×
1036

1037
    ax.axvspan(lerr + varVal, uerr + varVal, alpha=0.2, color="red", label="Minos error")
×
1038
    ax.axvspan(
×
1039
        varVal - varErr,
1040
        varVal + varErr,
1041
        alpha=0.2,
1042
        color="green",
1043
        label="Hessian Std error",
1044
    )
1045

1046
    ax.axvline(varVal, 0.03, 0.97, color="k", ls="--")
×
1047
    ax.axhline(fValsMin + 1, 0.03, 0.97, color="k")
×
1048
    ax.axhline(fValsMin, 0.03, 0.97, color="k")
×
1049

1050

1051
def createFitParametersTableWorkspace(wsYSpaceSym, parameters, values, errors, minosAutoErr, minosManualErr, chi2):
1✔
1052
    # Create Parameters workspace
1053
    tableWS = CreateEmptyTableWorkspace(OutputWorkspace=wsYSpaceSym.name() + "_Parameters")
×
1054
    tableWS.setTitle("Minuit Fit")
×
1055
    tableWS.addColumn(type="str", name="Name")
×
1056
    tableWS.addColumn(type="float", name="Value")
×
1057
    tableWS.addColumn(type="float", name="Error")
×
1058
    tableWS.addColumn(type="float", name="Auto Minos Error-")
×
1059
    tableWS.addColumn(type="float", name="Auto Minos Error+")
×
1060
    tableWS.addColumn(type="float", name="Manual Minos Error-")
×
1061
    tableWS.addColumn(type="float", name="Manual Minos Error+")
×
1062

1063
    for p, v, e, mae, mme in zip(parameters, values, errors, minosAutoErr, minosManualErr):
×
1064
        tableWS.addRow([p, v, e, mae[0], mae[1], mme[0], mme[1]])
×
1065

1066
    tableWS.addRow(["Cost function", chi2, 0, 0, 0, 0, 0])
×
1067
    return
×
1068

1069

1070
def oddPointsRes(x, res):
1✔
1071
    """
1072
    Make a odd grid that ensures a resolution with a single peak at the center.
1073
    """
1074

1075
    assert np.min(x) == -np.max(x), "Resolution needs to be in symetric range!"
×
1076
    assert x.size == res.size, "x and res need to be the same size!"
×
1077

1078
    if res.size % 2 == 0:
×
1079
        dens = res.size + 1  # If even change to odd
×
1080
    else:
1081
        dens = res.size  # If odd, keep being odd
×
1082

1083
    xDense = np.linspace(np.min(x), np.max(x), dens)  # Make gridd with odd number of points - peak at center
×
1084
    xDelta = xDense[1] - xDense[0]
×
1085

1086
    resDense = np.interp(xDense, x, res)
×
1087

1088
    return xDelta, resDense
×
1089

1090

1091
def fitProfileMantidFit(yFitIC, wsYSpaceSym, wsRes):
1✔
1092
    logger.notice("\nFitting on the sum of spectra in the West domain ...\n")
×
1093
    for minimizer in ["Levenberg-Marquardt", "Simplex"]:
×
1094
        if yFitIC.fitting_model == "gauss":
×
1095
            function = f"""composite=Convolution,FixResolution=true,NumDeriv=true;
×
1096
            name=Resolution,Workspace={wsRes.name()},WorkspaceIndex=0;
1097
            name=UserFunction,Formula=y0 + A*exp( -(x-x0)^2/2/sigma^2)/(2*3.1415*sigma^2)^0.5,
1098
            y0=0,A=1,x0=0,sigma=5,   ties=()"""
1099

1100
        elif yFitIC.fitting_model == "gcc4c6":
×
1101
            function = f"""
×
1102
            composite=Convolution,FixResolution=true,NumDeriv=true;
1103
            name=Resolution,Workspace={wsRes.name()},WorkspaceIndex=0,X=(),Y=();
1104
            name=UserFunction,Formula=y0 + A*exp( -(x-x0)^2/2./sigma1^2)/(sqrt(2.*3.1415*sigma1^2))
1105
            *(1.+c4/32.*(16.*((x-x0)/sqrt(2)/sigma1)^4-48.*((x-x0)/sqrt(2)/sigma1)^2+12)+c6/384*
1106
            (64*((x-x0)/sqrt(2)/sigma1)^6 - 480*((x-x0)/sqrt(2)/sigma1)^4 + 720*((x-x0)/sqrt(2)/sigma1)^2 - 120)),
1107
            y0=0, A=1,x0=0,sigma1=4.0,c4=0.0,c6=0.0,ties=(),constraints=(0<c4,0<c6)
1108
            """
1109
        elif yFitIC.fitting_model == "gcc4":
×
1110
            function = f"""
×
1111
            composite=Convolution,FixResolution=true,NumDeriv=true;
1112
            name=Resolution,Workspace={wsRes.name()},WorkspaceIndex=0,X=(),Y=();
1113
            name=UserFunction,Formula=y0 + A*exp( -(x-x0)^2/2./sigma1^2)/(sqrt(2.*3.1415*sigma1^2))
1114
            *(1.+c4/32.*(16.*((x-x0)/sqrt(2)/sigma1)^4-48.*((x-x0)/sqrt(2)/sigma1)^2+12)),
1115
            y0=0, A=1,x0=0,sigma1=4.0,c4=0.0,ties=()
1116
            """
1117
        elif yFitIC.fitting_model == "gcc6":
×
1118
            function = f"""
×
1119
            composite=Convolution,FixResolution=true,NumDeriv=true;
1120
            name=Resolution,Workspace={wsRes.name()},WorkspaceIndex=0,X=(),Y=();
1121
            name=UserFunction,Formula=y0 + A*exp( -(x-x0)^2/2./sigma1^2)/(sqrt(2.*3.1415*sigma1^2))
1122
            *(1.+c6/384*(64*((x-x0)/sqrt(2)/sigma1)^6 - 480*((x-x0)/sqrt(2)/sigma1)^4 + 720*((x-x0)/sqrt(2)/sigma1)^2 - 120)),
1123
            y0=0, A=1,x0=0,sigma1=4.0,c6=0.0,ties=()
1124
            """
1125
        elif (
×
1126
            (yFitIC.fitting_model == "doublewell")
1127
            | (yFitIC.fitting_model == "gauss2d")
1128
            | (yFitIC.fitting_model == "gauss3d")
1129
            | (yFitIC.fitting_model == "gauss_cntr")
1130
            | (yFitIC.fitting_model == "gcc4c6_cntr")
1131
            | (yFitIC.fitting_model == "gcc4_cntr")
1132
            | (yFitIC.fitting_model == "gcc6_cntr")
1133
        ):
NEW
1134
            logger.warning("Fitting model recognized but not currently implemented in Mantid Fit. Skipping Mantid Fit ...")
×
UNCOV
1135
            return
×
1136
        else:
1137
            raise ValueError(
×
1138
                """
1139
            Fitting Model not recognized, available options:
1140
            'gauss', 'gauss_cntr', 'gcc4c6', 'gcc4c6_cntr', 'gcc4', 'gcc4_cntr, 'gcc6', 'gcc6_cntr', 'doublewell', 'gauss2d' gauss3d'"
1141
            """
1142
            )
1143

1144
        suffix = "lm" if minimizer == "Levenberg-Marquardt" else minimizer.lower()
×
1145
        outputName = wsYSpaceSym.name() + "_" + suffix + "_" + yFitIC.fitting_model
×
1146
        CloneWorkspace(InputWorkspace=wsYSpaceSym, OutputWorkspace=outputName)
×
1147

1148
        Fit(
×
1149
            Function=function,
1150
            InputWorkspace=outputName,
1151
            Output=outputName,
1152
            Minimizer=minimizer,
1153
        )
1154
        # Fit produces output workspaces with results
1155
    return
×
1156

1157

1158
def printYSpaceFitResults():
1✔
1159
    for ws_name in mtd.getObjectNames():
×
1160
        if ws_name.endswith("Parameters"):
×
1161
            print_table_workspace(mtd[ws_name])
×
1162

1163

1164
def runGlobalFit(wsYSpace, wsRes, IC):
1✔
1165
    logger.notice("\nRunning GLobal Fit ...\n")
×
1166

1167
    dataX, dataY, dataE, dataRes, instrPars = extractData(wsYSpace, wsRes, IC)
×
1168
    dataX, dataY, dataE, dataRes, instrPars = takeOutMaskedSpectra(dataX, dataY, dataE, dataRes, instrPars)
×
1169

1170
    idxList = groupDetectors(instrPars, IC)
×
1171
    dataX, dataY, dataE, dataRes = avgWeightDetGroups(dataX, dataY, dataE, dataRes, idxList, IC)
×
1172

1173
    if IC.do_symmetrisation:
×
1174
        dataY, dataE = weightedSymArr(dataY, dataE)
×
1175

1176
    model, defaultPars, sharedPars = selectModelAndPars(IC.fitting_model)
×
1177

1178
    totCost = 0
×
1179
    for i, (x, y, yerr, res) in enumerate(zip(dataX, dataY, dataE, dataRes)):
×
1180
        totCost += calcCostFun(model, i, x, y, yerr, res, sharedPars)
×
1181

1182
    defaultPars["y0"] = 0  # Introduce default parameter for convolved model
×
1183

1184
    assert len(describe(totCost)) == len(sharedPars) + len(dataY) * (len(defaultPars) - len(sharedPars)), (
×
1185
        f"Wrong parameters for Global Fit:\n{describe(totCost)}"
1186
    )
1187

1188
    # Minuit Fit with global cost function and local+global parameters
1189
    initPars = minuitInitialParameters(defaultPars, sharedPars, len(dataY))
×
1190

1191
    logger.notice("\nRunning Global Fit ...\n")
×
1192
    m = Minuit(totCost, **initPars)
×
1193

1194
    for i in range(len(dataY)):  # Set limits for unshared parameters
×
1195
        m.limits["A" + str(i)] = (0, np.inf)
×
1196

1197
    if IC.fitting_model == "doublewell":
×
1198
        m.limits["d"] = (0, np.inf)  # Shared parameters
×
1199
        m.limits["R"] = (0, np.inf)
×
1200

1201
    t0 = time.time()
×
1202
    if IC.fitting_model == "gauss":
×
1203
        m.simplex()
×
1204
        m.migrad()
×
1205

1206
    else:
1207
        totSig = describe(totCost)  # This signature has 'x' already removed
×
1208
        sharedIdxs = [totSig.index(shPar) for shPar in sharedPars]
×
1209
        nCostFunctions = len(totCost)  # Number of individual cost functions
×
1210
        x = dataX[0]
×
1211

1212
        def constr(*pars):
×
1213
            """
1214
            Constraint for positivity of non Gaussian function.
1215
            Input: All parameters defined in global cost function.
1216
            x is the range for each individual cost fun, defined outside function.
1217
            Builds array with all constraints from individual functions.
1218
            """
1219

1220
            sharedPars = [pars[i] for i in sharedIdxs]  # sigma1, c4, c6 in original GC
×
1221
            unsharedPars = np.delete(pars, sharedIdxs, None)
×
1222
            unsharedParsSplit = np.split(unsharedPars, nCostFunctions)  # Splits unshared parameters per individual cost fun
×
1223

1224
            joinedGC = np.zeros(nCostFunctions * x.size)
×
1225
            for i, unshParsModel in enumerate(
×
1226
                unsharedParsSplit
1227
            ):  # Attention to format of unshared and shared parameters when calling model
1228
                joinedGC[i * x.size : (i + 1) * x.size] = model(
×
1229
                    x, *unshParsModel[1:], *sharedPars
1230
                )  # Intercept is first of unshared parameters
1231

1232
            return joinedGC
×
1233

1234
        m.simplex()
×
1235
        m.scipy(constraints=optimize.NonlinearConstraint(constr, 0, np.inf))
×
1236

1237
    t1 = time.time()
×
1238
    logger.notice(f"\nTime of fitting: {t1 - t0:.2f} seconds")
×
1239

1240
    # Explicitly calculate errors
1241
    m.hesse()
×
1242

1243
    # Number of non zero points (considered in the fit) minus no of parameters
1244
    chi2 = m.fval / (np.sum(dataE != 0) - m.nfit)
×
1245

1246
    create_table_for_global_fit_parameters(wsYSpace.name(), IC.fitting_model, m, chi2)
×
1247

1248
    if IC.show_plots:
×
1249
        plotGlobalFit(dataX, dataY, dataE, m, totCost, wsYSpace.name(), IC)
×
1250

1251
    # Pass into array to store values in variable
1252
    return np.array(m.values), np.array(m.errors)
×
1253

1254

1255
def extractData(ws, wsRes, ic):
1✔
1256
    dataY = ws.extractY()
×
1257
    dataE = ws.extractE()
×
1258
    dataX = ws.extractX()
×
1259
    dataRes = wsRes.extractY()
×
1260
    instrPars = loadInstrParsFileIntoArray(ic)
×
1261
    assert len(instrPars) == len(dataY), "Load of IP file not working correctly, probable issue with indexing."
×
1262
    return dataX, dataY, dataE, dataRes, instrPars
×
1263

1264

1265
def loadInstrParsFileIntoArray(ic):
1✔
1266
    ipFilesPath = Path(handle_config.read_config_var("caching.ipfolder"))
×
1267
    data = np.loadtxt(str(ipFilesPath / ic.instrument_parameters_file), dtype=str)[1:].astype(float)
×
1268
    spectra = data[:, 0]
×
1269
    firstSpec, lastSpec = [int(d) for d in ic.detectors.split("-")]
×
1270
    select_rows = np.where((spectra >= firstSpec) & (spectra <= lastSpec))
×
1271
    instrPars = data[select_rows]
×
1272
    return instrPars
×
1273

1274

1275
def takeOutMaskedSpectra(dataX, dataY, dataE, dataRes, instrPars):
1✔
1276
    zerosRowMask = np.all(dataY == 0, axis=1)
×
1277
    dataY = dataY[~zerosRowMask]
×
1278
    dataE = dataE[~zerosRowMask]
×
1279
    dataX = dataX[~zerosRowMask]
×
1280
    dataRes = dataRes[~zerosRowMask]
×
1281
    instrPars = instrPars[~zerosRowMask]
×
1282
    return dataX, dataY, dataE, dataRes, instrPars
×
1283

1284

1285
# ------- Groupings
1286

1287

1288
def groupDetectors(ipData, yFitIC):
1✔
1289
    """
1290
    Uses the method of k-means to find clusters in theta-L1 space.
1291
    Input: instrument parameters to extract L1 and theta of detectors.
1292
    Output: list of group lists containing the idx of spectra.
1293
    """
1294

1295
    checkNGroupsValid(yFitIC, ipData)
×
1296

1297
    logger.notice(f"\nNumber of gropus: {yFitIC.number_of_global_fit_groups}")
×
1298

1299
    L1 = ipData[:, -1].copy()
×
1300
    theta = ipData[:, 2].copy()
×
1301

1302
    # Normalize  ranges to similar values, needed for clustering
1303
    L1 /= np.sum(L1)
×
1304
    theta /= np.sum(theta)
×
1305

1306
    L1 *= 2  # Bigger weight to L1
×
1307

1308
    points = np.vstack((L1, theta)).T
×
1309
    assert points.shape == (len(L1), 2), "Wrong shape."
×
1310
    # Initial centers of groups
1311
    startingIdxs = np.linspace(0, len(points) - 1, yFitIC.number_of_global_fit_groups).astype(int)
×
1312
    centers = points[startingIdxs, :]  # Centers of cluster groups, NOT fitting parameter
×
1313

1314
    if False:  # Set to True to investigate problems with groupings
×
1315
        plotDetsAndInitialCenters(L1, theta, centers)
1316

1317
    clusters = kMeansClustering(points, centers)
×
1318
    idxList = formIdxList(clusters)
×
1319

1320
    if yFitIC.show_plots:
×
1321
        fig, ax = plt.subplots(tight_layout=True, subplot_kw={"projection": "mantid"})
×
1322
        fig.canvas.manager.set_window_title("Grouping of detectors")
×
1323
        plotFinalGroups(ax, ipData, idxList)
×
1324
        fig.show()
×
1325
    return idxList
×
1326

1327

1328
def checkNGroupsValid(yFitIC, ipData):
1✔
1329
    """Checks number of groups selected for global fit is valid."""
1330

1331
    nSpectra = len(ipData)  # Number of spectra in the workspace
×
1332

1333
    if yFitIC.number_of_global_fit_groups == "all":
×
1334
        yFitIC.number_of_global_fit_groups = nSpectra
×
1335
    else:
1336
        assert isinstance(yFitIC.number_of_global_fit_groups, int), "Number of global groups needs to be an integer."
×
1337
        assert yFitIC.number_of_global_fit_groups <= nSpectra, (
×
1338
            "Number of global groups needs to be less or equal to the no of unmasked spectra."
1339
        )
1340
        assert yFitIC.number_of_global_fit_groups > 0, "NUmber of global groups needs to be bigger than zero"
×
1341
    return
×
1342

1343

1344
def kMeansClustering(points, centers):
1✔
1345
    """
1346
    Algorithm used to form groups of detectors.
1347
    Works best for spherical groups with similar scaling on x and y axis.
1348
    Fails in some rare cases, solution is to try a different number of groups.
1349
    Returns clusters in the form a int i assigned to each detector.
1350
    Detectors with the same i assigned belong to the same group.
1351
    """
1352

1353
    prevCenters = centers  # Starting centers
×
1354
    while True:
×
1355
        clusters = closestCenter(points, prevCenters)  # Form groups by assigning points to their closest center
×
1356
        centers = calculateCenters(points, clusters)  # Recalculate centers of new groups
×
1357

1358
        if np.all(centers == prevCenters):
×
1359
            break
×
1360

1361
        assert np.isfinite(centers).all(), f"Invalid centers found:\n{centers}\nTry a different number for the groupings."
×
1362

1363
        prevCenters = centers
×
1364

1365
    clusters = closestCenter(points, centers)
×
1366
    return clusters
×
1367

1368

1369
def closestCenter(points, centers):
1✔
1370
    """
1371
    Checks each point and assigns it to closest center.
1372
    Each center is represented by an int i.
1373
    Returns clusters with corresponding centers.
1374
    """
1375

1376
    clusters = np.zeros(len(points))
×
1377
    for p in range(len(points)):  # Iterate over each point
×
1378
        distMin = np.inf  # To be replaced in first iteration
×
1379

1380
        for i in range(len(centers)):  # Assign closest center to point
×
1381
            dist = pairDistance(points[p], centers[i])
×
1382

1383
            if dist < distMin:  # Store minimum found
×
1384
                distMin = dist
×
1385
                closeCenter = i
×
1386

1387
        clusters[p] = closeCenter
×
1388
    return clusters
×
1389

1390

1391
def pairDistance(p1, p2):
1✔
1392
    "Calculates the distance between two points."
1393
    return np.sqrt(np.sum(np.square(p1 - p2)))
×
1394

1395

1396
def calculateCenters(points, clusters):
1✔
1397
    """Calculates centers for the given clusters"""
1398

1399
    nGroups = len(np.unique(clusters))
×
1400

1401
    centers = np.zeros((nGroups, 2))
×
1402
    for i in range(nGroups):
×
1403
        centers[i, :] = np.mean(points[clusters == i, :], axis=0)  # If cluster i is not present, returns nan
×
1404
    return centers
×
1405

1406

1407
def formIdxList(clusters):
1✔
1408
    """Converts assignment of clusters into a list of indexes."""
1409

1410
    idxList = []
×
1411
    for i in np.unique(clusters):
×
1412
        idxs = np.argwhere(clusters == i).flatten()
×
1413
        idxList.append(list(idxs))
×
1414

1415
    # Print groupings information
1416
    logger.notice("\nGroups formed successfully:\n")
×
1417
    groupLen = np.array([len(group) for group in idxList])
×
1418
    unique, counts = np.unique(groupLen, return_counts=True)
×
1419
    for length, no in zip(unique, counts):
×
1420
        logger.notice(f"{no} groups with {length} detectors.")
×
1421

1422
    return idxList
×
1423

1424

1425
def plotDetsAndInitialCenters(L1, theta, centers):
1✔
1426
    """Used in debugging."""
1427
    fig, ax = plt.subplots(tight_layout=True, subplot_kw={"projection": "mantid"})
×
1428
    fig.canvas.manager.set_window_title("Starting centroids for groupings")
×
1429
    ax.scatter(L1, theta, alpha=0.3, color="r", label="Detectors")
×
1430
    ax.scatter(centers[:, 0], centers[:, 1], color="k", label="Starting centroids")
×
1431
    ax.axes.xaxis.set_ticks([])  # Numbers plotted do not correspond to real numbers, so hide them
×
1432
    ax.axes.yaxis.set_ticks([])
×
1433
    ax.set_xlabel("L1")
×
1434
    ax.set_ylabel("Theta")
×
1435
    ax.legend()
×
1436
    fig.show()
×
1437

1438

1439
def plotFinalGroups(ax, ipData, idxList):
1✔
1440
    """Plot of groupings of detectors."""
1441

1442
    for i, idxs in enumerate(idxList):
×
1443
        L1 = ipData[idxs, -1]
×
1444
        theta = ipData[idxs, 2]
×
1445
        ax.scatter(L1, theta, label=f"Group {i}")
×
1446

1447
        dets = ipData[idxs, 0]
×
1448
        for det, x, y in zip(dets, L1, theta):
×
1449
            ax.text(x, y, str(int(det)), fontsize=8)
×
1450

1451
    ax.set_xlabel("L1")
×
1452
    ax.set_ylabel("Theta")
×
1453
    ax.legend()
×
1454
    return
×
1455

1456

1457
# --------- Weighted Avg of detectors
1458

1459

1460
def avgWeightDetGroups(dataX, dataY, dataE, dataRes, idxList, yFitIC):
1✔
1461
    """
1462
    Performs weighted average on each detector group given by the index list.
1463
    The imput arrays do not include masked spectra.
1464
    """
1465
    assert ~np.any(np.all(dataY == 0, axis=1)), (
×
1466
        f"Input data should not include masked spectra at: {np.argwhere(np.all(dataY == 0, axis=1))}"
1467
    )
1468

1469
    if yFitIC.mask_zeros_with == "nan":
×
1470
        return avgGroupsWithBins(dataX, dataY, dataE, dataRes, idxList, yFitIC)
×
1471

1472
    # Use Default for unmasked or NCP masked
1473
    return avgGroupsOverCols(dataX, dataY, dataE, dataRes, idxList)
×
1474

1475

1476
def avgGroupsOverCols(dataX, dataY, dataE, dataRes, idxList):
1✔
1477
    """
1478
    Averaging used when JoY workspace is already Rebinned and Normalised.
1479
    Selects groups of detectors and performs the weighted average for each group.
1480
    Returns arrays with the group averages.
1481
    """
1482

1483
    wDataX, wDataY, wDataE, wDataRes = initiateZeroArr((len(idxList), len(dataY[0])))
×
1484

1485
    for i, idxs in enumerate(idxList):
×
1486
        groupX, groupY, groupE, groupRes = extractArrByIdx(dataX, dataY, dataE, dataRes, idxs)
×
1487
        assert len(groupY) > 0, "Group with zero detectors found, invalid."
×
1488

1489
        if len(groupY) == 1:  # Cannot use weight avg in single spec, wrong results
×
1490
            meanY, meanE = groupY, groupE
×
1491
            meanRes = groupRes
×
1492

1493
        else:
1494
            meanY, meanE = weightedAvgArr(groupY, groupE)
×
1495
            meanRes = np.nanmean(groupRes, axis=0)  # Nans are not present but safeguard
×
1496

1497
        assert np.all(groupX[0] == np.mean(groupX, axis=0)), "X values should not change with groups"
×
1498

1499
        for wsData, mean in zip([wDataX, wDataY, wDataE, wDataRes], [groupX[0], meanY, meanE, meanRes]):
×
1500
            wsData[i] = mean
×
1501

1502
    assert ~np.any(np.all(wDataY == 0, axis=1)), (
×
1503
        f"Some avg weights in groups are not being performed:\n{np.argwhere(np.all(wDataY == 0, axis=1))}"
1504
    )
1505
    return wDataX, wDataY, wDataE, wDataRes
×
1506

1507

1508
def avgGroupsWithBins(dataX, dataY, dataE, dataRes, idxList, yFitIC):
1✔
1509
    """
1510
    Performed only when mask with NaNs and Bins is turned on.
1511
    Selection of groups is done as usual.
1512
    Weighted average uses altered function to account for the unique format.
1513
    Several dataY points correspond to each dataX point.
1514
    """
1515

1516
    # Build range to average over
1517
    meanX = buildXRangeFromRebinPars(yFitIC)
×
1518

1519
    wDataX, wDataY, wDataE, wDataRes = initiateZeroArr((len(idxList), len(meanX)))
×
1520
    for i, idxs in enumerate(idxList):
×
1521
        groupX, groupY, groupE, groupRes = extractArrByIdx(dataX, dataY, dataE, dataRes, idxs)
×
1522

1523
        meanY, meanE = weightedAvgXBinsArr(groupX, groupY, groupE, meanX)
×
1524

1525
        meanRes = np.nanmean(groupRes, axis=0)  # Nans are not present but safeguard
×
1526

1527
        for wsData, mean in zip([wDataX, wDataY, wDataE, wDataRes], [meanX, meanY, meanE, meanRes]):
×
1528
            wsData[i] = mean
×
1529

1530
    return wDataX, wDataY, wDataE, wDataRes
×
1531

1532

1533
def initiateZeroArr(shape):
1✔
1534
    wDataX = np.zeros(shape)
×
1535
    wDataY = np.zeros(shape)
×
1536
    wDataE = np.zeros(shape)
×
1537
    wDataRes = np.zeros(shape)
×
1538
    return wDataX, wDataY, wDataE, wDataRes
×
1539

1540

1541
def extractArrByIdx(dataX, dataY, dataE, dataRes, idxs):
1✔
1542
    groupE = dataE[idxs, :]
×
1543
    groupY = dataY[idxs, :]
×
1544
    groupX = dataX[idxs, :]
×
1545
    groupRes = dataRes[idxs, :]
×
1546
    return groupX, groupY, groupE, groupRes
×
1547

1548

1549
def calcCostFun(model, i, x, y, yerr, res, sharedPars):
1✔
1550
    "Returns cost function for one spectrum i to be summed to total cost function"
1551

1552
    xDelta, resDense = oddPointsRes(x, res)
×
1553

1554
    def convolvedModel(xrange, y0, *pars):
×
1555
        """Performs convolution first on high density grid and interpolates to desired x range"""
1556
        return y0 + signal.convolve(model(xrange, *pars), resDense, mode="same") * xDelta
×
1557

1558
    signature = describe(model)[:]
×
1559
    signature[1:1] = ["y0"]
×
1560

1561
    costSig = [key if key in sharedPars else key + str(i) for key in signature]
×
1562
    convolvedModel._parameters = {key: None for key in costSig}
×
1563

1564
    # Select only valid data, i.e. when error is not 0 or nan or inf
1565
    nonZeros = (yerr != 0) & (yerr != np.nan) & (yerr != np.inf) & (y != np.nan)
×
1566
    xNZ = x[nonZeros]
×
1567
    yNZ = y[nonZeros]
×
1568
    yerrNZ = yerr[nonZeros]
×
1569

1570
    costFun = cost.LeastSquares(xNZ, yNZ, yerrNZ, convolvedModel)
×
1571
    return costFun
×
1572

1573

1574
def minuitInitialParameters(defaultPars, sharedPars, nSpec):
1✔
1575
    """Buids dictionary to initialize Minuit with starting global+local parameters"""
1576

1577
    initPars = {}
×
1578
    # Populate with initial shared parameters
1579
    for sp in sharedPars:
×
1580
        initPars[sp] = defaultPars[sp]
×
1581
    # Add initial unshared parameters
1582
    unsharedPars = [key for key in defaultPars if key not in sharedPars]
×
1583
    for up in unsharedPars:
×
1584
        for i in range(nSpec):
×
1585
            initPars[up + str(i)] = defaultPars[up]
×
1586
    return initPars
×
1587

1588

1589
def create_table_for_global_fit_parameters(wsName, model, m, chi2):
1✔
1590
    t = CreateEmptyTableWorkspace(OutputWorkspace=wsName + f"_globalfit_{model}_Parameters")
×
1591
    t.setTitle("Global Fit Parameters")
×
1592
    t.addColumn(type="str", name="Name")
×
1593
    t.addColumn(type="float", name="Value")
×
1594
    t.addColumn(type="float", name="Error")
×
1595

1596
    logger.notice(f"Value of Chi2/ndof: {chi2:.2f}")
×
1597
    logger.notice(f"Migrad Minimum valid: {m.valid}")
×
1598
    for p, v, e in zip(m.parameters, m.values, m.errors):
×
1599
        t.addRow([p, v, e])
×
1600

1601
    t.addRow(["Cost function", chi2, 0])
×
1602
    print_table_workspace(t)
×
1603

1604

1605
def plotGlobalFit(dataX, dataY, dataE, mObj, totCost, wsName, yFitIC):
1✔
1606
    if len(dataY) > 10:
×
1607
        logger.notice("\nToo many axes to show in figure, skipping the plot ...\n")
×
1608
        return
×
1609

1610
    rows = 2
×
1611
    fig, axs = plt.subplots(
×
1612
        rows,
1613
        int(np.ceil(len(dataY) / rows)),
1614
        figsize=(15, 8),
1615
        tight_layout=True,
1616
        subplot_kw={"projection": "mantid"},
1617
    )
1618
    fig.canvas.manager.set_window_title(wsName + "_fitglobal")
×
1619

1620
    # Data used in Global Fit
1621
    for i, (x, y, yerr, ax) in enumerate(zip(dataX, dataY, dataE, axs.flat)):
×
1622
        ax.errorbar(x, y, yerr, fmt="k.", label=f"Data Group {i}")
×
1623

1624
    # Global Fit
1625
    for x, costFun, ax in zip(dataX, totCost, axs.flat):
×
1626
        signature = describe(costFun)
×
1627

1628
        values = mObj.values[signature]
×
1629
        errors = mObj.errors[signature]
×
1630

1631
        yfit = costFun.model(x, *values)
×
1632

1633
        # Build a decent legend
1634
        leg = []
×
1635
        for p, v, e in zip(signature, values, errors):
×
1636
            leg.append(f"${p} = {v:.3f} \\pm {e:.3f}$")
×
1637

1638
        ax.plot(x, yfit, "r-", label="\n".join(leg))
×
1639
        ax.plot(x, y - yfit, "b.", label="Residuals")
×
1640
        ax.grid()
×
1641
        ax.legend()
×
1642
    savePath = yFitIC.figSavePath / fig.canvas.manager.get_window_title()
×
1643
    plt.savefig(savePath, bbox_inches="tight")
×
1644
    fig.show()
×
1645
    return
×
1646

1647

1648
def save_workspaces(yFitIC):
1✔
1649
    for ws_name in mtd.getObjectNames():
×
1650
        if ws_name.endswith("Parameters") or ws_name.endswith("Workspace") or ws_name.endswith("CovarianceMatrix"):
×
1651
            save_path = yFitIC.save_path / f"{yFitIC.fitting_model}_fit" / ws_name
×
1652
            save_path.parent.mkdir(exist_ok=True, parents=True)
×
1653
            SaveAscii(ws_name, str(save_path))
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc