• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JohannesBuchner / UltraNest / 9f2dd4f6-0775-47e9-b700-af647027ebfa

22 Apr 2024 12:51PM UTC coverage: 74.53% (+0.3%) from 74.242%
9f2dd4f6-0775-47e9-b700-af647027ebfa

push

circleci

web-flow
Merge pull request #118 from njzifjoiez/fixed-size-vectorised-slice-sampler

vectorised slice sampler of fixed batch size

1329 of 2026 branches covered (65.6%)

Branch coverage included in aggregate %.

79 of 80 new or added lines in 1 file covered. (98.75%)

1 existing line in 1 file now uncovered.

4026 of 5159 relevant lines covered (78.04%)

0.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.51
/ultranest/hotstart.py
1
"""
2
Warm start
3
----------
4

5
Helper functions for deforming the parameter space to enable
6
a more efficient sampling.
7

8
Based on ideas from Petrosyan & Handley (2022, https://arxiv.org/abs/2212.01760).
9

10
"""
11

12
import numpy as np
1✔
13
import scipy.stats
1✔
14
from .utils import vectorize, resample_equal
1✔
15

16

17
def get_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1):
1✔
18
    """Return a new loglike and transform based on an auxiliary distribution.
19

20
    Given a likelihood and prior transform, and information about
21
    the (expected) posterior peak, generates a auxiliary
22
    likelihood and prior transform that is identical but
23
    requires fewer nested sampling iterations.
24

25
    This is achieved by deforming the prior space, and undoing that
26
    transformation by correction weights in the likelihood.
27

28
    The auxiliary distribution used for transformation/weighting is
29
    a d-dimensional Student-t distribution.
30

31
    Usage::
32

33
        aux_loglikelihood, aux_aftertransform = get_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1)
34
        aux_sampler = ReactiveNestedSampler(parameters, aux_loglikelihood)
35
        aux_results = aux_sampler.run()
36
        posterior_samples = [aux_aftertransform(sample) for sample in aux_results['samples']]
37

38
    Parameters
39
    ------------
40
    loglike: function
41
        original likelihood function
42
    transform: function
43
        original prior transform function
44
    ctr: array
45
        Posterior center (in u-space).
46
    invcov: array
47
        Covariance of the posterior (in u-space).
48
    enlargement_factor: float
49
        Factor by which the scale of the auxiliary distribution is enlarged
50
        in all dimensions.
51
        For Gaussian-like posteriors, sqrt(ndim) seems to work,
52
        Heavier tailed or non-elliptical distributions may need larger factors.
53
    df: float
54
        Number of degrees of freedom of the auxiliary student-t distribution.
55
        The default is recommended. For truly gaussian posteriors,
56
        the student-t can be made more gaussian (by df>=30) for accelation.
57

58
    Returns
59
    ---------
60
    aux_loglike: function
61
        auxiliary loglikelihood function.
62
    aux_aftertransform: function
63
        auxiliary transform function.
64
        Takes d u-space coordinates, and returns d + 1 p-space parameters.
65
        The first d return coordinates are identical to what ``transform`` would return.
66
        The final coordinate is the correction weight.
67
    """
68
    ndim, = ctr.shape
×
69
    assert invcov.shape == (ndim, ndim)
×
70
    assert df >= 1, ('Degrees of freedom must be above 1', df)
×
71

72
    l, v = np.linalg.eigh(invcov)
×
73
    rotation_matrix = np.dot(v, enlargement_factor * np.diag(1. / np.sqrt(l)))
×
74

75
    rv_auxiliary1d = scipy.stats.t(df)
×
76

77
    def aux_rotator(coords):
×
78
        return ctr + np.dot(coords, rotation_matrix)
×
79

80
    def aux_loglikelihood(u):
×
81
        # get uniform gauss/t distributed values:
82
        coords = rv_auxiliary1d.ppf(u)
×
83
        # rotate & stretch; transform into physical parameters
84
        x = aux_rotator(coords)
×
85
        # avoid outside regions
86
        if not (x > 0).all() or not (x < 1).all():
×
87
            return -1e300
×
88
        # undo the effect of the auxiliary distribution
89
        loglike_total = rv_auxiliary1d.logpdf(coords).sum()
×
90
        return loglike(transform(x)) - loglike_total
×
91

92
    def aux_aftertransform(u):
×
93
        return transform(aux_rotator(rv_auxiliary1d.ppf(u)))
×
94

95
    return aux_loglikelihood, aux_aftertransform
×
96

97

98
def get_extended_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1):
1✔
99
    """Return a new loglike and transform based on an auxiliary distribution.
100

101
    Given a likelihood and prior transform, and information about
102
    the (expected) posterior peak, generates a auxiliary
103
    likelihood and prior transform that is identical but
104
    requires fewer nested sampling iterations.
105

106
    This is achieved by deforming the prior space, and undoing that
107
    transformation by correction weights in the likelihood.
108

109
    The auxiliary distribution used for transformation/weighting is
110
    a d-dimensional Student-t distribution.
111

112
    Parameters
113
    ------------
114
    loglike: function
115
        original likelihood function
116
    transform: function
117
        original prior transform function
118
    ctr: array
119
        Posterior center (in u-space).
120
    invcov: array
121
        Covariance of the posterior (in u-space).
122
    enlargement_factor: float
123
        Factor by which the scale of the auxiliary distribution is enlarged
124
        in all dimensions.
125

126
        For Gaussian-like posteriors, sqrt(ndim) seems to work,
127
        Heavier tailed or non-elliptical distributions may need larger factors.
128
    df: float
129
        Number of degrees of freedom of the auxiliary student-t distribution.
130
        The default is recommended. For truly gaussian posteriors,
131
        the student-t can be made more gaussian (by df>=30) for accelation.
132

133
    Returns
134
    ---------
135
    aux_loglike: function
136
        auxiliary loglikelihood function. Takes d + 1 parameters (see below).
137
        The likelihood is the same as loglike, but adds weights.
138
    aux_transform: function
139
        auxiliary transform function.
140
        Takes d u-space coordinates, and returns d + 1 p-space parameters.
141
        The first d return coordinates are identical to what ``transform`` would return.
142
        The final coordinate is the correction weight.
143
    """
144
    ndim, = ctr.shape
1✔
145
    assert invcov.shape == (ndim, ndim)
1✔
146
    assert df >= 1, ('Degrees of freedom must be above 1', df)
1✔
147

148
    l, v = np.linalg.eigh(invcov)
1✔
149
    rotation_matrix = np.dot(v, enlargement_factor * np.diag(1. / np.sqrt(l)))
1✔
150

151
    rv_auxiliary1d = scipy.stats.t(df)
1✔
152
    weight_ref = rv_auxiliary1d.logpdf(0) * ndim
1✔
153

154
    def aux_transform(u):
1✔
155
        # get uniform gauss/t distributed values:
156
        coords = rv_auxiliary1d.ppf(u)
1✔
157
        # rotate & stretch; transform into physical parameters
158
        x = ctr + np.dot(rotation_matrix, coords)
1✔
159
        # avoid outside regions
160
        if (x > 0).all() and (x < 1).all():
1!
161
            weight = -rv_auxiliary1d.logpdf(coords).sum() + weight_ref
1✔
162
        else:
163
            weight = -1e101
×
164
            x = u * 0 + 0.5
×
165
        # add weight as a additional parameter
166
        return np.append(transform(x), weight)
1✔
167

168
    def aux_loglikelihood(x):
1✔
169
        x_actual = x[:-1]
1✔
170
        weight = x[-1]
1✔
171
        if -1e100 < weight < 1e100:
1!
172
            return loglike(x_actual) + weight - weight_ref
1✔
173
        else:
174
            return -1e300
×
175

176
    return aux_loglikelihood, aux_transform
1✔
177

178

179
def get_extended_auxiliary_independent_problem(loglike, transform, ctr, err, df=1):
1✔
180
    """Return a new loglike and transform based on an auxiliary distribution.
181

182
    Given a likelihood and prior transform, and information about
183
    the (expected) posterior peak, generates a auxiliary
184
    likelihood and prior transform that is identical but
185
    requires fewer nested sampling iterations.
186

187
    This is achieved by deforming the prior space, and undoing that
188
    transformation by correction weights in the likelihood.
189

190
    The auxiliary distribution used for transformation/weighting is
191
    a independent Student-t distribution for each parameter.
192

193
    Usage::
194

195
        aux_loglikelihood, aux_transform = get_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1)
196
        aux_sampler = ReactiveNestedSampler(parameters, aux_loglikelihood, transform=aux_transform, derived_param_names=['logweight'])
197
        aux_results = aux_sampler.run()
198
        posterior_samples = aux_results['samples'][:,-1]
199

200
    Parameters
201
    ------------
202
    loglike: function
203
        original likelihood function
204
    transform: function
205
        original prior transform function
206
    ctr: array
207
        Posterior center (in u-space).
208
    err: array
209
        Standard deviation around the posterior center (in u-space).
210
    df: float
211
        Number of degrees of freedom of the auxiliary student-t distribution.
212
        The default is recommended. For truly gaussian posteriors,
213
        the student-t can be made more gaussian (by df>=30) for accelation.
214

215
    Returns
216
    ---------
217
    aux_loglike: function
218
        auxiliary loglikelihood function.
219
    aux_transform: function
220
        auxiliary transform function.
221
        Takes d u-space coordinates, and returns d + 1 p-space parameters.
222
        The first d return coordinates are identical to what ``transform`` would return.
223
        The final coordinate is the log of the correction weight.
224
    """
225
    ndim, = np.shape(ctr)
×
226
    assert np.shape(err) == (ndim,)
×
227
    assert df >= 1, ('Degrees of freedom must be above 1', df)
×
228

229
    rv_aux = scipy.stats.t(df, ctr, err)
×
230
    # handle the case where the aux distribution extends beyond the unit cube
231
    aux_lo = rv_aux.cdf(0)
×
232
    aux_hi = rv_aux.cdf(1)
×
233
    aux_w = aux_hi - aux_lo
×
234
    weight_ref = rv_aux.logpdf(ctr).sum()
×
235

236
    def aux_transform(u):
×
237
        # get uniform gauss/t distributed values:
238
        x = rv_aux.ppf(u * aux_w + aux_lo)
×
239
        weight = -rv_aux.logpdf(x).sum() + weight_ref
×
240
        return np.append(transform(x), weight)
×
241

242
    def aux_loglikelihood(x):
×
243
        x_actual = x[:-1]
×
244
        weight = x[-1]
×
245
        if -1e100 < weight < 1e100:
×
246
            return loglike(x_actual) + weight - weight_ref
×
247
        else:
248
            return -1e300
×
249

250
    return aux_loglikelihood, aux_transform
×
251

252

253
def compute_quantile_intervals(steps, upoints, uweights):
1✔
254
    """Compute lower and upper axis quantiles.
255

256
    Parameters
257
    ------------
258
    steps: array
259
        list of quantiles q to compute.
260
    upoints: array
261
        samples, with dimensions (N, d)
262
    uweights: array
263
        sample weights
264

265
    Returns
266
    ---------
267
    ulo: array
268
        list of lower quantiles (at q), one entry for each dimension d.
269
    uhi: array
270
        list of upper quantiles (at 1-q), one entry for each dimension d.
271
    """
272
    ndim = upoints.shape[1]
1✔
273
    nboxes = len(steps)
1✔
274
    ulos = np.empty((nboxes + 1, ndim))
1✔
275
    uhis = np.empty((nboxes + 1, ndim))
1✔
276
    for j, pthresh in enumerate(steps):
1✔
277
        for i, ui in enumerate(upoints.transpose()):
1✔
278
            order = np.argsort(ui)
1✔
279
            c = np.cumsum(uweights[order])
1✔
280
            usel = ui[order][np.logical_and(c >= pthresh, c <= 1 - pthresh)]
1✔
281
            ulos[j,i] = usel.min()
1✔
282
            uhis[j,i] = usel.max()
1✔
283
    ulos[-1] = 0
1✔
284
    uhis[-1] = 1
1✔
285
    return ulos, uhis
1✔
286

287

288
def compute_quantile_intervals_refined(steps, upoints, uweights, logsteps_max=20):
1✔
289
    """Compute lower and upper axis quantiles.
290

291
    Parameters
292
    ------------
293
    steps: array
294
        list of quantiles q to compute, with dimensions
295
    upoints: array
296
        samples, with dimensions (N, d)
297
    uweights: array
298
        sample weights. N entries.
299
    logsteps_max: int
300
        number of intermediate steps to inject between largest quantiles interval and full unit cube
301

302
    Returns
303
    ---------
304
    ulo: array
305
        list of lower quantiles (at `q`), of shape (M, d), one entry per quantile and dimension d.
306
    uhi: array
307
        list of upper quantiles (at 1-`q`), of shape (M, d), one entry per quantile and dimension d.
308
    uinterpspace: array
309
        list of steps (length of `steps` plus `logsteps_max` long)
310
    """
311
    nboxes = len(steps)
1✔
312
    ulos_orig, uhis_orig = compute_quantile_intervals(steps, upoints, uweights)
1✔
313
    assert len(ulos_orig) == nboxes + 1
1✔
314
    assert len(uhis_orig) == nboxes + 1
1✔
315

316
    smallest_axis_width = np.min(uhis_orig[-2,:] - ulos_orig[-2,:])
1✔
317
    logsteps = min(logsteps_max, int(np.ceil(-np.log10(max(1e-100, smallest_axis_width)))))
1✔
318

319
    weights = np.logspace(-logsteps, 0, logsteps + 1).reshape((-1, 1))
1✔
320
    # print("logspace:", weights, logsteps)
321
    assert len(weights) == logsteps + 1, (weights.shape, logsteps)
1✔
322
    # print("quantiles:", ulos_orig, uhis_orig)
323
    ulos_new = ulos_orig[nboxes - 1, :].reshape((1, -1)) * (1 - weights) + 0 * weights
1✔
324
    uhis_new = uhis_orig[nboxes - 1, :].reshape((1, -1)) * (1 - weights) + 1 * weights
1✔
325

326
    # print("additional quantiles:", ulos_new, uhis_new)
327

328
    ulos = np.vstack((ulos_orig[:-1,:], ulos_new))
1✔
329
    uhis = np.vstack((uhis_orig[:-1,:], uhis_new))
1✔
330
    # print("combined quantiles:", ulos, uhis)
331
    assert (ulos[-1,:] == 0).all()
1✔
332
    assert (uhis[-1,:] == 1).all()
1✔
333

334
    uinterpspace = np.ones(nboxes + logsteps + 1)
1✔
335
    uinterpspace[:nboxes + 1] = np.linspace(0, 1, nboxes + 1)
1✔
336
    assert 0 < uinterpspace[nboxes - 1] < 1, uinterpspace[nboxes]
1✔
337
    uinterpspace[nboxes:] = np.linspace(uinterpspace[nboxes - 1], 1, logsteps + 2)[1:]
1✔
338

339
    return ulos, uhis, uinterpspace
1✔
340

341

342
def get_auxiliary_contbox_parameterization(
1✔
343
    param_names, loglike, transform, upoints, uweights, vectorized=False,
344
):
345
    """Return a new loglike and transform based on an auxiliary distribution.
346

347
    Given a likelihood and prior transform, and information about
348
    the (expected) posterior peak, generates a auxiliary
349
    likelihood and prior transform that is identical but
350
    requires fewer nested sampling iterations.
351

352
    This is achieved by deforming the prior space, and undoing that
353
    transformation by correction weights in the likelihood.
354
    A additional parameter, "aux_logweight", is added at the end,
355
    which contains the correction weight. You can ignore it.
356

357
    The auxiliary distribution used for transformation/weighting is
358
    factorized. Each axis considers the ECDF of the auxiliary samples,
359
    and segments it into quantile segments. Within each segment,
360
    the parameter edges in u-space are linearly interpolated.
361
    To see the interpolation quantiles for each axis, use::
362

363
        steps = 10**-(1.0 * np.arange(1, 8, 2))
364
        ulos, uhis, uinterpspace = compute_quantile_intervals_refined(steps, upoints, uweights)
365

366
    Parameters
367
    ------------
368
    param_names: list
369
        parameter names
370
    loglike: function
371
        original likelihood function
372
    transform: function
373
        original prior transform function
374
    upoints: array
375
        Posterior samples (in u-space).
376
    uweights: array
377
        Weights of samples (needs to sum of 1)
378
    vectorized: bool
379
        whether the loglike & transform functions are vectorized
380

381
    Returns
382
    ---------
383
    aux_param_names: list
384
        new parameter names (`param_names`) plus additional 'aux_logweight'
385
    aux_loglike: function
386
        auxiliary loglikelihood function.
387
    aux_transform: function
388
        auxiliary transform function.
389
        Takes d u-space coordinates, and returns d + 1 p-space parameters.
390
        The first d return coordinates are identical to what ``transform`` would return.
391
        The final coordinate is the log of the correction weight.
392
    vectorized: bool
393
        whether the returned functions are vectorized
394

395
    Usage
396
    ------
397
    ::
398

399
        aux_loglikelihood, aux_transform = get_auxiliary_contbox_parameterization(
400
            loglike, transform, auxiliary_usamples)
401
        aux_sampler = ReactiveNestedSampler(parameters, aux_loglikelihood, transform=aux_transform, derived_param_names=['logweight'])
402
        aux_results = aux_sampler.run()
403
        posterior_samples = aux_results['samples'][:,-1]
404

405
    """
406
    upoints = np.asarray(upoints)
1✔
407
    assert upoints.ndim == 2, ('expected 2d array for upoints, got shape: %s' % upoints.shape)
1✔
408
    mask = np.logical_and(upoints > 0, upoints < 1).all(axis=1)
1✔
409
    assert np.all(mask), (
1✔
410
        'upoints must be between 0 and 1, have:', upoints[~mask,:])
411
    steps = 10**-(1.0 * np.arange(1, 8, 2))
1✔
412
    nsamples, ndim = upoints.shape
1✔
413
    assert nsamples > 10
1✔
414
    ulos, uhis, uinterpspace = compute_quantile_intervals_refined(steps, upoints, uweights)
1✔
415

416
    aux_param_names = param_names + ['aux_logweight']
1✔
417

418
    def aux_transform(u):
1✔
419
        ndim2, = u.shape
1✔
420
        assert ndim2 == ndim + 1
1✔
421
        umod = np.empty(ndim)
1✔
422
        log_aux_volume_factors = 0
1✔
423
        for i in range(ndim):
1✔
424
            ulo_here = np.interp(u[-1], uinterpspace, ulos[:,i])
1✔
425
            uhi_here = np.interp(u[-1], uinterpspace, uhis[:,i])
1✔
426
            umod[i] = ulo_here + (uhi_here - ulo_here) * u[i]
1✔
427
            log_aux_volume_factors += np.log(uhi_here - ulo_here)
1✔
428
        return np.append(transform(umod), log_aux_volume_factors)
1✔
429

430
    def aux_transform_vectorized(u):
1✔
431
        nsamples, ndim2 = u.shape
1✔
432
        assert ndim2 == ndim + 1
1✔
433
        umod = np.empty((nsamples, ndim2 - 1))
1✔
434
        log_aux_volume_factors = np.zeros((nsamples, 1))
1✔
435
        for i in range(ndim):
1✔
436
            ulo_here = np.interp(u[:,-1], uinterpspace, ulos[:,i])
1✔
437
            uhi_here = np.interp(u[:,-1], uinterpspace, uhis[:,i])
1✔
438
            umod[:,i] = ulo_here + (uhi_here - ulo_here) * u[:,i]
1✔
439
            log_aux_volume_factors[:,0] += np.log(uhi_here - ulo_here)
1✔
440
        return np.hstack((transform(umod), log_aux_volume_factors))
1✔
441

442
    def aux_loglikelihood(x):
1✔
443
        x_actual = x[:-1]
1✔
444
        logl = loglike(x_actual)
1✔
445
        aux_logweight = x[-1]
1✔
446
        # downweight if we are in the auxiliary distribution
447
        return logl + aux_logweight
1✔
448

449
    def aux_loglikelihood_vectorized(x):
1✔
450
        x_actual = x[:,:-1]
1✔
451
        logl = loglike(x_actual)
1✔
452
        aux_logweight = x[:,-1]
1✔
453
        # downweight if we are in the auxiliary distribution
454
        return logl + aux_logweight
1✔
455

456
    if vectorized:
1✔
457
        return aux_param_names, aux_loglikelihood_vectorized, aux_transform_vectorized, vectorized
1✔
458
    else:
459
        return aux_param_names, aux_loglikelihood, aux_transform, vectorized
1✔
460

461

462
def reuse_samples(
1✔
463
    param_names, loglike, points, logl, logw=None,
464
    logz=0.0, logzerr=0.0, upoints=None,
465
    batchsize=128, vectorized=False, log_weight_threshold=-10,
466
    **kwargs
467
):
468
    """
469
    Reweight existing nested sampling run onto a new loglikelihood.
470

471
    Parameters
472
    ------------
473
    param_names: list of strings
474
        Names of the parameters
475
    loglike: function
476
        New likelihood function
477
    points: np.array of shape (npoints, ndim)
478
        Equally weighted (unless logw is passed) posterior points
479
    logl: np.array(npoints)
480
        Previously likelihood values of points
481
    logw: np.array(npoints)
482
        Log-weights of existing points.
483
    logz: float
484
        Previous evidence / marginal likelihood value.
485
    logzerr: float
486
        Previous evidence / marginal likelihood uncertainty.
487
    upoints: np.array of shape (npoints, ndim)
488
        Posterior points before transformation.
489
    vectorized: bool
490
        Whether loglike function is vectorized
491
    batchsize: int
492
        Number of points simultaneously passed to vectorized loglike function
493
    log_weight_threshold: float
494
        Lowest log-weight to consider
495

496
    Returns
497
    ---------
498
    results: dict
499
        All information of the run. Important keys:
500
        Number of nested sampling iterations (niter),
501
        Evidence estimate (logz),
502
        Effective Sample Size (ess),
503
        weighted samples (weighted_samples),
504
        equally weighted samples (samples),
505
        best-fit point information (maximum_likelihood),
506
        posterior summaries (posterior).
507
    """
508
    if not vectorized:
1!
509
        loglike = vectorize(loglike)
1✔
510

511
    Npoints, ndim = points.shape
1✔
512
    if logw is None:
1✔
513
        # assume equally distributed if no weights given
514
        logw = np.zeros(Npoints) - np.log(Npoints)
1✔
515
    logl_new = np.zeros(Npoints) - np.inf
1✔
516
    logw_new = np.zeros(Npoints) - np.inf
1✔
517
    assert logl.shape == (Npoints,)
1✔
518
    assert logw.shape == (Npoints,)
1✔
519

520
    # process points, highest weight first:
521
    indices = np.argsort(logl + logw)[::-1]
1✔
522
    ncall = 0
1✔
523
    for i in range(int(np.ceil(Npoints / batchsize))):
1✔
524
        batch = indices[i * batchsize:(i + 1) * batchsize]
1✔
525
        logl_new[batch] = loglike(points[batch,:])
1✔
526
        logw_new[batch] = logw[batch] + logl_new[batch]
1✔
527
        ncall += len(batch)
1✔
528
        if (logw_new[batch] < np.nanmax(logw_new) - np.log(Npoints) + log_weight_threshold).all():
1✔
529
            print("skipping", i)
1✔
530
            break
1✔
531

532
    logw_new0 = logw_new.max()
1✔
533
    w = np.exp(logw_new - logw_new0)
1✔
534
    print("weights:", w)
1✔
535
    logz_new = np.log(w.sum()) + logw_new0
1✔
536
    w /= w.sum()
1✔
537
    ess = len(w) / (1.0 + ((len(w) * w - 1)**2).sum() / len(w))
1✔
538

539
    integral_uncertainty_estimator = (((w - 1 / Npoints)**2).sum() / (Npoints - 1))**0.5
1✔
540
    logzerr_new = np.log(1 + integral_uncertainty_estimator)
1✔
541
    logzerr_new_total = (logzerr_new**2 + logzerr**2)**0.5
1✔
542

543
    samples = resample_equal(points, w)
1✔
544
    information_gain_bits = []
1✔
545
    for i in range(ndim):
1✔
546
        H, _ = np.histogram(points[:,i], weights=w, density=True, bins=np.linspace(0, 1, 40))
1✔
547
        information_gain_bits.append(float((np.log2(1 / ((H + 0.001) * 40)) / 40).sum()))
1✔
548

549
    j = logl_new.argmax()
1✔
550
    return dict(
1✔
551
        ncall=ncall,
552
        niter=Npoints,
553
        logz=logz_new, logzerr=logzerr_new_total,
554
        ess=ess,
555
        posterior=dict(
556
            mean=samples.mean(axis=0).tolist(),
557
            stdev=samples.std(axis=0).tolist(),
558
            median=np.percentile(samples, 50, axis=0).tolist(),
559
            errlo=np.percentile(samples, 15.8655, axis=0).tolist(),
560
            errup=np.percentile(samples, 84.1345, axis=0).tolist(),
561
            information_gain_bits=information_gain_bits,
562
        ),
563
        weighted_samples=dict(
564
            upoints=upoints, points=points, weights=w, logw=logw,
565
            logl=logl_new),
566
        samples=samples,
567
        maximum_likelihood=dict(
568
            logl=logl_new[j],
569
            point=points[j,:].tolist(),
570
            point_untransformed=upoints[j,:].tolist() if upoints is not None else None,
571
        ),
572
        param_names=param_names,
573
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc