• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pymc-devs / pymc3 / 8562

pending completion
8562

Pull #3327

travis-ci

web-flow
Added step_kwargs and nuts_kwargs deprecations to release notes
Pull Request #3327: WIP: Merge nuts_kwargs and step_kwargs into kwargs

13 of 13 new or added lines in 2 files covered. (100.0%)

9951 of 20106 relevant lines covered (49.49%)

1.48 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

39.39
/pymc3/tuning/scaling.py
1
import numpy as np
5✔
2
from numpy import exp, log, sqrt
5✔
3
from ..model import modelcontext, Point
5✔
4
from ..theanof import hessian_diag, inputvars
5✔
5
from ..blocking import DictToArrayBijection, ArrayOrdering
5✔
6

7
__all__ = ['approx_hessian', 'find_hessian', 'trace_cov', 'guess_scaling']
5✔
8

9

10
def approx_hessian(point, vars=None, model=None):
5✔
11
    """
12
    Returns an approximation of the Hessian at the current chain location.
13

14
    Parameters
15
    ----------
16
    model : Model (optional if in `with` context)
17
    point : dict
18
    vars : list
19
        Variables for which Hessian is to be calculated.
20
    """
21
    from numdifftools import Jacobian
×
22

23
    model = modelcontext(model)
×
24
    if vars is None:
×
25
        vars = model.cont_vars
×
26
    vars = inputvars(vars)
×
27

28
    point = Point(point, model=model)
×
29

30
    bij = DictToArrayBijection(ArrayOrdering(vars), point)
×
31
    dlogp = bij.mapf(model.fastdlogp(vars))
×
32

33
    def grad_logp(point):
×
34
        return np.nan_to_num(dlogp(point))
×
35

36
    '''
37
    Find the jacobian of the gradient function at the current position
38
    this should be the Hessian; invert it to find the approximate
39
    covariance matrix.
40
    '''
41
    return -Jacobian(grad_logp)(bij.map(point))
×
42

43

44
def fixed_hessian(point, vars=None, model=None):
5✔
45
    """
46
    Returns a fixed Hessian for any chain location.
47

48
    Parameters
49
    ----------
50
    model : Model (optional if in `with` context)
51
    point : dict
52
    vars : list
53
        Variables for which Hessian is to be calculated.
54
    """
55

56
    model = modelcontext(model)
×
57
    if vars is None:
×
58
        vars = model.cont_vars
×
59
    vars = inputvars(vars)
×
60

61
    point = Point(point, model=model)
×
62

63
    bij = DictToArrayBijection(ArrayOrdering(vars), point)
×
64
    rval = np.ones(bij.map(point).size) / 10
×
65
    return rval
×
66

67

68
def find_hessian(point, vars=None, model=None):
5✔
69
    """
70
    Returns Hessian of logp at the point passed.
71

72
    Parameters
73
    ----------
74
    model : Model (optional if in `with` context)
75
    point : dict
76
    vars : list
77
        Variables for which Hessian is to be calculated.
78
    """
79
    model = modelcontext(model)
1✔
80
    H = model.fastd2logp(vars)
1✔
81
    return H(Point(point, model=model))
1✔
82

83

84
def find_hessian_diag(point, vars=None, model=None):
5✔
85
    """
86
    Returns Hessian of logp at the point passed.
87

88
    Parameters
89
    ----------
90
    model : Model (optional if in `with` context)
91
    point : dict
92
    vars : list
93
        Variables for which Hessian is to be calculated.
94
    """
95
    model = modelcontext(model)
×
96
    H = model.fastfn(hessian_diag(model.logpt, vars))
×
97
    return H(Point(point, model=model))
×
98

99

100
def guess_scaling(point, vars=None, model=None, scaling_bound=1e-8):
5✔
101
    model = modelcontext(model)
×
102
    try:
×
103
        h = find_hessian_diag(point, vars, model=model)
×
104
    except NotImplementedError:
×
105
        h = fixed_hessian(point, vars, model=model)
×
106
    return adjust_scaling(h, scaling_bound)
×
107

108

109
def adjust_scaling(s, scaling_bound):
5✔
110
    if s.ndim < 2:
×
111
        return adjust_precision(s, scaling_bound)
×
112
    else:
113
        val, vec = np.linalg.eigh(s)
×
114
        val = adjust_precision(val, scaling_bound)
×
115
        return eig_recompose(val, vec)
×
116

117

118
def adjust_precision(tau, scaling_bound=1e-8):
5✔
119
    mag = sqrt(abs(tau))
×
120

121
    bounded = bound(log(mag), log(scaling_bound), log(1./scaling_bound))
×
122
    return exp(bounded)**2
×
123

124

125
def bound(a, l, u):
5✔
126
    return np.maximum(np.minimum(a, u), l)
×
127

128

129
def eig_recompose(val, vec):
5✔
130
    return vec.dot(np.diag(val)).dot(vec.T)
×
131

132

133
def trace_cov(trace, vars=None, model=None):
5✔
134
    """
135
    Calculate the flattened covariance matrix using a sample trace
136

137
    Useful if you want to base your covariance matrix for further sampling on some initial samples.
138

139
    Parameters
140
    ----------
141
    trace : Trace
142
    vars : list
143
        variables for which to calculate covariance matrix
144

145
    Returns
146
    -------
147
    r : array (n,n)
148
        covariance matrix
149
    """
150
    model = modelcontext(model)
1✔
151

152
    if model is not None:
1✔
153
        vars = model.free_RVs
1✔
154
    elif vars is None:
×
155
        vars = trace.varnames
×
156

157
    def flat_t(var):
1✔
158
        x = trace[str(var)]
1✔
159
        return x.reshape((x.shape[0], np.prod(x.shape[1:], dtype=int)))
1✔
160

161
    return np.cov(np.concatenate(list(map(flat_t, vars)), 1).T)
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc