• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pymc-devs / pymc3 / 9388

pending completion
9388

Pull #3597

travis-ci

web-flow
Add contextvars requirement.

The contextvars library is used to make the vectorized version of sample_posterior_predictive compatible with the legacy version.
contextvars was added in python 3.7, but there is a compatibility library for 3.6
This is an attempt to ensure it will be loaded in python 3.6
Pull Request #3597: WIP: Second try to vectorize sample_posterior_predictive.

513 of 513 new or added lines in 16 files covered. (100.0%)

12635 of 20551 relevant lines covered (61.48%)

0.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/pymc3/tests/test_dist_math.py
1
import numpy as np
×
2
import numpy.testing as npt
×
3
import theano.tensor as tt
×
4
import theano
×
5
import theano.tests.unittest_tools as utt
×
6
import pymc3 as pm
×
7
from scipy import stats, interpolate
×
8
import pytest
×
9

10
from ..theanof import floatX
×
11
from ..distributions import Discrete
×
12
from ..distributions.dist_math import (
×
13
    bound, factln, alltrue_scalar, MvNormalLogp, SplineWrapper, i0e)
14

15

16
def test_bound():
×
17
    logp = tt.ones((10, 10))
×
18
    cond = tt.ones((10, 10))
×
19
    assert np.all(bound(logp, cond).eval() == logp.eval())
×
20

21
    logp = tt.ones((10, 10))
×
22
    cond = tt.zeros((10, 10))
×
23
    assert np.all(bound(logp, cond).eval() == (-np.inf * logp).eval())
×
24

25
    logp = tt.ones((10, 10))
×
26
    cond = True
×
27
    assert np.all(bound(logp, cond).eval() == logp.eval())
×
28

29
    logp = tt.ones(3)
×
30
    cond = np.array([1, 0, 1])
×
31
    assert not np.all(bound(logp, cond).eval() == 1)
×
32
    assert np.prod(bound(logp, cond).eval()) == -np.inf
×
33

34
    logp = tt.ones((2, 3))
×
35
    cond = np.array([[1, 1, 1], [1, 0, 1]])
×
36
    assert not np.all(bound(logp, cond).eval() == 1)
×
37
    assert np.prod(bound(logp, cond).eval()) == -np.inf
×
38

39
def test_alltrue_scalar():
×
40
    assert alltrue_scalar([]).eval()
×
41
    assert alltrue_scalar([True]).eval()
×
42
    assert alltrue_scalar([tt.ones(10)]).eval()
×
43
    assert alltrue_scalar([tt.ones(10),
×
44
                    5 * tt.ones(101)]).eval()
45
    assert alltrue_scalar([np.ones(10),
×
46
                    5 * tt.ones(101)]).eval()
47
    assert alltrue_scalar([np.ones(10),
×
48
                    True,
49
                    5 * tt.ones(101)]).eval()
50
    assert alltrue_scalar([np.array([1, 2, 3]),
×
51
                    True,
52
                    5 * tt.ones(101)]).eval()
53

54
    assert not alltrue_scalar([False]).eval()
×
55
    assert not alltrue_scalar([tt.zeros(10)]).eval()
×
56
    assert not alltrue_scalar([True,
×
57
                        False]).eval()
58
    assert not alltrue_scalar([np.array([0, -1]),
×
59
                        tt.ones(60)]).eval()
60
    assert not alltrue_scalar([np.ones(10),
×
61
                        False,
62
                        5 * tt.ones(101)]).eval()
63

64
def test_alltrue_shape():
×
65
    vals = [True, tt.ones(10), tt.zeros(5)]
×
66

67
    assert alltrue_scalar(vals).eval().shape == ()
×
68

69
class MultinomialA(Discrete):
×
70
    def __init__(self, n, p, *args, **kwargs):
×
71
        super().__init__(*args, **kwargs)
×
72

73
        self.n = n
×
74
        self.p = p
×
75

76
    def logp(self, value):
×
77
        n = self.n
×
78
        p = self.p
×
79

80
        return bound(factln(n) - factln(value).sum() + (value * tt.log(p)).sum(),
×
81
                     value >= 0,
82
                     0 <= p, p <= 1,
83
                     tt.isclose(p.sum(), 1),
84
                     broadcast_conditions=False
85
        )
86

87

88
class MultinomialB(Discrete):
×
89
    def __init__(self, n, p, *args, **kwargs):
×
90
        super().__init__(*args, **kwargs)
×
91

92
        self.n = n
×
93
        self.p = p
×
94

95
    def logp(self, value):
×
96
        n = self.n
×
97
        p = self.p
×
98

99
        return bound(factln(n) - factln(value).sum() + (value * tt.log(p)).sum(),
×
100
                     tt.all(value >= 0),
101
                     tt.all(0 <= p), tt.all(p <= 1),
102
                     tt.isclose(p.sum(), 1),
103
                     broadcast_conditions=False
104
        )
105

106

107
def test_multinomial_bound():
×
108

109
    x = np.array([1, 5])
×
110
    n = x.sum()
×
111

112
    with pm.Model() as modelA:
×
113
        p_a = pm.Dirichlet('p', floatX(np.ones(2)))
×
114
        MultinomialA('x', n, p_a, observed=x)
×
115

116
    with pm.Model() as modelB:
×
117
        p_b = pm.Dirichlet('p', floatX(np.ones(2)))
×
118
        MultinomialB('x', n, p_b, observed=x)
×
119

120
    assert np.isclose(modelA.logp({'p_stickbreaking__': [0]}),
×
121
                      modelB.logp({'p_stickbreaking__': [0]}))
122

123

124
class TestMvNormalLogp():
×
125
    def test_logp(self):
×
126
        np.random.seed(42)
×
127

128
        chol_val = floatX(np.array([[1, 0.9], [0, 2]]))
×
129
        cov_val = floatX(np.dot(chol_val, chol_val.T))
×
130
        cov = tt.matrix('cov')
×
131
        cov.tag.test_value = cov_val
×
132
        delta_val = floatX(np.random.randn(5, 2))
×
133
        delta = tt.matrix('delta')
×
134
        delta.tag.test_value = delta_val
×
135
        expect = stats.multivariate_normal(mean=np.zeros(2), cov=cov_val)
×
136
        expect = expect.logpdf(delta_val).sum()
×
137
        logp = MvNormalLogp()(cov, delta)
×
138
        logp_f = theano.function([cov, delta], logp)
×
139
        logp = logp_f(cov_val, delta_val)
×
140
        npt.assert_allclose(logp, expect)
×
141

142
    @theano.configparser.change_flags(compute_test_value="ignore")
×
143
    def test_grad(self):
144
        np.random.seed(42)
×
145

146
        def func(chol_vec, delta):
×
147
            chol = tt.stack([
×
148
                tt.stack([tt.exp(0.1 * chol_vec[0]), 0]),
149
                tt.stack([chol_vec[1], 2 * tt.exp(chol_vec[2])]),
150
            ])
151
            cov = tt.dot(chol, chol.T)
×
152
            return MvNormalLogp()(cov, delta)
×
153

154
        chol_vec_val = floatX(np.array([0.5, 1., -0.1]))
×
155

156
        delta_val = floatX(np.random.randn(1, 2))
×
157
        utt.verify_grad(func, [chol_vec_val, delta_val])
×
158

159
        delta_val = floatX(np.random.randn(5, 2))
×
160
        utt.verify_grad(func, [chol_vec_val, delta_val])
×
161

162
    @pytest.mark.skip(reason="Fix in theano not released yet: Theano#5908")
×
163
    @theano.configparser.change_flags(compute_test_value="ignore")
×
164
    def test_hessian(self):
165
        chol_vec = tt.vector('chol_vec')
×
166
        chol_vec.tag.test_value = np.array([0.1, 2, 3])
×
167
        chol = tt.stack([
×
168
            tt.stack([tt.exp(0.1 * chol_vec[0]), 0]),
169
            tt.stack([chol_vec[1], 2 * tt.exp(chol_vec[2])]),
170
        ])
171
        cov = tt.dot(chol, chol.T)
×
172
        delta = tt.matrix('delta')
×
173
        delta.tag.test_value = np.ones((5, 2))
×
174
        logp = MvNormalLogp()(cov, delta)
×
175
        g_cov, g_delta = tt.grad(logp, [cov, delta])
×
176
        tt.grad(g_delta.sum() + g_cov.sum(), [delta, cov])
×
177

178

179
class TestSplineWrapper:
×
180
    @theano.configparser.change_flags(compute_test_value="ignore")
×
181
    def test_grad(self):
182
        x = np.linspace(0, 1, 100)
×
183
        y = x * x
×
184
        spline = SplineWrapper(interpolate.InterpolatedUnivariateSpline(x, y, k=1))
×
185
        utt.verify_grad(spline, [0.5])
×
186

187
    @theano.configparser.change_flags(compute_test_value="ignore")
×
188
    def test_hessian(self):
189
        x = np.linspace(0, 1, 100)
×
190
        y = x * x
×
191
        spline = SplineWrapper(interpolate.InterpolatedUnivariateSpline(x, y, k=1))
×
192
        x_var = tt.dscalar('x')
×
193
        g_x, = tt.grad(spline(x_var), [x_var])
×
194
        with pytest.raises(NotImplementedError):
×
195
            tt.grad(g_x, [x_var])
×
196

197

198
class TestI0e:
×
199
    @theano.configparser.change_flags(compute_test_value="ignore")
×
200
    def test_grad(self):
201
        utt.verify_grad(i0e, [0.5])
×
202
        utt.verify_grad(i0e, [-2.])
×
203
        utt.verify_grad(i0e, [[0.5, -2.]])
×
204
        utt.verify_grad(i0e, [[[0.5, -2.]]])
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc