• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

quaquel / EMAworkbench / 18194452655

02 Oct 2025 01:27PM UTC coverage: 88.664% (+0.5%) from 88.199%
18194452655

push

github

web-flow
Simplifcation of classses and functions related to sampling (#420)

525 of 549 new or added lines in 37 files covered. (95.63%)

10 existing lines in 4 files now uncovered.

7853 of 8857 relevant lines covered (88.66%)

0.89 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.19
/test/test_em_framework/test_evaluators.py
1
""" """
2

3
import pytest
1✔
4

5
import numpy as np
1✔
6

7
import ema_workbench
1✔
8

9
from ema_workbench import RealParameter, ScalarOutcome, Model, Sample
1✔
10
from ema_workbench.em_framework import evaluators, LHSSampler
1✔
11
from ema_workbench.em_framework.points import Experiment, SampleCollection
1✔
12
from ema_workbench.em_framework.experiment_runner import ExperimentRunner
1✔
13
from ema_workbench import EMAError
1✔
14

15
# Created on 14 Mar 2017
16
#
17
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
18

19

20
def test_sequential_evalutor(mocker):
1✔
21
    model = mocker.Mock(spec=ema_workbench.Model)
1✔
22
    model.name = "test"
1✔
23

24
    mocked_generator = mocker.patch(
1✔
25
        "ema_workbench.em_framework.evaluators.experiment_generator", autospec=True
26
    )
27
    mocked_generator.return_value = iter(
1✔
28
        Experiment(str(i), "test", Sample(), Sample(), i) for i in range(10)
29
    )
30

31
    mocked_runner = mocker.Mock(ExperimentRunner)
1✔
32
    mocker.patch(
1✔
33
        "ema_workbench.em_framework.evaluators.ExperimentRunner",
34
        mocker.MagicMock(return_value=mocked_runner),
35
    )
36
    mocked_runner.run_experiment.return_value = {}, {}
1✔
37

38
    mocked_callback = mocker.patch(
1✔
39
        "ema_workbench.em_framework.evaluators.DefaultCallback"
40
    )
41

42
    with evaluators.SequentialEvaluator(model) as evaluator:
1✔
43
        evaluator.evaluate_experiments(mocked_generator([], [], []), mocked_callback)
1✔
44

45
    for i, entry in enumerate(mocked_runner.run_experiment.call_args_list):
1✔
46
        assert entry.args[0].name == str(i)
1✔
47

48
    with pytest.raises(TypeError):
1✔
49
        evaluators.SequentialEvaluator([object()])
1✔
50

51

52
def test_perform_experiments(mocker):
1✔
53
    mocked_function = mocker.Mock(return_value={"c": 1})
1✔
54
    model = ema_workbench.Model("test", function=mocked_function)
1✔
55
    model.uncertainties = [RealParameter("a", 0, 1)]
1✔
56
    model.levers = [RealParameter("b", 0, 1)]
1✔
57
    model.outcomes = [ScalarOutcome("c")]
1✔
58

59
    n_scenarios = 10
1✔
60
    n_policies = 2
1✔
61
    n_experiments = n_scenarios * n_policies
1✔
62

63
    mocked_callback_class = mocker.patch(
1✔
64
        "ema_workbench.em_framework.evaluators.DefaultCallback"
65
    )
66
    mocked_callback_instance = mocker.Mock()
1✔
67
    mocked_callback_class.return_value = mocked_callback_instance
1✔
68
    mocked_callback_instance.i = n_experiments  # this is an implicit assert
1✔
69

70
    with evaluators.SequentialEvaluator(model) as evaluator:
1✔
71
        evaluator.perform_experiments(
1✔
72
            n_scenarios,
73
            n_policies,
74
        )
75

76
    mocked_callback_class = mocker.patch(
1✔
77
        "ema_workbench.em_framework.evaluators.DefaultCallback"
78
    )
79
    mocked_callback_instance = mocker.Mock()
1✔
80
    mocked_callback_class.return_value = mocked_callback_instance
1✔
81
    mocked_callback_instance.i = n_scenarios  # this is an implicit assert
1✔
82

83
    with evaluators.SequentialEvaluator(model) as evaluator:
1✔
84
        evaluator.perform_experiments(n_scenarios, n_policies, combine="cycle")
1✔
85

86
    # what to check?
87
    # fixme, all kinds of permutations of all the possible keyword arguments
88

89
    mocked_callback_class = mocker.patch(
1✔
90
        "ema_workbench.em_framework.evaluators.DefaultCallback"
91
    )
92
    mocked_callback_instance = mocker.Mock()
1✔
93
    mocked_callback_class.return_value = mocked_callback_instance
1✔
94
    mocked_callback_instance.i = n_experiments
1✔
95
    ret = evaluators.perform_experiments(
1✔
96
        model,
97
        n_scenarios,
98
        n_policies,
99
        return_callback=True,
100
        callback=mocked_callback_class,
101
    )
102
    assert ret == mocked_callback_instance
1✔
103

104
    mocked_callback_class = mocker.patch(
1✔
105
        "ema_workbench.em_framework.evaluators.DefaultCallback"
106
    )
107
    mocked_callback_instance = mocker.Mock()
1✔
108
    mocked_callback_class.return_value = mocked_callback_instance
1✔
109
    mocked_callback_instance.i = 1
1✔
110
    with pytest.raises(EMAError):
1✔
111
        evaluators.perform_experiments(
1✔
112
            model,
113
            n_scenarios,
114
            n_policies,
115
        )
116

117
    with pytest.raises(EMAError):
1✔
118
        evaluators.perform_experiments(model)
1✔
119

120
    with pytest.raises(ValueError):
1✔
121
        evaluators.perform_experiments(
1✔
122
            model, n_scenarios, n_policies, combine="wrong value"
123
        )
124

125

126
def test_optimize(mocker):
1✔
127
    mocked_function = mocker.Mock(return_value={"d": 1, "e": 1})
1✔
128
    model = ema_workbench.Model("test", function=mocked_function)
1✔
129
    model.uncertainties = [RealParameter("a", 0, 1)]
1✔
130
    model.levers = [RealParameter("b", 0, 1), RealParameter("c", 0, 1)]
1✔
131
    model.outcomes = [
1✔
132
        ScalarOutcome("d", kind=ScalarOutcome.MAXIMIZE),
133
        ScalarOutcome("e", kind=ScalarOutcome.MAXIMIZE),
134
    ]
135

136
    nfe = 100
1✔
137

138
    mocked_optimize = mocker.patch(
1✔
139
        "ema_workbench.em_framework.evaluators._optimize", autospec=True
140
    )
141
    with evaluators.SequentialEvaluator(model) as evaluator:
1✔
142
        evaluator.optimize(nfe=nfe, searchover="levers", epsilons=[0.1, 0.1])
1✔
143
    assert mocked_optimize.call_count == 1
1✔
144

145
    mocked_optimize.reset_mock()
1✔
146
    with evaluators.SequentialEvaluator(model) as evaluator:
1✔
147
        evaluator.optimize(nfe=nfe, searchover="uncertainties", epsilons=[0.1, 0.1])
1✔
148
    assert mocked_optimize.call_count == 1
1✔
149

150
    mocked_optimize.reset_mock()
1✔
151
    evaluators.optimize(model, nfe=nfe, searchover="uncertainties", epsilons=[0.1, 0.1])
1✔
152
    assert mocked_optimize.call_count == 1
1✔
153

154
    with pytest.raises(NotImplementedError):
1✔
155
        with evaluators.SequentialEvaluator([model, model]) as evaluator:
1✔
156
            evaluator.optimize(nfe=nfe, searchover="uncertainties", epsilons=[0.1, 0.1])
1✔
157
    with pytest.raises(EMAError):
1✔
158
        with evaluators.SequentialEvaluator(model) as evaluator:
1✔
159
            evaluator.optimize(nfe=nfe, searchover="unknown value", epsilons=[0.1, 0.1])
1✔
160

161

162
def test_robust_optimize(mocker):
1✔
163
    mocked_function = mocker.Mock(return_value={"d": 1, "e": 1})
1✔
164
    model = ema_workbench.Model("test", function=mocked_function)
1✔
165
    model.uncertainties = [RealParameter("a", 0, 1)]
1✔
166
    model.levers = [RealParameter("b", 0, 1), RealParameter("c", 0, 1)]
1✔
167
    model.outcomes = [
1✔
168
        ScalarOutcome("d", kind=ScalarOutcome.MAXIMIZE),
169
        ScalarOutcome("e", kind=ScalarOutcome.MAXIMIZE),
170
    ]
171

172
    robustness_functions = [
1✔
173
        ScalarOutcome(
174
            "robustness_d",
175
            kind=ScalarOutcome.MAXIMIZE,
176
            variable_name="d",
177
            function=mocker.Mock(return_value={"r_d": 0.5}),
178
        ),
179
        ScalarOutcome(
180
            "robustness_e",
181
            kind=ScalarOutcome.MAXIMIZE,
182
            variable_name="e",
183
            function=mocker.Mock(return_value={"r_e": 0.5}),
184
        ),
185
    ]
186
    scenarios = 10
1✔
187

188
    nfe = 100
1✔
189

190
    mocked_optimize = mocker.patch(
1✔
191
        "ema_workbench.em_framework.evaluators._optimize", autospec=True
192
    )
193
    with evaluators.SequentialEvaluator(model) as evaluator:
1✔
194
        evaluator.robust_optimize(
1✔
195
            robustness_functions, scenarios=scenarios, nfe=nfe, epsilons=[0.1, 0.1]
196
        )
197
    assert mocked_optimize.call_count == 1
1✔
198

199

200
def test_setup(mocker):
1✔
201

202
    my_model = Model("some_model", function=mocker.Mock(return_value={"d": 1, "e": 1}))
1✔
203
    my_model.uncertainties = [RealParameter("a", 0, 1)]
1✔
204

205
    samples, parameters, n_samples = evaluators._setup(
1✔
206
        None,
207
        None,
208
        {},
209
        [
210
            my_model,
211
        ],
212
        union=True,
213
        parameter_type="uncertainties",
214
    )
215
    assert n_samples == 1
1✔
216
    for p in parameters:
1✔
NEW
217
        assert p in my_model.uncertainties
×
218

219
    n = 10
1✔
220
    samples, parameters, n_samples = evaluators._setup(
1✔
221
        n,
222
        LHSSampler(),
223
        None,
224
        [
225
            my_model,
226
        ],
227
        union=True,
228
        parameter_type="uncertainties",
229
    )
230
    assert n_samples == n
1✔
231
    for p in parameters:
1✔
232
        assert p.name in my_model.uncertainties
1✔
233

234
    samples = [
1✔
235
        Sample(1, a=0.5),
236
    ]
237
    samples, parameters, n_samples = evaluators._setup(
1✔
238
        samples,
239
        LHSSampler(),
240
        None,
241
        [
242
            my_model,
243
        ],
244
        union=True,
245
        parameter_type="uncertainties",
246
    )
247
    assert n_samples == len(samples)
1✔
248
    for p in parameters:
1✔
249
        assert p.name in my_model.uncertainties
1✔
250

251
    samples = Sample(1, a=0.5)
1✔
252
    samples, parameters, n_samples = evaluators._setup(
1✔
253
        samples,
254
        LHSSampler(),
255
        None,
256
        [
257
            my_model,
258
        ],
259
        union=True,
260
        parameter_type="uncertainties",
261
    )
262
    assert n_samples == 1
1✔
263
    for p in parameters:
1✔
264
        assert p.name in my_model.uncertainties
1✔
265

266
    rng = np.random.default_rng(42)
1✔
267
    n = 100
1✔
268
    samples = SampleCollection(rng.random((n, 1)), my_model.uncertainties)
1✔
269
    samples, parameters, n_samples = evaluators._setup(
1✔
270
        samples,
271
        None,
272
        None,
273
        [
274
            my_model,
275
        ],
276
        union=True,
277
        parameter_type="uncertainties",
278
    )
279
    assert n_samples == n
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc