• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

quaquel / EMAworkbench / 6695764125

30 Oct 2023 04:25PM UTC coverage: 80.893%. Remained the same
6695764125

push

github

web-flow
examples: Speedup the lake_problem function by ~30x (#301)

Speed up the lake_problem function by around 30x.

Benchmark: Average duration of 100 lake_model experiments: 0.940 seconds vs 0.030 seconds (speed up: 31.41).

I validated the output and it is consistent with the previous output.

The improved performance might allow more and broader analysis to be done in the exercises.

Also print a warning on the decisions KeyError

4619 of 5710 relevant lines covered (80.89%)

0.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

38.15
/ema_workbench/em_framework/optimization.py
1
"""
2

3

4
"""
5
import copy
1✔
6
import functools
1✔
7
import os
1✔
8
import random
1✔
9
import shutil
1✔
10
import tarfile
1✔
11
import warnings
1✔
12

13
import numpy as np
1✔
14
import pandas as pd
1✔
15

16

17
from . import callbacks, evaluators
1✔
18
from .points import Scenario, Policy
1✔
19
from .outcomes import AbstractOutcome
1✔
20
from .parameters import IntegerParameter, RealParameter, CategoricalParameter, BooleanParameter
1✔
21
from .samplers import determine_parameters
1✔
22
from .util import determine_objects, ProgressTrackingMixIn
1✔
23
from ..util import get_module_logger, EMAError, temporary_filter, INFO
1✔
24

25
try:
1✔
26
    from platypus import (
1✔
27
        EpsNSGAII,
28
        Hypervolume,
29
        EpsilonIndicator,
30
        GenerationalDistance,
31
        Variator,
32
        Real,
33
        Integer,
34
        Subset,
35
        EpsilonProgressContinuation,
36
        RandomGenerator,
37
        TournamentSelector,
38
        NSGAII,
39
        EpsilonBoxArchive,
40
        Multimethod,
41
        GAOperator,
42
        SBX,
43
        PM,
44
        PCX,
45
        DifferentialEvolution,
46
        UNDX,
47
        SPX,
48
        UM,
49
        Solution,
50
        InvertedGenerationalDistance,
51
        Spacing,
52
    )  # @UnresolvedImport
53
    from platypus import Problem as PlatypusProblem
1✔
54

55
    import platypus
1✔
56

57

58
except ImportError:
×
59
    warnings.warn("platypus based optimization not available", ImportWarning)
×
60

61
    class PlatypusProblem:
×
62
        constraints = []
×
63

64
        def __init__(self, *args, **kwargs):
×
65
            pass
×
66

67
    class Variator:
×
68
        def __init__(self, *args, **kwargs):
×
69
            pass
×
70

71
    class RandomGenerator:
×
72
        def __call__(self, *args, **kwargs):
×
73
            pass
×
74

75
    class TournamentSelector:
×
76
        def __init__(self, *args, **kwargs):
×
77
            pass
×
78

79
        def __call__(self, *args, **kwargs):
×
80
            pass
×
81

82
    class EpsilonProgressContinuation:
×
83
        pass
×
84

85
    EpsNSGAII = None
×
86
    platypus = None
×
87
    Real = Integer = Subset = None
×
88

89
# Created on 5 Jun 2017
90
#
91
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
92

93
__all__ = [
1✔
94
    "Problem",
95
    "RobustProblem",
96
    "EpsilonProgress",
97
    "Convergence",
98
    "ArchiveLogger",
99
    "OperatorProbabilities",
100
    "rebuild_platypus_population",
101
    "HypervolumeMetric",
102
    "GenerationalDistanceMetric",
103
    "SpacingMetric",
104
    "InvertedGenerationalDistanceMetric",
105
    "EpsilonIndicatorMetric",
106
    "epsilon_nondominated",
107
    "to_problem",
108
    "to_robust_problem",
109
]
110
_logger = get_module_logger(__name__)
1✔
111

112

113
class Problem(PlatypusProblem):
1✔
114
    """small extension to Platypus problem object, includes information on
115
    the names of the decision variables, the names of the outcomes,
116
    and the type of search"""
117

118
    @property
1✔
119
    def parameter_names(self):
1✔
120
        return [e.name for e in self.parameters]
×
121

122
    def __init__(self, searchover, parameters, outcome_names, constraints, reference=None):
1✔
123
        if constraints is None:
1✔
124
            constraints = []
1✔
125

126
        super().__init__(len(parameters), len(outcome_names), nconstrs=len(constraints))
1✔
127
        #         assert len(parameters) == len(parameter_names)
128
        assert searchover in ("levers", "uncertainties", "robust")
1✔
129

130
        if searchover == "levers":
1✔
131
            assert not reference or isinstance(reference, Scenario)
1✔
132
        elif searchover == "uncertainties":
1✔
133
            assert not reference or isinstance(reference, Policy)
1✔
134
        else:
135
            assert not reference
1✔
136

137
        self.searchover = searchover
1✔
138
        self.parameters = parameters
1✔
139
        self.outcome_names = outcome_names
1✔
140
        self.ema_constraints = constraints
1✔
141
        self.constraint_names = [c.name for c in constraints]
1✔
142
        self.reference = reference if reference else 0
1✔
143

144

145
class RobustProblem(Problem):
1✔
146
    """small extension to Problem object for robust optimization, adds the
147
    scenarios and the robustness functions"""
148

149
    def __init__(self, parameters, outcome_names, scenarios, robustness_functions, constraints):
1✔
150
        super().__init__("robust", parameters, outcome_names, constraints)
1✔
151
        assert len(robustness_functions) == len(outcome_names)
1✔
152
        self.scenarios = scenarios
1✔
153
        self.robustness_functions = robustness_functions
1✔
154

155

156
def to_problem(model, searchover, reference=None, constraints=None):
1✔
157
    """helper function to create Problem object
158

159
    Parameters
160
    ----------
161
    model : AbstractModel instance
162
    searchover : str
163
    reference : Policy or Scenario instance, optional
164
                overwrite the default scenario in case of searching over
165
                levers, or default policy in case of searching over
166
                uncertainties
167
    constraints : list, optional
168

169
    Returns
170
    -------
171
    Problem instance
172

173
    """
174

175
    # extract the levers and the outcomes
176
    decision_variables = determine_parameters(model, searchover, union=True)
1✔
177

178
    outcomes = determine_objects(model, "outcomes")
1✔
179
    outcomes = [outcome for outcome in outcomes if outcome.kind != AbstractOutcome.INFO]
1✔
180
    outcome_names = [outcome.name for outcome in outcomes]
1✔
181

182
    if not outcomes:
1✔
183
        raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO")
×
184

185
    problem = Problem(
1✔
186
        searchover, decision_variables, outcome_names, constraints, reference=reference
187
    )
188
    problem.types = to_platypus_types(decision_variables)
1✔
189
    problem.directions = [outcome.kind for outcome in outcomes]
1✔
190
    problem.constraints[:] = "==0"
1✔
191

192
    return problem
1✔
193

194

195
def to_robust_problem(model, scenarios, robustness_functions, constraints=None):
1✔
196
    """helper function to create RobustProblem object
197

198
    Parameters
199
    ----------
200
    model : AbstractModel instance
201
    scenarios : collection
202
    robustness_functions : iterable of ScalarOutcomes
203
    constraints : list, optional
204

205
    Returns
206
    -------
207
    RobustProblem instance
208

209
    """
210

211
    # extract the levers and the outcomes
212
    decision_variables = determine_parameters(model, "levers", union=True)
1✔
213

214
    outcomes = robustness_functions
1✔
215
    outcomes = [outcome for outcome in outcomes if outcome.kind != AbstractOutcome.INFO]
1✔
216
    outcome_names = [outcome.name for outcome in outcomes]
1✔
217

218
    if not outcomes:
1✔
219
        raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO")
×
220

221
    problem = RobustProblem(
1✔
222
        decision_variables, outcome_names, scenarios, robustness_functions, constraints
223
    )
224

225
    problem.types = to_platypus_types(decision_variables)
1✔
226
    problem.directions = [outcome.kind for outcome in outcomes]
1✔
227
    problem.constraints[:] = "==0"
1✔
228

229
    return problem
1✔
230

231

232
def to_platypus_types(decision_variables):
1✔
233
    """helper function for mapping from workbench parameter types to
234
    platypus parameter types"""
235
    # TODO:: should categorical not be platypus.Subset, with size == 1?
236
    _type_mapping = {
1✔
237
        RealParameter: platypus.Real,
238
        IntegerParameter: platypus.Integer,
239
        CategoricalParameter: platypus.Subset,
240
        BooleanParameter: platypus.Subset,
241
    }
242

243
    types = []
1✔
244
    for dv in decision_variables:
1✔
245
        klass = _type_mapping[type(dv)]
1✔
246

247
        if not isinstance(dv, (CategoricalParameter, BooleanParameter)):
1✔
248
            decision_variable = klass(dv.lower_bound, dv.upper_bound)
1✔
249
        else:
250
            decision_variable = klass(dv.categories, 1)
1✔
251

252
        types.append(decision_variable)
1✔
253
    return types
1✔
254

255

256
def to_dataframe(solutions, dvnames, outcome_names):
1✔
257
    """helper function to turn a collection of platypus Solution instances
258
    into a pandas DataFrame
259
    Parameters
260
    ----------
261
    solutions : collection of Solution instances
262
    dvnames : list of str
263
    outcome_names : list of str
264

265
    Returns
266
    -------
267
    pandas DataFrame
268
    """
269

270
    results = []
1✔
271
    for solution in platypus.unique(solutions):
1✔
272
        vars = transform_variables(solution.problem, solution.variables)  # @ReservedAssignment
1✔
273

274
        decision_vars = dict(zip(dvnames, vars))
1✔
275
        decision_out = dict(zip(outcome_names, solution.objectives))
1✔
276

277
        result = decision_vars.copy()
1✔
278
        result.update(decision_out)
1✔
279

280
        results.append(result)
1✔
281

282
    results = pd.DataFrame(results, columns=dvnames + outcome_names)
1✔
283
    return results
1✔
284

285

286
def process_uncertainties(jobs):
1✔
287
    """helper function to map jobs generated by platypus to Scenario objects
288

289
    Parameters
290
    ----------
291
    jobs : collection
292

293
    Returns
294
    -------
295
    scenarios, policies
296

297
    """
298
    problem = jobs[0].solution.problem
×
299
    scenarios = []
×
300

301
    jobs = _process(jobs, problem)
×
302
    for i, job in enumerate(jobs):
×
303
        name = str(i)
×
304
        scenario = Scenario(name=name, **job)
×
305
        scenarios.append(scenario)
×
306

307
    policies = problem.reference
×
308

309
    return scenarios, policies
×
310

311

312
def process_levers(jobs):
1✔
313
    """helper function to map jobs generated by platypus to Policy objects
314

315
    Parameters
316
    ----------
317
    jobs : collection
318

319
    Returns
320
    -------
321
    scenarios, policies
322

323
    """
324
    problem = jobs[0].solution.problem
×
325
    policies = []
×
326
    jobs = _process(jobs, problem)
×
327
    for i, job in enumerate(jobs):
×
328
        name = str(i)
×
329
        job = Policy(name=name, **job)
×
330
        policies.append(job)
×
331

332
    scenarios = problem.reference
×
333

334
    return scenarios, policies
×
335

336

337
def _process(jobs, problem):
1✔
338
    """helper function to transform platypus job to dict with correct
339
    values for workbench"""
340

341
    processed_jobs = []
×
342
    for job in jobs:
×
343
        variables = transform_variables(problem, job.solution.variables)
×
344
        processed_job = {}
×
345
        for param, var in zip(problem.parameters, variables):
×
346
            try:
×
347
                var = var.value
×
348
            except AttributeError:
×
349
                pass
×
350
            processed_job[param.name] = var
×
351
        processed_jobs.append(processed_job)
×
352
    return processed_jobs
×
353

354

355
def process_robust(jobs):
1✔
356
    """Helper function to process robust optimization jobs
357

358
    Parameters
359
    ----------
360
    jobs : collection
361

362
    Returns
363
    -------
364
    scenarios, policies
365

366
    """
367
    _, policies = process_levers(jobs)
×
368
    scenarios = jobs[0].solution.problem.scenarios
×
369

370
    return scenarios, policies
×
371

372

373
def transform_variables(problem, variables):
1✔
374
    """helper function for transforming platypus variables"""
375

376
    converted_vars = []
1✔
377
    for type, var in zip(problem.types, variables):  # @ReservedAssignment
1✔
378
        var = type.decode(var)
1✔
379
        try:
1✔
380
            var = var[0]
1✔
381
        except TypeError:
1✔
382
            pass
1✔
383

384
        converted_vars.append(var)
1✔
385
    return converted_vars
1✔
386

387

388
def evaluate(jobs_collection, experiments, outcomes, problem):
1✔
389
    """Helper function for mapping the results from perform_experiments back
390
    to what platypus needs"""
391

392
    searchover = problem.searchover
×
393
    outcome_names = problem.outcome_names
×
394
    constraints = problem.ema_constraints
×
395

396
    if searchover == "levers":
×
397
        column = "policy"
×
398
    else:
399
        column = "scenario"
×
400

401
    for entry, job in jobs_collection:
×
402
        logical = experiments[column] == entry.name
×
403

404
        job_outputs = {}
×
405
        for k, v in outcomes.items():
×
406
            job_outputs[k] = v[logical][0]
×
407

408
        # TODO:: only retain uncertainties
409
        job_experiment = experiments[logical]
×
410
        job_constraints = _evaluate_constraints(job_experiment, job_outputs, constraints)
×
411
        job_outcomes = [job_outputs[key] for key in outcome_names]
×
412

413
        if job_constraints:
×
414
            job.solution.problem.function = lambda _: (job_outcomes, job_constraints)
×
415
        else:
416
            job.solution.problem.function = lambda _: job_outcomes
×
417
        job.solution.evaluate()
×
418

419

420
def evaluate_robust(jobs_collection, experiments, outcomes, problem):
1✔
421
    """Helper function for mapping the results from perform_experiments back
422
    to what Platypus needs"""
423

424
    robustness_functions = problem.robustness_functions
×
425
    constraints = problem.ema_constraints
×
426

427
    for entry, job in jobs_collection:
×
428
        logical = experiments["policy"] == entry.name
×
429

430
        job_outcomes_dict = {}
×
431
        job_outcomes = []
×
432
        for rf in robustness_functions:
×
433
            data = [outcomes[var_name][logical] for var_name in rf.variable_name]
×
434
            score = rf.function(*data)
×
435
            job_outcomes_dict[rf.name] = score
×
436
            job_outcomes.append(score)
×
437

438
        # TODO:: only retain levers
439
        job_experiment = experiments[logical].iloc[0]
×
440
        job_constraints = _evaluate_constraints(job_experiment, job_outcomes_dict, constraints)
×
441

442
        if job_constraints:
×
443
            job.solution.problem.function = lambda _: (job_outcomes, job_constraints)
×
444
        else:
445
            job.solution.problem.function = lambda _: job_outcomes
×
446

447
        job.solution.evaluate()
×
448

449

450
def _evaluate_constraints(job_experiment, job_outcomes, constraints):
1✔
451
    """Helper function for evaluating the constraints for a given job"""
452
    job_constraints = []
×
453
    for constraint in constraints:
×
454
        data = [job_experiment[var] for var in constraint.parameter_names]
×
455
        data += [job_outcomes[var] for var in constraint.outcome_names]
×
456
        constraint_value = constraint.process(data)
×
457
        job_constraints.append(constraint_value)
×
458
    return job_constraints
×
459

460

461
class AbstractConvergenceMetric:
1✔
462
    """base convergence metric class"""
463

464
    def __init__(self, name):
1✔
465
        super().__init__()
×
466
        self.name = name
×
467
        self.results = []
×
468

469
    def __call__(self, optimizer):
1✔
470
        raise NotImplementedError
×
471

472
    def reset(self):
1✔
473
        self.results = []
×
474

475
    def get_results(self):
1✔
476
        return self.results
×
477

478

479
class EpsilonProgress(AbstractConvergenceMetric):
1✔
480
    """epsilon progress convergence metric class"""
481

482
    def __init__(self):
1✔
483
        super().__init__("epsilon_progress")
×
484

485
    def __call__(self, optimizer):
1✔
486
        self.results.append(optimizer.algorithm.archive.improvements)
×
487

488

489
class MetricWrapper:
1✔
490
    f"""wrapper class for wrapping platypus indicators
491

492
    Parameters
493
    ----------
494
    reference_set : DataFrame
495
    problem : PlatypusProblem instance
496
    kwargs : dict
497
             any additional keyword arguments to be passed
498
             on to the wrapper platypus indicator class
499

500
    Notes
501
    -----
502
    this class relies on multi-inheritance and careful consideration
503
    of the MRO to conveniently wrap the convergence metrics provided
504
    by platypus.
505

506
    """
507

508
    def __init__(self, reference_set, problem, **kwargs):
1✔
509
        self.problem = problem
×
510
        reference_set = rebuild_platypus_population(reference_set, self.problem)
×
511
        super().__init__(reference_set=reference_set, **kwargs)
×
512

513
    def calculate(self, archive):
1✔
514
        solutions = rebuild_platypus_population(archive, self.problem)
×
515
        return super().calculate(solutions)
×
516

517

518
class HypervolumeMetric(MetricWrapper, Hypervolume):
1✔
519
    """Hypervolume metric
520

521
    Parameters
522
    ----------
523
    reference_set : DataFrame
524
    problem : PlatypusProblem instance
525

526

527
    this is a thin wrapper around Hypervolume as provided
528
    by platypus to make it easier to use in conjunction with the
529
    workbench.
530

531
    """
532

533
    pass
1✔
534

535

536
class GenerationalDistanceMetric(MetricWrapper, GenerationalDistance):
1✔
537
    """GenerationalDistance metric
538

539
    Parameters
540
    ----------
541
    reference_set : DataFrame
542
    problem : PlatypusProblem instance
543
    d : int, default=1
544
        the power in the intergenerational distance function
545

546

547
    This is a thin wrapper around GenerationalDistance as provided
548
    by platypus to make it easier to use in conjunction with the
549
    workbench.
550

551
    see https://link.springer.com/content/pdf/10.1007/978-3-319-15892-1_8.pdf
552
    for more information
553

554
    """
555

556
    pass
1✔
557

558

559
class InvertedGenerationalDistanceMetric(MetricWrapper, InvertedGenerationalDistance):
1✔
560
    """InvertedGenerationalDistance metric
561

562
    Parameters
563
    ----------
564
    reference_set : DataFrame
565
    problem : PlatypusProblem instance
566
    d : int, default=1
567
        the power in the inverted intergenerational distance function
568

569

570
    This is a thin wrapper around InvertedGenerationalDistance as provided
571
    by platypus to make it easier to use in conjunction with the
572
    workbench.
573

574
    see https://link.springer.com/content/pdf/10.1007/978-3-319-15892-1_8.pdf
575
    for more information
576

577
    """
578

579
    pass
1✔
580

581

582
class EpsilonIndicatorMetric(MetricWrapper, EpsilonIndicator):
1✔
583
    """EpsilonIndicator metric
584

585
    Parameters
586
    ----------
587
    reference_set : DataFrame
588
    problem : PlatypusProblem instance
589

590

591
    this is a thin wrapper around EpsilonIndicator as provided
592
    by platypus to make it easier to use in conjunction with the
593
    workbench.
594

595
    """
596

597
    pass
1✔
598

599

600
class SpacingMetric(MetricWrapper, Spacing):
1✔
601
    """Spacing metric
602

603
    Parameters
604
    ----------
605
    problem : PlatypusProblem instance
606

607

608
    this is a thin wrapper around Spacing as provided
609
    by platypus to make it easier to use in conjunction with the
610
    workbench.
611

612
    """
613

614
    def __init__(self, problem):
1✔
615
        self.problem = problem
×
616

617

618
class HyperVolume(AbstractConvergenceMetric):
1✔
619
    """Hypervolume convergence metric class
620

621
    This metric is derived from a hyper-volume measure, which describes the
622
    multi-dimensional volume of space contained within the pareto front. When
623
    computed with minimum and maximums, it describes the ratio of dominated
624
    outcomes to all possible outcomes in the extent of the space.  Getting this
625
    number to be high or low is not necessarily important, as not all outcomes
626
    within the min-max range will be feasible.  But, having the hypervolume remain
627
    fairly stable over multiple generations of the evolutionary algorithm provides
628
    an indicator of convergence.
629

630
    Parameters
631
    ---------
632
    minimum : numpy array
633
    maximum : numpy array
634

635

636
    This class is deprecated. Use ArchiveLogger instead and calculate hypervolume
637
    in post using HypervolumeMetric as also shown in the directed search tutorial.
638

639
    """
640

641
    def __init__(self, minimum, maximum):
1✔
642
        super().__init__("hypervolume")
×
643
        warnings.warn(
×
644
            "HyperVolume is deprecated, use ArchiveLogger and HypervolumeMetric instead",
645
            warnings.DeprecationWarning,
646
        )
647
        self.hypervolume_func = Hypervolume(minimum=minimum, maximum=maximum)
×
648

649
    def __call__(self, optimizer):
1✔
650
        self.results.append(self.hypervolume_func.calculate(optimizer.algorithm.archive))
×
651

652
    @classmethod
1✔
653
    def from_outcomes(cls, outcomes):
1✔
654
        ranges = [o.expected_range for o in outcomes if o.kind != o.INFO]
×
655
        minimum, maximum = np.asarray(list(zip(*ranges)))
×
656
        return cls(minimum, maximum)
×
657

658

659
class ArchiveLogger(AbstractConvergenceMetric):
1✔
660
    """Helper class to write the archive to disk at each iteration
661

662
    Parameters
663
    ----------
664
    directory : str
665
    decision_varnames : list of str
666
    outcome_varnames : list of str
667
    base_filename : str, optional
668
    """
669

670
    def __init__(
1✔
671
        self, directory, decision_varnames, outcome_varnames, base_filename="archives.tar.gz"
672
    ):
673
        super().__init__("archive_logger")
×
674

675
        # FIXME how to handle case where directory already exists
676
        self.directory = os.path.abspath(directory)
×
677
        self.temp = os.path.join(self.directory, "tmp")
×
678
        os.mkdir(self.temp)
×
679

680
        self.base = base_filename
×
681
        self.decision_varnames = decision_varnames
×
682
        self.outcome_varnames = outcome_varnames
×
683
        self.tarfilename = os.path.join(self.directory, base_filename)
×
684

685
        # self.index = 0
686

687
    def __call__(self, optimizer):
1✔
688
        archive = to_dataframe(optimizer.result, self.decision_varnames, self.outcome_varnames)
×
689
        archive.to_csv(os.path.join(self.temp, f"{optimizer.nfe}.csv"))
×
690

691
    def reset(self):
1✔
692
        # FIXME what needs to go here?
693
        pass
×
694

695
    def get_results(self):
1✔
696
        with tarfile.open(self.tarfilename, "w:gz") as z:
×
697
            z.add(self.temp, arcname=os.path.basename(self.temp))
×
698

699
        shutil.rmtree(self.temp)
×
700
        return None
×
701

702
    @classmethod
1✔
703
    def load_archives(cls, filename):
1✔
704
        """load the archives stored with the ArchiveLogger
705

706
        Parameters
707
        ----------
708
        filename : str
709
                   relative path to file
710

711
        Returns
712
        -------
713
        dict with nfe as key and dataframe as vlaue
714
        """
715

716
        archives = {}
×
717
        with tarfile.open(os.path.abspath(filename)) as fh:
×
718
            for entry in fh.getmembers():
×
719
                if entry.name.endswith("csv"):
×
720
                    key = entry.name.split("/")[1][:-4]
×
721
                    archives[int(key)] = pd.read_csv(fh.extractfile(entry))
×
722
        return archives
×
723

724

725
class OperatorProbabilities(AbstractConvergenceMetric):
1✔
726
    """OperatorProbabiliy convergence tracker for use with
727
    auto adaptive operator selection.
728

729
    Parameters
730
    ----------
731
    name : str
732
    index : int
733

734

735
    State of the art MOEAs like Borg (and GenerationalBorg provided by the workbench)
736
    use autoadaptive operator selection. The algorithm has multiple different evolutionary
737
    operators. Over the run, it tracks how well each operator is doing in producing fitter
738
    offspring. The probability of the algorithm using a given evolutionary operator is
739
    proportional to how well this operator has been doing in producing fitter offspring in
740
    recent generations. This class can be used to track these probabilities over the
741
    run of the algorithm.
742

743
    """
744

745
    def __init__(self, name, index):
1✔
746
        super().__init__(name)
×
747
        self.index = index
×
748

749
    def __call__(self, optimizer):
1✔
750
        try:
×
751
            props = optimizer.algorithm.variator.probabilities
×
752
            self.results.append(props[self.index])
×
753
        except AttributeError:
×
754
            pass
×
755

756

757
def epsilon_nondominated(results, epsilons, problem):
1✔
758
    """Merge the list of results into a single set of
759
    non dominated results using the provided epsilon values
760

761
    Parameters
762
    ----------
763
    results : list of DataFrames
764
    epsilons : epsilon values for each objective
765
    problem : PlatypusProblem instance
766

767
    Returns
768
    -------
769
    DataFrame
770

771
    Notes
772
    -----
773
    this is a platypus based alternative to pareto.py (https://github.com/matthewjwoodruff/pareto.py)
774
    """
775
    if problem.nobjs != len(epsilons):
×
776
        ValueError(
×
777
            f"the number of epsilon values ({len(epsilons)}) must match the number of objectives {problem.nobjs}"
778
        )
779

780
    results = pd.concat(results, ignore_index=True)
×
781
    solutions = rebuild_platypus_population(results, problem)
×
782
    archive = EpsilonBoxArchive(epsilons)
×
783
    archive += solutions
×
784

785
    return to_dataframe(archive, problem.parameter_names, problem.outcome_names)
×
786

787

788
class Convergence(ProgressTrackingMixIn):
1✔
789
    """helper class for tracking convergence of optimization"""
790

791
    valid_metrics = {"hypervolume", "epsilon_progress", "archive_logger"}
1✔
792

793
    def __init__(self, metrics, max_nfe, convergence_freq=1000, logging_freq=5, log_progress=False):
1✔
794
        super().__init__(
×
795
            max_nfe,
796
            logging_freq,
797
            _logger,
798
            log_progress=log_progress,
799
            log_func=lambda self: f"generation" f" {self.generation}, {self.i}/{self.max_nfe}",
800
        )
801

802
        self.max_nfe = max_nfe
×
803
        self.generation = -1
×
804
        self.index = []
×
805
        self.last_check = 0
×
806

807
        if metrics is None:
×
808
            metrics = []
×
809

810
        self.metrics = metrics
×
811
        self.convergence_freq = convergence_freq
×
812
        self.logging_freq = logging_freq
×
813

814
        # TODO what is the point of this code?
815
        for metric in metrics:
×
816
            assert isinstance(metric, AbstractConvergenceMetric)
×
817
            metric.reset()
×
818

819
    def __call__(self, optimizer, force=False):
1✔
820
        """Stores convergences information given specified convergence
821
        frequency.
822

823
        Parameters
824
        ----------
825
        optimizer : platypus optimizer instance
826
        force : boolean, optional
827
                if True, convergence information will always be stored
828
                if False, converge information will be stored if the
829
                the number of nfe since the last time of storing is equal to
830
                or higher then convergence_freq
831

832

833
        the primary use case for force is to force convergence frequency information
834
        to be stored once the stopping condition of the optimizer has been reached
835
        so that the final convergence information is kept.
836

837
        """
838
        nfe = optimizer.nfe
×
839
        super().__call__(nfe - self.i)
×
840

841
        self.generation += 1
×
842

843
        if (nfe >= self.last_check + self.convergence_freq) or (self.last_check == 0) or force:
×
844
            self.index.append(nfe)
×
845
            self.last_check = nfe
×
846

847
            for metric in self.metrics:
×
848
                metric(optimizer)
×
849

850
    def to_dataframe(self):
1✔
851
        progress = {
×
852
            metric.name: result for metric in self.metrics if (result := metric.get_results())
853
        }
854

855
        progress = pd.DataFrame.from_dict(progress)
×
856

857
        if not progress.empty:
×
858
            progress["nfe"] = self.index
×
859

860
        return progress
×
861

862

863
def rebuild_platypus_population(archive, problem):
1✔
864
    """rebuild a population of platypus Solution instances
865

866
    Parameters
867
    ----------
868
    archive : DataFrame
869
    problem : PlatypusProblem instance
870

871
    Returns
872
    -------
873
    list of platypus Solutions
874

875
    """
876

877
    expected_columns = problem.nvars + problem.nobjs
×
878
    actual_columns = len(archive.columns)
×
879

880
    if actual_columns != expected_columns:
×
881
        raise EMAError(
×
882
            f"The number of columns in the archive ({actual_columns}) does not match the "
883
            f"expected number of decision variables and objectives ({expected_columns})."
884
        )
885

886
    solutions = []
×
887
    for row in archive.itertuples():
×
888
        try:
×
889
            decision_variables = [getattr(row, attr) for attr in problem.parameter_names]
×
890
        except AttributeError:
×
891
            missing_parameters = [
×
892
                attr for attr in problem.parameter_names if not hasattr(row, attr)
893
            ]
894
            raise EMAError(f"parameter names {missing_parameters} not found in archive")
×
895

896
        try:
×
897
            objectives = [getattr(row, attr) for attr in problem.outcome_names]
×
898
        except AttributeError:
×
899
            missing_outcomes = [attr for attr in problem.outcome_names if not hasattr(row, attr)]
×
900
            raise EMAError(f"outcome names {missing_outcomes} not found in archive'")
×
901

902
        solution = Solution(problem)
×
903
        solution.variables = [
×
904
            platypus_type.encode(value)
905
            for platypus_type, value in zip(problem.types, decision_variables)
906
        ]
907
        solution.objectives = objectives
×
908
        solutions.append(solution)
×
909
    return solutions
×
910

911

912
class CombinedVariator(Variator):
1✔
913
    def __init__(self, crossover_prob=0.5, mutation_prob=1):
1✔
914
        super().__init__(2)
×
915
        self.SBX = platypus.SBX()
×
916
        self.crossover_prob = crossover_prob
×
917
        self.mutation_prob = mutation_prob
×
918

919
    def evolve(self, parents):
1✔
920
        child1 = copy.deepcopy(parents[0])
×
921
        child2 = copy.deepcopy(parents[1])
×
922
        problem = child1.problem
×
923

924
        # crossover
925
        # we will evolve the individual
926
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
×
927
            if random.random() <= self.crossover_prob:
×
928
                klass = kind.__class__
×
929
                child1, child2 = self._crossover[klass](self, child1, child2, i, kind)
×
930
                child1.evaluated = False
×
931
                child2.evaluated = False
×
932

933
        # mutate
934
        for child in [child1, child2]:
×
935
            self.mutate(child)
×
936

937
        return [child1, child2]
×
938

939
    def mutate(self, child):
1✔
940
        problem = child.problem
×
941

942
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
×
943
            if random.random() <= self.mutation_prob:
×
944
                klass = kind.__class__
×
945
                child = self._mutate[klass](self, child, i, kind)
×
946
                child.evaluated = False
×
947

948
    def crossover_real(self, child1, child2, i, type):  # @ReservedAssignment
1✔
949
        # sbx
950
        x1 = float(child1.variables[i])
×
951
        x2 = float(child2.variables[i])
×
952
        lb = type.min_value
×
953
        ub = type.max_value
×
954

955
        x1, x2 = self.SBX.sbx_crossover(x1, x2, lb, ub)
×
956

957
        child1.variables[i] = x1
×
958
        child2.variables[i] = x2
×
959

960
        return child1, child2
×
961

962
    def crossover_integer(self, child1, child2, i, type):  # @ReservedAssignment
1✔
963
        # HUX()
964
        for j in range(type.nbits):
×
965
            if child1.variables[i][j] != child2.variables[i][j]:
×
966
                if bool(random.getrandbits(1)):
×
967
                    child1.variables[i][j] = not child1.variables[i][j]
×
968
                    child2.variables[i][j] = not child2.variables[i][j]
×
969
        return child1, child2
×
970

971
    def crossover_categorical(self, child1, child2, i, type):  # @ReservedAssignment
1✔
972
        # SSX()
973
        # can probably be implemented in a simple manner, since size
974
        # of subset is fixed to 1
975

976
        s1 = set(child1.variables[i])
×
977
        s2 = set(child2.variables[i])
×
978

979
        for j in range(type.size):
×
980
            if (
×
981
                (child2.variables[i][j] not in s1)
982
                and (child1.variables[i][j] not in s2)
983
                and (random.random() < 0.5)
984
            ):
985
                temp = child1.variables[i][j]
×
986
                child1.variables[i][j] = child2.variables[i][j]
×
987
                child2.variables[i][j] = temp
×
988

989
        return child1, child2
×
990

991
    def mutate_real(self, child, i, type, distribution_index=20):  # @ReservedAssignment
1✔
992
        # PM
993
        x = child.variables[i]
×
994
        lower = type.min_value
×
995
        upper = type.max_value
×
996

997
        u = random.random()
×
998
        dx = upper - lower
×
999

1000
        if u < 0.5:
×
1001
            bl = (x - lower) / dx
×
1002
            b = 2.0 * u + (1.0 - 2.0 * u) * pow(1.0 - bl, distribution_index + 1.0)
×
1003
            delta = pow(b, 1.0 / (distribution_index + 1.0)) - 1.0
×
1004
        else:
1005
            bu = (upper - x) / dx
×
1006
            b = 2.0 * (1.0 - u) + 2.0 * (u - 0.5) * pow(1.0 - bu, distribution_index + 1.0)
×
1007
            delta = 1.0 - pow(b, 1.0 / (distribution_index + 1.0))
×
1008

1009
        x = x + delta * dx
×
1010
        x = max(lower, min(x, upper))
×
1011

1012
        child.variables[i] = x
×
1013
        return child
×
1014

1015
    def mutate_integer(self, child, i, type, probability=1):  # @ReservedAssignment
1✔
1016
        # bitflip
1017
        for j in range(type.nbits):
×
1018
            if random.random() <= probability:
×
1019
                child.variables[i][j] = not child.variables[i][j]
×
1020
        return child
×
1021

1022
    def mutate_categorical(self, child, i, type):  # @ReservedAssignment
1✔
1023
        # replace
1024
        probability = 1 / type.size
×
1025

1026
        if random.random() <= probability:
×
1027
            subset = child.variables[i]
×
1028

1029
            if len(subset) < len(type.elements):
×
1030
                j = random.randrange(len(subset))
×
1031

1032
                nonmembers = list(set(type.elements) - set(subset))
×
1033
                k = random.randrange(len(nonmembers))
×
1034
                subset[j] = nonmembers[k]
×
1035

1036
            len(subset)
×
1037

1038
            child.variables[i] = subset
×
1039

1040
        return child
×
1041

1042
    _crossover = {
1✔
1043
        Real: crossover_real,
1044
        Integer: crossover_integer,
1045
        Subset: crossover_categorical,
1046
    }
1047

1048
    _mutate = {
1✔
1049
        Real: mutate_real,
1050
        Integer: mutate_integer,
1051
        Subset: mutate_categorical,
1052
    }
1053

1054

1055
def _optimize(
1✔
1056
    problem,
1057
    evaluator,
1058
    algorithm,
1059
    convergence,
1060
    nfe,
1061
    convergence_freq,
1062
    logging_freq,
1063
    variator=None,
1064
    **kwargs,
1065
):
1066
    klass = problem.types[0].__class__
×
1067

1068
    try:
×
1069
        eps_values = kwargs["epsilons"]
×
1070
    except KeyError:
×
1071
        pass
×
1072
    else:
1073
        if len(eps_values) != len(problem.outcome_names):
×
1074
            raise EMAError("number of epsilon values does not match number " "of outcomes")
×
1075

1076
    if variator is None:
×
1077
        if all(isinstance(t, klass) for t in problem.types):
×
1078
            variator = None
×
1079
        else:
1080
            variator = CombinedVariator()
×
1081
    # mutator = CombinedMutator()
1082

1083
    optimizer = algorithm(
×
1084
        problem, evaluator=evaluator, variator=variator, log_frequency=500, **kwargs
1085
    )
1086
    # optimizer.mutator = mutator
1087

1088
    convergence = Convergence(
×
1089
        convergence, nfe, convergence_freq=convergence_freq, logging_freq=logging_freq
1090
    )
1091
    callback = functools.partial(convergence, optimizer)
×
1092
    evaluator.callback = callback
×
1093

1094
    with temporary_filter(name=[callbacks.__name__, evaluators.__name__], level=INFO):
×
1095
        optimizer.run(nfe)
×
1096

1097
    convergence(optimizer, force=True)
×
1098

1099
    # convergence.pbar.__exit__(None, None, None)
1100

1101
    results = to_dataframe(optimizer.result, problem.parameter_names, problem.outcome_names)
×
1102
    convergence = convergence.to_dataframe()
×
1103

1104
    message = "optimization completed, found {} solutions"
×
1105
    _logger.info(message.format(len(optimizer.archive)))
×
1106

1107
    if convergence.empty:
×
1108
        return results
×
1109
    else:
1110
        return results, convergence
×
1111

1112

1113
class BORGDefaultDescriptor:
1✔
1114
    # this treats defaults as class level attributes!
1115

1116
    def __init__(self, default_function):
1✔
1117
        self.default_function = default_function
1✔
1118

1119
    def __get__(self, instance, owner):
1✔
1120
        return self.default_function(instance.problem.nvars)
1✔
1121

1122
    def __set_name__(self, owner, name):
1✔
1123
        self.name = name
1✔
1124

1125

1126
class GenerationalBorg(EpsilonProgressContinuation):
1✔
1127
    """A generational implementation of the BORG Framework
1128

1129
    This algorithm adopts Epsilon Progress Continuation, and Auto Adaptive
1130
    Operator Selection, but embeds them within the NSGAII generational
1131
    algorithm, rather than the steady state implementation used by the BORG
1132
    algorithm.
1133

1134
    The parametrization of all operators is based on the default values as used
1135
    in Borg 1.9.
1136

1137
    Note:: limited to RealParameters only.
1138

1139
    """
1140

1141
    pm_p = BORGDefaultDescriptor(lambda x: 1 / x)
1✔
1142
    pm_dist = 20
1✔
1143

1144
    sbx_prop = 1
1✔
1145
    sbx_dist = 15
1✔
1146

1147
    de_rate = 0.1
1✔
1148
    de_stepsize = 0.5
1✔
1149

1150
    um_p = BORGDefaultDescriptor(lambda x: 1 / x)
1✔
1151

1152
    spx_nparents = 10
1✔
1153
    spx_noffspring = 2
1✔
1154
    spx_expansion = 0.3
1✔
1155

1156
    pcx_nparents = 10
1✔
1157
    pcx_noffspring = 2
1✔
1158
    pcx_eta = 0.1
1✔
1159
    pcx_zeta = 0.1
1✔
1160

1161
    undx_nparents = 10
1✔
1162
    undx_noffspring = 2
1✔
1163
    undx_zeta = 0.5
1✔
1164
    undx_eta = 0.35
1✔
1165

1166
    def __init__(
1✔
1167
        self,
1168
        problem,
1169
        epsilons,
1170
        population_size=100,
1171
        generator=RandomGenerator(),
1172
        selector=TournamentSelector(2),
1173
        variator=None,
1174
        **kwargs,
1175
    ):
1176
        self.problem = problem
×
1177

1178
        # Parameterization taken from
1179
        # Borg: An Auto-Adaptive MOEA Framework - Hadka, Reed
1180
        variators = [
×
1181
            GAOperator(
1182
                SBX(probability=self.sbx_prop, distribution_index=self.sbx_dist),
1183
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1184
            ),
1185
            GAOperator(
1186
                PCX(
1187
                    nparents=self.pcx_nparents,
1188
                    noffspring=self.pcx_noffspring,
1189
                    eta=self.pcx_eta,
1190
                    zeta=self.pcx_zeta,
1191
                ),
1192
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1193
            ),
1194
            GAOperator(
1195
                DifferentialEvolution(crossover_rate=self.de_rate, step_size=self.de_stepsize),
1196
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1197
            ),
1198
            GAOperator(
1199
                UNDX(
1200
                    nparents=self.undx_nparents,
1201
                    noffspring=self.undx_noffspring,
1202
                    zeta=self.undx_zeta,
1203
                    eta=self.undx_eta,
1204
                ),
1205
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1206
            ),
1207
            GAOperator(
1208
                SPX(
1209
                    nparents=self.spx_nparents,
1210
                    noffspring=self.spx_noffspring,
1211
                    expansion=self.spx_expansion,
1212
                ),
1213
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1214
            ),
1215
            UM(probability=self.um_p),
1216
        ]
1217

1218
        variator = Multimethod(self, variators)
×
1219

1220
        super().__init__(
×
1221
            NSGAII(
1222
                problem,
1223
                population_size,
1224
                generator,
1225
                selector,
1226
                variator,
1227
                EpsilonBoxArchive(epsilons),
1228
                **kwargs,
1229
            )
1230
        )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc