• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

quaquel / EMAworkbench / 7165526598

11 Dec 2023 09:37AM UTC coverage: 80.332%. Remained the same
7165526598

push

github

web-flow
Bump actions/setup-python from 4 to 5 (#332)

Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

4693 of 5842 relevant lines covered (80.33%)

0.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

38.15
/ema_workbench/em_framework/optimization.py
1
"""
2

3

4
"""
5
import copy
1✔
6
import functools
1✔
7
import os
1✔
8
import random
1✔
9
import shutil
1✔
10
import tarfile
1✔
11
import warnings
1✔
12

13
import numpy as np
1✔
14
import pandas as pd
1✔
15

16

17
from . import callbacks, evaluators
1✔
18
from .points import Scenario, Policy
1✔
19
from .outcomes import AbstractOutcome
1✔
20
from .parameters import IntegerParameter, RealParameter, CategoricalParameter, BooleanParameter
1✔
21
from .samplers import determine_parameters
1✔
22
from .util import determine_objects, ProgressTrackingMixIn
1✔
23
from ..util import get_module_logger, EMAError, temporary_filter, INFO
1✔
24

25
try:
1✔
26
    from platypus import (
1✔
27
        EpsNSGAII,
28
        Hypervolume,
29
        EpsilonIndicator,
30
        GenerationalDistance,
31
        Variator,
32
        Real,
33
        Integer,
34
        Subset,
35
        EpsilonProgressContinuation,
36
        RandomGenerator,
37
        TournamentSelector,
38
        NSGAII,
39
        EpsilonBoxArchive,
40
        Multimethod,
41
        GAOperator,
42
        SBX,
43
        PM,
44
        PCX,
45
        DifferentialEvolution,
46
        UNDX,
47
        SPX,
48
        UM,
49
        Solution,
50
        InvertedGenerationalDistance,
51
        Spacing,
52
    )  # @UnresolvedImport
53
    from platypus import Problem as PlatypusProblem
1✔
54

55
    import platypus
1✔
56

57

58
except ImportError:
×
59
    warnings.warn(
×
60
        f"Platypus based optimization not available. Install with `pip install platypus-opt`",
61
        ImportWarning,
62
    )
63

64
    class PlatypusProblem:
×
65
        constraints = []
×
66

67
        def __init__(self, *args, **kwargs):
×
68
            pass
×
69

70
    class Variator:
×
71
        def __init__(self, *args, **kwargs):
×
72
            pass
×
73

74
    class RandomGenerator:
×
75
        def __call__(self, *args, **kwargs):
×
76
            pass
×
77

78
    class TournamentSelector:
×
79
        def __init__(self, *args, **kwargs):
×
80
            pass
×
81

82
        def __call__(self, *args, **kwargs):
×
83
            pass
×
84

85
    class EpsilonProgressContinuation:
×
86
        pass
×
87

88
    EpsNSGAII = None
×
89
    platypus = None
×
90
    Real = Integer = Subset = None
×
91

92
# Created on 5 Jun 2017
93
#
94
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
95

96
__all__ = [
1✔
97
    "Problem",
98
    "RobustProblem",
99
    "EpsilonProgress",
100
    "Convergence",
101
    "ArchiveLogger",
102
    "OperatorProbabilities",
103
    "rebuild_platypus_population",
104
    "HypervolumeMetric",
105
    "GenerationalDistanceMetric",
106
    "SpacingMetric",
107
    "InvertedGenerationalDistanceMetric",
108
    "EpsilonIndicatorMetric",
109
    "epsilon_nondominated",
110
    "to_problem",
111
    "to_robust_problem",
112
]
113
_logger = get_module_logger(__name__)
1✔
114

115

116
class Problem(PlatypusProblem):
1✔
117
    """small extension to Platypus problem object, includes information on
118
    the names of the decision variables, the names of the outcomes,
119
    and the type of search"""
120

121
    @property
1✔
122
    def parameter_names(self):
1✔
123
        return [e.name for e in self.parameters]
×
124

125
    def __init__(self, searchover, parameters, outcome_names, constraints, reference=None):
1✔
126
        if constraints is None:
1✔
127
            constraints = []
1✔
128

129
        super().__init__(len(parameters), len(outcome_names), nconstrs=len(constraints))
1✔
130
        #         assert len(parameters) == len(parameter_names)
131
        assert searchover in ("levers", "uncertainties", "robust")
1✔
132

133
        if searchover == "levers":
1✔
134
            assert not reference or isinstance(reference, Scenario)
1✔
135
        elif searchover == "uncertainties":
1✔
136
            assert not reference or isinstance(reference, Policy)
1✔
137
        else:
138
            assert not reference
1✔
139

140
        self.searchover = searchover
1✔
141
        self.parameters = parameters
1✔
142
        self.outcome_names = outcome_names
1✔
143
        self.ema_constraints = constraints
1✔
144
        self.constraint_names = [c.name for c in constraints]
1✔
145
        self.reference = reference if reference else 0
1✔
146

147

148
class RobustProblem(Problem):
1✔
149
    """small extension to Problem object for robust optimization, adds the
150
    scenarios and the robustness functions"""
151

152
    def __init__(self, parameters, outcome_names, scenarios, robustness_functions, constraints):
1✔
153
        super().__init__("robust", parameters, outcome_names, constraints)
1✔
154
        assert len(robustness_functions) == len(outcome_names)
1✔
155
        self.scenarios = scenarios
1✔
156
        self.robustness_functions = robustness_functions
1✔
157

158

159
def to_problem(model, searchover, reference=None, constraints=None):
1✔
160
    """helper function to create Problem object
161

162
    Parameters
163
    ----------
164
    model : AbstractModel instance
165
    searchover : str
166
    reference : Policy or Scenario instance, optional
167
                overwrite the default scenario in case of searching over
168
                levers, or default policy in case of searching over
169
                uncertainties
170
    constraints : list, optional
171

172
    Returns
173
    -------
174
    Problem instance
175

176
    """
177

178
    # extract the levers and the outcomes
179
    decision_variables = determine_parameters(model, searchover, union=True)
1✔
180

181
    outcomes = determine_objects(model, "outcomes")
1✔
182
    outcomes = [outcome for outcome in outcomes if outcome.kind != AbstractOutcome.INFO]
1✔
183
    outcome_names = [outcome.name for outcome in outcomes]
1✔
184

185
    if not outcomes:
1✔
186
        raise EMAError("No outcomes specified to optimize over, all outcomes are of kind=INFO")
×
187

188
    problem = Problem(
1✔
189
        searchover, decision_variables, outcome_names, constraints, reference=reference
190
    )
191
    problem.types = to_platypus_types(decision_variables)
1✔
192
    problem.directions = [outcome.kind for outcome in outcomes]
1✔
193
    problem.constraints[:] = "==0"
1✔
194

195
    return problem
1✔
196

197

198
def to_robust_problem(model, scenarios, robustness_functions, constraints=None):
1✔
199
    """helper function to create RobustProblem object
200

201
    Parameters
202
    ----------
203
    model : AbstractModel instance
204
    scenarios : collection
205
    robustness_functions : iterable of ScalarOutcomes
206
    constraints : list, optional
207

208
    Returns
209
    -------
210
    RobustProblem instance
211

212
    """
213

214
    # extract the levers and the outcomes
215
    decision_variables = determine_parameters(model, "levers", union=True)
1✔
216

217
    outcomes = robustness_functions
1✔
218
    outcomes = [outcome for outcome in outcomes if outcome.kind != AbstractOutcome.INFO]
1✔
219
    outcome_names = [outcome.name for outcome in outcomes]
1✔
220

221
    if not outcomes:
1✔
222
        raise EMAError("No outcomes specified to optimize over, all outcomes are of kind=INFO")
×
223

224
    problem = RobustProblem(
1✔
225
        decision_variables, outcome_names, scenarios, robustness_functions, constraints
226
    )
227

228
    problem.types = to_platypus_types(decision_variables)
1✔
229
    problem.directions = [outcome.kind for outcome in outcomes]
1✔
230
    problem.constraints[:] = "==0"
1✔
231

232
    return problem
1✔
233

234

235
def to_platypus_types(decision_variables):
1✔
236
    """helper function for mapping from workbench parameter types to
237
    platypus parameter types"""
238
    # TODO:: should categorical not be platypus.Subset, with size == 1?
239
    _type_mapping = {
1✔
240
        RealParameter: platypus.Real,
241
        IntegerParameter: platypus.Integer,
242
        CategoricalParameter: platypus.Subset,
243
        BooleanParameter: platypus.Subset,
244
    }
245

246
    types = []
1✔
247
    for dv in decision_variables:
1✔
248
        klass = _type_mapping[type(dv)]
1✔
249

250
        if not isinstance(dv, (CategoricalParameter, BooleanParameter)):
1✔
251
            decision_variable = klass(dv.lower_bound, dv.upper_bound)
1✔
252
        else:
253
            decision_variable = klass(dv.categories, 1)
1✔
254

255
        types.append(decision_variable)
1✔
256
    return types
1✔
257

258

259
def to_dataframe(solutions, dvnames, outcome_names):
1✔
260
    """helper function to turn a collection of platypus Solution instances
261
    into a pandas DataFrame
262
    Parameters
263
    ----------
264
    solutions : collection of Solution instances
265
    dvnames : list of str
266
    outcome_names : list of str
267

268
    Returns
269
    -------
270
    pandas DataFrame
271
    """
272

273
    results = []
1✔
274
    for solution in platypus.unique(solutions):
1✔
275
        vars = transform_variables(solution.problem, solution.variables)  # @ReservedAssignment
1✔
276

277
        decision_vars = dict(zip(dvnames, vars))
1✔
278
        decision_out = dict(zip(outcome_names, solution.objectives))
1✔
279

280
        result = decision_vars.copy()
1✔
281
        result.update(decision_out)
1✔
282

283
        results.append(result)
1✔
284

285
    results = pd.DataFrame(results, columns=dvnames + outcome_names)
1✔
286
    return results
1✔
287

288

289
def process_uncertainties(jobs):
1✔
290
    """helper function to map jobs generated by platypus to Scenario objects
291

292
    Parameters
293
    ----------
294
    jobs : collection
295

296
    Returns
297
    -------
298
    scenarios, policies
299

300
    """
301
    problem = jobs[0].solution.problem
×
302
    scenarios = []
×
303

304
    jobs = _process(jobs, problem)
×
305
    for i, job in enumerate(jobs):
×
306
        name = str(i)
×
307
        scenario = Scenario(name=name, **job)
×
308
        scenarios.append(scenario)
×
309

310
    policies = problem.reference
×
311

312
    return scenarios, policies
×
313

314

315
def process_levers(jobs):
1✔
316
    """helper function to map jobs generated by platypus to Policy objects
317

318
    Parameters
319
    ----------
320
    jobs : collection
321

322
    Returns
323
    -------
324
    scenarios, policies
325

326
    """
327
    problem = jobs[0].solution.problem
×
328
    policies = []
×
329
    jobs = _process(jobs, problem)
×
330
    for i, job in enumerate(jobs):
×
331
        name = str(i)
×
332
        job = Policy(name=name, **job)
×
333
        policies.append(job)
×
334

335
    scenarios = problem.reference
×
336

337
    return scenarios, policies
×
338

339

340
def _process(jobs, problem):
1✔
341
    """helper function to transform platypus job to dict with correct
342
    values for workbench"""
343

344
    processed_jobs = []
×
345
    for job in jobs:
×
346
        variables = transform_variables(problem, job.solution.variables)
×
347
        processed_job = {}
×
348
        for param, var in zip(problem.parameters, variables):
×
349
            try:
×
350
                var = var.value
×
351
            except AttributeError:
×
352
                pass
×
353
            processed_job[param.name] = var
×
354
        processed_jobs.append(processed_job)
×
355
    return processed_jobs
×
356

357

358
def process_robust(jobs):
1✔
359
    """Helper function to process robust optimization jobs
360

361
    Parameters
362
    ----------
363
    jobs : collection
364

365
    Returns
366
    -------
367
    scenarios, policies
368

369
    """
370
    _, policies = process_levers(jobs)
×
371
    scenarios = jobs[0].solution.problem.scenarios
×
372

373
    return scenarios, policies
×
374

375

376
def transform_variables(problem, variables):
1✔
377
    """helper function for transforming platypus variables"""
378

379
    converted_vars = []
1✔
380
    for type, var in zip(problem.types, variables):  # @ReservedAssignment
1✔
381
        var = type.decode(var)
1✔
382
        try:
1✔
383
            var = var[0]
1✔
384
        except TypeError:
1✔
385
            pass
1✔
386

387
        converted_vars.append(var)
1✔
388
    return converted_vars
1✔
389

390

391
def evaluate(jobs_collection, experiments, outcomes, problem):
1✔
392
    """Helper function for mapping the results from perform_experiments back
393
    to what platypus needs"""
394

395
    searchover = problem.searchover
×
396
    outcome_names = problem.outcome_names
×
397
    constraints = problem.ema_constraints
×
398

399
    if searchover == "levers":
×
400
        column = "policy"
×
401
    else:
402
        column = "scenario"
×
403

404
    for entry, job in jobs_collection:
×
405
        logical = experiments[column] == entry.name
×
406

407
        job_outputs = {}
×
408
        for k, v in outcomes.items():
×
409
            job_outputs[k] = v[logical][0]
×
410

411
        # TODO:: only retain uncertainties
412
        job_experiment = experiments[logical]
×
413
        job_constraints = _evaluate_constraints(job_experiment, job_outputs, constraints)
×
414
        job_outcomes = [job_outputs[key] for key in outcome_names]
×
415

416
        if job_constraints:
×
417
            job.solution.problem.function = lambda _: (job_outcomes, job_constraints)
×
418
        else:
419
            job.solution.problem.function = lambda _: job_outcomes
×
420
        job.solution.evaluate()
×
421

422

423
def evaluate_robust(jobs_collection, experiments, outcomes, problem):
1✔
424
    """Helper function for mapping the results from perform_experiments back
425
    to what Platypus needs"""
426

427
    robustness_functions = problem.robustness_functions
×
428
    constraints = problem.ema_constraints
×
429

430
    for entry, job in jobs_collection:
×
431
        logical = experiments["policy"] == entry.name
×
432

433
        job_outcomes_dict = {}
×
434
        job_outcomes = []
×
435
        for rf in robustness_functions:
×
436
            data = [outcomes[var_name][logical] for var_name in rf.variable_name]
×
437
            score = rf.function(*data)
×
438
            job_outcomes_dict[rf.name] = score
×
439
            job_outcomes.append(score)
×
440

441
        # TODO:: only retain levers
442
        job_experiment = experiments[logical].iloc[0]
×
443
        job_constraints = _evaluate_constraints(job_experiment, job_outcomes_dict, constraints)
×
444

445
        if job_constraints:
×
446
            job.solution.problem.function = lambda _: (job_outcomes, job_constraints)
×
447
        else:
448
            job.solution.problem.function = lambda _: job_outcomes
×
449

450
        job.solution.evaluate()
×
451

452

453
def _evaluate_constraints(job_experiment, job_outcomes, constraints):
1✔
454
    """Helper function for evaluating the constraints for a given job"""
455
    job_constraints = []
×
456
    for constraint in constraints:
×
457
        data = [job_experiment[var] for var in constraint.parameter_names]
×
458
        data += [job_outcomes[var] for var in constraint.outcome_names]
×
459
        constraint_value = constraint.process(data)
×
460
        job_constraints.append(constraint_value)
×
461
    return job_constraints
×
462

463

464
class AbstractConvergenceMetric:
1✔
465
    """base convergence metric class"""
466

467
    def __init__(self, name):
1✔
468
        super().__init__()
×
469
        self.name = name
×
470
        self.results = []
×
471

472
    def __call__(self, optimizer):
1✔
473
        raise NotImplementedError
×
474

475
    def reset(self):
1✔
476
        self.results = []
×
477

478
    def get_results(self):
1✔
479
        return self.results
×
480

481

482
class EpsilonProgress(AbstractConvergenceMetric):
1✔
483
    """epsilon progress convergence metric class"""
484

485
    def __init__(self):
1✔
486
        super().__init__("epsilon_progress")
×
487

488
    def __call__(self, optimizer):
1✔
489
        self.results.append(optimizer.algorithm.archive.improvements)
×
490

491

492
class MetricWrapper:
1✔
493
    f"""wrapper class for wrapping platypus indicators
494

495
    Parameters
496
    ----------
497
    reference_set : DataFrame
498
    problem : PlatypusProblem instance
499
    kwargs : dict
500
             any additional keyword arguments to be passed
501
             on to the wrapper platypus indicator class
502

503
    Notes
504
    -----
505
    this class relies on multi-inheritance and careful consideration
506
    of the MRO to conveniently wrap the convergence metrics provided
507
    by platypus.
508

509
    """
510

511
    def __init__(self, reference_set, problem, **kwargs):
1✔
512
        self.problem = problem
×
513
        reference_set = rebuild_platypus_population(reference_set, self.problem)
×
514
        super().__init__(reference_set=reference_set, **kwargs)
×
515

516
    def calculate(self, archive):
1✔
517
        solutions = rebuild_platypus_population(archive, self.problem)
×
518
        return super().calculate(solutions)
×
519

520

521
class HypervolumeMetric(MetricWrapper, Hypervolume):
1✔
522
    """Hypervolume metric
523

524
    Parameters
525
    ----------
526
    reference_set : DataFrame
527
    problem : PlatypusProblem instance
528

529

530
    this is a thin wrapper around Hypervolume as provided
531
    by platypus to make it easier to use in conjunction with the
532
    workbench.
533

534
    """
535

536
    pass
1✔
537

538

539
class GenerationalDistanceMetric(MetricWrapper, GenerationalDistance):
1✔
540
    """GenerationalDistance metric
541

542
    Parameters
543
    ----------
544
    reference_set : DataFrame
545
    problem : PlatypusProblem instance
546
    d : int, default=1
547
        the power in the intergenerational distance function
548

549

550
    This is a thin wrapper around GenerationalDistance as provided
551
    by platypus to make it easier to use in conjunction with the
552
    workbench.
553

554
    see https://link.springer.com/content/pdf/10.1007/978-3-319-15892-1_8.pdf
555
    for more information
556

557
    """
558

559
    pass
1✔
560

561

562
class InvertedGenerationalDistanceMetric(MetricWrapper, InvertedGenerationalDistance):
1✔
563
    """InvertedGenerationalDistance metric
564

565
    Parameters
566
    ----------
567
    reference_set : DataFrame
568
    problem : PlatypusProblem instance
569
    d : int, default=1
570
        the power in the inverted intergenerational distance function
571

572

573
    This is a thin wrapper around InvertedGenerationalDistance as provided
574
    by platypus to make it easier to use in conjunction with the
575
    workbench.
576

577
    see https://link.springer.com/content/pdf/10.1007/978-3-319-15892-1_8.pdf
578
    for more information
579

580
    """
581

582
    pass
1✔
583

584

585
class EpsilonIndicatorMetric(MetricWrapper, EpsilonIndicator):
1✔
586
    """EpsilonIndicator metric
587

588
    Parameters
589
    ----------
590
    reference_set : DataFrame
591
    problem : PlatypusProblem instance
592

593

594
    this is a thin wrapper around EpsilonIndicator as provided
595
    by platypus to make it easier to use in conjunction with the
596
    workbench.
597

598
    """
599

600
    pass
1✔
601

602

603
class SpacingMetric(MetricWrapper, Spacing):
1✔
604
    """Spacing metric
605

606
    Parameters
607
    ----------
608
    problem : PlatypusProblem instance
609

610

611
    this is a thin wrapper around Spacing as provided
612
    by platypus to make it easier to use in conjunction with the
613
    workbench.
614

615
    """
616

617
    def __init__(self, problem):
1✔
618
        self.problem = problem
×
619

620

621
class HyperVolume(AbstractConvergenceMetric):
1✔
622
    """Hypervolume convergence metric class
623

624
    This metric is derived from a hyper-volume measure, which describes the
625
    multi-dimensional volume of space contained within the pareto front. When
626
    computed with minimum and maximums, it describes the ratio of dominated
627
    outcomes to all possible outcomes in the extent of the space.  Getting this
628
    number to be high or low is not necessarily important, as not all outcomes
629
    within the min-max range will be feasible.  But, having the hypervolume remain
630
    fairly stable over multiple generations of the evolutionary algorithm provides
631
    an indicator of convergence.
632

633
    Parameters
634
    ---------
635
    minimum : numpy array
636
    maximum : numpy array
637

638

639
    This class is deprecated and will be removed in version 3.0 of the EMAworkbench.
640
    Use ArchiveLogger instead and calculate hypervolume in post using HypervolumeMetric
641
    as also shown in the directed search tutorial.
642

643
    """
644

645
    def __init__(self, minimum, maximum):
1✔
646
        super().__init__("hypervolume")
×
647
        warnings.warn(
×
648
            "HyperVolume is deprecated and will be removed in version 3.0 of the EMAworkbench."
649
            "Use ArchiveLogger and HypervolumeMetric instead",
650
            DeprecationWarning,
651
        )
652
        self.hypervolume_func = Hypervolume(minimum=minimum, maximum=maximum)
×
653

654
    def __call__(self, optimizer):
1✔
655
        self.results.append(self.hypervolume_func.calculate(optimizer.algorithm.archive))
×
656

657
    @classmethod
1✔
658
    def from_outcomes(cls, outcomes):
1✔
659
        ranges = [o.expected_range for o in outcomes if o.kind != o.INFO]
×
660
        minimum, maximum = np.asarray(list(zip(*ranges)))
×
661
        return cls(minimum, maximum)
×
662

663

664
class ArchiveLogger(AbstractConvergenceMetric):
1✔
665
    """Helper class to write the archive to disk at each iteration
666

667
    Parameters
668
    ----------
669
    directory : str
670
    decision_varnames : list of str
671
    outcome_varnames : list of str
672
    base_filename : str, optional
673
    """
674

675
    def __init__(
1✔
676
        self, directory, decision_varnames, outcome_varnames, base_filename="archives.tar.gz"
677
    ):
678
        super().__init__("archive_logger")
×
679

680
        # FIXME how to handle case where directory already exists
681
        self.directory = os.path.abspath(directory)
×
682
        self.temp = os.path.join(self.directory, "tmp")
×
683
        os.mkdir(self.temp)
×
684

685
        self.base = base_filename
×
686
        self.decision_varnames = decision_varnames
×
687
        self.outcome_varnames = outcome_varnames
×
688
        self.tarfilename = os.path.join(self.directory, base_filename)
×
689

690
        # self.index = 0
691

692
    def __call__(self, optimizer):
1✔
693
        archive = to_dataframe(optimizer.result, self.decision_varnames, self.outcome_varnames)
×
694
        archive.to_csv(os.path.join(self.temp, f"{optimizer.nfe}.csv"))
×
695

696
    def reset(self):
1✔
697
        # FIXME what needs to go here?
698
        pass
×
699

700
    def get_results(self):
1✔
701
        with tarfile.open(self.tarfilename, "w:gz") as z:
×
702
            z.add(self.temp, arcname=os.path.basename(self.temp))
×
703

704
        shutil.rmtree(self.temp)
×
705
        return None
×
706

707
    @classmethod
1✔
708
    def load_archives(cls, filename):
1✔
709
        """load the archives stored with the ArchiveLogger
710

711
        Parameters
712
        ----------
713
        filename : str
714
                   relative path to file
715

716
        Returns
717
        -------
718
        dict with nfe as key and dataframe as vlaue
719
        """
720

721
        archives = {}
×
722
        with tarfile.open(os.path.abspath(filename)) as fh:
×
723
            for entry in fh.getmembers():
×
724
                if entry.name.endswith("csv"):
×
725
                    key = entry.name.split("/")[1][:-4]
×
726
                    archives[int(key)] = pd.read_csv(fh.extractfile(entry))
×
727
        return archives
×
728

729

730
class OperatorProbabilities(AbstractConvergenceMetric):
1✔
731
    """OperatorProbabiliy convergence tracker for use with
732
    auto adaptive operator selection.
733

734
    Parameters
735
    ----------
736
    name : str
737
    index : int
738

739

740
    State of the art MOEAs like Borg (and GenerationalBorg provided by the workbench)
741
    use autoadaptive operator selection. The algorithm has multiple different evolutionary
742
    operators. Over the run, it tracks how well each operator is doing in producing fitter
743
    offspring. The probability of the algorithm using a given evolutionary operator is
744
    proportional to how well this operator has been doing in producing fitter offspring in
745
    recent generations. This class can be used to track these probabilities over the
746
    run of the algorithm.
747

748
    """
749

750
    def __init__(self, name, index):
1✔
751
        super().__init__(name)
×
752
        self.index = index
×
753

754
    def __call__(self, optimizer):
1✔
755
        try:
×
756
            props = optimizer.algorithm.variator.probabilities
×
757
            self.results.append(props[self.index])
×
758
        except AttributeError:
×
759
            pass
×
760

761

762
def epsilon_nondominated(results, epsilons, problem):
1✔
763
    """Merge the list of results into a single set of
764
    non dominated results using the provided epsilon values
765

766
    Parameters
767
    ----------
768
    results : list of DataFrames
769
    epsilons : epsilon values for each objective
770
    problem : PlatypusProblem instance
771

772
    Returns
773
    -------
774
    DataFrame
775

776
    Notes
777
    -----
778
    this is a platypus based alternative to pareto.py (https://github.com/matthewjwoodruff/pareto.py)
779
    """
780
    if problem.nobjs != len(epsilons):
×
781
        ValueError(
×
782
            f"The number of epsilon values ({len(epsilons)}) must match the number of objectives {problem.nobjs}"
783
        )
784

785
    results = pd.concat(results, ignore_index=True)
×
786
    solutions = rebuild_platypus_population(results, problem)
×
787
    archive = EpsilonBoxArchive(epsilons)
×
788
    archive += solutions
×
789

790
    return to_dataframe(archive, problem.parameter_names, problem.outcome_names)
×
791

792

793
class Convergence(ProgressTrackingMixIn):
1✔
794
    """helper class for tracking convergence of optimization"""
795

796
    valid_metrics = {"hypervolume", "epsilon_progress", "archive_logger"}
1✔
797

798
    def __init__(self, metrics, max_nfe, convergence_freq=1000, logging_freq=5, log_progress=False):
1✔
799
        super().__init__(
×
800
            max_nfe,
801
            logging_freq,
802
            _logger,
803
            log_progress=log_progress,
804
            log_func=lambda self: f"generation" f" {self.generation}, {self.i}/{self.max_nfe}",
805
        )
806

807
        self.max_nfe = max_nfe
×
808
        self.generation = -1
×
809
        self.index = []
×
810
        self.last_check = 0
×
811

812
        if metrics is None:
×
813
            metrics = []
×
814

815
        self.metrics = metrics
×
816
        self.convergence_freq = convergence_freq
×
817
        self.logging_freq = logging_freq
×
818

819
        # TODO what is the point of this code?
820
        for metric in metrics:
×
821
            assert isinstance(metric, AbstractConvergenceMetric)
×
822
            metric.reset()
×
823

824
    def __call__(self, optimizer, force=False):
1✔
825
        """Stores convergences information given specified convergence
826
        frequency.
827

828
        Parameters
829
        ----------
830
        optimizer : platypus optimizer instance
831
        force : boolean, optional
832
                if True, convergence information will always be stored
833
                if False, converge information will be stored if the
834
                the number of nfe since the last time of storing is equal to
835
                or higher then convergence_freq
836

837

838
        the primary use case for force is to force convergence frequency information
839
        to be stored once the stopping condition of the optimizer has been reached
840
        so that the final convergence information is kept.
841

842
        """
843
        nfe = optimizer.nfe
×
844
        super().__call__(nfe - self.i)
×
845

846
        self.generation += 1
×
847

848
        if (nfe >= self.last_check + self.convergence_freq) or (self.last_check == 0) or force:
×
849
            self.index.append(nfe)
×
850
            self.last_check = nfe
×
851

852
            for metric in self.metrics:
×
853
                metric(optimizer)
×
854

855
    def to_dataframe(self):
1✔
856
        progress = {
×
857
            metric.name: result for metric in self.metrics if (result := metric.get_results())
858
        }
859

860
        progress = pd.DataFrame.from_dict(progress)
×
861

862
        if not progress.empty:
×
863
            progress["nfe"] = self.index
×
864

865
        return progress
×
866

867

868
def rebuild_platypus_population(archive, problem):
1✔
869
    """rebuild a population of platypus Solution instances
870

871
    Parameters
872
    ----------
873
    archive : DataFrame
874
    problem : PlatypusProblem instance
875

876
    Returns
877
    -------
878
    list of platypus Solutions
879

880
    """
881

882
    expected_columns = problem.nvars + problem.nobjs
×
883
    actual_columns = len(archive.columns)
×
884

885
    if actual_columns != expected_columns:
×
886
        raise EMAError(
×
887
            f"The number of columns in the archive ({actual_columns}) does not match the "
888
            f"expected number of decision variables and objectives ({expected_columns})."
889
        )
890

891
    solutions = []
×
892
    for row in archive.itertuples():
×
893
        try:
×
894
            decision_variables = [getattr(row, attr) for attr in problem.parameter_names]
×
895
        except AttributeError:
×
896
            missing_parameters = [
×
897
                attr for attr in problem.parameter_names if not hasattr(row, attr)
898
            ]
899
            raise EMAError(f"Parameter names {missing_parameters} not found in archive")
×
900

901
        try:
×
902
            objectives = [getattr(row, attr) for attr in problem.outcome_names]
×
903
        except AttributeError:
×
904
            missing_outcomes = [attr for attr in problem.outcome_names if not hasattr(row, attr)]
×
905
            raise EMAError(f"Outcome names {missing_outcomes} not found in archive'")
×
906

907
        solution = Solution(problem)
×
908
        solution.variables = [
×
909
            platypus_type.encode(value)
910
            for platypus_type, value in zip(problem.types, decision_variables)
911
        ]
912
        solution.objectives = objectives
×
913
        solutions.append(solution)
×
914
    return solutions
×
915

916

917
class CombinedVariator(Variator):
1✔
918
    def __init__(self, crossover_prob=0.5, mutation_prob=1):
1✔
919
        super().__init__(2)
×
920
        self.SBX = platypus.SBX()
×
921
        self.crossover_prob = crossover_prob
×
922
        self.mutation_prob = mutation_prob
×
923

924
    def evolve(self, parents):
1✔
925
        child1 = copy.deepcopy(parents[0])
×
926
        child2 = copy.deepcopy(parents[1])
×
927
        problem = child1.problem
×
928

929
        # crossover
930
        # we will evolve the individual
931
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
×
932
            if random.random() <= self.crossover_prob:
×
933
                klass = kind.__class__
×
934
                child1, child2 = self._crossover[klass](self, child1, child2, i, kind)
×
935
                child1.evaluated = False
×
936
                child2.evaluated = False
×
937

938
        # mutate
939
        for child in [child1, child2]:
×
940
            self.mutate(child)
×
941

942
        return [child1, child2]
×
943

944
    def mutate(self, child):
1✔
945
        problem = child.problem
×
946

947
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
×
948
            if random.random() <= self.mutation_prob:
×
949
                klass = kind.__class__
×
950
                child = self._mutate[klass](self, child, i, kind)
×
951
                child.evaluated = False
×
952

953
    def crossover_real(self, child1, child2, i, type):  # @ReservedAssignment
1✔
954
        # sbx
955
        x1 = float(child1.variables[i])
×
956
        x2 = float(child2.variables[i])
×
957
        lb = type.min_value
×
958
        ub = type.max_value
×
959

960
        x1, x2 = self.SBX.sbx_crossover(x1, x2, lb, ub)
×
961

962
        child1.variables[i] = x1
×
963
        child2.variables[i] = x2
×
964

965
        return child1, child2
×
966

967
    def crossover_integer(self, child1, child2, i, type):  # @ReservedAssignment
1✔
968
        # HUX()
969
        for j in range(type.nbits):
×
970
            if child1.variables[i][j] != child2.variables[i][j]:
×
971
                if bool(random.getrandbits(1)):
×
972
                    child1.variables[i][j] = not child1.variables[i][j]
×
973
                    child2.variables[i][j] = not child2.variables[i][j]
×
974
        return child1, child2
×
975

976
    def crossover_categorical(self, child1, child2, i, type):  # @ReservedAssignment
1✔
977
        # SSX()
978
        # can probably be implemented in a simple manner, since size
979
        # of subset is fixed to 1
980

981
        s1 = set(child1.variables[i])
×
982
        s2 = set(child2.variables[i])
×
983

984
        for j in range(type.size):
×
985
            if (
×
986
                (child2.variables[i][j] not in s1)
987
                and (child1.variables[i][j] not in s2)
988
                and (random.random() < 0.5)
989
            ):
990
                temp = child1.variables[i][j]
×
991
                child1.variables[i][j] = child2.variables[i][j]
×
992
                child2.variables[i][j] = temp
×
993

994
        return child1, child2
×
995

996
    def mutate_real(self, child, i, type, distribution_index=20):  # @ReservedAssignment
1✔
997
        # PM
998
        x = child.variables[i]
×
999
        lower = type.min_value
×
1000
        upper = type.max_value
×
1001

1002
        u = random.random()
×
1003
        dx = upper - lower
×
1004

1005
        if u < 0.5:
×
1006
            bl = (x - lower) / dx
×
1007
            b = 2.0 * u + (1.0 - 2.0 * u) * pow(1.0 - bl, distribution_index + 1.0)
×
1008
            delta = pow(b, 1.0 / (distribution_index + 1.0)) - 1.0
×
1009
        else:
1010
            bu = (upper - x) / dx
×
1011
            b = 2.0 * (1.0 - u) + 2.0 * (u - 0.5) * pow(1.0 - bu, distribution_index + 1.0)
×
1012
            delta = 1.0 - pow(b, 1.0 / (distribution_index + 1.0))
×
1013

1014
        x = x + delta * dx
×
1015
        x = max(lower, min(x, upper))
×
1016

1017
        child.variables[i] = x
×
1018
        return child
×
1019

1020
    def mutate_integer(self, child, i, type, probability=1):  # @ReservedAssignment
1✔
1021
        # bitflip
1022
        for j in range(type.nbits):
×
1023
            if random.random() <= probability:
×
1024
                child.variables[i][j] = not child.variables[i][j]
×
1025
        return child
×
1026

1027
    def mutate_categorical(self, child, i, type):  # @ReservedAssignment
1✔
1028
        # replace
1029
        probability = 1 / type.size
×
1030

1031
        if random.random() <= probability:
×
1032
            subset = child.variables[i]
×
1033

1034
            if len(subset) < len(type.elements):
×
1035
                j = random.randrange(len(subset))
×
1036

1037
                nonmembers = list(set(type.elements) - set(subset))
×
1038
                k = random.randrange(len(nonmembers))
×
1039
                subset[j] = nonmembers[k]
×
1040

1041
            len(subset)
×
1042

1043
            child.variables[i] = subset
×
1044

1045
        return child
×
1046

1047
    _crossover = {
1✔
1048
        Real: crossover_real,
1049
        Integer: crossover_integer,
1050
        Subset: crossover_categorical,
1051
    }
1052

1053
    _mutate = {
1✔
1054
        Real: mutate_real,
1055
        Integer: mutate_integer,
1056
        Subset: mutate_categorical,
1057
    }
1058

1059

1060
def _optimize(
1✔
1061
    problem,
1062
    evaluator,
1063
    algorithm,
1064
    convergence,
1065
    nfe,
1066
    convergence_freq,
1067
    logging_freq,
1068
    variator=None,
1069
    **kwargs,
1070
):
1071
    klass = problem.types[0].__class__
×
1072

1073
    try:
×
1074
        eps_values = kwargs["epsilons"]
×
1075
    except KeyError:
×
1076
        pass
×
1077
    else:
1078
        if len(eps_values) != len(problem.outcome_names):
×
1079
            raise EMAError("Number of epsilon values does not match number of outcomes")
×
1080

1081
    if variator is None:
×
1082
        if all(isinstance(t, klass) for t in problem.types):
×
1083
            variator = None
×
1084
        else:
1085
            variator = CombinedVariator()
×
1086
    # mutator = CombinedMutator()
1087

1088
    optimizer = algorithm(
×
1089
        problem, evaluator=evaluator, variator=variator, log_frequency=500, **kwargs
1090
    )
1091
    # optimizer.mutator = mutator
1092

1093
    convergence = Convergence(
×
1094
        convergence, nfe, convergence_freq=convergence_freq, logging_freq=logging_freq
1095
    )
1096
    callback = functools.partial(convergence, optimizer)
×
1097
    evaluator.callback = callback
×
1098

1099
    with temporary_filter(name=[callbacks.__name__, evaluators.__name__], level=INFO):
×
1100
        optimizer.run(nfe)
×
1101

1102
    convergence(optimizer, force=True)
×
1103

1104
    # convergence.pbar.__exit__(None, None, None)
1105

1106
    results = to_dataframe(optimizer.result, problem.parameter_names, problem.outcome_names)
×
1107
    convergence = convergence.to_dataframe()
×
1108

1109
    message = "optimization completed, found {} solutions"
×
1110
    _logger.info(message.format(len(optimizer.archive)))
×
1111

1112
    if convergence.empty:
×
1113
        return results
×
1114
    else:
1115
        return results, convergence
×
1116

1117

1118
class BORGDefaultDescriptor:
1✔
1119
    # this treats defaults as class level attributes!
1120

1121
    def __init__(self, default_function):
1✔
1122
        self.default_function = default_function
1✔
1123

1124
    def __get__(self, instance, owner):
1✔
1125
        return self.default_function(instance.problem.nvars)
1✔
1126

1127
    def __set_name__(self, owner, name):
1✔
1128
        self.name = name
1✔
1129

1130

1131
class GenerationalBorg(EpsilonProgressContinuation):
1✔
1132
    """A generational implementation of the BORG Framework
1133

1134
    This algorithm adopts Epsilon Progress Continuation, and Auto Adaptive
1135
    Operator Selection, but embeds them within the NSGAII generational
1136
    algorithm, rather than the steady state implementation used by the BORG
1137
    algorithm.
1138

1139
    The parametrization of all operators is based on the default values as used
1140
    in Borg 1.9.
1141

1142
    Note:: limited to RealParameters only.
1143

1144
    """
1145

1146
    pm_p = BORGDefaultDescriptor(lambda x: 1 / x)
1✔
1147
    pm_dist = 20
1✔
1148

1149
    sbx_prop = 1
1✔
1150
    sbx_dist = 15
1✔
1151

1152
    de_rate = 0.1
1✔
1153
    de_stepsize = 0.5
1✔
1154

1155
    um_p = BORGDefaultDescriptor(lambda x: 1 / x)
1✔
1156

1157
    spx_nparents = 10
1✔
1158
    spx_noffspring = 2
1✔
1159
    spx_expansion = 0.3
1✔
1160

1161
    pcx_nparents = 10
1✔
1162
    pcx_noffspring = 2
1✔
1163
    pcx_eta = 0.1
1✔
1164
    pcx_zeta = 0.1
1✔
1165

1166
    undx_nparents = 10
1✔
1167
    undx_noffspring = 2
1✔
1168
    undx_zeta = 0.5
1✔
1169
    undx_eta = 0.35
1✔
1170

1171
    def __init__(
1✔
1172
        self,
1173
        problem,
1174
        epsilons,
1175
        population_size=100,
1176
        generator=RandomGenerator(),
1177
        selector=TournamentSelector(2),
1178
        variator=None,
1179
        **kwargs,
1180
    ):
1181
        self.problem = problem
×
1182

1183
        # Parameterization taken from
1184
        # Borg: An Auto-Adaptive MOEA Framework - Hadka, Reed
1185
        variators = [
×
1186
            GAOperator(
1187
                SBX(probability=self.sbx_prop, distribution_index=self.sbx_dist),
1188
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1189
            ),
1190
            GAOperator(
1191
                PCX(
1192
                    nparents=self.pcx_nparents,
1193
                    noffspring=self.pcx_noffspring,
1194
                    eta=self.pcx_eta,
1195
                    zeta=self.pcx_zeta,
1196
                ),
1197
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1198
            ),
1199
            GAOperator(
1200
                DifferentialEvolution(crossover_rate=self.de_rate, step_size=self.de_stepsize),
1201
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1202
            ),
1203
            GAOperator(
1204
                UNDX(
1205
                    nparents=self.undx_nparents,
1206
                    noffspring=self.undx_noffspring,
1207
                    zeta=self.undx_zeta,
1208
                    eta=self.undx_eta,
1209
                ),
1210
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1211
            ),
1212
            GAOperator(
1213
                SPX(
1214
                    nparents=self.spx_nparents,
1215
                    noffspring=self.spx_noffspring,
1216
                    expansion=self.spx_expansion,
1217
                ),
1218
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1219
            ),
1220
            UM(probability=self.um_p),
1221
        ]
1222

1223
        variator = Multimethod(self, variators)
×
1224

1225
        super().__init__(
×
1226
            NSGAII(
1227
                problem,
1228
                population_size,
1229
                generator,
1230
                selector,
1231
                variator,
1232
                EpsilonBoxArchive(epsilons),
1233
                **kwargs,
1234
            )
1235
        )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc