• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

quaquel / EMAworkbench / 4743101082

pending completion
4743101082

Pull #257

github

GitHub
Merge 234dfd8bb into f62717f34
Pull Request #257: __init__.py: Set version to 2.5.0-dev

4612 of 5687 relevant lines covered (81.1%)

0.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

39.09
/ema_workbench/em_framework/optimization.py
1
"""
2

3

4
"""
5
import copy
1✔
6
import functools
1✔
7
import os
1✔
8
import random
1✔
9
import shutil
1✔
10
import tarfile
1✔
11
import warnings
1✔
12

13
import numpy as np
1✔
14
import pandas as pd
1✔
15

16

17
from . import callbacks, evaluators
1✔
18
from .points import Scenario, Policy
1✔
19
from .outcomes import AbstractOutcome
1✔
20
from .parameters import IntegerParameter, RealParameter, CategoricalParameter, BooleanParameter
1✔
21
from .samplers import determine_parameters
1✔
22
from .util import determine_objects, ProgressTrackingMixIn
1✔
23
from ..util import get_module_logger, EMAError, temporary_filter, INFO
1✔
24

25
try:
1✔
26
    from platypus import (
1✔
27
        EpsNSGAII,
28
        Hypervolume,
29
        EpsilonIndicator,
30
        GenerationalDistance,
31
        Variator,
32
        Real,
33
        Integer,
34
        Subset,
35
        EpsilonProgressContinuation,
36
        RandomGenerator,
37
        TournamentSelector,
38
        NSGAII,
39
        EpsilonBoxArchive,
40
        Multimethod,
41
        GAOperator,
42
        SBX,
43
        PM,
44
        PCX,
45
        DifferentialEvolution,
46
        UNDX,
47
        SPX,
48
        UM,
49
        Solution,
50
        InvertedGenerationalDistance,
51
        Spacing,
52
    )  # @UnresolvedImport
53
    from platypus import Problem as PlatypusProblem
1✔
54

55
    import platypus
1✔
56

57

58
except ImportError:
×
59
    warnings.warn("platypus based optimization not available", ImportWarning)
×
60

61
    class PlatypusProblem:
×
62
        constraints = []
×
63

64
        def __init__(self, *args, **kwargs):
×
65
            pass
×
66

67
    class Variator:
×
68
        def __init__(self, *args, **kwargs):
×
69
            pass
×
70

71
    class RandomGenerator:
×
72
        def __call__(self, *args, **kwargs):
×
73
            pass
×
74

75
    class TournamentSelector:
×
76
        def __init__(self, *args, **kwargs):
×
77
            pass
×
78

79
        def __call__(self, *args, **kwargs):
×
80
            pass
×
81

82
    class EpsilonProgressContinuation:
×
83
        pass
×
84

85
    EpsNSGAII = None
×
86
    platypus = None
×
87
    Real = Integer = Subset = None
×
88

89
# Created on 5 Jun 2017
90
#
91
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
92

93
__all__ = [
1✔
94
    "Problem",
95
    "RobustProblem",
96
    "EpsilonProgress",
97
    "Convergence",
98
    "ArchiveLogger",
99
    "OperatorProbabilities",
100
    "rebuild_platypus_population",
101
    "HypervolumeMetric",
102
    "GenerationalDistanceMetric",
103
    "SpacingMetric",
104
    "InvertedGenerationalDistanceMetric",
105
    "EpsilonIndicatorMetric",
106
    "epsilon_nondominated",
107
    "to_problem",
108
    "to_robust_problem",
109
]
110
_logger = get_module_logger(__name__)
1✔
111

112

113
class Problem(PlatypusProblem):
1✔
114
    """small extension to Platypus problem object, includes information on
115
    the names of the decision variables, the names of the outcomes,
116
    and the type of search"""
117

118
    @property
1✔
119
    def parameter_names(self):
1✔
120
        return [e.name for e in self.parameters]
×
121

122
    def __init__(self, searchover, parameters, outcome_names, constraints, reference=None):
1✔
123
        if constraints is None:
1✔
124
            constraints = []
1✔
125

126
        super().__init__(len(parameters), len(outcome_names), nconstrs=len(constraints))
1✔
127
        #         assert len(parameters) == len(parameter_names)
128
        assert searchover in ("levers", "uncertainties", "robust")
1✔
129

130
        if searchover == "levers":
1✔
131
            assert not reference or isinstance(reference, Scenario)
1✔
132
        elif searchover == "uncertainties":
1✔
133
            assert not reference or isinstance(reference, Policy)
1✔
134
        else:
135
            assert not reference
1✔
136

137
        self.searchover = searchover
1✔
138
        self.parameters = parameters
1✔
139
        self.outcome_names = outcome_names
1✔
140
        self.ema_constraints = constraints
1✔
141
        self.constraint_names = [c.name for c in constraints]
1✔
142
        self.reference = reference if reference else 0
1✔
143

144

145
class RobustProblem(Problem):
1✔
146
    """small extension to Problem object for robust optimization, adds the
147
    scenarios and the robustness functions"""
148

149
    def __init__(self, parameters, outcome_names, scenarios, robustness_functions, constraints):
1✔
150
        super().__init__("robust", parameters, outcome_names, constraints)
1✔
151
        assert len(robustness_functions) == len(outcome_names)
1✔
152
        self.scenarios = scenarios
1✔
153
        self.robustness_functions = robustness_functions
1✔
154

155

156
def to_problem(model, searchover, reference=None, constraints=None):
1✔
157
    """helper function to create Problem object
158

159
    Parameters
160
    ----------
161
    model : AbstractModel instance
162
    searchover : str
163
    reference : Policy or Scenario instance, optional
164
                overwrite the default scenario in case of searching over
165
                levers, or default policy in case of searching over
166
                uncertainties
167
    constraints : list, optional
168

169
    Returns
170
    -------
171
    Problem instance
172

173
    """
174

175
    # extract the levers and the outcomes
176
    decision_variables = determine_parameters(model, searchover, union=True)
1✔
177

178
    outcomes = determine_objects(model, "outcomes")
1✔
179
    outcomes = [outcome for outcome in outcomes if outcome.kind != AbstractOutcome.INFO]
1✔
180
    outcome_names = [outcome.name for outcome in outcomes]
1✔
181

182
    if not outcomes:
1✔
183
        raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO")
×
184

185
    problem = Problem(
1✔
186
        searchover, decision_variables, outcome_names, constraints, reference=reference
187
    )
188
    problem.types = to_platypus_types(decision_variables)
1✔
189
    problem.directions = [outcome.kind for outcome in outcomes]
1✔
190
    problem.constraints[:] = "==0"
1✔
191

192
    return problem
1✔
193

194

195
def to_robust_problem(model, scenarios, robustness_functions, constraints=None):
1✔
196
    """helper function to create RobustProblem object
197

198
    Parameters
199
    ----------
200
    model : AbstractModel instance
201
    scenarios : collection
202
    robustness_functions : iterable of ScalarOutcomes
203
    constraints : list, optional
204

205
    Returns
206
    -------
207
    RobustProblem instance
208

209
    """
210

211
    # extract the levers and the outcomes
212
    decision_variables = determine_parameters(model, "levers", union=True)
1✔
213

214
    outcomes = robustness_functions
1✔
215
    outcomes = [outcome for outcome in outcomes if outcome.kind != AbstractOutcome.INFO]
1✔
216
    outcome_names = [outcome.name for outcome in outcomes]
1✔
217

218
    if not outcomes:
1✔
219
        raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO")
×
220

221
    problem = RobustProblem(
1✔
222
        decision_variables, outcome_names, scenarios, robustness_functions, constraints
223
    )
224

225
    problem.types = to_platypus_types(decision_variables)
1✔
226
    problem.directions = [outcome.kind for outcome in outcomes]
1✔
227
    problem.constraints[:] = "==0"
1✔
228

229
    return problem
1✔
230

231

232
def to_platypus_types(decision_variables):
1✔
233
    """helper function for mapping from workbench parameter types to
234
    platypus parameter types"""
235
    # TODO:: should categorical not be platypus.Subset, with size == 1?
236
    _type_mapping = {
1✔
237
        RealParameter: platypus.Real,
238
        IntegerParameter: platypus.Integer,
239
        CategoricalParameter: platypus.Subset,
240
        BooleanParameter: platypus.Subset,
241
    }
242

243
    types = []
1✔
244
    for dv in decision_variables:
1✔
245
        klass = _type_mapping[type(dv)]
1✔
246

247
        if not isinstance(dv, (CategoricalParameter, BooleanParameter)):
1✔
248
            decision_variable = klass(dv.lower_bound, dv.upper_bound)
1✔
249
        else:
250
            decision_variable = klass(dv.categories, 1)
1✔
251

252
        types.append(decision_variable)
1✔
253
    return types
1✔
254

255

256
def to_dataframe(solutions, dvnames, outcome_names):
1✔
257
    """helper function to turn a collection of platypus Solution instances
258
    into a pandas DataFrame
259
    Parameters
260
    ----------
261
    solutions : collection of Solution instances
262
    dvnames : list of str
263
    outcome_names : list of str
264

265
    Returns
266
    -------
267
    pandas DataFrame
268
    """
269

270
    results = []
1✔
271
    for solution in platypus.unique(solutions):
1✔
272
        vars = transform_variables(solution.problem, solution.variables)  # @ReservedAssignment
1✔
273

274
        decision_vars = dict(zip(dvnames, vars))
1✔
275
        decision_out = dict(zip(outcome_names, solution.objectives))
1✔
276

277
        result = decision_vars.copy()
1✔
278
        result.update(decision_out)
1✔
279

280
        results.append(result)
1✔
281

282
    results = pd.DataFrame(results, columns=dvnames + outcome_names)
1✔
283
    return results
1✔
284

285

286
def process_uncertainties(jobs):
1✔
287
    """helper function to map jobs generated by platypus to Scenario objects
288

289
    Parameters
290
    ----------
291
    jobs : collection
292

293
    Returns
294
    -------
295
    scenarios, policies
296

297
    """
298
    problem = jobs[0].solution.problem
×
299
    scenarios = []
×
300

301
    jobs = _process(jobs, problem)
×
302
    for i, job in enumerate(jobs):
×
303
        name = str(i)
×
304
        scenario = Scenario(name=name, **job)
×
305
        scenarios.append(scenario)
×
306

307
    policies = problem.reference
×
308

309
    return scenarios, policies
×
310

311

312
def process_levers(jobs):
1✔
313
    """helper function to map jobs generated by platypus to Policy objects
314

315
    Parameters
316
    ----------
317
    jobs : collection
318

319
    Returns
320
    -------
321
    scenarios, policies
322

323
    """
324
    problem = jobs[0].solution.problem
×
325
    policies = []
×
326
    jobs = _process(jobs, problem)
×
327
    for i, job in enumerate(jobs):
×
328
        name = str(i)
×
329
        job = Policy(name=name, **job)
×
330
        policies.append(job)
×
331

332
    scenarios = problem.reference
×
333

334
    return scenarios, policies
×
335

336

337
def _process(jobs, problem):
1✔
338
    """helper function to transform platypus job to dict with correct
339
    values for workbench"""
340

341
    processed_jobs = []
×
342
    for job in jobs:
×
343
        variables = transform_variables(problem, job.solution.variables)
×
344
        processed_job = {}
×
345
        for param, var in zip(problem.parameters, variables):
×
346
            try:
×
347
                var = var.value
×
348
            except AttributeError:
×
349
                pass
×
350
            processed_job[param.name] = var
×
351
        processed_jobs.append(processed_job)
×
352
    return processed_jobs
×
353

354

355
def process_robust(jobs):
1✔
356
    """Helper function to process robust optimization jobs
357

358
    Parameters
359
    ----------
360
    jobs : collection
361

362
    Returns
363
    -------
364
    scenarios, policies
365

366
    """
367
    _, policies = process_levers(jobs)
×
368
    scenarios = jobs[0].solution.problem.scenarios
×
369

370
    return scenarios, policies
×
371

372

373
def transform_variables(problem, variables):
1✔
374
    """helper function for transforming platypus variables"""
375

376
    converted_vars = []
1✔
377
    for type, var in zip(problem.types, variables):  # @ReservedAssignment
1✔
378
        var = type.decode(var)
1✔
379
        try:
1✔
380
            var = var[0]
1✔
381
        except TypeError:
1✔
382
            pass
1✔
383

384
        converted_vars.append(var)
1✔
385
    return converted_vars
1✔
386

387

388
def evaluate(jobs_collection, experiments, outcomes, problem):
1✔
389
    """Helper function for mapping the results from perform_experiments back
390
    to what platypus needs"""
391

392
    searchover = problem.searchover
×
393
    outcome_names = problem.outcome_names
×
394
    constraints = problem.ema_constraints
×
395

396
    if searchover == "levers":
×
397
        column = "policy"
×
398
    else:
399
        column = "scenario"
×
400

401
    for entry, job in jobs_collection:
×
402
        logical = experiments[column] == entry.name
×
403

404
        job_outputs = {}
×
405
        for k, v in outcomes.items():
×
406
            job_outputs[k] = v[logical][0]
×
407

408
        # TODO:: only retain uncertainties
409
        job_experiment = experiments[logical]
×
410
        job_constraints = _evaluate_constraints(job_experiment, job_outputs, constraints)
×
411
        job_outcomes = [job_outputs[key] for key in outcome_names]
×
412

413
        if job_constraints:
×
414
            job.solution.problem.function = lambda _: (job_outcomes, job_constraints)
×
415
        else:
416
            job.solution.problem.function = lambda _: job_outcomes
×
417
        job.solution.evaluate()
×
418

419

420
def evaluate_robust(jobs_collection, experiments, outcomes, problem):
1✔
421
    """Helper function for mapping the results from perform_experiments back
422
    to what Platypus needs"""
423

424
    robustness_functions = problem.robustness_functions
×
425
    constraints = problem.ema_constraints
×
426

427
    for entry, job in jobs_collection:
×
428
        logical = experiments["policy"] == entry.name
×
429

430
        job_outcomes_dict = {}
×
431
        job_outcomes = []
×
432
        for rf in robustness_functions:
×
433
            data = [outcomes[var_name][logical] for var_name in rf.variable_name]
×
434
            score = rf.function(*data)
×
435
            job_outcomes_dict[rf.name] = score
×
436
            job_outcomes.append(score)
×
437

438
        # TODO:: only retain levers
439
        job_experiment = experiments[logical].iloc[0]
×
440
        job_constraints = _evaluate_constraints(job_experiment, job_outcomes_dict, constraints)
×
441

442
        if job_constraints:
×
443
            job.solution.problem.function = lambda _: (job_outcomes, job_constraints)
×
444
        else:
445
            job.solution.problem.function = lambda _: job_outcomes
×
446

447
        job.solution.evaluate()
×
448

449

450
def _evaluate_constraints(job_experiment, job_outcomes, constraints):
1✔
451
    """Helper function for evaluating the constraints for a given job"""
452
    job_constraints = []
×
453
    for constraint in constraints:
×
454
        data = [job_experiment[var] for var in constraint.parameter_names]
×
455
        data += [job_outcomes[var] for var in constraint.outcome_names]
×
456
        constraint_value = constraint.process(data)
×
457
        job_constraints.append(constraint_value)
×
458
    return job_constraints
×
459

460

461
class AbstractConvergenceMetric:
1✔
462
    """base convergence metric class"""
463

464
    def __init__(self, name):
1✔
465
        super().__init__()
×
466
        self.name = name
×
467
        self.results = []
×
468

469
    def __call__(self, optimizer):
1✔
470
        raise NotImplementedError
×
471

472
    def reset(self):
1✔
473
        self.results = []
×
474

475
    def get_results(self):
1✔
476
        return self.results
×
477

478

479
class EpsilonProgress(AbstractConvergenceMetric):
1✔
480
    """epsilon progress convergence metric class"""
481

482
    def __init__(self):
1✔
483
        super().__init__("epsilon_progress")
×
484

485
    def __call__(self, optimizer):
1✔
486
        self.results.append(optimizer.algorithm.archive.improvements)
×
487

488

489
class MetricWrapper:
1✔
490
    f"""wrapper class for wrapping platypus indicators
491

492
    Parameters
493
    ----------
494
    reference_set : DataFrame
495
    problem : PlatypusProblem instance
496
    kwargs : dict
497
             any additional keyword arguments to be passed
498
             on to the wrapper platypus indicator class
499

500
    Notes
501
    -----
502
    this class relies on multi-inheritance and careful consideration
503
    of the MRO to conveniently wrap the convergence metrics provided
504
    by platypus.
505

506
    """
507

508
    def __init__(self, reference_set, problem, **kwargs):
1✔
509
        self.problem = problem
×
510
        reference_set = rebuild_platypus_population(reference_set, self.problem)
×
511
        super().__init__(reference_set=reference_set, **kwargs)
×
512

513
    def calculate(self, archive):
1✔
514
        solutions = rebuild_platypus_population(archive, self.problem)
×
515
        return super().calculate(solutions)
×
516

517

518
class HypervolumeMetric(MetricWrapper, Hypervolume):
1✔
519
    """Hypervolume metric
520

521
    Parameters
522
    ----------
523
    reference_set : DataFrame
524
    problem : PlatypusProblem instance
525

526

527
    this is a thin wrapper around Hypervolume as provided
528
    by platypus to make it easier to use in conjunction with the
529
    workbench.
530

531
    """
532

533
    pass
1✔
534

535

536
class GenerationalDistanceMetric(MetricWrapper, GenerationalDistance):
1✔
537
    """GenerationalDistance metric
538

539
    Parameters
540
    ----------
541
    reference_set : DataFrame
542
    problem : PlatypusProblem instance
543
    d : int, default=1
544
        the power in the intergenerational distance function
545

546

547
    This is a thin wrapper around GenerationalDistance as provided
548
    by platypus to make it easier to use in conjunction with the
549
    workbench.
550

551
    see https://link.springer.com/content/pdf/10.1007/978-3-319-15892-1_8.pdf
552
    for more information
553

554
    """
555

556
    pass
1✔
557

558

559
class InvertedGenerationalDistanceMetric(MetricWrapper, InvertedGenerationalDistance):
1✔
560
    """InvertedGenerationalDistance metric
561

562
    Parameters
563
    ----------
564
    reference_set : DataFrame
565
    problem : PlatypusProblem instance
566
    d : int, default=1
567
        the power in the inverted intergenerational distance function
568

569

570
    This is a thin wrapper around InvertedGenerationalDistance as provided
571
    by platypus to make it easier to use in conjunction with the
572
    workbench.
573

574
    see https://link.springer.com/content/pdf/10.1007/978-3-319-15892-1_8.pdf
575
    for more information
576

577
    """
578

579
    pass
1✔
580

581

582
class EpsilonIndicatorMetric(MetricWrapper, EpsilonIndicator):
1✔
583
    """EpsilonIndicator metric
584

585
    Parameters
586
    ----------
587
    reference_set : DataFrame
588
    problem : PlatypusProblem instance
589

590

591
    this is a thin wrapper around EpsilonIndicator as provided
592
    by platypus to make it easier to use in conjunction with the
593
    workbench.
594

595
    """
596

597
    pass
1✔
598

599

600
class SpacingMetric(MetricWrapper, Spacing):
1✔
601
    """Spacing metric
602

603
    Parameters
604
    ----------
605
    problem : PlatypusProblem instance
606

607

608
    this is a thin wrapper around Spacing as provided
609
    by platypus to make it easier to use in conjunction with the
610
    workbench.
611

612
    """
613

614
    def __init__(self, problem):
1✔
615
        self.problem = problem
×
616

617

618
class HyperVolume(AbstractConvergenceMetric):
1✔
619
    """Hypervolume convergence metric class
620

621
    This metric is derived from a hyper-volume measure, which describes the
622
    multi-dimensional volume of space contained within the pareto front. When
623
    computed with minimum and maximums, it describes the ratio of dominated
624
    outcomes to all possible outcomes in the extent of the space.  Getting this
625
    number to be high or low is not necessarily important, as not all outcomes
626
    within the min-max range will be feasible.  But, having the hypervolume remain
627
    fairly stable over multiple generations of the evolutionary algorithm provides
628
    an indicator of convergence.
629

630
    Parameters
631
    ---------
632
    minimum : numpy array
633
    maximum : numpy array
634

635

636
    This class is deprecated. Use ArchiveLogger instead and calculate hypervolume
637
    in post using HypervolumeMetric as also shown in the directed search tutorial.
638

639
    """
640

641
    def __init__(self, minimum, maximum):
1✔
642
        super().__init__("hypervolume")
×
643
        warnings.warn(
×
644
            "HyperVolume is deprecated, use ArchiveLogger and HypervolumeMetric instead",
645
            warnings.DeprecationWarning,
646
        )
647
        self.hypervolume_func = Hypervolume(minimum=minimum, maximum=maximum)
×
648

649
    def __call__(self, optimizer):
1✔
650
        self.results.append(self.hypervolume_func.calculate(optimizer.algorithm.archive))
×
651

652
    @classmethod
1✔
653
    def from_outcomes(cls, outcomes):
1✔
654
        ranges = [o.expected_range for o in outcomes if o.kind != o.INFO]
×
655
        minimum, maximum = np.asarray(list(zip(*ranges)))
×
656
        return cls(minimum, maximum)
×
657

658

659
class ArchiveLogger(AbstractConvergenceMetric):
1✔
660
    """Helper class to write the archive to disk at each iteration
661

662
    Parameters
663
    ----------
664
    directory : str
665
    decision_varnames : list of str
666
    outcome_varnames : list of str
667
    base_filename : str, optional
668
    """
669

670
    def __init__(
1✔
671
        self, directory, decision_varnames, outcome_varnames, base_filename="archives.tar.gz"
672
    ):
673
        super().__init__("archive_logger")
×
674

675
        # FIXME how to handle case where directory already exists
676
        self.directory = os.path.abspath(directory)
×
677
        self.temp = os.path.join(self.directory, "tmp")
×
678
        os.mkdir(self.temp)
×
679

680
        self.base = base_filename
×
681
        self.decision_varnames = decision_varnames
×
682
        self.outcome_varnames = outcome_varnames
×
683
        self.tarfilename = os.path.join(self.directory, base_filename)
×
684

685
        # self.index = 0
686

687
    def __call__(self, optimizer):
1✔
688
        archive = to_dataframe(optimizer.result, self.decision_varnames, self.outcome_varnames)
×
689
        archive.to_csv(os.path.join(self.temp, f"{optimizer.nfe}.csv"))
×
690

691
    def reset(self):
1✔
692
        # FIXME what needs to go here?
693
        pass
×
694

695
    def get_results(self):
1✔
696
        with tarfile.open(self.tarfilename, "w:gz") as z:
×
697
            z.add(self.temp, arcname=os.path.basename(self.temp))
×
698

699
        shutil.rmtree(self.temp)
×
700
        return None
×
701

702
    @classmethod
1✔
703
    def load_archives(cls, filename):
1✔
704
        """load the archives stored with the ArchiveLogger
705

706
        Parameters
707
        ----------
708
        filename : str
709
                   relative path to file
710

711
        Returns
712
        -------
713
        dict with nfe as key and dataframe as vlaue
714
        """
715

716
        archives = {}
×
717
        with tarfile.open(os.path.abspath(filename)) as fh:
×
718
            for entry in fh.getmembers():
×
719
                if entry.name.endswith("csv"):
×
720
                    key = entry.name.split("/")[1][:-4]
×
721
                    archives[int(key)] = pd.read_csv(fh.extractfile(entry))
×
722
        return archives
×
723

724

725
class OperatorProbabilities(AbstractConvergenceMetric):
1✔
726
    """OperatorProbabiliy convergence tracker for use with
727
    auto adaptive operator selection.
728

729
    Parameters
730
    ----------
731
    name : str
732
    index : int
733

734

735
    State of the art MOEAs like Borg (and GenerationalBorg provided by the workbench)
736
    use autoadaptive operator selection. The algorithm has multiple different evolutionary
737
    operators. Over the run, it tracks how well each operator is doing in producing fitter
738
    offspring. The probability of the algorithm using a given evolutionary operator is
739
    proportional to how well this operator has been doing in producing fitter offspring in
740
    recent generations. This class can be used to track these probabilities over the
741
    run of the algorithm.
742

743
    """
744

745
    def __init__(self, name, index):
1✔
746
        super().__init__(name)
×
747
        self.index = index
×
748

749
    def __call__(self, optimizer):
1✔
750
        try:
×
751
            props = optimizer.algorithm.variator.probabilities
×
752
            self.results.append(props[self.index])
×
753
        except AttributeError:
×
754
            pass
×
755

756

757
def epsilon_nondominated(results, epsilons, problem):
1✔
758
    """Merge the list of results into a single set of
759
    non dominated results using the provided epsilon values
760

761
    Parameters
762
    ----------
763
    results : list of DataFrames
764
    epsilons : epsilon values for each objective
765
    problem : PlatypusProblem instance
766

767
    Returns
768
    -------
769
    DataFrame
770
    Notes
771
    -----
772
    this is a platypus based alternative to pareto.py (https://github.com/matthewjwoodruff/pareto.py)
773
    """
774
    if problem.nobjs != len(epsilons):
×
775
        ValueError(
×
776
            f"the number of epsilon values ({len(epsilons)}) must match the number of objectives {problem.nobjs}"
777
        )
778

779
    results = pd.concat(results, ignore_index=True)
×
780
    solutions = rebuild_platypus_population(results, problem)
×
781
    archive = EpsilonBoxArchive(epsilons)
×
782
    archive += solutions
×
783

784
    return to_dataframe(archive, problem.parameter_names, problem.outcome_names)
×
785

786

787
class Convergence(ProgressTrackingMixIn):
1✔
788
    """helper class for tracking convergence of optimization"""
789

790
    valid_metrics = {"hypervolume", "epsilon_progress", "archive_logger"}
1✔
791

792
    def __init__(self, metrics, max_nfe, convergence_freq=1000, logging_freq=5, log_progress=False):
1✔
793
        super().__init__(
×
794
            max_nfe,
795
            logging_freq,
796
            _logger,
797
            log_progress=log_progress,
798
            log_func=lambda self: f"generation" f" {self.generation}, {self.i}/{self.max_nfe}",
799
        )
800

801
        self.max_nfe = max_nfe
×
802
        self.generation = -1
×
803
        self.index = []
×
804
        self.last_check = 0
×
805

806
        if metrics is None:
×
807
            metrics = []
×
808

809
        self.metrics = metrics
×
810
        self.convergence_freq = convergence_freq
×
811
        self.logging_freq = logging_freq
×
812

813
        # TODO what is the point of this code?
814
        for metric in metrics:
×
815
            assert isinstance(metric, AbstractConvergenceMetric)
×
816
            metric.reset()
×
817

818
    def __call__(self, optimizer, force=False):
1✔
819
        """Stores convergences information given specified convergence
820
        frequency.
821

822
        Parameters
823
        ----------
824
        optimizer : platypus optimizer instance
825
        force : boolean, optional
826
                if True, convergence information will always be stored
827
                if False, converge information will be stored if the
828
                the number of nfe since the last time of storing is equal to
829
                or higher then convergence_freq
830

831

832
        the primary use case for force is to force convergence frequency information
833
        to be stored once the stopping condition of the optimizer has been reached
834
        so that the final convergence information is kept.
835

836
        """
837
        nfe = optimizer.nfe
×
838
        super().__call__(nfe - self.i)
×
839

840
        self.generation += 1
×
841

842
        if (nfe >= self.last_check + self.convergence_freq) or (self.last_check == 0) or force:
×
843
            self.index.append(nfe)
×
844
            self.last_check = nfe
×
845

846
            for metric in self.metrics:
×
847
                metric(optimizer)
×
848

849
    def to_dataframe(self):
1✔
850
        progress = {
×
851
            metric.name: result for metric in self.metrics if (result := metric.get_results())
852
        }
853

854
        progress = pd.DataFrame.from_dict(progress)
×
855

856
        if not progress.empty:
×
857
            progress["nfe"] = self.index
×
858

859
        return progress
×
860

861

862
def rebuild_platypus_population(archive, problem):
1✔
863
    """rebuild a population of platypus Solution instances
864

865
    Parameters
866
    ----------
867
    archive : DataFrame
868
    problem : PlatypusProblem instance
869

870
    Returns
871
    -------
872
    list of platypus Solutions
873

874
    """
875
    solutions = []
×
876
    for row in archive.itertuples():
×
877
        decision_variables = [getattr(row, attr) for attr in problem.parameter_names]
×
878
        objectives = [getattr(row, attr) for attr in problem.outcome_names]
×
879

880
        solution = Solution(problem)
×
881
        solution.variables = decision_variables
×
882
        solution.objectives = objectives
×
883
        solutions.append(solution)
×
884
    return solutions
×
885

886

887
class CombinedVariator(Variator):
1✔
888
    def __init__(self, crossover_prob=0.5, mutation_prob=1):
1✔
889
        super().__init__(2)
×
890
        self.SBX = platypus.SBX()
×
891
        self.crossover_prob = crossover_prob
×
892
        self.mutation_prob = mutation_prob
×
893

894
    def evolve(self, parents):
1✔
895
        child1 = copy.deepcopy(parents[0])
×
896
        child2 = copy.deepcopy(parents[1])
×
897
        problem = child1.problem
×
898

899
        # crossover
900
        # we will evolve the individual
901
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
×
902
            if random.random() <= self.crossover_prob:
×
903
                klass = kind.__class__
×
904
                child1, child2 = self._crossover[klass](self, child1, child2, i, kind)
×
905
                child1.evaluated = False
×
906
                child2.evaluated = False
×
907

908
        # mutate
909
        for child in [child1, child2]:
×
910
            self.mutate(child)
×
911

912
        return [child1, child2]
×
913

914
    def mutate(self, child):
1✔
915
        problem = child.problem
×
916

917
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
×
918
            if random.random() <= self.mutation_prob:
×
919
                klass = kind.__class__
×
920
                child = self._mutate[klass](self, child, i, kind)
×
921
                child.evaluated = False
×
922

923
    def crossover_real(self, child1, child2, i, type):  # @ReservedAssignment
1✔
924
        # sbx
925
        x1 = float(child1.variables[i])
×
926
        x2 = float(child2.variables[i])
×
927
        lb = type.min_value
×
928
        ub = type.max_value
×
929

930
        x1, x2 = self.SBX.sbx_crossover(x1, x2, lb, ub)
×
931

932
        child1.variables[i] = x1
×
933
        child2.variables[i] = x2
×
934

935
        return child1, child2
×
936

937
    def crossover_integer(self, child1, child2, i, type):  # @ReservedAssignment
1✔
938
        # HUX()
939
        for j in range(type.nbits):
×
940
            if child1.variables[i][j] != child2.variables[i][j]:
×
941
                if bool(random.getrandbits(1)):
×
942
                    child1.variables[i][j] = not child1.variables[i][j]
×
943
                    child2.variables[i][j] = not child2.variables[i][j]
×
944
        return child1, child2
×
945

946
    def crossover_categorical(self, child1, child2, i, type):  # @ReservedAssignment
1✔
947
        # SSX()
948
        # can probably be implemented in a simple manner, since size
949
        # of subset is fixed to 1
950

951
        s1 = set(child1.variables[i])
×
952
        s2 = set(child2.variables[i])
×
953

954
        for j in range(type.size):
×
955
            if (
×
956
                (child2.variables[i][j] not in s1)
957
                and (child1.variables[i][j] not in s2)
958
                and (random.random() < 0.5)
959
            ):
960
                temp = child1.variables[i][j]
×
961
                child1.variables[i][j] = child2.variables[i][j]
×
962
                child2.variables[i][j] = temp
×
963

964
        return child1, child2
×
965

966
    def mutate_real(self, child, i, type, distribution_index=20):  # @ReservedAssignment
1✔
967
        # PM
968
        x = child.variables[i]
×
969
        lower = type.min_value
×
970
        upper = type.max_value
×
971

972
        u = random.random()
×
973
        dx = upper - lower
×
974

975
        if u < 0.5:
×
976
            bl = (x - lower) / dx
×
977
            b = 2.0 * u + (1.0 - 2.0 * u) * pow(1.0 - bl, distribution_index + 1.0)
×
978
            delta = pow(b, 1.0 / (distribution_index + 1.0)) - 1.0
×
979
        else:
980
            bu = (upper - x) / dx
×
981
            b = 2.0 * (1.0 - u) + 2.0 * (u - 0.5) * pow(1.0 - bu, distribution_index + 1.0)
×
982
            delta = 1.0 - pow(b, 1.0 / (distribution_index + 1.0))
×
983

984
        x = x + delta * dx
×
985
        x = max(lower, min(x, upper))
×
986

987
        child.variables[i] = x
×
988
        return child
×
989

990
    def mutate_integer(self, child, i, type, probability=1):  # @ReservedAssignment
1✔
991
        # bitflip
992
        for j in range(type.nbits):
×
993
            if random.random() <= probability:
×
994
                child.variables[i][j] = not child.variables[i][j]
×
995
        return child
×
996

997
    def mutate_categorical(self, child, i, type):  # @ReservedAssignment
1✔
998
        # replace
999
        probability = 1 / type.size
×
1000

1001
        if random.random() <= probability:
×
1002
            subset = child.variables[i]
×
1003

1004
            if len(subset) < len(type.elements):
×
1005
                j = random.randrange(len(subset))
×
1006

1007
                nonmembers = list(set(type.elements) - set(subset))
×
1008
                k = random.randrange(len(nonmembers))
×
1009
                subset[j] = nonmembers[k]
×
1010

1011
            len(subset)
×
1012

1013
            child.variables[i] = subset
×
1014

1015
        return child
×
1016

1017
    _crossover = {
1✔
1018
        Real: crossover_real,
1019
        Integer: crossover_integer,
1020
        Subset: crossover_categorical,
1021
    }
1022

1023
    _mutate = {
1✔
1024
        Real: mutate_real,
1025
        Integer: mutate_integer,
1026
        Subset: mutate_categorical,
1027
    }
1028

1029

1030
def _optimize(
1✔
1031
    problem,
1032
    evaluator,
1033
    algorithm,
1034
    convergence,
1035
    nfe,
1036
    convergence_freq,
1037
    logging_freq,
1038
    variator=None,
1039
    **kwargs,
1040
):
1041
    klass = problem.types[0].__class__
×
1042

1043
    try:
×
1044
        eps_values = kwargs["epsilons"]
×
1045
    except KeyError:
×
1046
        pass
×
1047
    else:
1048
        if len(eps_values) != len(problem.outcome_names):
×
1049
            raise EMAError("number of epsilon values does not match number " "of outcomes")
×
1050

1051
    if variator is None:
×
1052
        if all(isinstance(t, klass) for t in problem.types):
×
1053
            variator = None
×
1054
        else:
1055
            variator = CombinedVariator()
×
1056
    # mutator = CombinedMutator()
1057

1058
    optimizer = algorithm(
×
1059
        problem, evaluator=evaluator, variator=variator, log_frequency=500, **kwargs
1060
    )
1061
    # optimizer.mutator = mutator
1062

1063
    convergence = Convergence(
×
1064
        convergence, nfe, convergence_freq=convergence_freq, logging_freq=logging_freq
1065
    )
1066
    callback = functools.partial(convergence, optimizer)
×
1067
    evaluator.callback = callback
×
1068

1069
    with temporary_filter(name=[callbacks.__name__, evaluators.__name__], level=INFO):
×
1070
        optimizer.run(nfe)
×
1071

1072
    convergence(optimizer, force=True)
×
1073

1074
    # convergence.pbar.__exit__(None, None, None)
1075

1076
    results = to_dataframe(optimizer.result, problem.parameter_names, problem.outcome_names)
×
1077
    convergence = convergence.to_dataframe()
×
1078

1079
    message = "optimization completed, found {} solutions"
×
1080
    _logger.info(message.format(len(optimizer.archive)))
×
1081

1082
    if convergence.empty:
×
1083
        return results
×
1084
    else:
1085
        return results, convergence
×
1086

1087

1088
class BORGDefaultDescriptor:
1✔
1089
    # this treats defaults as class level attributes!
1090

1091
    def __init__(self, default_function):
1✔
1092
        self.default_function = default_function
1✔
1093

1094
    def __get__(self, instance, owner):
1✔
1095
        return self.default_function(instance.problem.nvars)
1✔
1096

1097
    def __set_name__(self, owner, name):
1✔
1098
        self.name = name
1✔
1099

1100

1101
class GenerationalBorg(EpsilonProgressContinuation):
1✔
1102
    """A generational implementation of the BORG Framework
1103

1104
    This algorithm adopts Epsilon Progress Continuation, and Auto Adaptive
1105
    Operator Selection, but embeds them within the NSGAII generational
1106
    algorithm, rather than the steady state implementation used by the BORG
1107
    algorithm.
1108

1109
    The parametrization of all operators is based on the default values as used
1110
    in Borg 1.9.
1111

1112
    Note:: limited to RealParameters only.
1113

1114
    """
1115

1116
    pm_p = BORGDefaultDescriptor(lambda x: 1 / x)
1✔
1117
    pm_dist = 20
1✔
1118

1119
    sbx_prop = 1
1✔
1120
    sbx_dist = 15
1✔
1121

1122
    de_rate = 0.1
1✔
1123
    de_stepsize = 0.5
1✔
1124

1125
    um_p = BORGDefaultDescriptor(lambda x: 1 / x)
1✔
1126

1127
    spx_nparents = 10
1✔
1128
    spx_noffspring = 2
1✔
1129
    spx_expansion = 0.3
1✔
1130

1131
    pcx_nparents = 10
1✔
1132
    pcx_noffspring = 2
1✔
1133
    pcx_eta = 0.1
1✔
1134
    pcx_zeta = 0.1
1✔
1135

1136
    undx_nparents = 10
1✔
1137
    undx_noffspring = 2
1✔
1138
    undx_zeta = 0.5
1✔
1139
    undx_eta = 0.35
1✔
1140

1141
    def __init__(
1✔
1142
        self,
1143
        problem,
1144
        epsilons,
1145
        population_size=100,
1146
        generator=RandomGenerator(),
1147
        selector=TournamentSelector(2),
1148
        variator=None,
1149
        **kwargs,
1150
    ):
1151
        self.problem = problem
×
1152

1153
        # Parameterization taken from
1154
        # Borg: An Auto-Adaptive MOEA Framework - Hadka, Reed
1155
        variators = [
×
1156
            GAOperator(
1157
                SBX(probability=self.sbx_prop, distribution_index=self.sbx_dist),
1158
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1159
            ),
1160
            GAOperator(
1161
                PCX(
1162
                    nparents=self.pcx_nparents,
1163
                    noffspring=self.pcx_noffspring,
1164
                    eta=self.pcx_eta,
1165
                    zeta=self.pcx_zeta,
1166
                ),
1167
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1168
            ),
1169
            GAOperator(
1170
                DifferentialEvolution(crossover_rate=self.de_rate, step_size=self.de_stepsize),
1171
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1172
            ),
1173
            GAOperator(
1174
                UNDX(
1175
                    nparents=self.undx_nparents,
1176
                    noffspring=self.undx_noffspring,
1177
                    zeta=self.undx_zeta,
1178
                    eta=self.undx_eta,
1179
                ),
1180
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1181
            ),
1182
            GAOperator(
1183
                SPX(
1184
                    nparents=self.spx_nparents,
1185
                    noffspring=self.spx_noffspring,
1186
                    expansion=self.spx_expansion,
1187
                ),
1188
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
1189
            ),
1190
            UM(probability=self.um_p),
1191
        ]
1192

1193
        variator = Multimethod(self, variators)
×
1194

1195
        super().__init__(
×
1196
            NSGAII(
1197
                problem,
1198
                population_size,
1199
                generator,
1200
                selector,
1201
                variator,
1202
                EpsilonBoxArchive(epsilons),
1203
                **kwargs,
1204
            )
1205
        )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc