• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

quaquel / EMAworkbench / 18880279604

28 Oct 2025 03:33PM UTC coverage: 92.34% (+3.6%) from 88.699%
18880279604

Pull #424

github

quaquel
Update optimization_convergence_analysis.ipynb

adds citations
Pull Request #424: Optimization improvements

601 of 615 new or added lines in 15 files covered. (97.72%)

2 existing lines in 1 file now uncovered.

8294 of 8982 relevant lines covered (92.34%)

0.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.09
/ema_workbench/em_framework/optimization.py
1
"""Wrapper around platypus-opt."""
2

3
import contextlib
1✔
4
import copy
1✔
5
import io
1✔
6
import os
1✔
7
import random
1✔
8
import tarfile
1✔
9
import time
1✔
10
from collections.abc import Iterable
1✔
11
from typing import Literal
1✔
12

13
import numpy as np
1✔
14
import pandas as pd
1✔
15
import platypus
1✔
16
from platypus import (
1✔
17
    NSGAII,
18
    PCX,
19
    PM,
20
    SBX,
21
    SPX,
22
    UM,
23
    UNDX,
24
    DifferentialEvolution,
25
    EpsilonBoxArchive,
26
    GAOperator,
27
    InjectedPopulation,
28
    Integer,
29
    Multimethod,
30
    RandomGenerator,
31
    Real,
32
    Solution,
33
    Subset,
34
    TournamentSelector,
35
    Variator,
36
)
37
from platypus import Problem as PlatypusProblem
1✔
38

39
from ..util import INFO, EMAError, get_module_logger, temporary_filter
1✔
40
from . import callbacks, evaluators
1✔
41
from .outcomes import Constraint, ScalarOutcome
1✔
42
from .parameters import (
1✔
43
    BooleanParameter,
44
    CategoricalParameter,
45
    IntegerParameter,
46
    Parameter,
47
    RealParameter,
48
)
49
from .points import Sample
1✔
50
from .util import ProgressTrackingMixIn
1✔
51

52
# Created on 5 Jun 2017
53
#
54
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
55

56
__all__ = [
1✔
57
    "GenerationalBorg",
58
    "Problem",
59
    "epsilon_nondominated",
60
    "load_archives",
61
    "rebuild_platypus_population",
62
]
63
_logger = get_module_logger(__name__)
1✔
64

65

66
class Problem(PlatypusProblem):
1✔
67
    """Small extension to Platypus problem object.
68

69
    Includes the decision variables, outcomes, and constraints,
70
    any reference Sample(s), and the type of search.
71

72
    """
73

74
    @property
1✔
75
    def parameter_names(self) -> list[str]:
1✔
76
        """Getter for parameter names."""
77
        return [e.name for e in self.decision_variables]
1✔
78

79
    @property
1✔
80
    def outcome_names(self) -> list[str]:
1✔
81
        """Getter for outcome names."""
82
        return [e.name for e in self.objectives]
1✔
83

84
    @property
1✔
85
    def constraint_names(self) -> list[str]:
1✔
86
        """Getter for constraint names."""
87
        return [c.name for c in self.ema_constraints]
1✔
88

89
    def __init__(
1✔
90
        self,
91
        searchover: Literal["levers", "uncertainties", "robust"],
92
        decision_variables: list[Parameter],
93
        objectives: list[ScalarOutcome],
94
        constraints: list[Constraint] | None = None,
95
        reference: Sample | Iterable[Sample] | int | None = None,
96
    ):
97
        """Init."""
98
        if constraints is None:
1✔
99
            constraints = []
1✔
100
        if reference is None:
1✔
101
            reference = 1
1✔
102

103
        super().__init__(
1✔
104
            len(decision_variables), len(objectives), nconstrs=len(constraints)
105
        )
106

107
        # fixme we can probably get rid of 'robust'
108
        #    just flip to robust if reference is an iterable
109
        #    handle most value error checks inside optimize and robust_optimize instead of here
110
        if (searchover == "robust") and (
1✔
111
            (reference == 1) or isinstance(reference, Sample)
112
        ):
113
            raise ValueError(
1✔
114
                "you cannot use a no or a  single reference scenario for robust optimization"
115
            )
116
        for obj in objectives:
1✔
117
            if obj.kind == obj.INFO:
1✔
118
                raise ValueError(
1✔
119
                    f"you need to specify the direction for objective {obj.name}, cannot be INFO"
120
                )
121

122
        self.searchover = searchover
1✔
123
        self.decision_variables = decision_variables
1✔
124
        self.objectives = objectives
1✔
125

126
        self.ema_constraints = constraints
1✔
127
        self.reference = reference
1✔
128

129
        self.types[:] = to_platypus_types(decision_variables)
1✔
130
        self.directions[:] = [outcome.kind for outcome in objectives]
1✔
131
        self.constraints[:] = "==0"
1✔
132

133

134
def to_platypus_types(decision_variables: Iterable[Parameter]) -> list[platypus.Type]:
1✔
135
    """Helper function for mapping from workbench parameter types to platypus parameter types."""
136
    _type_mapping = {
1✔
137
        RealParameter: platypus.Real,
138
        IntegerParameter: platypus.Integer,
139
        CategoricalParameter: platypus.Subset,
140
        BooleanParameter: platypus.Subset,
141
    }
142

143
    types = []
1✔
144
    for dv in decision_variables:
1✔
145
        klass = _type_mapping[type(dv)]
1✔
146

147
        if not isinstance(dv, (CategoricalParameter | BooleanParameter)):
1✔
148
            decision_variable = klass(dv.lower_bound, dv.upper_bound)
1✔
149
        else:
150
            decision_variable = klass(dv.categories, 1)
1✔
151

152
        types.append(decision_variable)
1✔
153
    return types
1✔
154

155

156
def to_dataframe(
1✔
157
    solutions: Iterable[platypus.Solution], dvnames: list[str], outcome_names: list[str]
158
):
159
    """Helper function to turn a collection of platypus Solution instances into a pandas DataFrame.
160

161
    Parameters
162
    ----------
163
    solutions : collection of Solution instances
164
    dvnames : list of str
165
    outcome_names : list of str
166

167
    Returns
168
    -------
169
    pandas DataFrame
170
    """
171
    results = []
1✔
172
    for solution in platypus.unique(solutions):
1✔
173
        decision_vars = Sample._from_platypus_solution(solution)
1✔
174

175
        decision_out = dict(zip(outcome_names, solution.objectives))
1✔
176

177
        result = decision_vars.copy()
1✔
178
        result.update(decision_out)
1✔
179

180
        results.append(result)
1✔
181

182
    results = pd.DataFrame(results, columns=dvnames + outcome_names)
1✔
183
    return results
1✔
184

185

186
def process_jobs(jobs: list[platypus.core.EvaluateSolution]):
1✔
187
    """Helper function to map jobs generated by platypus to Sample instances.
188

189
    Parameters
190
    ----------
191
    jobs : collection
192

193
    Returns
194
    -------
195
    scenarios, policies
196

197
    """
198
    problem = jobs[0].solution.problem
1✔
199
    searchover = problem.searchover
1✔
200
    references = problem.reference
1✔
201

202
    samples = [Sample._from_platypus_solution(job.solution) for job in jobs]
1✔
203
    match searchover:
1✔
204
        case "levers":
1✔
205
            return references, samples
1✔
206
        case "uncertainties":
1✔
207
            return samples, references
1✔
208
        case "robust":
1✔
209
            return references, samples
1✔
210
        case _:
1✔
211
            raise ValueError(
1✔
212
                f"unknown value for searchover, got {searchover} should be one of 'levers', 'uncertainties', or 'robust'"
213
            )
214

215

216
def evaluate(
1✔
217
    jobs_collection: Iterable[tuple[Sample, platypus.core.EvaluateSolution]],
218
    experiments: pd.DataFrame,
219
    outcomes: dict[str, np.ndarray],
220
    problem: Problem,
221
):
222
    """Helper function for mapping the results from perform_experiments back to what platypus needs."""
223
    searchover = problem.searchover
1✔
224
    outcome_names = problem.outcome_names
1✔
225
    constraints = problem.ema_constraints
1✔
226

227
    column = "scenario" if searchover == "uncertainties" else "policy"
1✔
228

229
    for sample, job in jobs_collection:
1✔
230
        logical = experiments[column] == sample.name
1✔
231

232
        job_outputs = {}
1✔
233
        for k, v in outcomes.items():
1✔
234
            job_outputs[k] = v[logical]
1✔
235

236
        # TODO:: only retain decision variables
237
        job_experiment = experiments[logical]
1✔
238

239
        if searchover == "levers" or searchover == "uncertainties":
1✔
240
            job_outputs = {k: v[0] for k, v in job_outputs.items()}
1✔
241
        else:
242
            robustness_scores = {}
1✔
243
            for obj in problem.objectives:
1✔
244
                data = [outcomes[var_name] for var_name in obj.variable_name]
1✔
245
                score = obj.function(*data)
1✔
246
                robustness_scores[obj.name] = score
1✔
247
            job_outputs = robustness_scores
1✔
248
            job_experiment = job_experiment.iloc[
1✔
249
                0
250
            ]  # we only need a single row with the levers here
251

252
        job_constraints = _evaluate_constraints(
1✔
253
            job_experiment, job_outputs, constraints
254
        )
255
        job_outcomes = [job_outputs[key] for key in outcome_names]
1✔
256

257
        if job_constraints:
1✔
258
            job.solution.problem.function = (
×
259
                lambda _, job_outcomes=job_outcomes, job_constraints=job_constraints: (
260
                    job_outcomes,
261
                    job_constraints,
262
                )
263
            )
264
        else:
265
            job.solution.problem.function = (
1✔
266
                lambda _, job_outcomes=job_outcomes: job_outcomes
267
            )
268
        job.solution.evaluate()
1✔
269

270

271
def _evaluate_constraints(
1✔
272
    job_experiment: pd.Series,
273
    job_outcomes: dict[str, float | int],
274
    constraints: list[Constraint],
275
):
276
    """Helper function for evaluating the constraints for a given job."""
277
    job_constraints = []
1✔
278
    for constraint in constraints:
1✔
279
        data = [job_experiment[var] for var in constraint.parameter_names]
1✔
280
        data += [job_outcomes[var] for var in constraint.outcome_names]
1✔
281
        constraint_value = constraint.process(data)
1✔
282
        job_constraints.append(constraint_value)
1✔
283
    return job_constraints
1✔
284

285

286
class ProgressBarExtension(platypus.extensions.FixedFrequencyExtension):
1✔
287
    """Small platypus extension showing a progress bar."""
288

289
    def __init__(self, total_nfe: int, frequency: int = 100):
1✔
290
        """Init."""
291
        super().__init__(frequency=frequency)
1✔
292
        self.progress_tracker = ProgressTrackingMixIn(
1✔
293
            total_nfe,
294
            frequency,
295
            _logger,
296
            log_func=lambda self: f"generation"
297
            f" {self.generation}, {self.i}/{self.max_nfe}",
298
        )
299

300
    def do_action(self, algorithm):
1✔
301
        """Update the progress bar."""
302
        nfe = algorithm.nfe
1✔
303
        self.progress_tracker(nfe - self.progress_tracker.i)
1✔
304

305

306
class ArchiveStorageExtension(platypus.extensions.FixedFrequencyExtension):
1✔
307
    """Extension that stores the archive to a tarball at a fixed frequency.
308

309
    Parameters
310
    ----------
311
    directory : str
312
    decision_variable_names : list of the names of the decision variables
313
    outcome_names : list of names of the outcomes of interest
314
    filename : the name of the tarball
315
    frequency : int
316
        The frequency the action occurs.
317
    by_nfe : bool
318
        If :code:`True`, the frequency is given in number of function
319
        evaluations.  If :code:`False`, the frequency is given in the number
320
        of iterations.
321

322
    Raises
323
    ------
324
    FileExistsError if tarfile already exists.
325

326
    """
327

328
    def __init__(
1✔
329
        self,
330
        decision_variable_names: list[str],
331
        outcome_names: list[str],
332
        directory: str | None = None,
333
        filename: str | None = None,
334
        frequency: int = 1000,
335
        by_nfe: bool = True,
336
    ):
337
        super().__init__(frequency=frequency, by_nfe=by_nfe)
1✔
338
        self.decision_variable_names = decision_variable_names
1✔
339
        self.outcome_names = outcome_names
1✔
340
        self.temp = os.path.join(directory, "tmp")
1✔
341
        self.tar_filename = os.path.join(os.path.abspath(directory), filename)
1✔
342

343
        if os.path.exists(self.tar_filename):
1✔
344
            raise FileExistsError(
1✔
345
                f"File {self.tar_filename} for storing the archives already exists."
346
            )
347

348
    def do_action(self, algorithm: platypus.algorithms.AbstractGeneticAlgorithm):
1✔
349
        """Add the current archive to the tarball."""
350
        # broadens the algorithms in platypus we can support automagically
351
        try:
1✔
352
            data = algorithm.archive
1✔
353
        except AttributeError:
1✔
354
            data = algorithm.result
1✔
355

356
        # fixme, this opens and closes the tarball everytime
357
        #   can't we open in in the init and have a clean way to close it
358
        #   on any exit?
359
        with tarfile.open(self.tar_filename, "a") as f:
1✔
360
            archive = to_dataframe(
1✔
361
                data, self.decision_variable_names, self.outcome_names
362
            )
363
            stream = io.BytesIO()
1✔
364
            archive.to_csv(stream, encoding="UTF-8", index=False)
1✔
365
            stream.seek(0)
1✔
366
            tarinfo = tarfile.TarInfo(f"{algorithm.nfe}.csv")
1✔
367
            tarinfo.size = len(stream.getbuffer())
1✔
368
            tarinfo.mtime = time.time()
1✔
369
            f.addfile(tarinfo, stream)
1✔
370

371

372
class RuntimeConvergenceTracking(platypus.extensions.FixedFrequencyExtension):
1✔
373
    """Platypus Extension for tracking runtime convergence information.
374

375
    This extension tracks runtime information that cannot be retrieved from the archives that are stored. Specifically,
376
    it automatically tries to track epsilon progress and the operator probabilities in case of a MultiMethod
377
    variator.
378

379
    """
380

381
    def __init__(
1✔
382
        self,
383
        frequency: int = 1000,
384
        by_nfe: bool = True,
385
    ):
386
        super().__init__(frequency=frequency, by_nfe=by_nfe)
1✔
387
        self.data = []
1✔
388
        self.attributes_to_try = ["nfe"]
1✔
389

390
    def do_action(self, algorithm: platypus.algorithms.AbstractGeneticAlgorithm):
1✔
391
        """Retrieve the runtime convergence information."""
392
        runtime_info = {}
1✔
393
        runtime_info["nfe"] = algorithm.nfe
1✔
394

395
        with contextlib.suppress(AttributeError):
1✔
396
            runtime_info["epsilon_progress"] = algorithm.archive.improvements
1✔
397

398
        variator = algorithm.variator
1✔
399
        if isinstance(variator, Multimethod):
1✔
400
            for method, prob in zip(variator.variators, variator.probabilities):
1✔
401
                if isinstance(method, GAOperator):
1✔
402
                    method = method.variation  # noqa: PLW2901
1✔
403

404
                runtime_info[method.__class__.__name__] = prob
1✔
405

406
        self.data.append(runtime_info)
1✔
407

408
    def to_dataframe(self):
1✔
409
        return pd.DataFrame(self.data)
1✔
410

411

412
def load_archives(path_to_file: str) -> list[tuple[int, pd.DataFrame]]:
1✔
413
    """Returns a list of stored archives.
414

415
    Each entry in the list is a tuple. The first element is the number of
416
    nfe, the second is the archive at that number of nfe.
417

418
    Parameters
419
    ----------
420
    path_to_file : the path to the archive
421

422
    """
NEW
423
    with tarfile.open(path_to_file, "r") as archive:
×
NEW
424
        content = archive.getnames()
×
NEW
425
        archives = []
×
NEW
426
        for fn in content:
×
NEW
427
            f = archive.extractfile(fn)
×
NEW
428
            data = pd.read_csv(f)
×
NEW
429
            nfe = int(fn.split(".")[0])
×
NEW
430
            archives.append((nfe, data))
×
431

NEW
432
    return archives
×
433

434

435
def epsilon_nondominated(
1✔
436
    results: list[pd.DataFrame], epsilons: list[float], problem: Problem
437
) -> pd.DataFrame:
438
    """Merge the list of results into a single set of non dominated results using the provided epsilon values.
439

440
    Parameters
441
    ----------
442
    results : list of DataFrames
443
    epsilons : epsilon values for each objective
444
    problem : PlatypusProblem instance
445

446
    Returns
447
    -------
448
    DataFrame
449

450
    Notes
451
    -----
452
    this is a platypus based alternative to pareto.py (https://github.com/matthewjwoodruff/pareto.py)
453
    """
454
    if problem.nobjs != len(epsilons):
1✔
455
        raise ValueError(
1✔
456
            f"The number of epsilon values ({len(epsilons)}) must match the number of objectives {problem.nobjs}"
457
        )
458

459
    results = pd.concat(results, ignore_index=True)
1✔
460
    solutions = rebuild_platypus_population(results, problem)
1✔
461
    archive = EpsilonBoxArchive(epsilons)
1✔
462
    archive += solutions
1✔
463

464
    return to_dataframe(archive, problem.parameter_names, problem.outcome_names)
1✔
465

466

467
def rebuild_platypus_population(archive: pd.DataFrame, problem: Problem):
1✔
468
    """Rebuild a population of platypus Solution instances.
469

470
    Parameters
471
    ----------
472
    archive : DataFrame
473
    problem : PlatypusProblem instance
474

475
    Returns
476
    -------
477
    list of platypus Solutions
478

479
    """
480
    # fixme, might this be easier via Sample._to_platypus_solution?
481
    #   we can just turn each row into a Sample instance directly and then go to a Solution instance
482
    expected_columns = problem.nvars + problem.nobjs
1✔
483
    actual_columns = len(archive.columns)
1✔
484

485
    if actual_columns != expected_columns:
1✔
486
        raise EMAError(
1✔
487
            f"The number of columns in the archive ({actual_columns}) does not match the "
488
            f"expected number of decision variables and objectives ({expected_columns})."
489
        )
490

491
    solutions = []
1✔
492
    for row in archive.itertuples():
1✔
493
        try:
1✔
494
            decision_variables = [
1✔
495
                getattr(row, attr) for attr in problem.parameter_names
496
            ]
497
        except AttributeError as e:
1✔
498
            missing_parameters = [
1✔
499
                attr for attr in problem.parameter_names if not hasattr(row, attr)
500
            ]
501
            raise EMAError(
1✔
502
                f"Parameter names {missing_parameters} not found in archive"
503
            ) from e
504

505
        try:
1✔
506
            objectives = [getattr(row, attr) for attr in problem.outcome_names]
1✔
507
        except AttributeError as e:
1✔
508
            missing_outcomes = [
1✔
509
                attr for attr in problem.outcome_names if not hasattr(row, attr)
510
            ]
511
            raise EMAError(
1✔
512
                f"Outcome names {missing_outcomes} not found in archive'"
513
            ) from e
514

515
        solution = Solution(problem)
1✔
516
        solution.variables[:] = [
1✔
517
            platypus_type.encode(value)
518
            for platypus_type, value in zip(problem.types, decision_variables)
519
        ]
520
        solution.objectives[:] = objectives
1✔
521
        solutions.append(solution)
1✔
522
    return solutions
1✔
523

524

525
class CombinedVariator(Variator):
1✔
526
    """Combined variator."""
527

528
    def __init__(self, crossover_prob=0.5, mutation_prob=1):
1✔
529
        super().__init__(2)
1✔
530
        self.SBX = platypus.SBX()
1✔
531
        self.crossover_prob = crossover_prob
1✔
532
        self.mutation_prob = mutation_prob
1✔
533

534
    def evolve(self, parents: list[Solution]) -> tuple[Solution, Solution]:
1✔
535
        """Evolve the provided parents."""
536
        child1 = copy.deepcopy(parents[0])
1✔
537
        child2 = copy.deepcopy(parents[1])
1✔
538
        problem = child1.problem
1✔
539

540
        # crossover
541
        # we will evolve the individual
542
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
1✔
543
            if random.random() <= self.crossover_prob:
1✔
544
                klass = kind.__class__
1✔
545
                child1, child2 = self._crossover[klass](self, child1, child2, i, kind)
1✔
546
                child1.evaluated = False
1✔
547
                child2.evaluated = False
1✔
548

549
        # mutate
550
        for child in [child1, child2]:
1✔
551
            self.mutate(child)
1✔
552

553
        return child1, child2
1✔
554

555
    def mutate(self, child: Solution):
1✔
556
        problem = child.problem
1✔
557

558
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
1✔
559
            if random.random() <= self.mutation_prob:
1✔
560
                klass = kind.__class__
1✔
561
                child = self._mutate[klass](self, child, i, kind)
1✔
562
                child.evaluated = False
1✔
563

564
    def crossover_real(
1✔
565
        self, child1: Solution, child2: Solution, i: int, type: platypus.Real
566
    ) -> tuple[Solution, Solution]:  # @ReservedAssignment
567
        # sbx
568
        x1 = float(child1.variables[i])
1✔
569
        x2 = float(child2.variables[i])
1✔
570
        lb = type.min_value
1✔
571
        ub = type.max_value
1✔
572

573
        x1, x2 = self.SBX.sbx_crossover(x1, x2, lb, ub)
1✔
574

575
        child1.variables[i] = x1
1✔
576
        child2.variables[i] = x2
1✔
577

578
        return child1, child2
1✔
579

580
    def crossover_integer(
1✔
581
        self, child1: Solution, child2: Solution, i: int, type: platypus.Integer
582
    ) -> tuple[Solution, Solution]:  # @ReservedAssignment
583
        # HUX()
584
        for j in range(type.nbits):
1✔
585
            if child1.variables[i][j] != child2.variables[i][j]:  # noqa: SIM102
1✔
586
                if bool(random.getrandbits(1)):
1✔
587
                    child1.variables[i][j] = not child1.variables[i][j]
1✔
588
                    child2.variables[i][j] = not child2.variables[i][j]
1✔
589
        return child1, child2
1✔
590

591
    def crossover_categorical(
1✔
592
        self, child1: Solution, child2: Solution, i: int, type: platypus.Subset
593
    ) -> tuple[Solution, Solution]:  # @ReservedAssignment
594
        # SSX()
595
        # Implemented in a simplified manner, since size of subset is 1
596

597
        if (child2.variables[i] != child1.variables[i]) and (random.random() < 0.5):
1✔
598
            temp = child1.variables[i]
1✔
599
            child1.variables[i] = child2.variables[i]
1✔
600
            child2.variables[i] = temp
1✔
601

602
        return child1, child2
1✔
603

604
    def mutate_real(
1✔
605
        self, child: Solution, i: int, type: platypus.Real, distribution_index: int = 20
606
    ) -> Solution:  # @ReservedAssignment
607
        # PM
608
        x = child.variables[i]
1✔
609
        lower = type.min_value
1✔
610
        upper = type.max_value
1✔
611

612
        u = random.random()
1✔
613
        dx = upper - lower
1✔
614

615
        if u < 0.5:
1✔
616
            bl = (x - lower) / dx
1✔
617
            b = 2.0 * u + (1.0 - 2.0 * u) * pow(1.0 - bl, distribution_index + 1.0)
1✔
618
            delta = pow(b, 1.0 / (distribution_index + 1.0)) - 1.0
1✔
619
        else:
620
            bu = (upper - x) / dx
1✔
621
            b = 2.0 * (1.0 - u) + 2.0 * (u - 0.5) * pow(
1✔
622
                1.0 - bu, distribution_index + 1.0
623
            )
624
            delta = 1.0 - pow(b, 1.0 / (distribution_index + 1.0))
1✔
625

626
        x = x + delta * dx
1✔
627
        x = max(lower, min(x, upper))
1✔
628

629
        child.variables[i] = x
1✔
630
        return child
1✔
631

632
    def mutate_integer(
1✔
633
        self, child: Solution, i: int, type: platypus.Integer, probability: float = 1
634
    ) -> Solution:  # @ReservedAssignment
635
        # bitflip
636
        for j in range(type.nbits):
1✔
637
            if random.random() <= probability:
1✔
638
                child.variables[i][j] = not child.variables[i][j]
1✔
639
        return child
1✔
640

641
    def mutate_categorical(
1✔
642
        self, child: Solution, i: int, type: platypus.Subset
643
    ) -> Solution:  # @ReservedAssignment
644
        # replace, again simplified because len(subset) is 1
645
        non_members = [
1✔
646
            entry for entry in type.elements if entry.value != child.variables[i]
647
        ]
648
        new_value = random.choice(non_members)
1✔
649
        child.variables[i] = new_value.value
1✔
650

651
        return child
1✔
652

653
    _crossover = {
1✔
654
        Real: crossover_real,
655
        Integer: crossover_integer,
656
        Subset: crossover_categorical,
657
    }
658

659
    _mutate = {
1✔
660
        Real: mutate_real,
661
        Integer: mutate_integer,
662
        Subset: mutate_categorical,
663
    }
664

665

666
def _optimize(
1✔
667
    problem: Problem,
668
    evaluator: "BaseEvaluator",  # noqa: F821
669
    algorithm: type[platypus.algorithms.AbstractGeneticAlgorithm],
670
    nfe: int,
671
    convergence_freq: int,
672
    logging_freq: int,
673
    variator: Variator = None,
674
    initial_population: Iterable[Sample] | None = None,
675
    filename: str | None = None,
676
    directory: str | None = None,
677
    **kwargs,
678
) -> tuple[pd.DataFrame, pd.DataFrame]:
679
    """Helper function for optimization."""
680
    klass = problem.types[0].__class__
1✔
681

682
    try:
1✔
683
        eps_values = kwargs["epsilons"]
1✔
684
    except KeyError:
1✔
685
        pass
1✔
686
    else:
687
        if len(eps_values) != len(problem.outcome_names):
1✔
688
            raise ValueError(
1✔
689
                "Number of epsilon values does not match number of outcomes"
690
            )
691

692
    if variator is None:
1✔
693
        if all(isinstance(t, klass) for t in problem.types):
1✔
694
            variator = None
1✔
695
        else:
696
            variator = CombinedVariator()
1✔
697

698
    generator = (
1✔
699
        RandomGenerator()
700
        if initial_population is None
701
        else InjectedPopulation(
702
            [sample._to_platypus_solution(problem) for sample in initial_population]
703
        )
704
    )
705

706
    optimizer = algorithm(
1✔
707
        problem,
708
        evaluator=evaluator,
709
        variator=variator,
710
        log_frequency=500,
711
        generator=generator,
712
        **kwargs,
713
    )
714
    storage = ArchiveStorageExtension(
1✔
715
        problem.parameter_names,
716
        problem.outcome_names,
717
        directory=directory,
718
        filename=filename,
719
        frequency=convergence_freq,
720
        by_nfe=True,
721
    )
722
    progress_bar = ProgressBarExtension(nfe, frequency=logging_freq)
1✔
723
    runtime_convergence_info = RuntimeConvergenceTracking(frequency=convergence_freq)
1✔
724
    optimizer.add_extension(storage)
1✔
725
    optimizer.add_extension(progress_bar)
1✔
726
    optimizer.add_extension(runtime_convergence_info)
1✔
727

728
    with temporary_filter(name=[callbacks.__name__, evaluators.__name__], level=INFO):
1✔
729
        optimizer.run(nfe)
1✔
730

731
    storage.do_action(
1✔
732
        optimizer
733
    )  # ensure last archive is included in the convergence information
734
    runtime_convergence_info.do_action(
1✔
735
        optimizer
736
    )  # ensure the last convergence information is added as well
737
    progress_bar.progress_tracker.pbar.__exit__(
1✔
738
        None, None, None
739
    )  # ensure progress bar is closed correctly
740

741
    try:
1✔
742
        data = optimizer.archive
1✔
743
    except AttributeError:
1✔
744
        data = optimizer.result
1✔
745

746
    runtime_convergence = runtime_convergence_info.to_dataframe()
1✔
747

748
    results = to_dataframe(data, problem.parameter_names, problem.outcome_names)
1✔
749

750
    _logger.info(f"optimization completed, found {len(data)} solutions")
1✔
751

752
    return results, runtime_convergence
1✔
753

754

755
class GenerationalBorg(NSGAII):
1✔
756
    """A generational implementation of the BORG Framework.
757

758
    This algorithm adopts Epsilon Progress Continuation, and Auto Adaptive
759
    Operator Selection, but embeds them within the NSGAII generational
760
    algorithm, rather than the steady state implementation used by the BORG
761
    algorithm.
762

763
    The parametrization of all operators is based on the default values as used
764
    in Borg 1.9.
765

766
    Note:: limited to RealParameters only.
767

768
    """
769

770
    pm_p = None
1✔
771
    pm_dist = 20
1✔
772

773
    sbx_prop = 1
1✔
774
    sbx_dist = 15
1✔
775

776
    de_rate = 0.1
1✔
777
    de_stepsize = 0.5
1✔
778

779
    um_p = None
1✔
780

781
    spx_nparents = 10
1✔
782
    spx_noffspring = 2
1✔
783
    spx_expansion = 0.3
1✔
784

785
    pcx_nparents = 10
1✔
786
    pcx_noffspring = 2
1✔
787
    pcx_eta = 0.1
1✔
788
    pcx_zeta = 0.1
1✔
789

790
    undx_nparents = 10
1✔
791
    undx_noffspring = 2
1✔
792
    undx_zeta = 0.5
1✔
793
    undx_eta = 0.35
1✔
794

795
    def __init__(
1✔
796
        self,
797
        problem: Problem,
798
        epsilons: list[float],
799
        population_size: int = 100,
800
        generator: platypus.Generator = RandomGenerator(),  # noqa: B008
801
        selector: platypus.Selector = TournamentSelector(2),  # noqa: B008
802
        **kwargs,
803
    ):
804
        """Init."""
805
        self.pm_p = 1 / problem.nvars
1✔
806
        self.um_p = 1 / problem.nvars
1✔
807

808
        # Parameterization taken from
809
        # Borg: An Auto-Adaptive MOEA Framework - Hadka, Reed
810
        variators = [
1✔
811
            GAOperator(
812
                SBX(probability=self.sbx_prop, distribution_index=self.sbx_dist),
813
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
814
            ),
815
            GAOperator(
816
                PCX(
817
                    nparents=self.pcx_nparents,
818
                    noffspring=self.pcx_noffspring,
819
                    eta=self.pcx_eta,
820
                    zeta=self.pcx_zeta,
821
                ),
822
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
823
            ),
824
            GAOperator(
825
                DifferentialEvolution(
826
                    crossover_rate=self.de_rate, step_size=self.de_stepsize
827
                ),
828
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
829
            ),
830
            GAOperator(
831
                UNDX(
832
                    nparents=self.undx_nparents,
833
                    noffspring=self.undx_noffspring,
834
                    zeta=self.undx_zeta,
835
                    eta=self.undx_eta,
836
                ),
837
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
838
            ),
839
            GAOperator(
840
                SPX(
841
                    nparents=self.spx_nparents,
842
                    noffspring=self.spx_noffspring,
843
                    expansion=self.spx_expansion,
844
                ),
845
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
846
            ),
847
            UM(probability=self.um_p),
848
        ]
849

850
        kwargs["variator"] = Multimethod(self, variators)
1✔
851
        super().__init__(
1✔
852
            problem,
853
            population_size=population_size,
854
            generator=generator,
855
            selector=selector,
856
            archive=EpsilonBoxArchive(epsilons),
857
            **kwargs,
858
        )
859
        self.add_extension(platypus.extensions.EpsilonProgressContinuationExtension())
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc