• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

quaquel / EMAworkbench / 18655708277

20 Oct 2025 02:44PM UTC coverage: 92.339% (+3.6%) from 88.699%
18655708277

Pull #424

github

quaquel
Update optimization_convergence_analysis.ipynb
Pull Request #424: Optimization improvements

600 of 614 new or added lines in 15 files covered. (97.72%)

2 existing lines in 1 file now uncovered.

8293 of 8981 relevant lines covered (92.34%)

0.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.09
/ema_workbench/em_framework/optimization.py
1
"""Wrapper around platypus-opt."""
2

3
import contextlib
1✔
4
import copy
1✔
5
import io
1✔
6
import os
1✔
7
import random
1✔
8
import tarfile
1✔
9
import time
1✔
10
from collections.abc import Iterable
1✔
11
from typing import Literal
1✔
12

13
import numpy as np
1✔
14
import pandas as pd
1✔
15
import platypus
1✔
16
from platypus import (
1✔
17
    NSGAII,
18
    PCX,
19
    PM,
20
    SBX,
21
    SPX,
22
    UM,
23
    UNDX,
24
    DifferentialEvolution,
25
    EpsilonBoxArchive,
26
    GAOperator,
27
    InjectedPopulation,
28
    Integer,
29
    Multimethod,
30
    RandomGenerator,
31
    Real,
32
    Solution,
33
    Subset,
34
    TournamentSelector,
35
    Variator,
36
)
37
from platypus import Problem as PlatypusProblem
1✔
38

39
from ..util import INFO, EMAError, get_module_logger, temporary_filter
1✔
40
from . import callbacks, evaluators
1✔
41
from .outcomes import Constraint, ScalarOutcome
1✔
42
from .parameters import (
1✔
43
    BooleanParameter,
44
    CategoricalParameter,
45
    IntegerParameter,
46
    Parameter,
47
    RealParameter,
48
)
49
from .points import Sample
1✔
50
from .util import ProgressTrackingMixIn
1✔
51

52
# Created on 5 Jun 2017
53
#
54
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
55

56
__all__ = [
1✔
57
    "GenerationalBorg",
58
    "Problem",
59
    "epsilon_nondominated",
60
    "load_archives",
61
    "rebuild_platypus_population",
62
]
63
_logger = get_module_logger(__name__)
1✔
64

65

66
class Problem(PlatypusProblem):
1✔
67
    """Small extension to Platypus problem object.
68

69
    Includes the decision variables, outcomes, and constraints,
70
    any reference Sample(s), and the type of search.
71

72
    """
73

74
    @property
1✔
75
    def parameter_names(self) -> list[str]:
1✔
76
        """Getter for parameter names."""
77
        return [e.name for e in self.decision_variables]
1✔
78

79
    @property
1✔
80
    def outcome_names(self) -> list[str]:
1✔
81
        """Getter for outcome names."""
82
        return [e.name for e in self.objectives]
1✔
83

84
    @property
1✔
85
    def constraint_names(self) -> list[str]:
1✔
86
        """Getter for constraint names."""
87
        return [c.name for c in self.ema_constraints]
1✔
88

89
    def __init__(
1✔
90
        self,
91
        searchover: Literal["levers", "uncertainties", "robust"],
92
        decision_variables: list[Parameter],
93
        objectives: list[ScalarOutcome],
94
        constraints: list[Constraint] | None = None,
95
        reference: Sample | Iterable[Sample] | int | None = None,
96
    ):
97
        """Init."""
98
        if constraints is None:
1✔
99
            constraints = []
1✔
100
        if reference is None:
1✔
101
            reference = 1
1✔
102

103
        super().__init__(
1✔
104
            len(decision_variables), len(objectives), nconstrs=len(constraints)
105
        )
106

107
        # fixme we can probably get rid of 'robust'
108
        #    just flip to robust if reference is an iterable
109
        #    handle most value error checks inside optimize and robust_optimize instead of here
110
        if (searchover == "robust") and (
1✔
111
            (reference == 1) or isinstance(reference, Sample)
112
        ):
113
            raise ValueError(
1✔
114
                "you cannot use a no or a  single reference scenario for robust optimization"
115
            )
116
        for obj in objectives:
1✔
117
            if obj.kind == obj.INFO:
1✔
118
                raise ValueError(
1✔
119
                    f"you need to specify the direction for objective {obj.name}, cannot be INFO"
120
                )
121

122
        self.searchover = searchover
1✔
123
        self.decision_variables = decision_variables
1✔
124
        self.objectives = objectives
1✔
125

126
        self.ema_constraints = constraints
1✔
127
        self.reference = reference
1✔
128

129
        self.types[:] = to_platypus_types(decision_variables)
1✔
130
        self.directions[:] = [outcome.kind for outcome in objectives]
1✔
131
        self.constraints[:] = "==0"
1✔
132

133

134
def to_platypus_types(decision_variables: Iterable[Parameter]) -> list[platypus.Type]:
1✔
135
    """Helper function for mapping from workbench parameter types to platypus parameter types."""
136
    # TODO:: should categorical not be platypus.Subset, with size == 1?
137
    _type_mapping = {
1✔
138
        RealParameter: platypus.Real,
139
        IntegerParameter: platypus.Integer,
140
        CategoricalParameter: platypus.Subset,
141
        BooleanParameter: platypus.Subset,
142
    }
143

144
    types = []
1✔
145
    for dv in decision_variables:
1✔
146
        klass = _type_mapping[type(dv)]
1✔
147

148
        if not isinstance(dv, (CategoricalParameter | BooleanParameter)):
1✔
149
            decision_variable = klass(dv.lower_bound, dv.upper_bound)
1✔
150
        else:
151
            decision_variable = klass(dv.categories, 1)
1✔
152

153
        types.append(decision_variable)
1✔
154
    return types
1✔
155

156

157
def to_dataframe(
1✔
158
    solutions: Iterable[platypus.Solution], dvnames: list[str], outcome_names: list[str]
159
):
160
    """Helper function to turn a collection of platypus Solution instances into a pandas DataFrame.
161

162
    Parameters
163
    ----------
164
    solutions : collection of Solution instances
165
    dvnames : list of str
166
    outcome_names : list of str
167

168
    Returns
169
    -------
170
    pandas DataFrame
171
    """
172
    results = []
1✔
173
    for solution in platypus.unique(solutions):
1✔
174
        decision_vars = Sample._from_platypus_solution(solution)
1✔
175

176
        decision_out = dict(zip(outcome_names, solution.objectives))
1✔
177

178
        result = decision_vars.copy()
1✔
179
        result.update(decision_out)
1✔
180

181
        results.append(result)
1✔
182

183
    results = pd.DataFrame(results, columns=dvnames + outcome_names)
1✔
184
    return results
1✔
185

186

187
def process_jobs(jobs: list[platypus.core.EvaluateSolution]):
1✔
188
    """Helper function to map jobs generated by platypus to Sample instances.
189

190
    Parameters
191
    ----------
192
    jobs : collection
193

194
    Returns
195
    -------
196
    scenarios, policies
197

198
    """
199
    problem = jobs[0].solution.problem
1✔
200
    searchover = problem.searchover
1✔
201
    references = problem.reference
1✔
202

203
    samples = [Sample._from_platypus_solution(job.solution) for job in jobs]
1✔
204
    match searchover:
1✔
205
        case "levers":
1✔
206
            return references, samples
1✔
207
        case "uncertainties":
1✔
208
            return samples, references
1✔
209
        case "robust":
1✔
210
            return references, samples
1✔
211
        case _:
1✔
212
            raise ValueError(
1✔
213
                f"unknown value for searchover, got {searchover} should be one of 'levers', 'uncertainties', or 'robust'"
214
            )
215

216

217
def evaluate(
1✔
218
    jobs_collection: Iterable[tuple[Sample, platypus.core.EvaluateSolution]],
219
    experiments: pd.DataFrame,
220
    outcomes: dict[str, np.ndarray],
221
    problem: Problem,
222
):
223
    """Helper function for mapping the results from perform_experiments back to what platypus needs."""
224
    searchover = problem.searchover
1✔
225
    outcome_names = problem.outcome_names
1✔
226
    constraints = problem.ema_constraints
1✔
227

228
    column = "scenario" if searchover == "uncertainties" else "policy"
1✔
229

230
    for sample, job in jobs_collection:
1✔
231
        logical = experiments[column] == sample.name
1✔
232

233
        job_outputs = {}
1✔
234
        for k, v in outcomes.items():
1✔
235
            job_outputs[k] = v[logical]
1✔
236

237
        # TODO:: only retain decision variables
238
        job_experiment = experiments[logical]
1✔
239

240
        if searchover == "levers" or searchover == "uncertainties":
1✔
241
            job_outputs = {k: v[0] for k, v in job_outputs.items()}
1✔
242
        else:
243
            robustness_scores = {}
1✔
244
            for obj in problem.objectives:
1✔
245
                data = [outcomes[var_name] for var_name in obj.variable_name]
1✔
246
                score = obj.function(*data)
1✔
247
                robustness_scores[obj.name] = score
1✔
248
            job_outputs = robustness_scores
1✔
249
            job_experiment = job_experiment.iloc[
1✔
250
                0
251
            ]  # we only need a single row with the levers here
252

253
        job_constraints = _evaluate_constraints(
1✔
254
            job_experiment, job_outputs, constraints
255
        )
256
        job_outcomes = [job_outputs[key] for key in outcome_names]
1✔
257

258
        if job_constraints:
1✔
259
            job.solution.problem.function = (
×
260
                lambda _, job_outcomes=job_outcomes, job_constraints=job_constraints: (
261
                    job_outcomes,
262
                    job_constraints,
263
                )
264
            )
265
        else:
266
            job.solution.problem.function = (
1✔
267
                lambda _, job_outcomes=job_outcomes: job_outcomes
268
            )
269
        job.solution.evaluate()
1✔
270

271

272
def _evaluate_constraints(
1✔
273
    job_experiment: pd.Series,
274
    job_outcomes: dict[str, float | int],
275
    constraints: list[Constraint],
276
):
277
    """Helper function for evaluating the constraints for a given job."""
278
    job_constraints = []
1✔
279
    for constraint in constraints:
1✔
280
        data = [job_experiment[var] for var in constraint.parameter_names]
1✔
281
        data += [job_outcomes[var] for var in constraint.outcome_names]
1✔
282
        constraint_value = constraint.process(data)
1✔
283
        job_constraints.append(constraint_value)
1✔
284
    return job_constraints
1✔
285

286

287
class ProgressBarExtension(platypus.extensions.FixedFrequencyExtension):
1✔
288
    """Small platypus extension showing a progress bar."""
289

290
    def __init__(self, total_nfe: int, frequency: int = 100):
1✔
291
        """Init."""
292
        super().__init__(frequency=frequency)
1✔
293
        self.progress_tracker = ProgressTrackingMixIn(
1✔
294
            total_nfe,
295
            frequency,
296
            _logger,
297
            log_func=lambda self: f"generation"
298
            f" {self.generation}, {self.i}/{self.max_nfe}",
299
        )
300

301
    def do_action(self, algorithm):
1✔
302
        """Update the progress bar."""
303
        nfe = algorithm.nfe
1✔
304
        self.progress_tracker(nfe - self.progress_tracker.i)
1✔
305

306

307
class ArchiveStorageExtension(platypus.extensions.FixedFrequencyExtension):
1✔
308
    """Extension that stores the archive to a tarball at a fixed frequency.
309

310
    Parameters
311
    ----------
312
    directory : str
313
    decision_variable_names : list of the names of the decision variables
314
    outcome_names : list of names of the outcomes of interest
315
    filename : the name of the tarball
316
    frequency : int
317
        The frequency the action occurs.
318
    by_nfe : bool
319
        If :code:`True`, the frequency is given in number of function
320
        evaluations.  If :code:`False`, the frequency is given in the number
321
        of iterations.
322

323
    Raises
324
    ------
325
    FileExistsError if tarfile already exists.
326

327
    """
328

329
    def __init__(
1✔
330
        self,
331
        decision_variable_names: list[str],
332
        outcome_names: list[str],
333
        directory: str | None = None,
334
        filename: str | None = None,
335
        frequency: int = 1000,
336
        by_nfe: bool = True,
337
    ):
338
        super().__init__(frequency=frequency, by_nfe=by_nfe)
1✔
339
        self.decision_variable_names = decision_variable_names
1✔
340
        self.outcome_names = outcome_names
1✔
341
        self.temp = os.path.join(directory, "tmp")
1✔
342
        self.tar_filename = os.path.join(os.path.abspath(directory), filename)
1✔
343

344
        if os.path.exists(self.tar_filename):
1✔
345
            raise FileExistsError(
1✔
346
                f"File {self.tar_filename} for storing the archives already exists."
347
            )
348

349
    def do_action(self, algorithm: platypus.algorithms.AbstractGeneticAlgorithm):
1✔
350
        """Add the current archive to the tarball."""
351
        # broadens the algorithms in platypus we can support automagically
352
        try:
1✔
353
            data = algorithm.archive
1✔
354
        except AttributeError:
1✔
355
            data = algorithm.result
1✔
356

357
        # fixme, this opens and closes the tarball everytime
358
        #   can't we open in in the init and have a clean way to close it
359
        #   on any exit?
360
        with tarfile.open(self.tar_filename, "a") as f:
1✔
361
            archive = to_dataframe(
1✔
362
                data, self.decision_variable_names, self.outcome_names
363
            )
364
            stream = io.BytesIO()
1✔
365
            archive.to_csv(stream, encoding="UTF-8", index=False)
1✔
366
            stream.seek(0)
1✔
367
            tarinfo = tarfile.TarInfo(f"{algorithm.nfe}.csv")
1✔
368
            tarinfo.size = len(stream.getbuffer())
1✔
369
            tarinfo.mtime = time.time()
1✔
370
            f.addfile(tarinfo, stream)
1✔
371

372

373
class RuntimeConvergenceTracking(platypus.extensions.FixedFrequencyExtension):
1✔
374
    """Platypus Extension for tracking runtime convergence information.
375

376
    This extension tracks runtime information that cannot be retrieved from the archives that are stored. Specifically,
377
    it automatically tries to track epsilon progress and the operator probabilities in case of a MultiMethod
378
    variator.
379

380
    """
381

382
    def __init__(
1✔
383
        self,
384
        frequency: int = 1000,
385
        by_nfe: bool = True,
386
    ):
387
        super().__init__(frequency=frequency, by_nfe=by_nfe)
1✔
388
        self.data = []
1✔
389
        self.attributes_to_try = ["nfe"]
1✔
390

391
    def do_action(self, algorithm: platypus.algorithms.AbstractGeneticAlgorithm):
1✔
392
        """Retrieve the runtime convergence information."""
393
        runtime_info = {}
1✔
394
        runtime_info["nfe"] = algorithm.nfe
1✔
395

396
        with contextlib.suppress(AttributeError):
1✔
397
            runtime_info["epsilon_progress"] = algorithm.archive.improvements
1✔
398

399
        variator = algorithm.variator
1✔
400
        if isinstance(variator, Multimethod):
1✔
401
            for method, prob in zip(variator.variators, variator.probabilities):
1✔
402
                if isinstance(method, GAOperator):
1✔
403
                    method = method.variation  # noqa: PLW2901
1✔
404

405
                runtime_info[method.__class__.__name__] = prob
1✔
406

407
        self.data.append(runtime_info)
1✔
408

409
    def to_dataframe(self):
1✔
410
        return pd.DataFrame(self.data)
1✔
411

412

413
def load_archives(path_to_file: str) -> list[tuple[int, pd.DataFrame]]:
1✔
414
    """Returns a list of stored archives.
415

416
    Each entry in the list is a tuple. The first element is the number of
417
    nfe, the second is the archive at that number of nfe.
418

419
    Parameters
420
    ----------
421
    path_to_file : the path to the archive
422

423
    """
NEW
424
    with tarfile.open(path_to_file, "r") as archive:
×
NEW
425
        content = archive.getnames()
×
NEW
426
        archives = []
×
NEW
427
        for fn in content:
×
NEW
428
            f = archive.extractfile(fn)
×
NEW
429
            data = pd.read_csv(f)
×
NEW
430
            nfe = int(fn.split(".")[0])
×
NEW
431
            archives.append((nfe, data))
×
432

NEW
433
    return archives
×
434

435

436
def epsilon_nondominated(results:list[pd.DataFrame], epsilons:list[float], problem:Problem)->pd.DataFrame:
1✔
437
    """Merge the list of results into a single set of non dominated results using the provided epsilon values.
438

439
    Parameters
440
    ----------
441
    results : list of DataFrames
442
    epsilons : epsilon values for each objective
443
    problem : PlatypusProblem instance
444

445
    Returns
446
    -------
447
    DataFrame
448

449
    Notes
450
    -----
451
    this is a platypus based alternative to pareto.py (https://github.com/matthewjwoodruff/pareto.py)
452
    """
453
    if problem.nobjs != len(epsilons):
1✔
454
        raise ValueError(
1✔
455
            f"The number of epsilon values ({len(epsilons)}) must match the number of objectives {problem.nobjs}"
456
        )
457

458
    results = pd.concat(results, ignore_index=True)
1✔
459
    solutions = rebuild_platypus_population(results, problem)
1✔
460
    archive = EpsilonBoxArchive(epsilons)
1✔
461
    archive += solutions
1✔
462

463
    return to_dataframe(archive, problem.parameter_names, problem.outcome_names)
1✔
464

465

466
def rebuild_platypus_population(archive: pd.DataFrame, problem: Problem):
1✔
467
    """Rebuild a population of platypus Solution instances.
468

469
    Parameters
470
    ----------
471
    archive : DataFrame
472
    problem : PlatypusProblem instance
473

474
    Returns
475
    -------
476
    list of platypus Solutions
477

478
    """
479
    # fixme, might this be easier via Sample._to_platypus_solution?
480
    #   we can just turn each row into a Sample instance directly and then go to a Solution instance
481
    expected_columns = problem.nvars + problem.nobjs
1✔
482
    actual_columns = len(archive.columns)
1✔
483

484
    if actual_columns != expected_columns:
1✔
485
        raise EMAError(
1✔
486
            f"The number of columns in the archive ({actual_columns}) does not match the "
487
            f"expected number of decision variables and objectives ({expected_columns})."
488
        )
489

490
    solutions = []
1✔
491
    for row in archive.itertuples():
1✔
492
        try:
1✔
493
            decision_variables = [
1✔
494
                getattr(row, attr) for attr in problem.parameter_names
495
            ]
496
        except AttributeError as e:
1✔
497
            missing_parameters = [
1✔
498
                attr for attr in problem.parameter_names if not hasattr(row, attr)
499
            ]
500
            raise EMAError(
1✔
501
                f"Parameter names {missing_parameters} not found in archive"
502
            ) from e
503

504
        try:
1✔
505
            objectives = [getattr(row, attr) for attr in problem.outcome_names]
1✔
506
        except AttributeError as e:
1✔
507
            missing_outcomes = [
1✔
508
                attr for attr in problem.outcome_names if not hasattr(row, attr)
509
            ]
510
            raise EMAError(
1✔
511
                f"Outcome names {missing_outcomes} not found in archive'"
512
            ) from e
513

514
        solution = Solution(problem)
1✔
515
        solution.variables[:] = [
1✔
516
            platypus_type.encode(value)
517
            for platypus_type, value in zip(problem.types, decision_variables)
518
        ]
519
        solution.objectives[:] = objectives
1✔
520
        solutions.append(solution)
1✔
521
    return solutions
1✔
522

523

524
class CombinedVariator(Variator):
1✔
525
    """Combined variator."""
526

527
    def __init__(self, crossover_prob=0.5, mutation_prob=1):
1✔
528
        super().__init__(2)
1✔
529
        self.SBX = platypus.SBX()
1✔
530
        self.crossover_prob = crossover_prob
1✔
531
        self.mutation_prob = mutation_prob
1✔
532

533
    def evolve(self, parents: list[Solution]) -> tuple[Solution, Solution]:
1✔
534
        """Evolve the provided parents."""
535
        child1 = copy.deepcopy(parents[0])
1✔
536
        child2 = copy.deepcopy(parents[1])
1✔
537
        problem = child1.problem
1✔
538

539
        # crossover
540
        # we will evolve the individual
541
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
1✔
542
            if random.random() <= self.crossover_prob:
1✔
543
                klass = kind.__class__
1✔
544
                child1, child2 = self._crossover[klass](self, child1, child2, i, kind)
1✔
545
                child1.evaluated = False
1✔
546
                child2.evaluated = False
1✔
547

548
        # mutate
549
        for child in [child1, child2]:
1✔
550
            self.mutate(child)
1✔
551

552
        return child1, child2
1✔
553

554
    def mutate(self, child: Solution):
1✔
555
        problem = child.problem
1✔
556

557
        for i, kind in enumerate(problem.types):  # @ReservedAssignment
1✔
558
            if random.random() <= self.mutation_prob:
1✔
559
                klass = kind.__class__
1✔
560
                child = self._mutate[klass](self, child, i, kind)
1✔
561
                child.evaluated = False
1✔
562

563
    def crossover_real(
1✔
564
        self, child1: Solution, child2: Solution, i: int, type: platypus.Real
565
    ) -> tuple[Solution, Solution]:  # @ReservedAssignment
566
        # sbx
567
        x1 = float(child1.variables[i])
1✔
568
        x2 = float(child2.variables[i])
1✔
569
        lb = type.min_value
1✔
570
        ub = type.max_value
1✔
571

572
        x1, x2 = self.SBX.sbx_crossover(x1, x2, lb, ub)
1✔
573

574
        child1.variables[i] = x1
1✔
575
        child2.variables[i] = x2
1✔
576

577
        return child1, child2
1✔
578

579
    def crossover_integer(
1✔
580
        self, child1: Solution, child2: Solution, i: int, type: platypus.Integer
581
    ) -> tuple[Solution, Solution]:  # @ReservedAssignment
582
        # HUX()
583
        for j in range(type.nbits):
1✔
584
            if child1.variables[i][j] != child2.variables[i][j]:  # noqa: SIM102
1✔
585
                if bool(random.getrandbits(1)):
1✔
586
                    child1.variables[i][j] = not child1.variables[i][j]
1✔
587
                    child2.variables[i][j] = not child2.variables[i][j]
1✔
588
        return child1, child2
1✔
589

590
    def crossover_categorical(
1✔
591
        self, child1: Solution, child2: Solution, i: int, type: platypus.Subset
592
    ) -> tuple[Solution, Solution]:  # @ReservedAssignment
593
        # SSX()
594
        # Implemented in a simplified manner, since size of subset is 1
595

596
        if (child2.variables[i] != child1.variables[i]) and (random.random() < 0.5):
1✔
597
            temp = child1.variables[i]
1✔
598
            child1.variables[i] = child2.variables[i]
1✔
599
            child2.variables[i] = temp
1✔
600

601
        return child1, child2
1✔
602

603
    def mutate_real(
1✔
604
        self, child: Solution, i: int, type: platypus.Real, distribution_index: int = 20
605
    ) -> Solution:  # @ReservedAssignment
606
        # PM
607
        x = child.variables[i]
1✔
608
        lower = type.min_value
1✔
609
        upper = type.max_value
1✔
610

611
        u = random.random()
1✔
612
        dx = upper - lower
1✔
613

614
        if u < 0.5:
1✔
615
            bl = (x - lower) / dx
1✔
616
            b = 2.0 * u + (1.0 - 2.0 * u) * pow(1.0 - bl, distribution_index + 1.0)
1✔
617
            delta = pow(b, 1.0 / (distribution_index + 1.0)) - 1.0
1✔
618
        else:
619
            bu = (upper - x) / dx
1✔
620
            b = 2.0 * (1.0 - u) + 2.0 * (u - 0.5) * pow(
1✔
621
                1.0 - bu, distribution_index + 1.0
622
            )
623
            delta = 1.0 - pow(b, 1.0 / (distribution_index + 1.0))
1✔
624

625
        x = x + delta * dx
1✔
626
        x = max(lower, min(x, upper))
1✔
627

628
        child.variables[i] = x
1✔
629
        return child
1✔
630

631
    def mutate_integer(
1✔
632
        self, child: Solution, i: int, type: platypus.Integer, probability: float = 1
633
    ) -> Solution:  # @ReservedAssignment
634
        # bitflip
635
        for j in range(type.nbits):
1✔
636
            if random.random() <= probability:
1✔
637
                child.variables[i][j] = not child.variables[i][j]
1✔
638
        return child
1✔
639

640
    def mutate_categorical(
1✔
641
        self, child: Solution, i: int, type: platypus.Subset
642
    ) -> Solution:  # @ReservedAssignment
643
        # replace, again simplified because len(subset) is 1
644
        non_members = [
1✔
645
            entry for entry in type.elements if entry.value != child.variables[i]
646
        ]
647
        new_value = random.choice(non_members)
1✔
648
        child.variables[i] = new_value.value
1✔
649

650
        return child
1✔
651

652
    _crossover = {
1✔
653
        Real: crossover_real,
654
        Integer: crossover_integer,
655
        Subset: crossover_categorical,
656
    }
657

658
    _mutate = {
1✔
659
        Real: mutate_real,
660
        Integer: mutate_integer,
661
        Subset: mutate_categorical,
662
    }
663

664

665
def _optimize(
1✔
666
    problem: Problem,
667
    evaluator: "BaseEvaluator",  # noqa: F821
668
    algorithm: type[platypus.algorithms.AbstractGeneticAlgorithm],
669
    nfe: int,
670
    convergence_freq: int,
671
    logging_freq: int,
672
    variator: Variator = None,
673
    initial_population: Iterable[Sample] | None = None,
674
    filename: str | None = None,
675
    directory: str | None = None,
676
    **kwargs,
677
) -> tuple[pd.DataFrame, pd.DataFrame]:
678
    """Helper function for optimization."""
679
    klass = problem.types[0].__class__
1✔
680

681
    try:
1✔
682
        eps_values = kwargs["epsilons"]
1✔
683
    except KeyError:
1✔
684
        pass
1✔
685
    else:
686
        if len(eps_values) != len(problem.outcome_names):
1✔
687
            raise ValueError(
1✔
688
                "Number of epsilon values does not match number of outcomes"
689
            )
690

691
    if variator is None:
1✔
692
        if all(isinstance(t, klass) for t in problem.types):
1✔
693
            variator = None
1✔
694
        else:
695
            variator = CombinedVariator()
1✔
696

697
    generator = (
1✔
698
        RandomGenerator()
699
        if initial_population is None
700
        else InjectedPopulation(
701
            [sample._to_platypus_solution(problem) for sample in initial_population]
702
        )
703
    )
704

705
    optimizer = algorithm(
1✔
706
        problem,
707
        evaluator=evaluator,
708
        variator=variator,
709
        log_frequency=500,
710
        generator=generator,
711
        **kwargs,
712
    )
713
    storage = ArchiveStorageExtension(
1✔
714
        problem.parameter_names,
715
        problem.outcome_names,
716
        directory=directory,
717
        filename=filename,
718
        frequency=convergence_freq,
719
        by_nfe=True,
720
    )
721
    progress_bar = ProgressBarExtension(nfe, frequency=logging_freq)
1✔
722
    runtime_convergence_info = RuntimeConvergenceTracking(frequency=convergence_freq)
1✔
723
    optimizer.add_extension(storage)
1✔
724
    optimizer.add_extension(progress_bar)
1✔
725
    optimizer.add_extension(runtime_convergence_info)
1✔
726

727
    with temporary_filter(name=[callbacks.__name__, evaluators.__name__], level=INFO):
1✔
728
        optimizer.run(nfe)
1✔
729

730
    storage.do_action(
1✔
731
        optimizer
732
    )  # ensure last archive is included in the convergence information
733
    runtime_convergence_info.do_action(
1✔
734
        optimizer
735
    )  # ensure the last convergence information is added as well
736
    progress_bar.progress_tracker.pbar.__exit__(
1✔
737
        None, None, None
738
    )  # ensure progress bar is closed correctly
739

740
    try:
1✔
741
        data = optimizer.archive
1✔
742
    except AttributeError:
1✔
743
        data = optimizer.result
1✔
744

745
    runtime_convergence = runtime_convergence_info.to_dataframe()
1✔
746

747
    results = to_dataframe(data, problem.parameter_names, problem.outcome_names)
1✔
748

749
    _logger.info(f"optimization completed, found {len(data)} solutions")
1✔
750

751
    return results, runtime_convergence
1✔
752

753

754
class GenerationalBorg(NSGAII):
1✔
755
    """A generational implementation of the BORG Framework.
756

757
    This algorithm adopts Epsilon Progress Continuation, and Auto Adaptive
758
    Operator Selection, but embeds them within the NSGAII generational
759
    algorithm, rather than the steady state implementation used by the BORG
760
    algorithm.
761

762
    The parametrization of all operators is based on the default values as used
763
    in Borg 1.9.
764

765
    Note:: limited to RealParameters only.
766

767
    """
768

769
    pm_p = None
1✔
770
    pm_dist = 20
1✔
771

772
    sbx_prop = 1
1✔
773
    sbx_dist = 15
1✔
774

775
    de_rate = 0.1
1✔
776
    de_stepsize = 0.5
1✔
777

778
    um_p = None
1✔
779

780
    spx_nparents = 10
1✔
781
    spx_noffspring = 2
1✔
782
    spx_expansion = 0.3
1✔
783

784
    pcx_nparents = 10
1✔
785
    pcx_noffspring = 2
1✔
786
    pcx_eta = 0.1
1✔
787
    pcx_zeta = 0.1
1✔
788

789
    undx_nparents = 10
1✔
790
    undx_noffspring = 2
1✔
791
    undx_zeta = 0.5
1✔
792
    undx_eta = 0.35
1✔
793

794
    def __init__(
1✔
795
        self,
796
        problem: Problem,
797
        epsilons: list[float],
798
        population_size: int = 100,
799
        generator: platypus.Generator = RandomGenerator(),  # noqa: B008
800
        selector: platypus.Selector = TournamentSelector(2),  # noqa: B008
801
        **kwargs,
802
    ):
803
        """Init."""
804
        self.pm_p = 1 / problem.nvars
1✔
805
        self.um_p = 1 / problem.nvars
1✔
806

807
        # Parameterization taken from
808
        # Borg: An Auto-Adaptive MOEA Framework - Hadka, Reed
809
        variators = [
1✔
810
            GAOperator(
811
                SBX(probability=self.sbx_prop, distribution_index=self.sbx_dist),
812
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
813
            ),
814
            GAOperator(
815
                PCX(
816
                    nparents=self.pcx_nparents,
817
                    noffspring=self.pcx_noffspring,
818
                    eta=self.pcx_eta,
819
                    zeta=self.pcx_zeta,
820
                ),
821
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
822
            ),
823
            GAOperator(
824
                DifferentialEvolution(
825
                    crossover_rate=self.de_rate, step_size=self.de_stepsize
826
                ),
827
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
828
            ),
829
            GAOperator(
830
                UNDX(
831
                    nparents=self.undx_nparents,
832
                    noffspring=self.undx_noffspring,
833
                    zeta=self.undx_zeta,
834
                    eta=self.undx_eta,
835
                ),
836
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
837
            ),
838
            GAOperator(
839
                SPX(
840
                    nparents=self.spx_nparents,
841
                    noffspring=self.spx_noffspring,
842
                    expansion=self.spx_expansion,
843
                ),
844
                PM(probability=self.pm_p, distribution_index=self.pm_dist),
845
            ),
846
            UM(probability=self.um_p),
847
        ]
848

849
        kwargs["variator"] = Multimethod(self, variators)
1✔
850
        super().__init__(
1✔
851
            problem,
852
            population_size=population_size,
853
            generator=generator,
854
            selector=selector,
855
            archive=EpsilonBoxArchive(epsilons),
856
            **kwargs,
857
        )
858
        self.add_extension(platypus.extensions.EpsilonProgressContinuationExtension())
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc