• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

LSDOlab / modopt / 13119822143

03 Feb 2025 06:02PM UTC coverage: 82.255% (+0.003%) from 82.252%
13119822143

push

github

anugrahjo
Update live visualization: 1) add keep_viz_open option, 2) always plot both callback and optimizer variables

1 of 4 new or added lines in 1 file covered. (25.0%)

2 existing lines in 1 file now uncovered.

5363 of 6520 relevant lines covered (82.25%)

0.82 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.72
/modopt/core/optimizer.py
1
import numpy as np
1✔
2
import scipy as sp
1✔
3
from typing import Union
1✔
4
import os, shutil, copy
1✔
5
from datetime import datetime
1✔
6
import contextlib
1✔
7

8
from modopt.utils.options_dictionary import OptionsDictionary
1✔
9
from modopt.utils.general_utils import pad_name
1✔
10
# from io import StringIO
11
from modopt.core.problem import Problem
1✔
12
from modopt.core.problem_lite import ProblemLite
1✔
13
from modopt.core.visualization import Visualizer
1✔
14
import warnings
1✔
15

16
try:
1✔
17
    import h5py
1✔
18
except ImportError:
×
19
    warnings.warn("h5py not found, recording disabled")
×
20

21
from abc import ABC, abstractmethod
1✔
22

23
class Optimizer(ABC):
1✔
24
    '''
25
    Base class for all optimization algorithms in modOpt.
26
    This class provides the common functionalities for all the optimization algorithms,
27
    such as checking the correctness of first derivatives, recording the outputs, 
28
    hot-starting the optimization, and visualizing the scalar variables.
29
    The user-defined optimization algorithms should inherit from this class.
30

31
    Attributes
32
    ----------
33
    problem : Problem or ProblemLite
34
        The problem to be solved.
35
        Needs to be a Problem() or ProblemLite() object.
36
    problem_name : str
37
        The name of the problem.
38
    solver_name : str
39
        The name of the solver.
40
    options : OptionsDictionary
41
        The options dictionary for the optimizer.
42
    record : h5py.File
43
        The record file to store the outputs from all iterations of the optimization.
44
    hot_start_record : h5py.File
45
        The record file from which to hot-start the optimization.
46
        modOpt loads and reuses the outputs stored in this file from a previous optimization.
47
    out_dir : str
48
        The directory to store all the output files generated from the optimization.
49
    modopt_output_files : list
50
        The list of all files generated by modOpt during the optimization.
51
    visualizer : Visualizer
52
        The visualizer object to plot the scalar variables during the optimization.
53
    timestamp : str
54
        The timestamp for the optimization.
55
    scalar_outputs : list
56
        The list of scalar outputs provided by the optimizer after each iteration.
57
    available_outputs : dict
58
        The dictionary of all available outputs from the optimizer after each iteration.
59
    results : dict
60
        The dictionary containing the results of the optimization.
61
    update_outputs_count : int
62
        Number of times the ``update_outputs()`` method is called.
63
        Only relevant for development and debugging purposes.
64
    '''
65
    def __init__(self, 
1✔
66
                 problem:Union[Problem, ProblemLite], 
67
                 recording:bool = False,
68
                 hot_start_from:str = None,
69
                 hot_start_atol:float = 0.,
70
                 hot_start_rtol:float = 0.,
71
                 visualize:list = [],
72
                 keep_viz_open:bool = False,
73
                 turn_off_outputs:bool = False,
74
                 **kwargs):
75
        '''
76
        Initialize the Optimizer object.
77
        Sets up recording, hot-starting, and visualization.
78

79
        Parameters
80
        ----------
81
        problem : Problem or ProblemLite
82
            The problem to be solved. 
83
            Required argument for all optimizers.
84
        recording : bool, default=False
85
            If ``True``, record all outputs from the optimization.
86
            This needs to be enabled for hot-starting the same problem later,
87
            if the optimization is interrupted.
88
        hot_start_from : str, optional
89
            The record file from which to hot-start the optimization.
90
        hot_start_atol : float, default=0.
91
            The absolute tolerance check for the inputs 
92
            when reusing outputs from the hot-start record.
93
        hot_start_rtol : float, default=0.
94
            The relative tolerance check for the inputs 
95
            when reusing outputs from the hot-start record.
96
        visualize : list, default=[]
97
            The list of scalar variables to be visualized during the optimization.
98
        keep_viz_open : bool, default=False
99
            If ``True``, keeps the visualization window open after the optimization is complete.
100
        turn_off_outputs : bool, default=False
101
            If ``True``, prevents modOpt from generating any output files.
102
        **kwargs
103
            Additional optimizer-specific keyword arguments.
104
        '''
105

106
        # if type(self) is Optimizer:
107
        #     raise TypeError("Optimizer cannot be instantiated directly.")
108

109
        now = datetime.now()
1✔
110
        self.timestamp = now.strftime("%Y-%m-%d_%H.%M.%S.%f")
1✔
111

112
        self.options = OptionsDictionary()
1✔
113
        self.problem = problem
1✔
114
        self.problem_name = problem.problem_name
1✔
115
        self.solver_name = 'unnamed_solver'
1✔
116
        self.options.declare('recording', default=False, types=bool)
1✔
117
        self.options.declare('hot_start_from', default=None, types=(type(None), str))
1✔
118
        self.options.declare('hot_start_atol', default=0., types=float)
1✔
119
        self.options.declare('hot_start_rtol', default=0., types=float)
1✔
120
        self.options.declare('visualize', default=[], types=list)
1✔
121
        self.options.declare('keep_viz_open', default=False, types=bool)
1✔
122
        self.options.declare('turn_off_outputs', default=False, types=bool)
1✔
123
        self.update_outputs_count = 0
1✔
124

125
        self.options.declare('formulation', default='rs', types=str)
1✔
126

127
        self.initialize()
1✔
128
        self.options.update({'recording': recording,
1✔
129
                             'hot_start_from': hot_start_from,
130
                             'hot_start_atol': hot_start_atol,
131
                             'hot_start_rtol': hot_start_rtol,
132
                             'visualize': visualize,
133
                             'keep_viz_open': keep_viz_open,
134
                             'turn_off_outputs': turn_off_outputs})
135
        self.options.update(kwargs)
1✔
136

137
        # compute the scalar outputs from the optimizer after initialization
138
        a_outs = self.available_outputs
1✔
139
        self.scalar_outputs = [out for out in a_outs.keys() if not isinstance(a_outs[out], tuple)]
1✔
140

141
        # Create the outputs directory
142
        if not self.options['turn_off_outputs']:
1✔
143
            self.out_dir = f"{problem.problem_name}_outputs/{self.timestamp}"
1✔
144
            self.modopt_output_files  = [f"directory: {self.out_dir}", 'modopt_results.out']
1✔
145
            os.makedirs(self.out_dir) # recursively create the directory
1✔
146
        else:
147
            if self.options['recording']:
1✔
148
                raise ValueError("Cannot record with 'turn_off_outputs=True'.")
1✔
149
            if self.options['readable_outputs'] != []:
1✔
150
                raise ValueError("Cannot write 'readable_outputs' with 'turn_off_outputs=True'.")
1✔
151
            if self.options['visualize'] != []:
1✔
152
                raise ValueError("Cannot visualize with 'turn_off_outputs=True'.")
1✔
153

154
        # Hot starting and recording should start even before setup() is called
155
        # since there might be callbacks in the setup() function
156
        self.record  = self.problem._record = None      # Reset if using the same problem object again
1✔
157
        self.problem._callback_count        = 0         # Reset if using the same problem object again
1✔
158
        self.problem._obj_count             = 0         # Reset if using the same problem object again
1✔
159
        self.problem._grad_count            = 0         # Reset if using the same problem object again
1✔
160
        self.problem._hess_count            = 0         # Reset if using the same problem object again
1✔
161
        self.problem._con_count             = 0         # Reset if using the same problem object again
1✔
162
        self.problem._jac_count             = 0         # Reset if using the same problem object again
1✔
163
        self.problem._reused_callback_count = 0         # Reset if using the same problem object again
1✔
164
        self.problem._hot_start_mode        = False     # Reset if using the same problem object again
1✔
165
        self.problem._hot_start_record      = None      # Reset if using the same problem object again
1✔
166
        self.problem._num_callbacks_found   = 0         # Reset if using the same problem object again
1✔
167
        self.problem._hot_start_tol         = None      # Reset if using the same problem object again
1✔
168
        self.problem._visualizer            = None      # Reset if using the same problem object again
1✔
169
        
170
        if self.options['recording']:
1✔
171
            self.record  = self.problem._record = h5py.File(f'{self.out_dir}/record.hdf5', 'a')
1✔
172
        if self.options['hot_start_from'] is not None:
1✔
173
            self.setup_hot_start()
1✔
174
        if self.options['visualize'] != []:
1✔
175
            # NOTE: This will neglect 'obj_hess', 'lag_hess' active_callbacks for IPOPT
176
            #       and 'obj_hess', 'lag_hess', 'obj_hvp' for TrustConstr 
177
            #       since these get added in the setup() function.
178
            self.setup_visualization()
1✔
179
            
180
        self._setup()
1✔
181

182
    def _setup(self):
1✔
183
        # User defined optimizer-specific setup
184
        self.setup()
1✔
185
        # Setup outputs to be written to file
186
        if not self.options['turn_off_outputs']:
1✔
187
            self.setup_outputs()
1✔
188

189
    def setup_outputs(self):
1✔
190
        '''
191
        Set up the directory and open files to write the outputs of the optimization problem.
192
        Four different types of outputs are written:
193
            1. Summary table:    Single file with the scalar outputs of the optimization problem.
194
            2. Readable outputs: A file for each readable_output declared.
195
            3. Recorder:         Contains all the outputs of the optimization problem, if recording is enabled.
196
            4. Results:          Single file with the readable print_results() string (no setup needed).
197
        '''
198
        dir     = self.out_dir
1✔
199
        a_outs  = self.available_outputs             # Available outputs dictionary
1✔
200
        d_outs  = self.options['readable_outputs']   # Declared outputs list
1✔
201
        s_outs  = self.scalar_outputs                # Scalar outputs list
1✔
202

203
        # 1. Write the header of the summary_table file
204
        if len(s_outs) > 0:
1✔
205
            header = "%10s " % '#'
1✔
206
            for key in s_outs:
1✔
207
                if a_outs[key] in (int, np.int_, np.int32, np.int64):
1✔
208
                    header += "%10s " % key
1✔
209
                elif a_outs[key] in (float, np.float_, np.float32, np.float64):
1✔
210
                    header += "%16s " % key
1✔
211

212
            with open(f"{dir}/modopt_summary.out", 'w') as f:
1✔
213
                f.write(header)
1✔
214
            self.modopt_output_files += ["modopt_summary.out"]
1✔
215

216
        # 2. Create the readable output files
217
        for key in d_outs:
1✔
218
            if key not in a_outs:
1✔
219
                raise ValueError(f'Invalid readable output "{key}" is declared.' \
×
220
                                 f'Available outputs are {list(a_outs.keys())}.')
221
            with open(f"{dir}/{key}.out", 'w') as f:
1✔
222
                pass
1✔
223
            self.modopt_output_files += [f"{key}.out"]
1✔
224

225
        # 3. Create the recorder output file and write the attributes
226
        if self.options['recording']:
1✔
227
            constrained = self.problem.constrained
1✔
228
            rec = self.record
1✔
229
            self.modopt_output_files += ['record.hdf5']
1✔
230

231
            rec.attrs['problem_name']   = self.problem_name
1✔
232
            rec.attrs['solver_name']    = self.solver_name
1✔
233
            rec.attrs['modopt_output_files'] = self.modopt_output_files
1✔
234
            if hasattr(self, 'default_solver_options'):
1✔
235
                solver_opts = self.solver_options.get_pure_dict()
1✔
236
                for key, value in solver_opts.items():
1✔
237
                    value = 'None' if value is None else value
1✔
238
                    if isinstance(value, (int, float, bool, str)):
1✔
239
                        rec.attrs[f'solver_options-{key}'] = value
1✔
240
            elif self.solver_name == 'ipopt': # ipopt-specific
1✔
241
                for key, value in self.nlp_options['ipopt'].items():
1✔
242
                    if isinstance(value, (int, float, bool, str)):
1✔
243
                        rec.attrs[f'solver_options-{key}'] = value
1✔
244
            elif self.solver_name.startswith('convex_qpsolvers'): # convex_qpsolvers-specific
1✔
245
                for key, value in self.options['solver_options'].items():
1✔
246
                    if isinstance(value, (int, float, bool, str)):
1✔
247
                        rec.attrs[f'solver_options-{key}'] = value
1✔
248
            else: # for inbuilt solvers
249
                opts = self.options.get_pure_dict()
1✔
250
                for key, value in opts.items():
1✔
251
                    value = 'None' if value is None else value
1✔
252
                    if isinstance(value, (int, float, bool, str)):
1✔
253
                        rec.attrs[f'options-{key}'] = value
1✔
254
            rec.attrs['readable_outputs'] = d_outs
1✔
255
            rec.attrs['recording'] = str(self.options['recording'])
1✔
256
            rec.attrs['hot_start_from'] = str(self.options['hot_start_from'])
1✔
257
            rec.attrs['visualize'] = self.options['visualize']
1✔
258
            rec.attrs['timestamp'] = self.timestamp
1✔
259
            rec.attrs['constrained'] = constrained
1✔
260
            rec.attrs['nx'] = self.problem.nx
1✔
261
            rec.attrs['nc'] = self.problem.nc
1✔
262

263
            rec.attrs['x0']       = self.problem.x0 / self.problem.x_scaler
1✔
264
            rec.attrs['x_scaler'] = self.problem.x_scaler
1✔
265
            rec.attrs['o_scaler'] = self.problem.o_scaler # Only for single-objective problems
1✔
266
            rec.attrs['x_lower']  = self.problem.x_lower / self.problem.x_scaler
1✔
267
            rec.attrs['x_upper']  = self.problem.x_upper / self.problem.x_scaler
1✔
268
            rec.attrs['c_scaler'] = self.problem.c_scaler if constrained else 'None'
1✔
269
            rec.attrs['c_lower']  = self.problem.c_lower / self.problem.c_scaler if constrained else 'None'
1✔
270
            rec.attrs['c_upper']  = self.problem.c_upper / self.problem.c_scaler if constrained else 'None'
1✔
271

272
    def setup_hot_start(self):
1✔
273
        '''
274
        Open the hot-start record file, compute the number of callbacks found in it,
275
        and pass both to the problem object.
276
        '''
277
        self.hot_start_record                 = h5py.File(self.options['hot_start_from'], 'r')
1✔
278
        num_callbacks_found = len([key for key in list(self.hot_start_record.keys()) if key.startswith('callback_')])
1✔
279
        self.problem._hot_start_mode          = True
1✔
280
        self.problem._hot_start_record        = self.hot_start_record
1✔
281
        self.problem._num_callbacks_found     = num_callbacks_found
1✔
282
        self.problem._hot_start_tol           = (self.options['hot_start_rtol'], self.options['hot_start_atol'])
1✔
283

284
    def setup_visualization(self,):
1✔
285
        '''
286
        Setup the visualization for scalar variables of the optimization problem.
287
        Variables can be either optimizer outputs or callback inputs/outputs.
288
        '''
289
        visualize_vars   = []
1✔
290
        available_vars  = sorted(list(set(list(self.available_outputs.keys()) + self.active_callbacks + ['x'])))
1✔
291
        for s_var in self.options['visualize']: # scalar variables
1✔
292
            var = s_var.split('[')[0]
1✔
293
            if var not in available_vars:
1✔
294
                raise ValueError(f"Unavailable variable '{var}' is declared for visualization. " \
1✔
295
                                 f"Available variables for visualization are {available_vars}.")
296
            if var in self.scalar_outputs + ['obj', 'lag']:
1✔
297
                if var != s_var:
1✔
298
                    raise ValueError(f"Scalar variable '{var}' is indexed for visualization.")
1✔
299
            else:
300
                if var == s_var:
1✔
301
                    raise ValueError(f"Non-scalar variable '{var}' is not indexed for visualization. " \
1✔
302
                                     f"Provide an index to a scalar entry in '{var}' for visualization.")
303
            
304
            if var in self.available_outputs.keys():
×
305
                visualize_vars.append(s_var)
×
306
            if var in self.active_callbacks + ['x']:
×
307
                visualize_vars.append('callback_' + s_var)
×
308
        
UNCOV
309
        visualize_callbacks = True
×
310
        # # No need to visualize callbacks if all variables are optimizer outputs
311
        # if all(s_var.split('[')[0] in self.available_outputs.keys() for s_var in self.options['visualize']):
312
        #     visualize_callbacks = False
313
        
314
        self.visualizer = Visualizer(self.problem_name, visualize_vars, self.out_dir)
×
315
        if visualize_callbacks:
×
316
            self.problem._visualizer = self.visualizer
×
317
            
318
    @abstractmethod
1✔
319
    def initialize(self):
1✔
320
        '''
321
        Set solver name and any solver-specific options.
322
        '''
323
        # raise NotImplementedError("Subclasses must implement this method.")
324
        pass
×
325

326
    @abstractmethod
1✔
327
    def setup(self):
1✔
328
        '''
329
        Set up any solver-specific attributes or modules (e.g., merit function or Hessian approximation)
330
        required by the optimization algorithm, during optimizer instantiation.
331
        This method is called after initialize().
332
        '''
333
        # raise NotImplementedError("Subclasses must implement this method.")
334
        pass
×
335

336
    @abstractmethod         # Ensure that the subclasses cannot be instantiated unless the method is implemented
1✔
337
    def solve(self):
1✔
338
        '''
339
        Run the optimization algorithm to solve the problem.
340
        '''
341
        # raise NotImplementedError("Subclasses must implement this method.")
342
        pass
×
343

344
    def run_post_processing(self):
1✔
345
        '''
346
        Run the post-processing functions of the optimizer.
347
        1. Write the print_results() output to the the results.out file
348
        2. Write self.results to the record file
349
        3. Save and close the visualization plot
350
        '''
351

352
        self.results['total_callbacks']     = self.problem._callback_count
1✔
353
        self.results['obj_evals']           = self.problem._obj_count
1✔
354
        self.results['grad_evals']          = self.problem._grad_count
1✔
355
        self.results['hess_evals']          = self.problem._hess_count
1✔
356
        self.results['con_evals']           = self.problem._con_count
1✔
357
        self.results['jac_evals']           = self.problem._jac_count
1✔
358
        self.results['reused_callbacks']    = self.problem._reused_callback_count
1✔
359

360
        if self.options['turn_off_outputs']:
1✔
361
            return
1✔
362
        
363
        if self.options['visualize'] != []:
1✔
NEW
364
            if self.options['keep_viz_open']:
×
NEW
365
                self.visualizer.keep_plot()
×
366
                # vis_wait = self.visualizer.wait_time
367
            else:
NEW
368
                self.visualizer.close_plot()
×
UNCOV
369
            self.results['vis_time'] = self.visualizer.vis_time
×
370

371
        self.results['out_dir']     = self.out_dir
1✔
372
        with open(f"{self.out_dir}/modopt_results.out", 'w') as f:
1✔
373
            with contextlib.redirect_stdout(f):
1✔
374
                self.print_results(all=True)
1✔
375

376
        if self.options['recording']:
1✔
377
            group = self.record.create_group('results')
1✔
378
            for key, value in self.results.items():
1✔
379
                if self.solver_name.startswith('convex_qpsolvers') and key in ['problem', 'extras']:
1✔
380
                    continue
1✔
381
                if isinstance(value, dict):
1✔
382
                    for k, v in value.items():
1✔
383
                        group[f"{key}-{k}"] = v
1✔
384
                else:
385
                    group[key] = value
1✔
386

387
    def update_outputs(self, **kwargs):
1✔
388
        '''
389
        Update and write the outputs of the optimization problem to the corresponding files.
390
        Three different types of outputs are written:
391
            1. Summary table: Contains the scalar outputs of the optimization problem
392
            2. Readable outputs: Contains the declared readable outputs
393
            3. Recorder outputs: Contains all the outputs of the optimization problem, if recording is enabled
394
        '''
395
        self.out_dict = out_dict = copy.deepcopy(kwargs)
1✔
396
        
397
        if self.options['turn_off_outputs']:
1✔
398
            return
1✔
399
        
400
        if self.options['visualize'] != []:
1✔
401
            self.visualizer.update_plot(out_dict)
×
402

403
        dir    = self.out_dir
1✔
404
        a_outs = self.available_outputs             # Available outputs dictionary
1✔
405
        d_outs = self.options['readable_outputs']   # Declared outputs list
1✔
406

407
        if set(kwargs.keys()) != set(a_outs):
1✔
408
            raise ValueError(f'Output(s) passed in to be updated {list(kwargs.keys())} ' \
×
409
                             f'do not match the available outputs {list(a_outs.keys())}.')
410
        
411
        # 1. Write the scalar outputs to the summary file
412
        if len(self.scalar_outputs) > 0:
1✔
413
            # Print summary_table row
414
            new_row ='\n' + "%10i " % self.update_outputs_count
1✔
415
            for key in self.scalar_outputs:
1✔
416
                if a_outs[key] in (int, np.int_, np.int32, np.int64):
1✔
417
                    new_row += "%10i " % kwargs[key]
1✔
418
                elif a_outs[key] in (float, np.float_, np.float32, np.float64):
1✔
419
                    new_row += "%16.6E " % kwargs[key]
1✔
420

421
            with open(f"{dir}/modopt_summary.out", 'a') as f:
1✔
422
                f.write(new_row)
1✔
423

424
        # 2. Write the declared readable outputs to the corresponding files
425
        for key in d_outs:
1✔
426
            value = kwargs[key]
1✔
427
            if key in self.scalar_outputs:
1✔
428
                if np.isscalar(value) and np.isreal(value):
1✔
429
                    with open(f"{dir}/{key}.out", 'a') as f:
1✔
430
                        np.savetxt(f, [value])
1✔
431
                else:
432
                    raise ValueError(f'Value of "{key}" is not a real-valued scalar.')        
×
433
            else:
434
                # Multidim. arrays will be flattened (c-major/row major) before writing to a file
435
                with open(f"{dir}/{key}.out", 'a') as f:
1✔
436
                    np.savetxt(f, value.reshape(1, value.size))
1✔
437
        
438
        # 3. Write the outputs to the recording files
439
        if self.options['recording']:
1✔
440
            group_name = 'iteration_' + str(self.update_outputs_count)
1✔
441
            group = self.record.create_group(group_name)
1✔
442
            for var, value in out_dict.items():
1✔
443
                group[var] = value
1✔
444
                
445
        self.update_outputs_count += 1
1✔
446

447
    def check_if_callbacks_are_declared(self, cb, cb_str, solver_str):
1✔
448
        if cb not in self.problem.user_defined_callbacks:
1✔
449
            if isinstance(self.problem, Problem):
1✔
450
                raise ValueError(f"{cb_str} function is not declared in the Problem() subclass but is needed for {solver_str}.")
1✔
451
            elif isinstance(self.problem, ProblemLite):
1✔
452
                warnings.warn(f"{cb_str} function is not provided in the ProblemLite() container but is needed for {solver_str}. "\
1✔
453
                              f"The optimizer will use finite differences to compute the {cb_str}.")
454

455
    def print_results(self, summary_table=False, all=False):
1✔
456
        '''
457
        Print the results of the optimization problem to the terminal.
458

459
        Parameters
460
        ----------
461
        summary_table : bool, default=False
462
            If ``True``, print the summary table for the optimization.
463
        all : bool, default=False
464
            If ``False``, print only the scalar outputs of the optimization problem.
465
            Otherwise, print all the outputs of the optimization problem.
466
        '''
467

468
        # TODO: Testing to verify the design variable data
469
        # print(
470
        #     np.loadtxt(self.problem_name + '_outputs/x.out'))
471

472
        output  = "\n\tSolution from modOpt:"
1✔
473
        output += "\n\t"+"-" * 100
1✔
474

475
        output += f"\n\t{'Problem':25}: {self.problem_name}"
1✔
476
        output += f"\n\t{'Solver':25}: {self.solver_name}"
1✔
477
        for key, value in self.results.items():
1✔
478
            if np.isscalar(value):
1✔
479
                output += f"\n\t{key:25}: {value}"
1✔
480
            elif all:
1✔
481
                output += f"\n\t{key:25}: {value}"
1✔
482

483
        output += '\n\t' + '-'*100
1✔
484
        print(output)
1✔
485

486
        if summary_table:
1✔
487
            with open(f"{self.out_dir}/modopt_summary.out", 'r') as f:
1✔
488
                # lines = f.readlines()
489
                lines = f.read().splitlines()
1✔
490

491
            title = "modOpt summary table:"
1✔
492
            line_length = max(len(lines[0]), len(title))
1✔
493

494
            # Print header
495
            output  = "\n" + "=" * line_length
1✔
496
            output += f"\n{title.center(line_length)}"
1✔
497
            output += "\n" + "=" * line_length
1✔
498

499
            # Print all iterations
500
            output += "\n" + "\n".join(lines)
1✔
501

502
            output += "\n" + "=" * line_length
1✔
503
            print(output)
1✔
504

505
    def print_callback_counts(self):
1✔
506
        print('\n')
1✔
507
        print(f"{'Problem':20}: {self.problem_name}")
1✔
508
        print('-'*100)
1✔
509
        print(f"{'total_callbacks':20}: {self.problem._callback_count}")
1✔
510
        print(f"{'reused_callbacks':20}: {self.problem._reused_callback_count}")
1✔
511

512
        for key in ['obj', 'grad', 'hess', 'con', 'jac']:
1✔
513
            count = getattr(self.problem, f"_{key}_count")
1✔
514
            name  = f"{key}_callbacks"
1✔
515
            print(f"{name:20}: {count}")
1✔
516
        print('-'*100)
1✔
517

518
    def get_callback_counts_string(self, length):
1✔
519
        output  = f"\n\t{'Total callbacks':{length}}: {self.problem._callback_count}"
1✔
520
        output += f"\n\t{'Reused callbacks':{length}}: {self.problem._reused_callback_count}"
1✔
521

522
        for key in ['obj', 'grad', 'hess', 'con', 'jac']:
1✔
523
            count   = getattr(self.problem, f"_{key}_count")
1✔
524
            name    = f"{key} callbacks"
1✔
525
            output += f"\n\t{name:{length}}: {count}"
1✔
526
        
527
        return output
1✔
528

529
    def check_first_derivatives(self, x=None, step=1e-6, formulation='rs'):
1✔
530
        '''
531
        Check the first derivatives of the optimization problem using finite differences.
532

533
        Parameters
534
        ----------
535
        x : np.ndarray, optional
536
            The design variables at which the derivatives are to be checked.
537
            If not provided, the initial design variables are used.
538
        step : float, default=1e-6
539
            The step size for the finite differences.
540
        '''
541
        obj = self.obj
1✔
542
        grad = self.grad
1✔
543

544
        if x is None:
1✔
545
            x = self.problem.x0
1✔
546

547
        if self.problem.ny == 0:
1✔
548
            nx = self.problem.nx
1✔
549
            nc = self.problem.nc
1✔
550

551
        ###############################
552
        # Only for the SURF algorithm #
553
        ###############################
554
        else:
555
            nx = self.problem.nx + self.problem.ny
×
556
            nc = self.problem.nc + self.problem.nr
×
557
        ###############################
558

559
        constrained = False
1✔
560
        if nc != 0:
1✔
561
            constrained = True
1✔
562
            con = self.problem._compute_constraints
1✔
563
            jac = self.problem._compute_constraint_jacobian
1✔
564

565
        ###############################
566
        # Only for the SURF algorithm #
567
        ###############################
568
        if formulation in ('cfs', 'surf'):
1✔
569
            print("INSIDE cfs or surf")
×
570
            y = self.problem.solve_residual_equations(
×
571
                x[:self.problem.nx])
572
            x[self.problem.nx:] = y
×
573

574
            self.problem.formulation = 'fs'
×
575
        ###############################
576

577
        grad_exact = grad(x)
1✔
578
        if constrained:
1✔
579
            jac_exact = jac(x)
1✔
580

581
        h = step
1✔
582

583
        grad_fd = np.full((nx, ), -obj(x), dtype=float)
1✔
584
        if constrained:
1✔
585
            jac_fd = np.outer(-con(x), np.ones((nx, ), dtype=float))
1✔
586

587
        for i in range(nx):
1✔
588
            e = h * np.identity(nx)[i]
1✔
589

590
            grad_fd[i] += obj(x + e)
1✔
591
            if constrained:
1✔
592
                jac_fd[:, i] += con(x + e)
1✔
593

594
        grad_fd /= h
1✔
595
        if constrained:
1✔
596
            jac_fd /= h
1✔
597

598
        EPSILON = 1e-10
1✔
599

600
        # print('grad_exact:', grad_exact)
601
        # print('grad_fd:', grad_fd)
602
        # print('jac_exact:', jac_exact)
603
        # print('jac_fd:', jac_fd)
604

605
        grad_abs_error = np.absolute(grad_fd - grad_exact)
1✔
606

607
        # FD is assumed to give the actual gradient
608
        grad_rel_error = grad_abs_error / (np.linalg.norm(grad_fd, 2) + EPSILON)
1✔
609
        # grad_rel_error = grad_abs_error / (np.absolute(grad_fd) + EPSILON)
610

611
        if constrained:
1✔
612
            jac_abs_error = np.absolute(jac_fd - jac_exact)
1✔
613
            jac_rel_error = jac_abs_error / (np.linalg.norm(jac_fd, 'fro') + EPSILON)
1✔
614

615
            # jac_rel_error = jac_abs_error / (np.absolute(jac_fd) + EPSILON)
616
            # jac_rel_error = jac_abs_error / (np.absolute(jac_exact.toarray()) + EPSILON)
617

618
        # out_buffer = StringIO()
619

620
        header = "{0} | {1} | {2} | {3} | {4} "\
1✔
621
                            .format(
622
                                pad_name('Derivative type', 8, quotes=False),
623
                                pad_name('Calc norm', 10),
624
                                pad_name('FD norm', 10),
625
                                pad_name('Abs error norm', 10),
626
                                pad_name('Rel error norm', 10),
627
                            )
628

629
        # out_buffer.write('\n' + header + '\n')
630
        print('\n' + '-' * len(header))
1✔
631
        print(header)
1✔
632

633
        # out_buffer.write('-' * len(header) + '\n' + '\n')
634
        print('-' * len(header) + '\n')
1✔
635

636
        deriv_line = "{0} | {1:.4e} | {2:.4e} | {3:.4e}     | {4:.4e}    "
1✔
637
        grad_line = deriv_line.format(
1✔
638
            pad_name('Gradient', 15, quotes=False),
639
            np.linalg.norm(grad_exact),
640
            np.linalg.norm(grad_fd),
641
            np.linalg.norm(grad_abs_error),
642
            np.linalg.norm(grad_rel_error),
643
        )
644

645
        # out_buffer.write(grad_line + '\n')
646
        print(grad_line)
1✔
647

648
        if constrained:
1✔
649
            if isinstance(jac_exact, np.ndarray):
1✔
650
                jac_exact_norm = np.linalg.norm(jac_exact)
1✔
651
            else:
652
                jac_exact_norm = sp.sparse.linalg.norm(jac_exact)
×
653

654
            jac_line = deriv_line.format(
1✔
655
                pad_name('Jacobian', 15, quotes=False),
656
                jac_exact_norm,
657
                # np.linalg.norm(jac_exact),
658
                # sp.sparse.linalg.norm(jac_exact),
659
                np.linalg.norm(jac_fd),
660
                np.linalg.norm(jac_abs_error),
661
                np.linalg.norm(jac_rel_error),
662
            )
663

664
            # out_buffer.write(jac_line + '\n')
665
            print(jac_line)
1✔
666

667
        print('-' * len(header) + '\n')
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc