• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

freqtrade / freqtrade / 6181253459

08 Sep 2023 06:04AM UTC coverage: 94.614% (+0.06%) from 94.556%
6181253459

push

github-actions

web-flow
Merge pull request #9159 from stash86/fix-adjust

remove old codes when we only can do partial entries

2 of 2 new or added lines in 1 file covered. (100.0%)

19114 of 20202 relevant lines covered (94.61%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.43
/freqtrade/optimize/hyperopt.py
1
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
2

3
"""
1✔
4
This module contains the hyperopt logic
5
"""
6

7
import logging
1✔
8
import random
1✔
9
import sys
1✔
10
import warnings
1✔
11
from datetime import datetime, timezone
1✔
12
from math import ceil
1✔
13
from pathlib import Path
1✔
14
from typing import Any, Dict, List, Optional, Tuple
1✔
15

16
import rapidjson
1✔
17
from colorama import init as colorama_init
1✔
18
from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects
1✔
19
from joblib.externals import cloudpickle
1✔
20
from pandas import DataFrame
1✔
21
from rich.progress import (BarColumn, MofNCompleteColumn, Progress, TaskProgressColumn, TextColumn,
1✔
22
                           TimeElapsedColumn, TimeRemainingColumn)
23

24
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
1✔
25
from freqtrade.data.converter import trim_dataframes
1✔
26
from freqtrade.data.history import get_timerange
1✔
27
from freqtrade.data.metrics import calculate_market_change
1✔
28
from freqtrade.enums import HyperoptState
1✔
29
from freqtrade.exceptions import OperationalException
1✔
30
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
1✔
31
from freqtrade.optimize.backtesting import Backtesting
1✔
32
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
33
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
1✔
34
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss
1✔
35
from freqtrade.optimize.hyperopt_tools import (HyperoptStateContainer, HyperoptTools,
1✔
36
                                               hyperopt_serializer)
37
from freqtrade.optimize.optimize_reports import generate_strategy_stats
1✔
38
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
1✔
39

40

41
# Suppress scikit-learn FutureWarnings from skopt
42
with warnings.catch_warnings():
1✔
43
    warnings.filterwarnings("ignore", category=FutureWarning)
1✔
44
    from skopt import Optimizer
1✔
45
    from skopt.space import Dimension
1✔
46

47
logger = logging.getLogger(__name__)
1✔
48

49

50
INITIAL_POINTS = 30
1✔
51

52
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
53
# in the skopt model queue, to optimize memory consumption
54
SKOPT_MODEL_QUEUE_SIZE = 10
1✔
55

56
MAX_LOSS = 100000  # just a big enough number to be bad result in loss optimization
1✔
57

58

59
class Hyperopt:
1✔
60
    """
61
    Hyperopt class, this class contains all the logic to run a hyperopt simulation
62

63
    To start a hyperopt run:
64
    hyperopt = Hyperopt(config)
65
    hyperopt.start()
66
    """
67

68
    def __init__(self, config: Config) -> None:
1✔
69
        self.buy_space: List[Dimension] = []
1✔
70
        self.sell_space: List[Dimension] = []
1✔
71
        self.protection_space: List[Dimension] = []
1✔
72
        self.roi_space: List[Dimension] = []
1✔
73
        self.stoploss_space: List[Dimension] = []
1✔
74
        self.trailing_space: List[Dimension] = []
1✔
75
        self.max_open_trades_space: List[Dimension] = []
1✔
76
        self.dimensions: List[Dimension] = []
1✔
77

78
        self.config = config
1✔
79
        self.min_date: datetime
1✔
80
        self.max_date: datetime
1✔
81

82
        self.backtesting = Backtesting(self.config)
1✔
83
        self.pairlist = self.backtesting.pairlists.whitelist
1✔
84
        self.custom_hyperopt: HyperOptAuto
1✔
85
        self.analyze_per_epoch = self.config.get('analyze_per_epoch', False)
1✔
86
        HyperoptStateContainer.set_state(HyperoptState.STARTUP)
1✔
87

88
        if not self.config.get('hyperopt'):
1✔
89
            self.custom_hyperopt = HyperOptAuto(self.config)
1✔
90
        else:
91
            raise OperationalException(
1✔
92
                "Using separate Hyperopt files has been removed in 2021.9. Please convert "
93
                "your existing Hyperopt file to the new Hyperoptable strategy interface")
94

95
        self.backtesting._set_strategy(self.backtesting.strategylist[0])
1✔
96
        self.custom_hyperopt.strategy = self.backtesting.strategy
1✔
97

98
        self.hyperopt_pickle_magic(self.backtesting.strategy.__class__.__bases__)
1✔
99
        self.custom_hyperoptloss: IHyperOptLoss = HyperOptLossResolver.load_hyperoptloss(
1✔
100
            self.config)
101
        self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
1✔
102
        time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
1✔
103
        strategy = str(self.config['strategy'])
1✔
104
        self.results_file: Path = (self.config['user_data_dir'] / 'hyperopt_results' /
1✔
105
                                   f'strategy_{strategy}_{time_now}.fthypt')
106
        self.data_pickle_file = (self.config['user_data_dir'] /
1✔
107
                                 'hyperopt_results' / 'hyperopt_tickerdata.pkl')
108
        self.total_epochs = config.get('epochs', 0)
1✔
109

110
        self.current_best_loss = 100
1✔
111

112
        self.clean_hyperopt()
1✔
113

114
        self.market_change = 0.0
1✔
115
        self.num_epochs_saved = 0
1✔
116
        self.current_best_epoch: Optional[Dict[str, Any]] = None
1✔
117

118
        # Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set
119
        if not self.config.get('use_max_market_positions', True):
1✔
120
            logger.debug('Ignoring max_open_trades (--disable-max-market-positions was used) ...')
×
121
            self.backtesting.strategy.max_open_trades = float('inf')
×
122
            config.update({'max_open_trades': self.backtesting.strategy.max_open_trades})
×
123

124
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
125
            # Make sure use_exit_signal is enabled
126
            self.config['use_exit_signal'] = True
1✔
127

128
        self.print_all = self.config.get('print_all', False)
1✔
129
        self.hyperopt_table_header = 0
1✔
130
        self.print_colorized = self.config.get('print_colorized', False)
1✔
131
        self.print_json = self.config.get('print_json', False)
1✔
132

133
    @staticmethod
1✔
134
    def get_lock_filename(config: Config) -> str:
1✔
135

136
        return str(config['user_data_dir'] / 'hyperopt.lock')
1✔
137

138
    def clean_hyperopt(self) -> None:
1✔
139
        """
140
        Remove hyperopt pickle files to restart hyperopt.
141
        """
142
        for f in [self.data_pickle_file, self.results_file]:
1✔
143
            p = Path(f)
1✔
144
            if p.is_file():
1✔
145
                logger.info(f"Removing `{p}`.")
1✔
146
                p.unlink()
1✔
147

148
    def hyperopt_pickle_magic(self, bases) -> None:
1✔
149
        """
150
        Hyperopt magic to allow strategy inheritance across files.
151
        For this to properly work, we need to register the module of the imported class
152
        to pickle as value.
153
        """
154
        for modules in bases:
1✔
155
            if modules.__name__ != 'IStrategy':
1✔
156
                cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
1✔
157
                self.hyperopt_pickle_magic(modules.__bases__)
1✔
158

159
    def _get_params_dict(self, dimensions: List[Dimension], raw_params: List[Any]) -> Dict:
1✔
160

161
        # Ensure the number of dimensions match
162
        # the number of parameters in the list.
163
        if len(raw_params) != len(dimensions):
1✔
164
            raise ValueError('Mismatch in number of search-space dimensions.')
×
165

166
        # Return a dict where the keys are the names of the dimensions
167
        # and the values are taken from the list of parameters.
168
        return {d.name: v for d, v in zip(dimensions, raw_params)}
1✔
169

170
    def _save_result(self, epoch: Dict) -> None:
1✔
171
        """
172
        Save hyperopt results to file
173
        Store one line per epoch.
174
        While not a valid json object - this allows appending easily.
175
        :param epoch: result dictionary for this epoch.
176
        """
177
        epoch[FTHYPT_FILEVERSION] = 2
1✔
178
        with self.results_file.open('a') as f:
1✔
179
            rapidjson.dump(epoch, f, default=hyperopt_serializer,
1✔
180
                           number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN)
181
            f.write("\n")
1✔
182

183
        self.num_epochs_saved += 1
1✔
184
        logger.debug(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
1✔
185
                     f"saved to '{self.results_file}'.")
186
        # Store hyperopt filename
187
        latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
1✔
188
        file_dump_json(latest_filename, {'latest_hyperopt': str(self.results_file.name)},
1✔
189
                       log=False)
190

191
    def _get_params_details(self, params: Dict) -> Dict:
1✔
192
        """
193
        Return the params for each space
194
        """
195
        result: Dict = {}
1✔
196

197
        if HyperoptTools.has_space(self.config, 'buy'):
1✔
198
            result['buy'] = {p.name: params.get(p.name) for p in self.buy_space}
1✔
199
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
200
            result['sell'] = {p.name: params.get(p.name) for p in self.sell_space}
1✔
201
        if HyperoptTools.has_space(self.config, 'protection'):
1✔
202
            result['protection'] = {p.name: params.get(p.name) for p in self.protection_space}
1✔
203
        if HyperoptTools.has_space(self.config, 'roi'):
1✔
204
            result['roi'] = {str(k): v for k, v in
1✔
205
                             self.custom_hyperopt.generate_roi_table(params).items()}
206
        if HyperoptTools.has_space(self.config, 'stoploss'):
1✔
207
            result['stoploss'] = {p.name: params.get(p.name) for p in self.stoploss_space}
1✔
208
        if HyperoptTools.has_space(self.config, 'trailing'):
1✔
209
            result['trailing'] = self.custom_hyperopt.generate_trailing_params(params)
1✔
210
        if HyperoptTools.has_space(self.config, 'trades'):
1✔
211
            result['max_open_trades'] = {
1✔
212
                'max_open_trades': self.backtesting.strategy.max_open_trades
213
                if self.backtesting.strategy.max_open_trades != float('inf') else -1}
214

215
        return result
1✔
216

217
    def _get_no_optimize_details(self) -> Dict[str, Any]:
1✔
218
        """
219
        Get non-optimized parameters
220
        """
221
        result: Dict[str, Any] = {}
1✔
222
        strategy = self.backtesting.strategy
1✔
223
        if not HyperoptTools.has_space(self.config, 'roi'):
1✔
224
            result['roi'] = {str(k): v for k, v in strategy.minimal_roi.items()}
1✔
225
        if not HyperoptTools.has_space(self.config, 'stoploss'):
1✔
226
            result['stoploss'] = {'stoploss': strategy.stoploss}
1✔
227
        if not HyperoptTools.has_space(self.config, 'trailing'):
1✔
228
            result['trailing'] = {
1✔
229
                'trailing_stop': strategy.trailing_stop,
230
                'trailing_stop_positive': strategy.trailing_stop_positive,
231
                'trailing_stop_positive_offset': strategy.trailing_stop_positive_offset,
232
                'trailing_only_offset_is_reached': strategy.trailing_only_offset_is_reached,
233
            }
234
        if not HyperoptTools.has_space(self.config, 'trades'):
1✔
235
            result['max_open_trades'] = {'max_open_trades': strategy.max_open_trades}
1✔
236
        return result
1✔
237

238
    def print_results(self, results) -> None:
1✔
239
        """
240
        Log results if it is better than any previous evaluation
241
        TODO: this should be moved to HyperoptTools too
242
        """
243
        is_best = results['is_best']
1✔
244

245
        if self.print_all or is_best:
1✔
246
            print(
1✔
247
                HyperoptTools.get_result_table(
248
                    self.config, results, self.total_epochs,
249
                    self.print_all, self.print_colorized,
250
                    self.hyperopt_table_header
251
                )
252
            )
253
            self.hyperopt_table_header = 2
1✔
254

255
    def init_spaces(self):
1✔
256
        """
257
        Assign the dimensions in the hyperoptimization space.
258
        """
259
        if HyperoptTools.has_space(self.config, 'protection'):
1✔
260
            # Protections can only be optimized when using the Parameter interface
261
            logger.debug("Hyperopt has 'protection' space")
1✔
262
            # Enable Protections if protection space is selected.
263
            self.config['enable_protections'] = True
1✔
264
            self.backtesting.enable_protections = True
1✔
265
            self.protection_space = self.custom_hyperopt.protection_space()
1✔
266

267
        if HyperoptTools.has_space(self.config, 'buy'):
1✔
268
            logger.debug("Hyperopt has 'buy' space")
1✔
269
            self.buy_space = self.custom_hyperopt.buy_indicator_space()
1✔
270

271
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
272
            logger.debug("Hyperopt has 'sell' space")
1✔
273
            self.sell_space = self.custom_hyperopt.sell_indicator_space()
1✔
274

275
        if HyperoptTools.has_space(self.config, 'roi'):
1✔
276
            logger.debug("Hyperopt has 'roi' space")
1✔
277
            self.roi_space = self.custom_hyperopt.roi_space()
1✔
278

279
        if HyperoptTools.has_space(self.config, 'stoploss'):
1✔
280
            logger.debug("Hyperopt has 'stoploss' space")
1✔
281
            self.stoploss_space = self.custom_hyperopt.stoploss_space()
1✔
282

283
        if HyperoptTools.has_space(self.config, 'trailing'):
1✔
284
            logger.debug("Hyperopt has 'trailing' space")
1✔
285
            self.trailing_space = self.custom_hyperopt.trailing_space()
1✔
286

287
        if HyperoptTools.has_space(self.config, 'trades'):
1✔
288
            logger.debug("Hyperopt has 'trades' space")
1✔
289
            self.max_open_trades_space = self.custom_hyperopt.max_open_trades_space()
1✔
290

291
        self.dimensions = (self.buy_space + self.sell_space + self.protection_space
1✔
292
                           + self.roi_space + self.stoploss_space + self.trailing_space
293
                           + self.max_open_trades_space)
294

295
    def assign_params(self, params_dict: Dict, category: str) -> None:
1✔
296
        """
297
        Assign hyperoptable parameters
298
        """
299
        for attr_name, attr in self.backtesting.strategy.enumerate_parameters(category):
1✔
300
            if attr.optimize:
1✔
301
                # noinspection PyProtectedMember
302
                attr.value = params_dict[attr_name]
1✔
303

304
    def generate_optimizer(self, raw_params: List[Any]) -> Dict[str, Any]:
1✔
305
        """
306
        Used Optimize function.
307
        Called once per epoch to optimize whatever is configured.
308
        Keep this function as optimized as possible!
309
        """
310
        HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
1✔
311
        backtest_start_time = datetime.now(timezone.utc)
1✔
312
        params_dict = self._get_params_dict(self.dimensions, raw_params)
1✔
313

314
        # Apply parameters
315
        if HyperoptTools.has_space(self.config, 'buy'):
1✔
316
            self.assign_params(params_dict, 'buy')
1✔
317

318
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
319
            self.assign_params(params_dict, 'sell')
1✔
320

321
        if HyperoptTools.has_space(self.config, 'protection'):
1✔
322
            self.assign_params(params_dict, 'protection')
1✔
323

324
        if HyperoptTools.has_space(self.config, 'roi'):
1✔
325
            self.backtesting.strategy.minimal_roi = (
1✔
326
                self.custom_hyperopt.generate_roi_table(params_dict))
327

328
        if HyperoptTools.has_space(self.config, 'stoploss'):
1✔
329
            self.backtesting.strategy.stoploss = params_dict['stoploss']
1✔
330

331
        if HyperoptTools.has_space(self.config, 'trailing'):
1✔
332
            d = self.custom_hyperopt.generate_trailing_params(params_dict)
1✔
333
            self.backtesting.strategy.trailing_stop = d['trailing_stop']
1✔
334
            self.backtesting.strategy.trailing_stop_positive = d['trailing_stop_positive']
1✔
335
            self.backtesting.strategy.trailing_stop_positive_offset = \
1✔
336
                d['trailing_stop_positive_offset']
337
            self.backtesting.strategy.trailing_only_offset_is_reached = \
1✔
338
                d['trailing_only_offset_is_reached']
339

340
        if HyperoptTools.has_space(self.config, 'trades'):
1✔
341
            if self.config["stake_amount"] == "unlimited" and \
1✔
342
                    (params_dict['max_open_trades'] == -1 or params_dict['max_open_trades'] == 0):
343
                # Ignore unlimited max open trades if stake amount is unlimited
344
                params_dict.update({'max_open_trades': self.config['max_open_trades']})
1✔
345

346
            updated_max_open_trades = int(params_dict['max_open_trades']) \
1✔
347
                if (params_dict['max_open_trades'] != -1
348
                    and params_dict['max_open_trades'] != 0) else float('inf')
349

350
            self.config.update({'max_open_trades': updated_max_open_trades})
1✔
351

352
            self.backtesting.strategy.max_open_trades = updated_max_open_trades
1✔
353

354
        with self.data_pickle_file.open('rb') as f:
1✔
355
            processed = load(f, mmap_mode='r')
1✔
356
            if self.analyze_per_epoch:
1✔
357
                # Data is not yet analyzed, rerun populate_indicators.
358
                processed = self.advise_and_trim(processed)
×
359

360
        bt_results = self.backtesting.backtest(
1✔
361
            processed=processed,
362
            start_date=self.min_date,
363
            end_date=self.max_date
364
        )
365
        backtest_end_time = datetime.now(timezone.utc)
1✔
366
        bt_results.update({
1✔
367
            'backtest_start_time': int(backtest_start_time.timestamp()),
368
            'backtest_end_time': int(backtest_end_time.timestamp()),
369
        })
370

371
        return self._get_results_dict(bt_results, self.min_date, self.max_date,
1✔
372
                                      params_dict,
373
                                      processed=processed)
374

375
    def _get_results_dict(self, backtesting_results, min_date, max_date,
1✔
376
                          params_dict, processed: Dict[str, DataFrame]
377
                          ) -> Dict[str, Any]:
378
        params_details = self._get_params_details(params_dict)
1✔
379

380
        strat_stats = generate_strategy_stats(
1✔
381
            self.pairlist, self.backtesting.strategy.get_strategy_name(),
382
            backtesting_results, min_date, max_date, market_change=self.market_change,
383
            is_hyperopt=True,
384
        )
385
        results_explanation = HyperoptTools.format_results_explanation_string(
1✔
386
            strat_stats, self.config['stake_currency'])
387

388
        not_optimized = self.backtesting.strategy.get_no_optimize_params()
1✔
389
        not_optimized = deep_merge_dicts(not_optimized, self._get_no_optimize_details())
1✔
390

391
        trade_count = strat_stats['total_trades']
1✔
392
        total_profit = strat_stats['profit_total']
1✔
393

394
        # If this evaluation contains too short amount of trades to be
395
        # interesting -- consider it as 'bad' (assigned max. loss value)
396
        # in order to cast this hyperspace point away from optimization
397
        # path. We do not want to optimize 'hodl' strategies.
398
        loss: float = MAX_LOSS
1✔
399
        if trade_count >= self.config['hyperopt_min_trades']:
1✔
400
            loss = self.calculate_loss(results=backtesting_results['results'],
1✔
401
                                       trade_count=trade_count,
402
                                       min_date=min_date, max_date=max_date,
403
                                       config=self.config, processed=processed,
404
                                       backtest_stats=strat_stats)
405
        return {
1✔
406
            'loss': loss,
407
            'params_dict': params_dict,
408
            'params_details': params_details,
409
            'params_not_optimized': not_optimized,
410
            'results_metrics': strat_stats,
411
            'results_explanation': results_explanation,
412
            'total_profit': total_profit,
413
        }
414

415
    def get_optimizer(self, dimensions: List[Dimension], cpu_count) -> Optimizer:
1✔
416
        estimator = self.custom_hyperopt.generate_estimator(dimensions=dimensions)
1✔
417

418
        acq_optimizer = "sampling"
1✔
419
        if isinstance(estimator, str):
1✔
420
            if estimator not in ("GP", "RF", "ET", "GBRT"):
1✔
421
                raise OperationalException(f"Estimator {estimator} not supported.")
1✔
422
            else:
423
                acq_optimizer = "auto"
1✔
424

425
        logger.info(f"Using estimator {estimator}.")
1✔
426
        return Optimizer(
1✔
427
            dimensions,
428
            base_estimator=estimator,
429
            acq_optimizer=acq_optimizer,
430
            n_initial_points=INITIAL_POINTS,
431
            acq_optimizer_kwargs={'n_jobs': cpu_count},
432
            random_state=self.random_state,
433
            model_queue_size=SKOPT_MODEL_QUEUE_SIZE,
434
        )
435

436
    def run_optimizer_parallel(
1✔
437
            self, parallel: Parallel, asked: List[List]) -> List[Dict[str, Any]]:
438
        """ Start optimizer in a parallel way """
439
        return parallel(delayed(
1✔
440
                        wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked)
441

442
    def _set_random_state(self, random_state: Optional[int]) -> int:
1✔
443
        return random_state or random.randint(1, 2**16 - 1)
1✔
444

445
    def advise_and_trim(self, data: Dict[str, DataFrame]) -> Dict[str, DataFrame]:
1✔
446
        preprocessed = self.backtesting.strategy.advise_all_indicators(data)
1✔
447

448
        # Trim startup period from analyzed dataframe to get correct dates for output.
449
        # This is only used to keep track of min/max date after trimming.
450
        # The result is NOT returned from this method, actual trimming happens in backtesting.
451
        trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
1✔
452
        self.min_date, self.max_date = get_timerange(trimmed)
1✔
453
        if not self.market_change:
1✔
454
            self.market_change = calculate_market_change(trimmed, 'close')
1✔
455

456
        # Real trimming will happen as part of backtesting.
457
        return preprocessed
1✔
458

459
    def prepare_hyperopt_data(self) -> None:
1✔
460
        HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
1✔
461
        data, self.timerange = self.backtesting.load_bt_data()
1✔
462
        self.backtesting.load_bt_data_detail()
1✔
463
        logger.info("Dataload complete. Calculating indicators")
1✔
464

465
        if not self.analyze_per_epoch:
1✔
466
            HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
1✔
467

468
            preprocessed = self.advise_and_trim(data)
1✔
469

470
            logger.info(f'Hyperopting with data from '
1✔
471
                        f'{self.min_date.strftime(DATETIME_PRINT_FORMAT)} '
472
                        f'up to {self.max_date.strftime(DATETIME_PRINT_FORMAT)} '
473
                        f'({(self.max_date - self.min_date).days} days)..')
474
            # Store non-trimmed data - will be trimmed after signal generation.
475
            dump(preprocessed, self.data_pickle_file)
1✔
476
        else:
477
            dump(data, self.data_pickle_file)
1✔
478

479
    def get_asked_points(self, n_points: int) -> Tuple[List[List[Any]], List[bool]]:
1✔
480
        """
481
        Enforce points returned from `self.opt.ask` have not been already evaluated
482

483
        Steps:
484
        1. Try to get points using `self.opt.ask` first
485
        2. Discard the points that have already been evaluated
486
        3. Retry using `self.opt.ask` up to 3 times
487
        4. If still some points are missing in respect to `n_points`, random sample some points
488
        5. Repeat until at least `n_points` points in the `asked_non_tried` list
489
        6. Return a list with length truncated at `n_points`
490
        """
491
        def unique_list(a_list):
1✔
492
            new_list = []
1✔
493
            for item in a_list:
1✔
494
                if item not in new_list:
1✔
495
                    new_list.append(item)
1✔
496
            return new_list
1✔
497
        i = 0
1✔
498
        asked_non_tried: List[List[Any]] = []
1✔
499
        is_random_non_tried: List[bool] = []
1✔
500
        while i < 5 and len(asked_non_tried) < n_points:
1✔
501
            if i < 3:
1✔
502
                self.opt.cache_ = {}
1✔
503
                asked = unique_list(self.opt.ask(n_points=n_points * 5))
1✔
504
                is_random = [False for _ in range(len(asked))]
1✔
505
            else:
506
                asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
×
507
                is_random = [True for _ in range(len(asked))]
×
508
            is_random_non_tried += [rand for x, rand in zip(asked, is_random)
1✔
509
                                    if x not in self.opt.Xi
510
                                    and x not in asked_non_tried]
511
            asked_non_tried += [x for x in asked
1✔
512
                                if x not in self.opt.Xi
513
                                and x not in asked_non_tried]
514
            i += 1
1✔
515

516
        if asked_non_tried:
1✔
517
            return (
1✔
518
                asked_non_tried[:min(len(asked_non_tried), n_points)],
519
                is_random_non_tried[:min(len(asked_non_tried), n_points)]
520
            )
521
        else:
522
            return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
×
523

524
    def evaluate_result(self, val: Dict[str, Any], current: int, is_random: bool):
1✔
525
        """
526
        Evaluate results returned from generate_optimizer
527
        """
528
        val['current_epoch'] = current
1✔
529
        val['is_initial_point'] = current <= INITIAL_POINTS
1✔
530

531
        logger.debug("Optimizer epoch evaluated: %s", val)
1✔
532

533
        is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
1✔
534
        # This value is assigned here and not in the optimization method
535
        # to keep proper order in the list of results. That's because
536
        # evaluations can take different time. Here they are aligned in the
537
        # order they will be shown to the user.
538
        val['is_best'] = is_best
1✔
539
        val['is_random'] = is_random
1✔
540
        self.print_results(val)
1✔
541

542
        if is_best:
1✔
543
            self.current_best_loss = val['loss']
1✔
544
            self.current_best_epoch = val
1✔
545

546
        self._save_result(val)
1✔
547

548
    def start(self) -> None:
1✔
549
        self.random_state = self._set_random_state(self.config.get('hyperopt_random_state'))
1✔
550
        logger.info(f"Using optimizer random state: {self.random_state}")
1✔
551
        self.hyperopt_table_header = -1
1✔
552
        # Initialize spaces ...
553
        self.init_spaces()
1✔
554

555
        self.prepare_hyperopt_data()
1✔
556

557
        # We don't need exchange instance anymore while running hyperopt
558
        self.backtesting.exchange.close()
1✔
559
        self.backtesting.exchange._api = None
1✔
560
        self.backtesting.exchange._api_async = None
1✔
561
        self.backtesting.exchange.loop = None  # type: ignore
1✔
562
        self.backtesting.exchange._loop_lock = None  # type: ignore
1✔
563
        self.backtesting.exchange._cache_lock = None  # type: ignore
1✔
564
        # self.backtesting.exchange = None  # type: ignore
565
        self.backtesting.pairlists = None  # type: ignore
1✔
566

567
        cpus = cpu_count()
1✔
568
        logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
1✔
569
        config_jobs = self.config.get('hyperopt_jobs', -1)
1✔
570
        logger.info(f'Number of parallel jobs set as: {config_jobs}')
1✔
571

572
        self.opt = self.get_optimizer(self.dimensions, config_jobs)
1✔
573

574
        if self.print_colorized:
1✔
575
            colorama_init(autoreset=True)
×
576

577
        try:
1✔
578
            with Parallel(n_jobs=config_jobs) as parallel:
1✔
579
                jobs = parallel._effective_n_jobs()
1✔
580
                logger.info(f'Effective number of parallel workers used: {jobs}')
1✔
581

582
                # Define progressbar
583
                with Progress(
1✔
584
                    TextColumn("[progress.description]{task.description}"),
585
                    BarColumn(bar_width=None),
586
                    MofNCompleteColumn(),
587
                    TaskProgressColumn(),
588
                    "•",
589
                    TimeElapsedColumn(),
590
                    "•",
591
                    TimeRemainingColumn(),
592
                    expand=True,
593
                ) as pbar:
594
                    task = pbar.add_task("Epochs", total=self.total_epochs)
1✔
595

596
                    start = 0
1✔
597

598
                    if self.analyze_per_epoch:
1✔
599
                        # First analysis not in parallel mode when using --analyze-per-epoch.
600
                        # This allows dataprovider to load it's informative cache.
601
                        asked, is_random = self.get_asked_points(n_points=1)
1✔
602
                        f_val0 = self.generate_optimizer(asked[0])
1✔
603
                        self.opt.tell(asked, [f_val0['loss']])
1✔
604
                        self.evaluate_result(f_val0, 1, is_random[0])
1✔
605
                        pbar.update(task, advance=1)
1✔
606
                        start += 1
1✔
607

608
                    evals = ceil((self.total_epochs - start) / jobs)
1✔
609
                    for i in range(evals):
1✔
610
                        # Correct the number of epochs to be processed for the last
611
                        # iteration (should not exceed self.total_epochs in total)
612
                        n_rest = (i + 1) * jobs - (self.total_epochs - start)
1✔
613
                        current_jobs = jobs - n_rest if n_rest > 0 else jobs
1✔
614

615
                        asked, is_random = self.get_asked_points(n_points=current_jobs)
1✔
616
                        f_val = self.run_optimizer_parallel(parallel, asked)
1✔
617
                        self.opt.tell(asked, [v['loss'] for v in f_val])
1✔
618

619
                        for j, val in enumerate(f_val):
1✔
620
                            # Use human-friendly indexes here (starting from 1)
621
                            current = i * jobs + j + 1 + start
1✔
622

623
                            self.evaluate_result(val, current, is_random[j])
1✔
624
                            pbar.update(task, advance=1)
1✔
625

626
        except KeyboardInterrupt:
×
627
            print('User interrupted..')
×
628

629
        logger.info(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
1✔
630
                    f"saved to '{self.results_file}'.")
631

632
        if self.current_best_epoch:
1✔
633
            HyperoptTools.try_export_params(
1✔
634
                self.config,
635
                self.backtesting.strategy.get_strategy_name(),
636
                self.current_best_epoch)
637

638
            HyperoptTools.show_epoch_details(self.current_best_epoch, self.total_epochs,
1✔
639
                                             self.print_json)
640
        else:
641
            # This is printed when Ctrl+C is pressed quickly, before first epochs have
642
            # a chance to be evaluated.
643
            print("No epochs evaluated yet, no best result.")
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc