• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

freqtrade / freqtrade / 3645476382

pending completion
3645476382

push

github-actions

GitHub
Merge pull request #7665 from freqtrade/update_ci

15895 of 16625 relevant lines covered (95.61%)

0.96 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.36
/freqtrade/optimize/hyperopt.py
1
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
2

3
"""
1✔
4
This module contains the hyperopt logic
5
"""
6

7
import logging
1✔
8
import random
1✔
9
import sys
1✔
10
import warnings
1✔
11
from datetime import datetime, timezone
1✔
12
from math import ceil
1✔
13
from pathlib import Path
1✔
14
from typing import Any, Dict, List, Optional, Tuple
1✔
15

16
import progressbar
1✔
17
import rapidjson
1✔
18
from colorama import Fore, Style
1✔
19
from colorama import init as colorama_init
1✔
20
from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects
1✔
21
from joblib.externals import cloudpickle
1✔
22
from pandas import DataFrame
1✔
23

24
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
1✔
25
from freqtrade.data.converter import trim_dataframes
1✔
26
from freqtrade.data.history import get_timerange
1✔
27
from freqtrade.data.metrics import calculate_market_change
1✔
28
from freqtrade.enums import HyperoptState
1✔
29
from freqtrade.exceptions import OperationalException
1✔
30
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
1✔
31
from freqtrade.optimize.backtesting import Backtesting
1✔
32
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
33
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
1✔
34
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss
1✔
35
from freqtrade.optimize.hyperopt_tools import (HyperoptStateContainer, HyperoptTools,
1✔
36
                                               hyperopt_serializer)
37
from freqtrade.optimize.optimize_reports import generate_strategy_stats
1✔
38
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
1✔
39

40

41
# Suppress scikit-learn FutureWarnings from skopt
42
with warnings.catch_warnings():
1✔
43
    warnings.filterwarnings("ignore", category=FutureWarning)
1✔
44
    from skopt import Optimizer
1✔
45
    from skopt.space import Dimension
1✔
46

47
progressbar.streams.wrap_stderr()
1✔
48
progressbar.streams.wrap_stdout()
1✔
49
logger = logging.getLogger(__name__)
1✔
50

51

52
INITIAL_POINTS = 30
1✔
53

54
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
55
# in the skopt model queue, to optimize memory consumption
56
SKOPT_MODEL_QUEUE_SIZE = 10
1✔
57

58
MAX_LOSS = 100000  # just a big enough number to be bad result in loss optimization
1✔
59

60

61
class Hyperopt:
1✔
62
    """
63
    Hyperopt class, this class contains all the logic to run a hyperopt simulation
64

65
    To start a hyperopt run:
66
    hyperopt = Hyperopt(config)
67
    hyperopt.start()
68
    """
69

70
    def __init__(self, config: Config) -> None:
1✔
71
        self.buy_space: List[Dimension] = []
1✔
72
        self.sell_space: List[Dimension] = []
1✔
73
        self.protection_space: List[Dimension] = []
1✔
74
        self.roi_space: List[Dimension] = []
1✔
75
        self.stoploss_space: List[Dimension] = []
1✔
76
        self.trailing_space: List[Dimension] = []
1✔
77
        self.dimensions: List[Dimension] = []
1✔
78

79
        self.config = config
1✔
80
        self.min_date: datetime
1✔
81
        self.max_date: datetime
1✔
82

83
        self.backtesting = Backtesting(self.config)
1✔
84
        self.pairlist = self.backtesting.pairlists.whitelist
1✔
85
        self.custom_hyperopt: HyperOptAuto
1✔
86
        self.analyze_per_epoch = self.config.get('analyze_per_epoch', False)
1✔
87
        HyperoptStateContainer.set_state(HyperoptState.STARTUP)
1✔
88

89
        if not self.config.get('hyperopt'):
1✔
90
            self.custom_hyperopt = HyperOptAuto(self.config)
1✔
91
        else:
92
            raise OperationalException(
1✔
93
                "Using separate Hyperopt files has been removed in 2021.9. Please convert "
94
                "your existing Hyperopt file to the new Hyperoptable strategy interface")
95

96
        self.backtesting._set_strategy(self.backtesting.strategylist[0])
1✔
97
        self.custom_hyperopt.strategy = self.backtesting.strategy
1✔
98

99
        self.hyperopt_pickle_magic(self.backtesting.strategy.__class__.__bases__)
1✔
100
        self.custom_hyperoptloss: IHyperOptLoss = HyperOptLossResolver.load_hyperoptloss(
1✔
101
            self.config)
102
        self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
1✔
103
        time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
1✔
104
        strategy = str(self.config['strategy'])
1✔
105
        self.results_file: Path = (self.config['user_data_dir'] / 'hyperopt_results' /
1✔
106
                                   f'strategy_{strategy}_{time_now}.fthypt')
107
        self.data_pickle_file = (self.config['user_data_dir'] /
1✔
108
                                 'hyperopt_results' / 'hyperopt_tickerdata.pkl')
109
        self.total_epochs = config.get('epochs', 0)
1✔
110

111
        self.current_best_loss = 100
1✔
112

113
        self.clean_hyperopt()
1✔
114

115
        self.market_change = 0.0
1✔
116
        self.num_epochs_saved = 0
1✔
117
        self.current_best_epoch: Optional[Dict[str, Any]] = None
1✔
118

119
        # Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set
120
        if self.config.get('use_max_market_positions', True):
1✔
121
            self.max_open_trades = self.config['max_open_trades']
1✔
122
        else:
123
            logger.debug('Ignoring max_open_trades (--disable-max-market-positions was used) ...')
×
124
            self.max_open_trades = 0
×
125

126
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
127
            # Make sure use_exit_signal is enabled
128
            self.config['use_exit_signal'] = True
1✔
129

130
        self.print_all = self.config.get('print_all', False)
1✔
131
        self.hyperopt_table_header = 0
1✔
132
        self.print_colorized = self.config.get('print_colorized', False)
1✔
133
        self.print_json = self.config.get('print_json', False)
1✔
134

135
    @staticmethod
1✔
136
    def get_lock_filename(config: Config) -> str:
1✔
137

138
        return str(config['user_data_dir'] / 'hyperopt.lock')
1✔
139

140
    def clean_hyperopt(self) -> None:
1✔
141
        """
142
        Remove hyperopt pickle files to restart hyperopt.
143
        """
144
        for f in [self.data_pickle_file, self.results_file]:
1✔
145
            p = Path(f)
1✔
146
            if p.is_file():
1✔
147
                logger.info(f"Removing `{p}`.")
1✔
148
                p.unlink()
1✔
149

150
    def hyperopt_pickle_magic(self, bases) -> None:
1✔
151
        """
152
        Hyperopt magic to allow strategy inheritance across files.
153
        For this to properly work, we need to register the module of the imported class
154
        to pickle as value.
155
        """
156
        for modules in bases:
1✔
157
            if modules.__name__ != 'IStrategy':
1✔
158
                cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
1✔
159
                self.hyperopt_pickle_magic(modules.__bases__)
1✔
160

161
    def _get_params_dict(self, dimensions: List[Dimension], raw_params: List[Any]) -> Dict:
1✔
162

163
        # Ensure the number of dimensions match
164
        # the number of parameters in the list.
165
        if len(raw_params) != len(dimensions):
1✔
166
            raise ValueError('Mismatch in number of search-space dimensions.')
×
167

168
        # Return a dict where the keys are the names of the dimensions
169
        # and the values are taken from the list of parameters.
170
        return {d.name: v for d, v in zip(dimensions, raw_params)}
1✔
171

172
    def _save_result(self, epoch: Dict) -> None:
1✔
173
        """
174
        Save hyperopt results to file
175
        Store one line per epoch.
176
        While not a valid json object - this allows appending easily.
177
        :param epoch: result dictionary for this epoch.
178
        """
179
        epoch[FTHYPT_FILEVERSION] = 2
1✔
180
        with self.results_file.open('a') as f:
1✔
181
            rapidjson.dump(epoch, f, default=hyperopt_serializer,
1✔
182
                           number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN)
183
            f.write("\n")
1✔
184

185
        self.num_epochs_saved += 1
1✔
186
        logger.debug(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
1✔
187
                     f"saved to '{self.results_file}'.")
188
        # Store hyperopt filename
189
        latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
1✔
190
        file_dump_json(latest_filename, {'latest_hyperopt': str(self.results_file.name)},
1✔
191
                       log=False)
192

193
    def _get_params_details(self, params: Dict) -> Dict:
1✔
194
        """
195
        Return the params for each space
196
        """
197
        result: Dict = {}
1✔
198

199
        if HyperoptTools.has_space(self.config, 'buy'):
1✔
200
            result['buy'] = {p.name: params.get(p.name) for p in self.buy_space}
1✔
201
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
202
            result['sell'] = {p.name: params.get(p.name) for p in self.sell_space}
1✔
203
        if HyperoptTools.has_space(self.config, 'protection'):
1✔
204
            result['protection'] = {p.name: params.get(p.name) for p in self.protection_space}
1✔
205
        if HyperoptTools.has_space(self.config, 'roi'):
1✔
206
            result['roi'] = {str(k): v for k, v in
1✔
207
                             self.custom_hyperopt.generate_roi_table(params).items()}
208
        if HyperoptTools.has_space(self.config, 'stoploss'):
1✔
209
            result['stoploss'] = {p.name: params.get(p.name) for p in self.stoploss_space}
1✔
210
        if HyperoptTools.has_space(self.config, 'trailing'):
1✔
211
            result['trailing'] = self.custom_hyperopt.generate_trailing_params(params)
1✔
212

213
        return result
1✔
214

215
    def _get_no_optimize_details(self) -> Dict[str, Any]:
1✔
216
        """
217
        Get non-optimized parameters
218
        """
219
        result: Dict[str, Any] = {}
1✔
220
        strategy = self.backtesting.strategy
1✔
221
        if not HyperoptTools.has_space(self.config, 'roi'):
1✔
222
            result['roi'] = {str(k): v for k, v in strategy.minimal_roi.items()}
1✔
223
        if not HyperoptTools.has_space(self.config, 'stoploss'):
1✔
224
            result['stoploss'] = {'stoploss': strategy.stoploss}
1✔
225
        if not HyperoptTools.has_space(self.config, 'trailing'):
1✔
226
            result['trailing'] = {
1✔
227
                'trailing_stop': strategy.trailing_stop,
228
                'trailing_stop_positive': strategy.trailing_stop_positive,
229
                'trailing_stop_positive_offset': strategy.trailing_stop_positive_offset,
230
                'trailing_only_offset_is_reached': strategy.trailing_only_offset_is_reached,
231
            }
232
        return result
1✔
233

234
    def print_results(self, results) -> None:
1✔
235
        """
236
        Log results if it is better than any previous evaluation
237
        TODO: this should be moved to HyperoptTools too
238
        """
239
        is_best = results['is_best']
1✔
240

241
        if self.print_all or is_best:
1✔
242
            print(
1✔
243
                HyperoptTools.get_result_table(
244
                    self.config, results, self.total_epochs,
245
                    self.print_all, self.print_colorized,
246
                    self.hyperopt_table_header
247
                )
248
            )
249
            self.hyperopt_table_header = 2
1✔
250

251
    def init_spaces(self):
1✔
252
        """
253
        Assign the dimensions in the hyperoptimization space.
254
        """
255
        if HyperoptTools.has_space(self.config, 'protection'):
1✔
256
            # Protections can only be optimized when using the Parameter interface
257
            logger.debug("Hyperopt has 'protection' space")
1✔
258
            # Enable Protections if protection space is selected.
259
            self.config['enable_protections'] = True
1✔
260
            self.backtesting.enable_protections = True
1✔
261
            self.protection_space = self.custom_hyperopt.protection_space()
1✔
262

263
        if HyperoptTools.has_space(self.config, 'buy'):
1✔
264
            logger.debug("Hyperopt has 'buy' space")
1✔
265
            self.buy_space = self.custom_hyperopt.buy_indicator_space()
1✔
266

267
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
268
            logger.debug("Hyperopt has 'sell' space")
1✔
269
            self.sell_space = self.custom_hyperopt.sell_indicator_space()
1✔
270

271
        if HyperoptTools.has_space(self.config, 'roi'):
1✔
272
            logger.debug("Hyperopt has 'roi' space")
1✔
273
            self.roi_space = self.custom_hyperopt.roi_space()
1✔
274

275
        if HyperoptTools.has_space(self.config, 'stoploss'):
1✔
276
            logger.debug("Hyperopt has 'stoploss' space")
1✔
277
            self.stoploss_space = self.custom_hyperopt.stoploss_space()
1✔
278

279
        if HyperoptTools.has_space(self.config, 'trailing'):
1✔
280
            logger.debug("Hyperopt has 'trailing' space")
1✔
281
            self.trailing_space = self.custom_hyperopt.trailing_space()
1✔
282

283
        self.dimensions = (self.buy_space + self.sell_space + self.protection_space
1✔
284
                           + self.roi_space + self.stoploss_space + self.trailing_space)
285

286
    def assign_params(self, params_dict: Dict, category: str) -> None:
1✔
287
        """
288
        Assign hyperoptable parameters
289
        """
290
        for attr_name, attr in self.backtesting.strategy.enumerate_parameters(category):
1✔
291
            if attr.optimize:
1✔
292
                # noinspection PyProtectedMember
293
                attr.value = params_dict[attr_name]
1✔
294

295
    def generate_optimizer(self, raw_params: List[Any]) -> Dict[str, Any]:
1✔
296
        """
297
        Used Optimize function.
298
        Called once per epoch to optimize whatever is configured.
299
        Keep this function as optimized as possible!
300
        """
301
        HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
1✔
302
        backtest_start_time = datetime.now(timezone.utc)
1✔
303
        params_dict = self._get_params_dict(self.dimensions, raw_params)
1✔
304

305
        # Apply parameters
306
        if HyperoptTools.has_space(self.config, 'buy'):
1✔
307
            self.assign_params(params_dict, 'buy')
1✔
308

309
        if HyperoptTools.has_space(self.config, 'sell'):
1✔
310
            self.assign_params(params_dict, 'sell')
1✔
311

312
        if HyperoptTools.has_space(self.config, 'protection'):
1✔
313
            self.assign_params(params_dict, 'protection')
1✔
314

315
        if HyperoptTools.has_space(self.config, 'roi'):
1✔
316
            self.backtesting.strategy.minimal_roi = (
1✔
317
                self.custom_hyperopt.generate_roi_table(params_dict))
318

319
        if HyperoptTools.has_space(self.config, 'stoploss'):
1✔
320
            self.backtesting.strategy.stoploss = params_dict['stoploss']
1✔
321

322
        if HyperoptTools.has_space(self.config, 'trailing'):
1✔
323
            d = self.custom_hyperopt.generate_trailing_params(params_dict)
1✔
324
            self.backtesting.strategy.trailing_stop = d['trailing_stop']
1✔
325
            self.backtesting.strategy.trailing_stop_positive = d['trailing_stop_positive']
1✔
326
            self.backtesting.strategy.trailing_stop_positive_offset = \
1✔
327
                d['trailing_stop_positive_offset']
328
            self.backtesting.strategy.trailing_only_offset_is_reached = \
1✔
329
                d['trailing_only_offset_is_reached']
330

331
        with self.data_pickle_file.open('rb') as f:
1✔
332
            processed = load(f, mmap_mode='r')
1✔
333
            if self.analyze_per_epoch:
1✔
334
                # Data is not yet analyzed, rerun populate_indicators.
335
                processed = self.advise_and_trim(processed)
×
336

337
        bt_results = self.backtesting.backtest(
1✔
338
            processed=processed,
339
            start_date=self.min_date,
340
            end_date=self.max_date,
341
            max_open_trades=self.max_open_trades,
342
        )
343
        backtest_end_time = datetime.now(timezone.utc)
1✔
344
        bt_results.update({
1✔
345
            'backtest_start_time': int(backtest_start_time.timestamp()),
346
            'backtest_end_time': int(backtest_end_time.timestamp()),
347
        })
348

349
        return self._get_results_dict(bt_results, self.min_date, self.max_date,
1✔
350
                                      params_dict,
351
                                      processed=processed)
352

353
    def _get_results_dict(self, backtesting_results, min_date, max_date,
1✔
354
                          params_dict, processed: Dict[str, DataFrame]
355
                          ) -> Dict[str, Any]:
356
        params_details = self._get_params_details(params_dict)
1✔
357

358
        strat_stats = generate_strategy_stats(
1✔
359
            self.pairlist, self.backtesting.strategy.get_strategy_name(),
360
            backtesting_results, min_date, max_date, market_change=self.market_change
361
        )
362
        results_explanation = HyperoptTools.format_results_explanation_string(
1✔
363
            strat_stats, self.config['stake_currency'])
364

365
        not_optimized = self.backtesting.strategy.get_no_optimize_params()
1✔
366
        not_optimized = deep_merge_dicts(not_optimized, self._get_no_optimize_details())
1✔
367

368
        trade_count = strat_stats['total_trades']
1✔
369
        total_profit = strat_stats['profit_total']
1✔
370

371
        # If this evaluation contains too short amount of trades to be
372
        # interesting -- consider it as 'bad' (assigned max. loss value)
373
        # in order to cast this hyperspace point away from optimization
374
        # path. We do not want to optimize 'hodl' strategies.
375
        loss: float = MAX_LOSS
1✔
376
        if trade_count >= self.config['hyperopt_min_trades']:
1✔
377
            loss = self.calculate_loss(results=backtesting_results['results'],
1✔
378
                                       trade_count=trade_count,
379
                                       min_date=min_date, max_date=max_date,
380
                                       config=self.config, processed=processed,
381
                                       backtest_stats=strat_stats)
382
        return {
1✔
383
            'loss': loss,
384
            'params_dict': params_dict,
385
            'params_details': params_details,
386
            'params_not_optimized': not_optimized,
387
            'results_metrics': strat_stats,
388
            'results_explanation': results_explanation,
389
            'total_profit': total_profit,
390
        }
391

392
    def get_optimizer(self, dimensions: List[Dimension], cpu_count) -> Optimizer:
1✔
393
        estimator = self.custom_hyperopt.generate_estimator(dimensions=dimensions)
1✔
394

395
        acq_optimizer = "sampling"
1✔
396
        if isinstance(estimator, str):
1✔
397
            if estimator not in ("GP", "RF", "ET", "GBRT"):
1✔
398
                raise OperationalException(f"Estimator {estimator} not supported.")
1✔
399
            else:
400
                acq_optimizer = "auto"
1✔
401

402
        logger.info(f"Using estimator {estimator}.")
1✔
403
        return Optimizer(
1✔
404
            dimensions,
405
            base_estimator=estimator,
406
            acq_optimizer=acq_optimizer,
407
            n_initial_points=INITIAL_POINTS,
408
            acq_optimizer_kwargs={'n_jobs': cpu_count},
409
            random_state=self.random_state,
410
            model_queue_size=SKOPT_MODEL_QUEUE_SIZE,
411
        )
412

413
    def run_optimizer_parallel(
1✔
414
            self, parallel: Parallel, asked: List[List]) -> List[Dict[str, Any]]:
415
        """ Start optimizer in a parallel way """
416
        return parallel(delayed(
1✔
417
                        wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked)
418

419
    def _set_random_state(self, random_state: Optional[int]) -> int:
1✔
420
        return random_state or random.randint(1, 2**16 - 1)
1✔
421

422
    def advise_and_trim(self, data: Dict[str, DataFrame]) -> Dict[str, DataFrame]:
1✔
423
        preprocessed = self.backtesting.strategy.advise_all_indicators(data)
1✔
424

425
        # Trim startup period from analyzed dataframe to get correct dates for output.
426
        trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
1✔
427
        self.min_date, self.max_date = get_timerange(trimmed)
1✔
428
        if not self.market_change:
1✔
429
            self.market_change = calculate_market_change(trimmed, 'close')
1✔
430

431
        # Real trimming will happen as part of backtesting.
432
        return preprocessed
1✔
433

434
    def prepare_hyperopt_data(self) -> None:
1✔
435
        HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
1✔
436
        data, self.timerange = self.backtesting.load_bt_data()
1✔
437
        self.backtesting.load_bt_data_detail()
1✔
438
        logger.info("Dataload complete. Calculating indicators")
1✔
439

440
        if not self.analyze_per_epoch:
1✔
441
            HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
1✔
442

443
            preprocessed = self.advise_and_trim(data)
1✔
444

445
            logger.info(f'Hyperopting with data from '
1✔
446
                        f'{self.min_date.strftime(DATETIME_PRINT_FORMAT)} '
447
                        f'up to {self.max_date.strftime(DATETIME_PRINT_FORMAT)} '
448
                        f'({(self.max_date - self.min_date).days} days)..')
449
            # Store non-trimmed data - will be trimmed after signal generation.
450
            dump(preprocessed, self.data_pickle_file)
1✔
451
        else:
452
            dump(data, self.data_pickle_file)
1✔
453

454
    def get_asked_points(self, n_points: int) -> Tuple[List[List[Any]], List[bool]]:
1✔
455
        """
456
        Enforce points returned from `self.opt.ask` have not been already evaluated
457

458
        Steps:
459
        1. Try to get points using `self.opt.ask` first
460
        2. Discard the points that have already been evaluated
461
        3. Retry using `self.opt.ask` up to 3 times
462
        4. If still some points are missing in respect to `n_points`, random sample some points
463
        5. Repeat until at least `n_points` points in the `asked_non_tried` list
464
        6. Return a list with length truncated at `n_points`
465
        """
466
        def unique_list(a_list):
1✔
467
            new_list = []
1✔
468
            for item in a_list:
1✔
469
                if item not in new_list:
1✔
470
                    new_list.append(item)
1✔
471
            return new_list
1✔
472
        i = 0
1✔
473
        asked_non_tried: List[List[Any]] = []
1✔
474
        is_random_non_tried: List[bool] = []
1✔
475
        while i < 5 and len(asked_non_tried) < n_points:
1✔
476
            if i < 3:
1✔
477
                self.opt.cache_ = {}
1✔
478
                asked = unique_list(self.opt.ask(n_points=n_points * 5))
1✔
479
                is_random = [False for _ in range(len(asked))]
1✔
480
            else:
481
                asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
×
482
                is_random = [True for _ in range(len(asked))]
×
483
            is_random_non_tried += [rand for x, rand in zip(asked, is_random)
1✔
484
                                    if x not in self.opt.Xi
485
                                    and x not in asked_non_tried]
486
            asked_non_tried += [x for x in asked
1✔
487
                                if x not in self.opt.Xi
488
                                and x not in asked_non_tried]
489
            i += 1
1✔
490

491
        if asked_non_tried:
1✔
492
            return (
1✔
493
                asked_non_tried[:min(len(asked_non_tried), n_points)],
494
                is_random_non_tried[:min(len(asked_non_tried), n_points)]
495
            )
496
        else:
497
            return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
×
498

499
    def get_progressbar_widgets(self):
1✔
500
        if self.print_colorized:
1✔
501
            widgets = [
×
502
                ' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs),
503
                ' (', progressbar.Percentage(), ')] ',
504
                progressbar.Bar(marker=progressbar.AnimatedMarker(
505
                    fill='\N{FULL BLOCK}',
506
                    fill_wrap=Fore.GREEN + '{}' + Fore.RESET,
507
                    marker_wrap=Style.BRIGHT + '{}' + Style.RESET_ALL,
508
                )),
509
                ' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
510
            ]
511
        else:
512
            widgets = [
1✔
513
                ' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs),
514
                ' (', progressbar.Percentage(), ')] ',
515
                progressbar.Bar(marker=progressbar.AnimatedMarker(
516
                    fill='\N{FULL BLOCK}',
517
                )),
518
                ' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
519
            ]
520
        return widgets
1✔
521

522
    def evaluate_result(self, val: Dict[str, Any], current: int, is_random: bool):
1✔
523
        """
524
        Evaluate results returned from generate_optimizer
525
        """
526
        val['current_epoch'] = current
1✔
527
        val['is_initial_point'] = current <= INITIAL_POINTS
1✔
528

529
        logger.debug("Optimizer epoch evaluated: %s", val)
1✔
530

531
        is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
1✔
532
        # This value is assigned here and not in the optimization method
533
        # to keep proper order in the list of results. That's because
534
        # evaluations can take different time. Here they are aligned in the
535
        # order they will be shown to the user.
536
        val['is_best'] = is_best
1✔
537
        val['is_random'] = is_random
1✔
538
        self.print_results(val)
1✔
539

540
        if is_best:
1✔
541
            self.current_best_loss = val['loss']
1✔
542
            self.current_best_epoch = val
1✔
543

544
        self._save_result(val)
1✔
545

546
    def start(self) -> None:
1✔
547
        self.random_state = self._set_random_state(self.config.get('hyperopt_random_state'))
1✔
548
        logger.info(f"Using optimizer random state: {self.random_state}")
1✔
549
        self.hyperopt_table_header = -1
1✔
550
        # Initialize spaces ...
551
        self.init_spaces()
1✔
552

553
        self.prepare_hyperopt_data()
1✔
554

555
        # We don't need exchange instance anymore while running hyperopt
556
        self.backtesting.exchange.close()
1✔
557
        self.backtesting.exchange._api = None
1✔
558
        self.backtesting.exchange._api_async = None
1✔
559
        self.backtesting.exchange.loop = None  # type: ignore
1✔
560
        self.backtesting.exchange._loop_lock = None  # type: ignore
1✔
561
        self.backtesting.exchange._cache_lock = None  # type: ignore
1✔
562
        # self.backtesting.exchange = None  # type: ignore
563
        self.backtesting.pairlists = None  # type: ignore
1✔
564

565
        cpus = cpu_count()
1✔
566
        logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
1✔
567
        config_jobs = self.config.get('hyperopt_jobs', -1)
1✔
568
        logger.info(f'Number of parallel jobs set as: {config_jobs}')
1✔
569

570
        self.opt = self.get_optimizer(self.dimensions, config_jobs)
1✔
571

572
        if self.print_colorized:
1✔
573
            colorama_init(autoreset=True)
×
574

575
        try:
1✔
576
            with Parallel(n_jobs=config_jobs) as parallel:
1✔
577
                jobs = parallel._effective_n_jobs()
1✔
578
                logger.info(f'Effective number of parallel workers used: {jobs}')
1✔
579

580
                # Define progressbar
581
                widgets = self.get_progressbar_widgets()
1✔
582
                with progressbar.ProgressBar(
1✔
583
                    max_value=self.total_epochs, redirect_stdout=False, redirect_stderr=False,
584
                    widgets=widgets
585
                ) as pbar:
586
                    start = 0
1✔
587

588
                    if self.analyze_per_epoch:
1✔
589
                        # First analysis not in parallel mode when using --analyze-per-epoch.
590
                        # This allows dataprovider to load it's informative cache.
591
                        asked, is_random = self.get_asked_points(n_points=1)
1✔
592
                        f_val0 = self.generate_optimizer(asked[0])
1✔
593
                        self.opt.tell(asked, [f_val0['loss']])
1✔
594
                        self.evaluate_result(f_val0, 1, is_random[0])
1✔
595
                        pbar.update(1)
1✔
596
                        start += 1
1✔
597

598
                    evals = ceil((self.total_epochs - start) / jobs)
1✔
599
                    for i in range(evals):
1✔
600
                        # Correct the number of epochs to be processed for the last
601
                        # iteration (should not exceed self.total_epochs in total)
602
                        n_rest = (i + 1) * jobs - (self.total_epochs - start)
1✔
603
                        current_jobs = jobs - n_rest if n_rest > 0 else jobs
1✔
604

605
                        asked, is_random = self.get_asked_points(n_points=current_jobs)
1✔
606
                        f_val = self.run_optimizer_parallel(parallel, asked)
1✔
607
                        self.opt.tell(asked, [v['loss'] for v in f_val])
1✔
608

609
                        # Calculate progressbar outputs
610
                        for j, val in enumerate(f_val):
1✔
611
                            # Use human-friendly indexes here (starting from 1)
612
                            current = i * jobs + j + 1 + start
1✔
613

614
                            self.evaluate_result(val, current, is_random[j])
1✔
615

616
                            pbar.update(current)
1✔
617

618
        except KeyboardInterrupt:
×
619
            print('User interrupted..')
×
620

621
        logger.info(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
1✔
622
                    f"saved to '{self.results_file}'.")
623

624
        if self.current_best_epoch:
1✔
625
            HyperoptTools.try_export_params(
1✔
626
                self.config,
627
                self.backtesting.strategy.get_strategy_name(),
628
                self.current_best_epoch)
629

630
            HyperoptTools.show_epoch_details(self.current_best_epoch, self.total_epochs,
1✔
631
                                             self.print_json)
632
        else:
633
            # This is printed when Ctrl+C is pressed quickly, before first epochs have
634
            # a chance to be evaluated.
635
            print("No epochs evaluated yet, no best result.")
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2024 Coveralls, Inc