• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

freqtrade / freqtrade / 13826381362

31 Jan 2025 06:03AM UTC coverage: 94.401% (-0.04%) from 94.437%
13826381362

push

github

xmatthias
test: add test for exchange.features

21835 of 23130 relevant lines covered (94.4%)

0.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.94
/freqtrade/optimize/hyperopt/hyperopt.py
1
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
2

3
"""
4
This module contains the hyperopt logic
5
"""
6

7
import logging
1✔
8
import random
1✔
9
import sys
1✔
10
from datetime import datetime
1✔
11
from math import ceil
1✔
12
from multiprocessing import Manager
1✔
13
from pathlib import Path
1✔
14
from typing import Any
1✔
15

16
import rapidjson
1✔
17
from joblib import Parallel, cpu_count, delayed, wrap_non_picklable_objects
1✔
18
from joblib.externals import cloudpickle
1✔
19

20
from freqtrade.constants import FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
1✔
21
from freqtrade.enums import HyperoptState
1✔
22
from freqtrade.exceptions import OperationalException
1✔
23
from freqtrade.misc import file_dump_json, plural
1✔
24
from freqtrade.optimize.hyperopt.hyperopt_logger import logging_mp_handle, logging_mp_setup
1✔
25
from freqtrade.optimize.hyperopt.hyperopt_optimizer import HyperOptimizer
1✔
26
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
1✔
27
from freqtrade.optimize.hyperopt_tools import (
1✔
28
    HyperoptStateContainer,
29
    HyperoptTools,
30
    hyperopt_serializer,
31
)
32
from freqtrade.util import get_progress_tracker
1✔
33

34

35
logger = logging.getLogger(__name__)
1✔
36

37

38
INITIAL_POINTS = 30
1✔
39

40
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
41
# in the skopt model queue, to optimize memory consumption
42
SKOPT_MODEL_QUEUE_SIZE = 10
1✔
43

44
log_queue: Any
1✔
45

46

47
class Hyperopt:
1✔
48
    """
49
    Hyperopt class, this class contains all the logic to run a hyperopt simulation
50

51
    To start a hyperopt run:
52
    hyperopt = Hyperopt(config)
53
    hyperopt.start()
54
    """
55

56
    def __init__(self, config: Config) -> None:
1✔
57
        self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
1✔
58

59
        self.config = config
1✔
60

61
        self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
1✔
62
        HyperoptStateContainer.set_state(HyperoptState.STARTUP)
1✔
63

64
        if self.config.get("hyperopt"):
1✔
65
            raise OperationalException(
1✔
66
                "Using separate Hyperopt files has been removed in 2021.9. Please convert "
67
                "your existing Hyperopt file to the new Hyperoptable strategy interface"
68
            )
69

70
        time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
1✔
71
        strategy = str(self.config["strategy"])
1✔
72
        self.results_file: Path = (
1✔
73
            self.config["user_data_dir"]
74
            / "hyperopt_results"
75
            / f"strategy_{strategy}_{time_now}.fthypt"
76
        )
77
        self.data_pickle_file = (
1✔
78
            self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
79
        )
80
        self.total_epochs = config.get("epochs", 0)
1✔
81

82
        self.current_best_loss = 100
1✔
83

84
        self.clean_hyperopt()
1✔
85

86
        self.num_epochs_saved = 0
1✔
87
        self.current_best_epoch: dict[str, Any] | None = None
1✔
88

89
        if HyperoptTools.has_space(self.config, "sell"):
1✔
90
            # Make sure use_exit_signal is enabled
91
            self.config["use_exit_signal"] = True
1✔
92

93
        self.print_all = self.config.get("print_all", False)
1✔
94
        self.hyperopt_table_header = 0
1✔
95
        self.print_json = self.config.get("print_json", False)
1✔
96

97
        self.hyperopter = HyperOptimizer(self.config)
1✔
98

99
    @staticmethod
1✔
100
    def get_lock_filename(config: Config) -> str:
1✔
101
        return str(config["user_data_dir"] / "hyperopt.lock")
1✔
102

103
    def clean_hyperopt(self) -> None:
1✔
104
        """
105
        Remove hyperopt pickle files to restart hyperopt.
106
        """
107
        for f in [self.data_pickle_file, self.results_file]:
1✔
108
            p = Path(f)
1✔
109
            if p.is_file():
1✔
110
                logger.info(f"Removing `{p}`.")
1✔
111
                p.unlink()
1✔
112

113
    def hyperopt_pickle_magic(self, bases) -> None:
1✔
114
        """
115
        Hyperopt magic to allow strategy inheritance across files.
116
        For this to properly work, we need to register the module of the imported class
117
        to pickle as value.
118
        """
119
        for modules in bases:
×
120
            if modules.__name__ != "IStrategy":
×
121
                cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
×
122
                self.hyperopt_pickle_magic(modules.__bases__)
×
123

124
    def _save_result(self, epoch: dict) -> None:
1✔
125
        """
126
        Save hyperopt results to file
127
        Store one line per epoch.
128
        While not a valid json object - this allows appending easily.
129
        :param epoch: result dictionary for this epoch.
130
        """
131
        epoch[FTHYPT_FILEVERSION] = 2
1✔
132
        with self.results_file.open("a") as f:
1✔
133
            rapidjson.dump(
1✔
134
                epoch,
135
                f,
136
                default=hyperopt_serializer,
137
                number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
138
            )
139
            f.write("\n")
1✔
140

141
        self.num_epochs_saved += 1
1✔
142
        logger.debug(
1✔
143
            f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
144
            f"saved to '{self.results_file}'."
145
        )
146
        # Store hyperopt filename
147
        latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
1✔
148
        file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
1✔
149

150
    def print_results(self, results: dict[str, Any]) -> None:
1✔
151
        """
152
        Log results if it is better than any previous evaluation
153
        TODO: this should be moved to HyperoptTools too
154
        """
155
        is_best = results["is_best"]
1✔
156

157
        if self.print_all or is_best:
1✔
158
            self._hyper_out.add_data(
1✔
159
                self.config,
160
                [results],
161
                self.total_epochs,
162
                self.print_all,
163
            )
164

165
    def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
1✔
166
        """Start optimizer in a parallel way"""
167

168
        def optimizer_wrapper(*args, **kwargs):
1✔
169
            # global log queue. This must happen in the file that initializes Parallel
170
            logging_mp_setup(
1✔
171
                log_queue, logging.INFO if self.config["verbosity"] < 1 else logging.DEBUG
172
            )
173

174
            return self.hyperopter.generate_optimizer(*args, **kwargs)
1✔
175

176
        return parallel(delayed(wrap_non_picklable_objects(optimizer_wrapper))(v) for v in asked)
1✔
177

178
    def _set_random_state(self, random_state: int | None) -> int:
1✔
179
        return random_state or random.randint(1, 2**16 - 1)  # noqa: S311
1✔
180

181
    def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
1✔
182
        """
183
        Enforce points returned from `self.opt.ask` have not been already evaluated
184

185
        Steps:
186
        1. Try to get points using `self.opt.ask` first
187
        2. Discard the points that have already been evaluated
188
        3. Retry using `self.opt.ask` up to 3 times
189
        4. If still some points are missing in respect to `n_points`, random sample some points
190
        5. Repeat until at least `n_points` points in the `asked_non_tried` list
191
        6. Return a list with length truncated at `n_points`
192
        """
193

194
        def unique_list(a_list):
1✔
195
            new_list = []
1✔
196
            for item in a_list:
1✔
197
                if item not in new_list:
1✔
198
                    new_list.append(item)
1✔
199
            return new_list
1✔
200

201
        i = 0
1✔
202
        asked_non_tried: list[list[Any]] = []
1✔
203
        is_random_non_tried: list[bool] = []
1✔
204
        while i < 5 and len(asked_non_tried) < n_points:
1✔
205
            if i < 3:
1✔
206
                self.opt.cache_ = {}
1✔
207
                asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
1✔
208
                is_random = [False for _ in range(len(asked))]
1✔
209
            else:
210
                asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
×
211
                is_random = [True for _ in range(len(asked))]
×
212
            is_random_non_tried += [
1✔
213
                rand
214
                for x, rand in zip(asked, is_random, strict=False)
215
                if x not in self.opt.Xi and x not in asked_non_tried
216
            ]
217
            asked_non_tried += [
1✔
218
                x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
219
            ]
220
            i += 1
1✔
221

222
        if asked_non_tried:
1✔
223
            return (
1✔
224
                asked_non_tried[: min(len(asked_non_tried), n_points)],
225
                is_random_non_tried[: min(len(asked_non_tried), n_points)],
226
            )
227
        else:
228
            return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
×
229

230
    def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
1✔
231
        """
232
        Evaluate results returned from generate_optimizer
233
        """
234
        val["current_epoch"] = current
1✔
235
        val["is_initial_point"] = current <= INITIAL_POINTS
1✔
236

237
        logger.debug("Optimizer epoch evaluated: %s", val)
1✔
238

239
        is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
1✔
240
        # This value is assigned here and not in the optimization method
241
        # to keep proper order in the list of results. That's because
242
        # evaluations can take different time. Here they are aligned in the
243
        # order they will be shown to the user.
244
        val["is_best"] = is_best
1✔
245
        val["is_random"] = is_random
1✔
246
        self.print_results(val)
1✔
247

248
        if is_best:
1✔
249
            self.current_best_loss = val["loss"]
1✔
250
            self.current_best_epoch = val
1✔
251

252
        self._save_result(val)
1✔
253

254
    def _setup_logging_mp_workaround(self) -> None:
1✔
255
        """
256
        Workaround for logging in child processes.
257
        local_queue must be a global in the file that initializes Parallel.
258
        """
259
        global log_queue
260
        m = Manager()
1✔
261
        log_queue = m.Queue()
1✔
262

263
    def start(self) -> None:
1✔
264
        self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
1✔
265
        logger.info(f"Using optimizer random state: {self.random_state}")
1✔
266
        self.hyperopt_table_header = -1
1✔
267
        self.hyperopter.prepare_hyperopt()
1✔
268

269
        cpus = cpu_count()
1✔
270
        logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
1✔
271
        config_jobs = self.config.get("hyperopt_jobs", -1)
1✔
272
        logger.info(f"Number of parallel jobs set as: {config_jobs}")
1✔
273

274
        self.opt = self.hyperopter.get_optimizer(
1✔
275
            config_jobs, self.random_state, INITIAL_POINTS, SKOPT_MODEL_QUEUE_SIZE
276
        )
277
        self._setup_logging_mp_workaround()
1✔
278
        try:
1✔
279
            with Parallel(n_jobs=config_jobs) as parallel:
1✔
280
                jobs = parallel._effective_n_jobs()
1✔
281
                logger.info(f"Effective number of parallel workers used: {jobs}")
1✔
282

283
                # Define progressbar
284
                with get_progress_tracker(cust_callables=[self._hyper_out]) as pbar:
1✔
285
                    task = pbar.add_task("Epochs", total=self.total_epochs)
1✔
286

287
                    start = 0
1✔
288

289
                    if self.analyze_per_epoch:
1✔
290
                        # First analysis not in parallel mode when using --analyze-per-epoch.
291
                        # This allows dataprovider to load it's informative cache.
292
                        asked, is_random = self.get_asked_points(n_points=1)
1✔
293
                        f_val0 = self.hyperopter.generate_optimizer(asked[0])
1✔
294
                        self.opt.tell(asked, [f_val0["loss"]])
1✔
295
                        self.evaluate_result(f_val0, 1, is_random[0])
1✔
296
                        pbar.update(task, advance=1)
1✔
297
                        start += 1
1✔
298

299
                    evals = ceil((self.total_epochs - start) / jobs)
1✔
300
                    for i in range(evals):
1✔
301
                        # Correct the number of epochs to be processed for the last
302
                        # iteration (should not exceed self.total_epochs in total)
303
                        n_rest = (i + 1) * jobs - (self.total_epochs - start)
1✔
304
                        current_jobs = jobs - n_rest if n_rest > 0 else jobs
1✔
305

306
                        asked, is_random = self.get_asked_points(n_points=current_jobs)
1✔
307
                        f_val = self.run_optimizer_parallel(parallel, asked)
1✔
308
                        self.opt.tell(asked, [v["loss"] for v in f_val])
1✔
309

310
                        for j, val in enumerate(f_val):
1✔
311
                            # Use human-friendly indexes here (starting from 1)
312
                            current = i * jobs + j + 1 + start
1✔
313

314
                            self.evaluate_result(val, current, is_random[j])
1✔
315
                            pbar.update(task, advance=1)
1✔
316
                        logging_mp_handle(log_queue)
1✔
317

318
        except KeyboardInterrupt:
×
319
            print("User interrupted..")
×
320

321
        logger.info(
1✔
322
            f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
323
            f"saved to '{self.results_file}'."
324
        )
325

326
        if self.current_best_epoch:
1✔
327
            HyperoptTools.try_export_params(
1✔
328
                self.config,
329
                self.hyperopter.get_strategy_name(),
330
                self.current_best_epoch,
331
            )
332

333
            HyperoptTools.show_epoch_details(
1✔
334
                self.current_best_epoch, self.total_epochs, self.print_json
335
            )
336
        elif self.num_epochs_saved > 0:
1✔
337
            print(
1✔
338
                f"No good result found for given optimization function in {self.num_epochs_saved} "
339
                f"{plural(self.num_epochs_saved, 'epoch')}."
340
            )
341
        else:
342
            # This is printed when Ctrl+C is pressed quickly, before first epochs have
343
            # a chance to be evaluated.
344
            print("No epochs evaluated yet, no best result.")
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc