• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

freqtrade / freqtrade / 15036087736

14 May 2025 06:14PM UTC coverage: 94.366% (-0.02%) from 94.385%
15036087736

push

github

xmatthias
fix: default max_open_trades to inf instead of -1

Without this, the auto-conversion doesn't backpopulate to the config

closes #11752

22361 of 23696 relevant lines covered (94.37%)

0.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.39
/freqtrade/optimize/hyperopt/hyperopt.py
1
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
2

3
"""
4
This module contains the hyperopt logic
5
"""
6

7
import gc
1✔
8
import logging
1✔
9
import random
1✔
10
from datetime import datetime
1✔
11
from math import ceil
1✔
12
from multiprocessing import Manager
1✔
13
from pathlib import Path
1✔
14
from typing import Any
1✔
15

16
import rapidjson
1✔
17
from joblib import Parallel, cpu_count
1✔
18

19
from freqtrade.constants import FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
1✔
20
from freqtrade.enums import HyperoptState
1✔
21
from freqtrade.exceptions import OperationalException
1✔
22
from freqtrade.misc import file_dump_json, plural
1✔
23
from freqtrade.optimize.hyperopt.hyperopt_logger import logging_mp_handle, logging_mp_setup
1✔
24
from freqtrade.optimize.hyperopt.hyperopt_optimizer import HyperOptimizer
1✔
25
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
1✔
26
from freqtrade.optimize.hyperopt_tools import (
1✔
27
    HyperoptStateContainer,
28
    HyperoptTools,
29
    hyperopt_serializer,
30
)
31
from freqtrade.util import get_progress_tracker
1✔
32

33

34
logger = logging.getLogger(__name__)
1✔
35

36

37
INITIAL_POINTS = 30
1✔
38

39

40
log_queue: Any
1✔
41

42

43
class Hyperopt:
1✔
44
    """
45
    Hyperopt class, this class contains all the logic to run a hyperopt simulation
46

47
    To start a hyperopt run:
48
    hyperopt = Hyperopt(config)
49
    hyperopt.start()
50
    """
51

52
    def __init__(self, config: Config) -> None:
1✔
53
        self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
1✔
54

55
        self.config = config
1✔
56

57
        self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
1✔
58
        HyperoptStateContainer.set_state(HyperoptState.STARTUP)
1✔
59

60
        if self.config.get("hyperopt"):
1✔
61
            raise OperationalException(
1✔
62
                "Using separate Hyperopt files has been removed in 2021.9. Please convert "
63
                "your existing Hyperopt file to the new Hyperoptable strategy interface"
64
            )
65

66
        time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
1✔
67
        strategy = str(self.config["strategy"])
1✔
68
        self.results_file: Path = (
1✔
69
            self.config["user_data_dir"]
70
            / "hyperopt_results"
71
            / f"strategy_{strategy}_{time_now}.fthypt"
72
        )
73
        self.data_pickle_file = (
1✔
74
            self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
75
        )
76
        self.total_epochs = config.get("epochs", 0)
1✔
77

78
        self.current_best_loss = 100
1✔
79

80
        self.clean_hyperopt()
1✔
81

82
        self.num_epochs_saved = 0
1✔
83
        self.current_best_epoch: dict[str, Any] | None = None
1✔
84

85
        if HyperoptTools.has_space(self.config, "sell"):
1✔
86
            # Make sure use_exit_signal is enabled
87
            self.config["use_exit_signal"] = True
1✔
88

89
        self.print_all = self.config.get("print_all", False)
1✔
90
        self.hyperopt_table_header = 0
1✔
91
        self.print_json = self.config.get("print_json", False)
1✔
92

93
        self.hyperopter = HyperOptimizer(self.config, self.data_pickle_file)
1✔
94

95
    @staticmethod
1✔
96
    def get_lock_filename(config: Config) -> str:
1✔
97
        return str(config["user_data_dir"] / "hyperopt.lock")
1✔
98

99
    def clean_hyperopt(self) -> None:
1✔
100
        """
101
        Remove hyperopt pickle files to restart hyperopt.
102
        """
103
        for f in [self.data_pickle_file, self.results_file]:
1✔
104
            p = Path(f)
1✔
105
            if p.is_file():
1✔
106
                logger.info(f"Removing `{p}`.")
1✔
107
                p.unlink()
1✔
108

109
    def _save_result(self, epoch: dict) -> None:
1✔
110
        """
111
        Save hyperopt results to file
112
        Store one line per epoch.
113
        While not a valid json object - this allows appending easily.
114
        :param epoch: result dictionary for this epoch.
115
        """
116
        epoch[FTHYPT_FILEVERSION] = 2
1✔
117
        with self.results_file.open("a") as f:
1✔
118
            rapidjson.dump(
1✔
119
                epoch,
120
                f,
121
                default=hyperopt_serializer,
122
                number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
123
            )
124
            f.write("\n")
1✔
125

126
        self.num_epochs_saved += 1
1✔
127
        logger.debug(
1✔
128
            f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
129
            f"saved to '{self.results_file}'."
130
        )
131
        # Store hyperopt filename
132
        latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
1✔
133
        file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
1✔
134

135
    def print_results(self, results: dict[str, Any]) -> None:
1✔
136
        """
137
        Log results if it is better than any previous evaluation
138
        TODO: this should be moved to HyperoptTools too
139
        """
140
        is_best = results["is_best"]
1✔
141

142
        if self.print_all or is_best:
1✔
143
            self._hyper_out.add_data(
1✔
144
                self.config,
145
                [results],
146
                self.total_epochs,
147
                self.print_all,
148
            )
149

150
    def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
1✔
151
        """Start optimizer in a parallel way"""
152

153
        def optimizer_wrapper(*args, **kwargs):
1✔
154
            # global log queue. This must happen in the file that initializes Parallel
155
            logging_mp_setup(
1✔
156
                log_queue, logging.INFO if self.config["verbosity"] < 1 else logging.DEBUG
157
            )
158

159
            return self.hyperopter.generate_optimizer_wrapped(*args, **kwargs)
1✔
160

161
        return parallel(optimizer_wrapper(v) for v in asked)
1✔
162

163
    def _set_random_state(self, random_state: int | None) -> int:
1✔
164
        return random_state or random.randint(1, 2**16 - 1)  # noqa: S311
1✔
165

166
    def get_optuna_asked_points(self, n_points: int, dimensions: dict) -> list[Any]:
1✔
167
        asked: list[list[Any]] = []
1✔
168
        for i in range(n_points):
1✔
169
            asked.append(self.opt.ask(dimensions))
1✔
170
        return asked
1✔
171

172
    def get_asked_points(self, n_points: int, dimensions: dict) -> tuple[list[Any], list[bool]]:
1✔
173
        """
174
        Enforce points returned from `self.opt.ask` have not been already evaluated
175

176
        Steps:
177
        1. Try to get points using `self.opt.ask` first
178
        2. Discard the points that have already been evaluated
179
        3. Retry using `self.opt.ask` up to 3 times
180
        4. If still some points are missing in respect to `n_points`, random sample some points
181
        5. Repeat until at least `n_points` points in the `asked_non_tried` list
182
        6. Return a list with length truncated at `n_points`
183
        """
184

185
        def unique_list(a_list):
1✔
186
            new_list = []
1✔
187
            for item in a_list:
1✔
188
                if item not in new_list:
1✔
189
                    new_list.append(item)
1✔
190
            return new_list
1✔
191

192
        i = 0
1✔
193
        asked_non_tried: list[list[Any]] = []
1✔
194
        is_random_non_tried: list[bool] = []
1✔
195
        while i < 5 and len(asked_non_tried) < n_points:
1✔
196
            if i < 3:
1✔
197
                self.opt.cache_ = {}
1✔
198
                asked = unique_list(
1✔
199
                    self.get_optuna_asked_points(
200
                        n_points=n_points * 5 if i > 0 else n_points, dimensions=dimensions
201
                    )
202
                )
203
                is_random = [False for _ in range(len(asked))]
1✔
204
            else:
205
                asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
×
206
                is_random = [True for _ in range(len(asked))]
×
207
            is_random_non_tried += [
1✔
208
                rand for x, rand in zip(asked, is_random, strict=False) if x not in asked_non_tried
209
            ]
210
            asked_non_tried += [x for x in asked if x not in asked_non_tried]
1✔
211
            i += 1
1✔
212

213
        if asked_non_tried:
1✔
214
            return (
1✔
215
                asked_non_tried[: min(len(asked_non_tried), n_points)],
216
                is_random_non_tried[: min(len(asked_non_tried), n_points)],
217
            )
218
        else:
219
            return self.get_optuna_asked_points(n_points=n_points, dimensions=dimensions), [
×
220
                False for _ in range(n_points)
221
            ]
222

223
    def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
1✔
224
        """
225
        Evaluate results returned from generate_optimizer
226
        """
227
        val["current_epoch"] = current
1✔
228
        val["is_initial_point"] = current <= INITIAL_POINTS
1✔
229

230
        logger.debug("Optimizer epoch evaluated: %s", val)
1✔
231

232
        is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
1✔
233
        # This value is assigned here and not in the optimization method
234
        # to keep proper order in the list of results. That's because
235
        # evaluations can take different time. Here they are aligned in the
236
        # order they will be shown to the user.
237
        val["is_best"] = is_best
1✔
238
        val["is_random"] = is_random
1✔
239
        self.print_results(val)
1✔
240

241
        if is_best:
1✔
242
            self.current_best_loss = val["loss"]
1✔
243
            self.current_best_epoch = val
1✔
244

245
        self._save_result(val)
1✔
246

247
    def _setup_logging_mp_workaround(self) -> None:
1✔
248
        """
249
        Workaround for logging in child processes.
250
        local_queue must be a global in the file that initializes Parallel.
251
        """
252
        global log_queue
253
        m = Manager()
1✔
254
        log_queue = m.Queue()
1✔
255

256
    def start(self) -> None:
1✔
257
        self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
1✔
258
        logger.info(f"Using optimizer random state: {self.random_state}")
1✔
259
        self.hyperopt_table_header = -1
1✔
260
        self.hyperopter.prepare_hyperopt()
1✔
261

262
        cpus = cpu_count()
1✔
263
        logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
1✔
264
        config_jobs = self.config.get("hyperopt_jobs", -1)
1✔
265
        logger.info(f"Number of parallel jobs set as: {config_jobs}")
1✔
266

267
        self.opt = self.hyperopter.get_optimizer(self.random_state)
1✔
268
        self._setup_logging_mp_workaround()
1✔
269
        try:
1✔
270
            with Parallel(n_jobs=config_jobs) as parallel:
1✔
271
                jobs = parallel._effective_n_jobs()
1✔
272
                logger.info(f"Effective number of parallel workers used: {jobs}")
1✔
273

274
                # Define progressbar
275
                with get_progress_tracker(cust_callables=[self._hyper_out]) as pbar:
1✔
276
                    task = pbar.add_task("Epochs", total=self.total_epochs)
1✔
277

278
                    start = 0
1✔
279

280
                    if self.analyze_per_epoch:
1✔
281
                        # First analysis not in parallel mode when using --analyze-per-epoch.
282
                        # This allows dataprovider to load it's informative cache.
283
                        asked, is_random = self.get_asked_points(
1✔
284
                            n_points=1, dimensions=self.hyperopter.o_dimensions
285
                        )
286
                        f_val0 = self.hyperopter.generate_optimizer(asked[0].params)
1✔
287
                        self.opt.tell(asked[0], [f_val0["loss"]])
1✔
288
                        self.evaluate_result(f_val0, 1, is_random[0])
1✔
289
                        pbar.update(task, advance=1)
1✔
290
                        start += 1
1✔
291

292
                    evals = ceil((self.total_epochs - start) / jobs)
1✔
293
                    for i in range(evals):
1✔
294
                        # Correct the number of epochs to be processed for the last
295
                        # iteration (should not exceed self.total_epochs in total)
296
                        n_rest = (i + 1) * jobs - (self.total_epochs - start)
1✔
297
                        current_jobs = jobs - n_rest if n_rest > 0 else jobs
1✔
298

299
                        asked, is_random = self.get_asked_points(
1✔
300
                            n_points=current_jobs, dimensions=self.hyperopter.o_dimensions
301
                        )
302

303
                        f_val = self.run_optimizer_parallel(
1✔
304
                            parallel,
305
                            [asked1.params for asked1 in asked],
306
                        )
307
                        f_val_loss = [v["loss"] for v in f_val]
1✔
308
                        for o_ask, v in zip(asked, f_val_loss, strict=False):
1✔
309
                            self.opt.tell(o_ask, v)
1✔
310

311
                        for j, val in enumerate(f_val):
1✔
312
                            # Use human-friendly indexes here (starting from 1)
313
                            current = i * jobs + j + 1 + start
1✔
314

315
                            self.evaluate_result(val, current, is_random[j])
1✔
316
                            pbar.update(task, advance=1)
1✔
317
                        logging_mp_handle(log_queue)
1✔
318
                        gc.collect()
1✔
319

320
        except KeyboardInterrupt:
×
321
            print("User interrupted..")
×
322

323
        logger.info(
1✔
324
            f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
325
            f"saved to '{self.results_file}'."
326
        )
327

328
        if self.current_best_epoch:
1✔
329
            HyperoptTools.try_export_params(
1✔
330
                self.config,
331
                self.hyperopter.get_strategy_name(),
332
                self.current_best_epoch,
333
            )
334

335
            HyperoptTools.show_epoch_details(
1✔
336
                self.current_best_epoch, self.total_epochs, self.print_json
337
            )
338
        elif self.num_epochs_saved > 0:
1✔
339
            print(
1✔
340
                f"No good result found for given optimization function in {self.num_epochs_saved} "
341
                f"{plural(self.num_epochs_saved, 'epoch')}."
342
            )
343
        else:
344
            # This is printed when Ctrl+C is pressed quickly, before first epochs have
345
            # a chance to be evaluated.
346
            print("No epochs evaluated yet, no best result.")
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc