• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

freqtrade / freqtrade / 14507242113

02 Dec 2024 07:11PM UTC coverage: 94.422% (+0.05%) from 94.377%
14507242113

push

github

web-flow
Merge pull request #11028 from xzmeng/fix-none

fix: check if days is None before conversion

1 of 1 new or added line in 1 file covered. (100.0%)

525 existing lines in 54 files now uncovered.

21684 of 22965 relevant lines covered (94.42%)

0.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.05
/freqtrade/optimize/hyperopt/hyperopt.py
1
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
2

3
"""
4
This module contains the hyperopt logic
5
"""
6

7
import logging
1✔
8
import random
1✔
9
import sys
1✔
10
from datetime import datetime
1✔
11
from math import ceil
1✔
12
from multiprocessing import Manager
1✔
13
from pathlib import Path
1✔
14
from typing import Any
1✔
15

16
import rapidjson
1✔
17
from joblib import Parallel, cpu_count, delayed, wrap_non_picklable_objects
1✔
18
from joblib.externals import cloudpickle
1✔
19
from rich.console import Console
1✔
20

21
from freqtrade.constants import FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
1✔
22
from freqtrade.enums import HyperoptState
1✔
23
from freqtrade.exceptions import OperationalException
1✔
24
from freqtrade.misc import file_dump_json, plural
1✔
25
from freqtrade.optimize.hyperopt.hyperopt_logger import logging_mp_handle, logging_mp_setup
1✔
26
from freqtrade.optimize.hyperopt.hyperopt_optimizer import HyperOptimizer
1✔
27
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
1✔
28
from freqtrade.optimize.hyperopt_tools import (
1✔
29
    HyperoptStateContainer,
30
    HyperoptTools,
31
    hyperopt_serializer,
32
)
33
from freqtrade.util import get_progress_tracker
1✔
34

35

36
logger = logging.getLogger(__name__)
1✔
37

38

39
INITIAL_POINTS = 30
1✔
40

41
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
42
# in the skopt model queue, to optimize memory consumption
43
SKOPT_MODEL_QUEUE_SIZE = 10
1✔
44

45
log_queue: Any
1✔
46

47

48
class Hyperopt:
1✔
49
    """
50
    Hyperopt class, this class contains all the logic to run a hyperopt simulation
51

52
    To start a hyperopt run:
53
    hyperopt = Hyperopt(config)
54
    hyperopt.start()
55
    """
56

57
    def __init__(self, config: Config) -> None:
1✔
58
        self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
1✔
59

60
        self.config = config
1✔
61

62
        self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
1✔
63
        HyperoptStateContainer.set_state(HyperoptState.STARTUP)
1✔
64

65
        if self.config.get("hyperopt"):
1✔
66
            raise OperationalException(
1✔
67
                "Using separate Hyperopt files has been removed in 2021.9. Please convert "
68
                "your existing Hyperopt file to the new Hyperoptable strategy interface"
69
            )
70

71
        time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
1✔
72
        strategy = str(self.config["strategy"])
1✔
73
        self.results_file: Path = (
1✔
74
            self.config["user_data_dir"]
75
            / "hyperopt_results"
76
            / f"strategy_{strategy}_{time_now}.fthypt"
77
        )
78
        self.data_pickle_file = (
1✔
79
            self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
80
        )
81
        self.total_epochs = config.get("epochs", 0)
1✔
82

83
        self.current_best_loss = 100
1✔
84

85
        self.clean_hyperopt()
1✔
86

87
        self.num_epochs_saved = 0
1✔
88
        self.current_best_epoch: dict[str, Any] | None = None
1✔
89

90
        if HyperoptTools.has_space(self.config, "sell"):
1✔
91
            # Make sure use_exit_signal is enabled
92
            self.config["use_exit_signal"] = True
1✔
93

94
        self.print_all = self.config.get("print_all", False)
1✔
95
        self.hyperopt_table_header = 0
1✔
96
        self.print_colorized = self.config.get("print_colorized", False)
1✔
97
        self.print_json = self.config.get("print_json", False)
1✔
98

99
        self.hyperopter = HyperOptimizer(self.config)
1✔
100

101
    @staticmethod
1✔
102
    def get_lock_filename(config: Config) -> str:
1✔
103
        return str(config["user_data_dir"] / "hyperopt.lock")
1✔
104

105
    def clean_hyperopt(self) -> None:
1✔
106
        """
107
        Remove hyperopt pickle files to restart hyperopt.
108
        """
109
        for f in [self.data_pickle_file, self.results_file]:
1✔
110
            p = Path(f)
1✔
111
            if p.is_file():
1✔
112
                logger.info(f"Removing `{p}`.")
1✔
113
                p.unlink()
1✔
114

115
    def hyperopt_pickle_magic(self, bases) -> None:
1✔
116
        """
117
        Hyperopt magic to allow strategy inheritance across files.
118
        For this to properly work, we need to register the module of the imported class
119
        to pickle as value.
120
        """
UNCOV
121
        for modules in bases:
×
UNCOV
122
            if modules.__name__ != "IStrategy":
×
UNCOV
123
                cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
×
UNCOV
124
                self.hyperopt_pickle_magic(modules.__bases__)
×
125

126
    def _save_result(self, epoch: dict) -> None:
1✔
127
        """
128
        Save hyperopt results to file
129
        Store one line per epoch.
130
        While not a valid json object - this allows appending easily.
131
        :param epoch: result dictionary for this epoch.
132
        """
133
        epoch[FTHYPT_FILEVERSION] = 2
1✔
134
        with self.results_file.open("a") as f:
1✔
135
            rapidjson.dump(
1✔
136
                epoch,
137
                f,
138
                default=hyperopt_serializer,
139
                number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
140
            )
141
            f.write("\n")
1✔
142

143
        self.num_epochs_saved += 1
1✔
144
        logger.debug(
1✔
145
            f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
146
            f"saved to '{self.results_file}'."
147
        )
148
        # Store hyperopt filename
149
        latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
1✔
150
        file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
1✔
151

152
    def print_results(self, results: dict[str, Any]) -> None:
1✔
153
        """
154
        Log results if it is better than any previous evaluation
155
        TODO: this should be moved to HyperoptTools too
156
        """
157
        is_best = results["is_best"]
1✔
158

159
        if self.print_all or is_best:
1✔
160
            self._hyper_out.add_data(
1✔
161
                self.config,
162
                [results],
163
                self.total_epochs,
164
                self.print_all,
165
            )
166

167
    def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
1✔
168
        """Start optimizer in a parallel way"""
169

170
        def optimizer_wrapper(*args, **kwargs):
1✔
171
            # global log queue. This must happen in the file that initializes Parallel
172
            logging_mp_setup(
1✔
173
                log_queue, logging.INFO if self.config["verbosity"] < 1 else logging.DEBUG
174
            )
175

176
            return self.hyperopter.generate_optimizer(*args, **kwargs)
1✔
177

178
        return parallel(delayed(wrap_non_picklable_objects(optimizer_wrapper))(v) for v in asked)
1✔
179

180
    def _set_random_state(self, random_state: int | None) -> int:
1✔
181
        return random_state or random.randint(1, 2**16 - 1)  # noqa: S311
1✔
182

183
    def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
1✔
184
        """
185
        Enforce points returned from `self.opt.ask` have not been already evaluated
186

187
        Steps:
188
        1. Try to get points using `self.opt.ask` first
189
        2. Discard the points that have already been evaluated
190
        3. Retry using `self.opt.ask` up to 3 times
191
        4. If still some points are missing in respect to `n_points`, random sample some points
192
        5. Repeat until at least `n_points` points in the `asked_non_tried` list
193
        6. Return a list with length truncated at `n_points`
194
        """
195

196
        def unique_list(a_list):
1✔
197
            new_list = []
1✔
198
            for item in a_list:
1✔
199
                if item not in new_list:
1✔
200
                    new_list.append(item)
1✔
201
            return new_list
1✔
202

203
        i = 0
1✔
204
        asked_non_tried: list[list[Any]] = []
1✔
205
        is_random_non_tried: list[bool] = []
1✔
206
        while i < 5 and len(asked_non_tried) < n_points:
1✔
207
            if i < 3:
1✔
208
                self.opt.cache_ = {}
1✔
209
                asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
1✔
210
                is_random = [False for _ in range(len(asked))]
1✔
211
            else:
UNCOV
212
                asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
×
UNCOV
213
                is_random = [True for _ in range(len(asked))]
×
214
            is_random_non_tried += [
1✔
215
                rand
216
                for x, rand in zip(asked, is_random, strict=False)
217
                if x not in self.opt.Xi and x not in asked_non_tried
218
            ]
219
            asked_non_tried += [
1✔
220
                x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
221
            ]
222
            i += 1
1✔
223

224
        if asked_non_tried:
1✔
225
            return (
1✔
226
                asked_non_tried[: min(len(asked_non_tried), n_points)],
227
                is_random_non_tried[: min(len(asked_non_tried), n_points)],
228
            )
229
        else:
UNCOV
230
            return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
×
231

232
    def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
1✔
233
        """
234
        Evaluate results returned from generate_optimizer
235
        """
236
        val["current_epoch"] = current
1✔
237
        val["is_initial_point"] = current <= INITIAL_POINTS
1✔
238

239
        logger.debug("Optimizer epoch evaluated: %s", val)
1✔
240

241
        is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
1✔
242
        # This value is assigned here and not in the optimization method
243
        # to keep proper order in the list of results. That's because
244
        # evaluations can take different time. Here they are aligned in the
245
        # order they will be shown to the user.
246
        val["is_best"] = is_best
1✔
247
        val["is_random"] = is_random
1✔
248
        self.print_results(val)
1✔
249

250
        if is_best:
1✔
251
            self.current_best_loss = val["loss"]
1✔
252
            self.current_best_epoch = val
1✔
253

254
        self._save_result(val)
1✔
255

256
    def _setup_logging_mp_workaround(self) -> None:
1✔
257
        """
258
        Workaround for logging in child processes.
259
        local_queue must be a global in the file that initializes Parallel.
260
        """
261
        global log_queue
262
        m = Manager()
1✔
263
        log_queue = m.Queue()
1✔
264

265
    def start(self) -> None:
1✔
266
        self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
1✔
267
        logger.info(f"Using optimizer random state: {self.random_state}")
1✔
268
        self.hyperopt_table_header = -1
1✔
269
        self.hyperopter.prepare_hyperopt()
1✔
270

271
        cpus = cpu_count()
1✔
272
        logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
1✔
273
        config_jobs = self.config.get("hyperopt_jobs", -1)
1✔
274
        logger.info(f"Number of parallel jobs set as: {config_jobs}")
1✔
275

276
        self.opt = self.hyperopter.get_optimizer(
1✔
277
            config_jobs, self.random_state, INITIAL_POINTS, SKOPT_MODEL_QUEUE_SIZE
278
        )
279
        self._setup_logging_mp_workaround()
1✔
280
        try:
1✔
281
            with Parallel(n_jobs=config_jobs) as parallel:
1✔
282
                jobs = parallel._effective_n_jobs()
1✔
283
                logger.info(f"Effective number of parallel workers used: {jobs}")
1✔
284
                console = Console(
1✔
285
                    color_system="auto" if self.print_colorized else None,
286
                )
287

288
                # Define progressbar
289
                with get_progress_tracker(
1✔
290
                    console=console,
291
                    cust_callables=[self._hyper_out],
292
                ) as pbar:
293
                    task = pbar.add_task("Epochs", total=self.total_epochs)
1✔
294

295
                    start = 0
1✔
296

297
                    if self.analyze_per_epoch:
1✔
298
                        # First analysis not in parallel mode when using --analyze-per-epoch.
299
                        # This allows dataprovider to load it's informative cache.
300
                        asked, is_random = self.get_asked_points(n_points=1)
1✔
301
                        f_val0 = self.hyperopter.generate_optimizer(asked[0])
1✔
302
                        self.opt.tell(asked, [f_val0["loss"]])
1✔
303
                        self.evaluate_result(f_val0, 1, is_random[0])
1✔
304
                        pbar.update(task, advance=1)
1✔
305
                        start += 1
1✔
306

307
                    evals = ceil((self.total_epochs - start) / jobs)
1✔
308
                    for i in range(evals):
1✔
309
                        # Correct the number of epochs to be processed for the last
310
                        # iteration (should not exceed self.total_epochs in total)
311
                        n_rest = (i + 1) * jobs - (self.total_epochs - start)
1✔
312
                        current_jobs = jobs - n_rest if n_rest > 0 else jobs
1✔
313

314
                        asked, is_random = self.get_asked_points(n_points=current_jobs)
1✔
315
                        f_val = self.run_optimizer_parallel(parallel, asked)
1✔
316
                        self.opt.tell(asked, [v["loss"] for v in f_val])
1✔
317

318
                        for j, val in enumerate(f_val):
1✔
319
                            # Use human-friendly indexes here (starting from 1)
320
                            current = i * jobs + j + 1 + start
1✔
321

322
                            self.evaluate_result(val, current, is_random[j])
1✔
323
                            pbar.update(task, advance=1)
1✔
324
                        logging_mp_handle(log_queue)
1✔
325

UNCOV
326
        except KeyboardInterrupt:
×
UNCOV
327
            print("User interrupted..")
×
328

329
        logger.info(
1✔
330
            f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
331
            f"saved to '{self.results_file}'."
332
        )
333

334
        if self.current_best_epoch:
1✔
335
            HyperoptTools.try_export_params(
1✔
336
                self.config,
337
                self.hyperopter.get_strategy_name(),
338
                self.current_best_epoch,
339
            )
340

341
            HyperoptTools.show_epoch_details(
1✔
342
                self.current_best_epoch, self.total_epochs, self.print_json
343
            )
344
        elif self.num_epochs_saved > 0:
1✔
345
            print(
1✔
346
                f"No good result found for given optimization function in {self.num_epochs_saved} "
347
                f"{plural(self.num_epochs_saved, 'epoch')}."
348
            )
349
        else:
350
            # This is printed when Ctrl+C is pressed quickly, before first epochs have
351
            # a chance to be evaluated.
UNCOV
352
            print("No epochs evaluated yet, no best result.")
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc