• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

freqtrade / freqtrade / 6181253459

08 Sep 2023 06:04AM UTC coverage: 94.614% (+0.06%) from 94.556%
6181253459

push

github-actions

web-flow
Merge pull request #9159 from stash86/fix-adjust

remove old codes when we only can do partial entries

2 of 2 new or added lines in 1 file covered. (100.0%)

19114 of 20202 relevant lines covered (94.61%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.48
/freqtrade/data/btanalysis.py
1
"""
2
Helpers when analyzing backtest data
3
"""
4
import logging
1✔
5
from copy import copy
1✔
6
from datetime import datetime, timezone
1✔
7
from pathlib import Path
1✔
8
from typing import Any, Dict, List, Literal, Optional, Union
1✔
9

10
import numpy as np
1✔
11
import pandas as pd
1✔
12

13
from freqtrade.constants import LAST_BT_RESULT_FN, IntOrInf
1✔
14
from freqtrade.exceptions import OperationalException
1✔
15
from freqtrade.misc import file_dump_json, json_load
1✔
16
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
1✔
17
from freqtrade.persistence import LocalTrade, Trade, init_db
1✔
18
from freqtrade.types import BacktestHistoryEntryType, BacktestResultType
1✔
19

20

21
logger = logging.getLogger(__name__)
1✔
22

23
# Newest format
24
BT_DATA_COLUMNS = ['pair', 'stake_amount', 'max_stake_amount', 'amount',
1✔
25
                   'open_date', 'close_date', 'open_rate', 'close_rate',
26
                   'fee_open', 'fee_close', 'trade_duration',
27
                   'profit_ratio', 'profit_abs', 'exit_reason',
28
                   'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs',
29
                   'stop_loss_ratio', 'min_rate', 'max_rate', 'is_open', 'enter_tag',
30
                   'leverage', 'is_short', 'open_timestamp', 'close_timestamp', 'orders'
31
                   ]
32

33

34
def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> str:
1✔
35
    """
36
    Get latest backtest export based on '.last_result.json'.
37
    :param directory: Directory to search for last result
38
    :param variant: 'backtest' or 'hyperopt' - the method to return
39
    :return: string containing the filename of the latest backtest result
40
    :raises: ValueError in the following cases:
41
        * Directory does not exist
42
        * `directory/.last_result.json` does not exist
43
        * `directory/.last_result.json` has the wrong content
44
    """
45
    if isinstance(directory, str):
1✔
46
        directory = Path(directory)
1✔
47
    if not directory.is_dir():
1✔
48
        raise ValueError(f"Directory '{directory}' does not exist.")
1✔
49
    filename = directory / LAST_BT_RESULT_FN
1✔
50

51
    if not filename.is_file():
1✔
52
        raise ValueError(
1✔
53
            f"Directory '{directory}' does not seem to contain backtest statistics yet.")
54

55
    with filename.open() as file:
1✔
56
        data = json_load(file)
1✔
57

58
    if f'latest_{variant}' not in data:
1✔
59
        raise ValueError(f"Invalid '{LAST_BT_RESULT_FN}' format.")
1✔
60

61
    return data[f'latest_{variant}']
1✔
62

63

64
def get_latest_backtest_filename(directory: Union[Path, str]) -> str:
1✔
65
    """
66
    Get latest backtest export based on '.last_result.json'.
67
    :param directory: Directory to search for last result
68
    :return: string containing the filename of the latest backtest result
69
    :raises: ValueError in the following cases:
70
        * Directory does not exist
71
        * `directory/.last_result.json` does not exist
72
        * `directory/.last_result.json` has the wrong content
73
    """
74
    return get_latest_optimize_filename(directory, 'backtest')
1✔
75

76

77
def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str:
1✔
78
    """
79
    Get latest hyperopt export based on '.last_result.json'.
80
    :param directory: Directory to search for last result
81
    :return: string containing the filename of the latest hyperopt result
82
    :raises: ValueError in the following cases:
83
        * Directory does not exist
84
        * `directory/.last_result.json` does not exist
85
        * `directory/.last_result.json` has the wrong content
86
    """
87
    try:
1✔
88
        return get_latest_optimize_filename(directory, 'hyperopt')
1✔
89
    except ValueError:
1✔
90
        # Return default (legacy) pickle filename
91
        return 'hyperopt_results.pickle'
1✔
92

93

94
def get_latest_hyperopt_file(
1✔
95
        directory: Union[Path, str], predef_filename: Optional[str] = None) -> Path:
96
    """
97
    Get latest hyperopt export based on '.last_result.json'.
98
    :param directory: Directory to search for last result
99
    :return: string containing the filename of the latest hyperopt result
100
    :raises: ValueError in the following cases:
101
        * Directory does not exist
102
        * `directory/.last_result.json` does not exist
103
        * `directory/.last_result.json` has the wrong content
104
    """
105
    if isinstance(directory, str):
1✔
106
        directory = Path(directory)
1✔
107
    if predef_filename:
1✔
108
        if Path(predef_filename).is_absolute():
1✔
109
            raise OperationalException(
1✔
110
                "--hyperopt-filename expects only the filename, not an absolute path.")
111
        return directory / predef_filename
1✔
112
    return directory / get_latest_hyperopt_filename(directory)
1✔
113

114

115
def load_backtest_metadata(filename: Union[Path, str]) -> Dict[str, Any]:
1✔
116
    """
117
    Read metadata dictionary from backtest results file without reading and deserializing entire
118
    file.
119
    :param filename: path to backtest results file.
120
    :return: metadata dict or None if metadata is not present.
121
    """
122
    filename = get_backtest_metadata_filename(filename)
1✔
123
    try:
1✔
124
        with filename.open() as fp:
1✔
125
            return json_load(fp)
1✔
126
    except FileNotFoundError:
1✔
127
        return {}
1✔
128
    except Exception as e:
1✔
129
        raise OperationalException('Unexpected error while loading backtest metadata.') from e
1✔
130

131

132
def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType:
1✔
133
    """
134
    Load backtest statistics file.
135
    :param filename: pathlib.Path object, or string pointing to the file.
136
    :return: a dictionary containing the resulting file.
137
    """
138
    if isinstance(filename, str):
1✔
139
        filename = Path(filename)
1✔
140
    if filename.is_dir():
1✔
141
        filename = filename / get_latest_backtest_filename(filename)
1✔
142
    if not filename.is_file():
1✔
143
        raise ValueError(f"File {filename} does not exist.")
1✔
144
    logger.info(f"Loading backtest result from {filename}")
1✔
145
    with filename.open() as file:
1✔
146
        data = json_load(file)
1✔
147

148
    # Legacy list format does not contain metadata.
149
    if isinstance(data, dict):
1✔
150
        data['metadata'] = load_backtest_metadata(filename)
1✔
151
    return data
1✔
152

153

154
def load_and_merge_backtest_result(strategy_name: str, filename: Path, results: Dict[str, Any]):
1✔
155
    """
156
    Load one strategy from multi-strategy result and merge it with results
157
    :param strategy_name: Name of the strategy contained in the result
158
    :param filename: Backtest-result-filename to load
159
    :param results: dict to merge the result to.
160
    """
161
    bt_data = load_backtest_stats(filename)
1✔
162
    k: Literal['metadata', 'strategy']
163
    for k in ('metadata', 'strategy'):  # type: ignore
1✔
164
        results[k][strategy_name] = bt_data[k][strategy_name]
1✔
165
    results['metadata'][strategy_name]['filename'] = filename.stem
1✔
166
    comparison = bt_data['strategy_comparison']
1✔
167
    for i in range(len(comparison)):
1✔
168
        if comparison[i]['key'] == strategy_name:
1✔
169
            results['strategy_comparison'].append(comparison[i])
1✔
170
            break
1✔
171

172

173
def _get_backtest_files(dirname: Path) -> List[Path]:
1✔
174
    # Weird glob expression here avoids including .meta.json files.
175
    return list(reversed(sorted(dirname.glob('backtest-result-*-[0-9][0-9].json'))))
1✔
176

177

178
def get_backtest_result(filename: Path) -> List[BacktestHistoryEntryType]:
1✔
179
    """
180
    Get backtest result read from metadata file
181
    """
182
    return [
1✔
183
        {
184
            'filename': filename.stem,
185
            'strategy': s,
186
            'notes': v.get('notes', ''),
187
            'run_id': v['run_id'],
188
            'backtest_start_time': v['backtest_start_time'],
189
        } for s, v in load_backtest_metadata(filename).items()
190
    ]
191

192

193
def get_backtest_resultlist(dirname: Path) -> List[BacktestHistoryEntryType]:
1✔
194
    """
195
    Get list of backtest results read from metadata files
196
    """
197
    return [
1✔
198
        {
199
            'filename': filename.stem,
200
            'strategy': s,
201
            'run_id': v['run_id'],
202
            'notes': v.get('notes', ''),
203
            'backtest_start_time': v['backtest_start_time'],
204
        }
205
        for filename in _get_backtest_files(dirname)
206
        for s, v in load_backtest_metadata(filename).items()
207
        if v
208
    ]
209

210

211
def delete_backtest_result(file_abs: Path):
1✔
212
    """
213
    Delete backtest result file and corresponding metadata file.
214
    """
215
    # *.meta.json
216
    logger.info(f"Deleting backtest result file: {file_abs.name}")
1✔
217
    file_abs_meta = file_abs.with_suffix('.meta.json')
1✔
218
    file_abs.unlink()
1✔
219
    file_abs_meta.unlink()
1✔
220

221

222
def update_backtest_metadata(filename: Path, strategy: str, content: Dict[str, Any]):
1✔
223
    """
224
    Updates backtest metadata file with new content.
225
    :raises: ValueError if metadata file does not exist, or strategy is not in this file.
226
    """
227
    metadata = load_backtest_metadata(filename)
1✔
228
    if not metadata:
1✔
229
        raise ValueError("File does not exist.")
×
230
    if strategy not in metadata:
1✔
231
        raise ValueError("Strategy not in metadata.")
1✔
232
    metadata[strategy].update(content)
1✔
233
    # Write data again.
234
    file_dump_json(get_backtest_metadata_filename(filename), metadata)
1✔
235

236

237
def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, str],
1✔
238
                                 min_backtest_date: Optional[datetime] = None) -> Dict[str, Any]:
239
    """
240
    Find existing backtest stats that match specified run IDs and load them.
241
    :param dirname: pathlib.Path object, or string pointing to the file.
242
    :param run_ids: {strategy_name: id_string} dictionary.
243
    :param min_backtest_date: do not load a backtest older than specified date.
244
    :return: results dict.
245
    """
246
    # Copy so we can modify this dict without affecting parent scope.
247
    run_ids = copy(run_ids)
1✔
248
    dirname = Path(dirname)
1✔
249
    results: Dict[str, Any] = {
1✔
250
        'metadata': {},
251
        'strategy': {},
252
        'strategy_comparison': [],
253
    }
254

255
    for filename in _get_backtest_files(dirname):
1✔
256
        metadata = load_backtest_metadata(filename)
1✔
257
        if not metadata:
1✔
258
            # Files are sorted from newest to oldest. When file without metadata is encountered it
259
            # is safe to assume older files will also not have any metadata.
260
            break
×
261

262
        for strategy_name, run_id in list(run_ids.items()):
1✔
263
            strategy_metadata = metadata.get(strategy_name, None)
1✔
264
            if not strategy_metadata:
1✔
265
                # This strategy is not present in analyzed backtest.
266
                continue
×
267

268
            if min_backtest_date is not None:
1✔
269
                backtest_date = strategy_metadata['backtest_start_time']
1✔
270
                backtest_date = datetime.fromtimestamp(backtest_date, tz=timezone.utc)
1✔
271
                if backtest_date < min_backtest_date:
1✔
272
                    # Do not use a cached result for this strategy as first result is too old.
273
                    del run_ids[strategy_name]
1✔
274
                    continue
1✔
275

276
            if strategy_metadata['run_id'] == run_id:
1✔
277
                del run_ids[strategy_name]
1✔
278
                load_and_merge_backtest_result(strategy_name, filename, results)
1✔
279

280
        if len(run_ids) == 0:
1✔
281
            break
1✔
282
    return results
1✔
283

284

285
def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
1✔
286
    """
287
    Compatibility support for older backtest data.
288
    """
289
    df['open_date'] = pd.to_datetime(df['open_date'], utc=True)
1✔
290
    df['close_date'] = pd.to_datetime(df['close_date'], utc=True)
1✔
291
    # Compatibility support for pre short Columns
292
    if 'is_short' not in df.columns:
1✔
293
        df['is_short'] = False
1✔
294
    if 'leverage' not in df.columns:
1✔
295
        df['leverage'] = 1.0
1✔
296
    if 'enter_tag' not in df.columns:
1✔
297
        df['enter_tag'] = df['buy_tag']
1✔
298
        df = df.drop(['buy_tag'], axis=1)
1✔
299
    if 'max_stake_amount' not in df.columns:
1✔
300
        df['max_stake_amount'] = df['stake_amount']
1✔
301
    if 'orders' not in df.columns:
1✔
302
        df['orders'] = None
1✔
303
    return df
1✔
304

305

306
def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = None) -> pd.DataFrame:
1✔
307
    """
308
    Load backtest data file.
309
    :param filename: pathlib.Path object, or string pointing to a file or directory
310
    :param strategy: Strategy to load - mainly relevant for multi-strategy backtests
311
                     Can also serve as protection to load the correct result.
312
    :return: a dataframe with the analysis results
313
    :raise: ValueError if loading goes wrong.
314
    """
315
    data = load_backtest_stats(filename)
1✔
316
    if not isinstance(data, list):
1✔
317
        # new, nested format
318
        if 'strategy' not in data:
1✔
319
            raise ValueError("Unknown dataformat.")
1✔
320

321
        if not strategy:
1✔
322
            if len(data['strategy']) == 1:
1✔
323
                strategy = list(data['strategy'].keys())[0]
1✔
324
            else:
325
                raise ValueError("Detected backtest result with more than one strategy. "
1✔
326
                                 "Please specify a strategy.")
327

328
        if strategy not in data['strategy']:
1✔
329
            raise ValueError(f"Strategy {strategy} not available in the backtest result.")
1✔
330

331
        data = data['strategy'][strategy]['trades']
1✔
332
        df = pd.DataFrame(data)
1✔
333
        if not df.empty:
1✔
334
            df = _load_backtest_data_df_compatibility(df)
1✔
335

336
    else:
337
        # old format - only with lists.
338
        raise OperationalException(
1✔
339
            "Backtest-results with only trades data are no longer supported.")
340
    if not df.empty:
1✔
341
        df = df.sort_values("open_date").reset_index(drop=True)
1✔
342
    return df
1✔
343

344

345
def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataFrame:
1✔
346
    """
347
    Find overlapping trades by expanding each trade once per period it was open
348
    and then counting overlaps.
349
    :param results: Results Dataframe - can be loaded
350
    :param timeframe: Timeframe used for backtest
351
    :return: dataframe with open-counts per time-period in timeframe
352
    """
353
    from freqtrade.exchange import timeframe_to_minutes
1✔
354
    timeframe_min = timeframe_to_minutes(timeframe)
1✔
355
    dates = [pd.Series(pd.date_range(row[1]['open_date'], row[1]['close_date'],
1✔
356
                                     freq=f"{timeframe_min}min"))
357
             for row in results[['open_date', 'close_date']].iterrows()]
358
    deltas = [len(x) for x in dates]
1✔
359
    dates = pd.Series(pd.concat(dates).values, name='date')
1✔
360
    df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)
1✔
361

362
    df2 = pd.concat([dates, df2], axis=1)
1✔
363
    df2 = df2.set_index('date')
1✔
364
    df_final = df2.resample(f"{timeframe_min}min")[['pair']].count()
1✔
365
    df_final = df_final.rename({'pair': 'open_trades'}, axis=1)
1✔
366
    return df_final
1✔
367

368

369
def evaluate_result_multi(results: pd.DataFrame, timeframe: str,
1✔
370
                          max_open_trades: IntOrInf) -> pd.DataFrame:
371
    """
372
    Find overlapping trades by expanding each trade once per period it was open
373
    and then counting overlaps
374
    :param results: Results Dataframe - can be loaded
375
    :param timeframe: Frequency used for the backtest
376
    :param max_open_trades: parameter max_open_trades used during backtest run
377
    :return: dataframe with open-counts per time-period in freq
378
    """
379
    df_final = analyze_trade_parallelism(results, timeframe)
1✔
380
    return df_final[df_final['open_trades'] > max_open_trades]
1✔
381

382

383
def trade_list_to_dataframe(trades: Union[List[Trade], List[LocalTrade]]) -> pd.DataFrame:
1✔
384
    """
385
    Convert list of Trade objects to pandas Dataframe
386
    :param trades: List of trade objects
387
    :return: Dataframe with BT_DATA_COLUMNS
388
    """
389
    df = pd.DataFrame.from_records([t.to_json(True) for t in trades], columns=BT_DATA_COLUMNS)
1✔
390
    if len(df) > 0:
1✔
391
        df['close_date'] = pd.to_datetime(df['close_date'], utc=True)
1✔
392
        df['open_date'] = pd.to_datetime(df['open_date'], utc=True)
1✔
393
        df['close_rate'] = df['close_rate'].astype('float64')
1✔
394
    return df
1✔
395

396

397
def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataFrame:
1✔
398
    """
399
    Load trades from a DB (using dburl)
400
    :param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)
401
    :param strategy: Strategy to load - mainly relevant for multi-strategy backtests
402
                     Can also serve as protection to load the correct result.
403
    :return: Dataframe containing Trades
404
    """
405
    init_db(db_url)
1✔
406

407
    filters = []
1✔
408
    if strategy:
1✔
409
        filters.append(Trade.strategy == strategy)
1✔
410
    trades = trade_list_to_dataframe(list(Trade.get_trades(filters).all()))
1✔
411

412
    return trades
1✔
413

414

415
def load_trades(source: str, db_url: str, exportfilename: Path,
1✔
416
                no_trades: bool = False, strategy: Optional[str] = None) -> pd.DataFrame:
417
    """
418
    Based on configuration option 'trade_source':
419
    * loads data from DB (using `db_url`)
420
    * loads data from backtestfile (using `exportfilename`)
421
    :param source: "DB" or "file" - specify source to load from
422
    :param db_url: sqlalchemy formatted url to a database
423
    :param exportfilename: Json file generated by backtesting
424
    :param no_trades: Skip using trades, only return backtesting data columns
425
    :return: DataFrame containing trades
426
    """
427
    if no_trades:
1✔
428
        df = pd.DataFrame(columns=BT_DATA_COLUMNS)
1✔
429
        return df
1✔
430

431
    if source == "DB":
1✔
432
        return load_trades_from_db(db_url)
1✔
433
    elif source == "file":
1✔
434
        return load_backtest_data(exportfilename, strategy)
1✔
435

436

437
def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame,
1✔
438
                             date_index=False) -> pd.DataFrame:
439
    """
440
    Compare trades and backtested pair DataFrames to get trades performed on backtested period
441
    :return: the DataFrame of a trades of period
442
    """
443
    if date_index:
1✔
444
        trades_start = dataframe.index[0]
1✔
445
        trades_stop = dataframe.index[-1]
1✔
446
    else:
447
        trades_start = dataframe.iloc[0]['date']
1✔
448
        trades_stop = dataframe.iloc[-1]['date']
1✔
449
    trades = trades.loc[(trades['open_date'] >= trades_start) &
1✔
450
                        (trades['close_date'] <= trades_stop)]
451
    return trades
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc