• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

spesmilo / electrum / 4773218692628480

07 Apr 2025 11:18AM UTC coverage: 60.965% (-0.04%) from 61.0%
4773218692628480

Pull #9708

CirrusCI

accumulator
qml: when opening channel, validate and show user feedback if amount outside acceptable range
Pull Request #9708: qml: when opening channel, validate and show user feedback if amount outside acceptable range

21398 of 35099 relevant lines covered (60.96%)

0.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

61.54
/electrum/util.py
1
# Electrum - lightweight Bitcoin client
2
# Copyright (C) 2011 Thomas Voegtlin
3
#
4
# Permission is hereby granted, free of charge, to any person
5
# obtaining a copy of this software and associated documentation files
6
# (the "Software"), to deal in the Software without restriction,
7
# including without limitation the rights to use, copy, modify, merge,
8
# publish, distribute, sublicense, and/or sell copies of the Software,
9
# and to permit persons to whom the Software is furnished to do so,
10
# subject to the following conditions:
11
#
12
# The above copyright notice and this permission notice shall be
13
# included in all copies or substantial portions of the Software.
14
#
15
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
# SOFTWARE.
23
import binascii
1✔
24
import concurrent.futures
1✔
25
import logging
1✔
26
import os, sys, re
1✔
27
from collections import defaultdict, OrderedDict
1✔
28
from concurrent.futures.process import ProcessPoolExecutor
1✔
29
from typing import (NamedTuple, Union, TYPE_CHECKING, Tuple, Optional, Callable, Any,
1✔
30
                    Sequence, Dict, Generic, TypeVar, List, Iterable, Set, Awaitable)
31
from datetime import datetime, timezone
1✔
32
import decimal
1✔
33
from decimal import Decimal
1✔
34
import urllib
1✔
35
import threading
1✔
36
import hmac
1✔
37
import hashlib
1✔
38
import stat
1✔
39
import locale
1✔
40
import asyncio
1✔
41
import urllib.request, urllib.parse, urllib.error
1✔
42
import builtins
1✔
43
import json
1✔
44
import time
1✔
45
import ssl
1✔
46
import ipaddress
1✔
47
from ipaddress import IPv4Address, IPv6Address
1✔
48
import random
1✔
49
import secrets
1✔
50
import functools
1✔
51
from functools import partial
1✔
52
from abc import abstractmethod, ABC
1✔
53
import socket
1✔
54
import enum
1✔
55
from contextlib import nullcontext
1✔
56

57
import attr
1✔
58
import aiohttp
1✔
59
from aiohttp_socks import ProxyConnector, ProxyType
1✔
60
import aiorpcx
1✔
61
import certifi
1✔
62
import dns.resolver
1✔
63

64
from .i18n import _
1✔
65
from .logging import get_logger, Logger
1✔
66

67
if TYPE_CHECKING:
1✔
68
    from .network import Network, ProxySettings
×
69
    from .interface import Interface
×
70
    from .simple_config import SimpleConfig
×
71
    from .paymentrequest import PaymentRequest
×
72

73

74
_logger = get_logger(__name__)
1✔
75

76

77
def inv_dict(d):
1✔
78
    return {v: k for k, v in d.items()}
1✔
79

80

81
def all_subclasses(cls) -> Set:
1✔
82
    """Return all (transitive) subclasses of cls."""
83
    res = set(cls.__subclasses__())
1✔
84
    for sub in res.copy():
1✔
85
        res |= all_subclasses(sub)
1✔
86
    return res
1✔
87

88

89
ca_path = certifi.where()
1✔
90

91

92
base_units = {'BTC':8, 'mBTC':5, 'bits':2, 'sat':0}
1✔
93
base_units_inverse = inv_dict(base_units)
1✔
94
base_units_list = ['BTC', 'mBTC', 'bits', 'sat']  # list(dict) does not guarantee order
1✔
95

96
DECIMAL_POINT_DEFAULT = 5  # mBTC
1✔
97

98

99
class UnknownBaseUnit(Exception): pass
1✔
100

101

102
def decimal_point_to_base_unit_name(dp: int) -> str:
1✔
103
    # e.g. 8 -> "BTC"
104
    try:
1✔
105
        return base_units_inverse[dp]
1✔
106
    except KeyError:
×
107
        raise UnknownBaseUnit(dp) from None
×
108

109

110
def base_unit_name_to_decimal_point(unit_name: str) -> int:
1✔
111
    """Returns the max number of digits allowed after the decimal point."""
112
    # e.g. "BTC" -> 8
113
    try:
×
114
        return base_units[unit_name]
×
115
    except KeyError:
×
116
        raise UnknownBaseUnit(unit_name) from None
×
117

118
def parse_max_spend(amt: Any) -> Optional[int]:
1✔
119
    """Checks if given amount is "spend-max"-like.
120
    Returns None or the positive integer weight for "max". Never raises.
121

122
    When creating invoices and on-chain txs, the user can specify to send "max".
123
    This is done by setting the amount to '!'. Splitting max between multiple
124
    tx outputs is also possible, and custom weights (positive ints) can also be used.
125
    For example, to send 40% of all coins to address1, and 60% to address2:
126
    ```
127
    address1, 2!
128
    address2, 3!
129
    ```
130
    """
131
    if not (isinstance(amt, str) and amt and amt[-1] == '!'):
1✔
132
        return None
1✔
133
    if amt == '!':
1✔
134
        return 1
1✔
135
    x = amt[:-1]
1✔
136
    try:
1✔
137
        x = int(x)
1✔
138
    except ValueError:
×
139
        return None
×
140
    if x > 0:
1✔
141
        return x
1✔
142
    return None
×
143

144
class NotEnoughFunds(Exception):
1✔
145
    def __str__(self):
1✔
146
        return _("Insufficient funds")
1✔
147

148

149
class UneconomicFee(Exception):
1✔
150
    def __str__(self):
1✔
151
        return _("The fee for the transaction is higher than the funds gained from it.")
×
152

153

154
class NoDynamicFeeEstimates(Exception):
1✔
155
    def __str__(self):
1✔
156
        return _('Dynamic fee estimates not available')
×
157

158

159
class BelowDustLimit(Exception):
1✔
160
    pass
1✔
161

162

163
class InvalidPassword(Exception):
1✔
164
    def __init__(self, message: Optional[str] = None):
1✔
165
        self.message = message
1✔
166

167
    def __str__(self):
1✔
168
        if self.message is None:
×
169
            return _("Incorrect password")
×
170
        else:
171
            return str(self.message)
×
172

173

174
class AddTransactionException(Exception):
1✔
175
    pass
1✔
176

177

178
class UnrelatedTransactionException(AddTransactionException):
1✔
179
    def __str__(self):
1✔
180
        return _("Transaction is unrelated to this wallet.")
×
181

182

183
class FileImportFailed(Exception):
1✔
184
    def __init__(self, message=''):
1✔
185
        self.message = str(message)
×
186

187
    def __str__(self):
1✔
188
        return _("Failed to import from file.") + "\n" + self.message
×
189

190

191
class FileExportFailed(Exception):
1✔
192
    def __init__(self, message=''):
1✔
193
        self.message = str(message)
×
194

195
    def __str__(self):
1✔
196
        return _("Failed to export to file.") + "\n" + self.message
×
197

198

199
class WalletFileException(Exception):
1✔
200
    def __init__(self, message='', *, should_report_crash: bool = False):
1✔
201
        Exception.__init__(self, message)
1✔
202
        self.should_report_crash = should_report_crash
1✔
203

204

205
class BitcoinException(Exception): pass
1✔
206

207

208
class UserFacingException(Exception):
1✔
209
    """Exception that contains information intended to be shown to the user."""
210

211

212
class InvoiceError(UserFacingException): pass
1✔
213

214

215
class NetworkOfflineException(UserFacingException):
1✔
216
    """Can be raised if we are running in offline mode (--offline flag)
217
    and the user requests an operation that requires the network.
218
    """
219
    def __str__(self):
1✔
220
        return _("You are offline.")
×
221

222

223
# Throw this exception to unwind the stack like when an error occurs.
224
# However unlike other exceptions the user won't be informed.
225
class UserCancelled(Exception):
1✔
226
    '''An exception that is suppressed from the user'''
227
    pass
1✔
228

229

230
def to_decimal(x: Union[str, float, int, Decimal]) -> Decimal:
1✔
231
    # helper function mainly for float->Decimal conversion, i.e.:
232
    #   >>> Decimal(41754.681)
233
    #   Decimal('41754.680999999996856786310672760009765625')
234
    #   >>> Decimal("41754.681")
235
    #   Decimal('41754.681')
236
    if isinstance(x, Decimal):
1✔
237
        return x
×
238
    return Decimal(str(x))
1✔
239

240

241
# note: this is not a NamedTuple as then its json encoding cannot be customized
242
class Satoshis(object):
1✔
243
    __slots__ = ('value',)
1✔
244

245
    def __new__(cls, value):
1✔
246
        self = super(Satoshis, cls).__new__(cls)
×
247
        # note: 'value' sometimes has msat precision
248
        assert isinstance(value, (int, Decimal)), f"unexpected type for {value=!r}"
×
249
        self.value = value
×
250
        return self
×
251

252
    def __repr__(self):
1✔
253
        return f'Satoshis({self.value})'
×
254

255
    def __str__(self):
1✔
256
        # note: precision is truncated to satoshis here
257
        return format_satoshis(self.value)
×
258

259
    def __eq__(self, other):
1✔
260
        return self.value == other.value
×
261

262
    def __ne__(self, other):
1✔
263
        return not (self == other)
×
264

265
    def __add__(self, other):
1✔
266
        return Satoshis(self.value + other.value)
×
267

268

269
# note: this is not a NamedTuple as then its json encoding cannot be customized
270
class Fiat(object):
1✔
271
    __slots__ = ('value', 'ccy')
1✔
272

273
    def __new__(cls, value: Optional[Decimal], ccy: str):
1✔
274
        self = super(Fiat, cls).__new__(cls)
×
275
        self.ccy = ccy
×
276
        if not isinstance(value, (Decimal, type(None))):
×
277
            raise TypeError(f"value should be Decimal or None, not {type(value)}")
×
278
        self.value = value
×
279
        return self
×
280

281
    def __repr__(self):
1✔
282
        return 'Fiat(%s)'% self.__str__()
×
283

284
    def __str__(self):
1✔
285
        if self.value is None or self.value.is_nan():
×
286
            return _('No Data')
×
287
        else:
288
            return "{:.2f}".format(self.value)
×
289

290
    def to_ui_string(self):
1✔
291
        if self.value is None or self.value.is_nan():
×
292
            return _('No Data')
×
293
        else:
294
            return "{:.2f}".format(self.value) + ' ' + self.ccy
×
295

296
    def __eq__(self, other):
1✔
297
        if not isinstance(other, Fiat):
×
298
            return False
×
299
        if self.ccy != other.ccy:
×
300
            return False
×
301
        if isinstance(self.value, Decimal) and isinstance(other.value, Decimal) \
×
302
                and self.value.is_nan() and other.value.is_nan():
303
            return True
×
304
        return self.value == other.value
×
305

306
    def __ne__(self, other):
1✔
307
        return not (self == other)
×
308

309
    def __add__(self, other):
1✔
310
        assert self.ccy == other.ccy
×
311
        return Fiat(self.value + other.value, self.ccy)
×
312

313

314
class MyEncoder(json.JSONEncoder):
1✔
315
    def default(self, obj):
1✔
316
        # note: this does not get called for namedtuples :(  https://bugs.python.org/issue30343
317
        from .transaction import Transaction, TxOutput
1✔
318
        if isinstance(obj, Transaction):
1✔
319
            return obj.serialize()
1✔
320
        if isinstance(obj, TxOutput):
1✔
321
            return obj.to_legacy_tuple()
1✔
322
        if isinstance(obj, Satoshis):
1✔
323
            return str(obj)
×
324
        if isinstance(obj, Fiat):
1✔
325
            return str(obj)
×
326
        if isinstance(obj, Decimal):
1✔
327
            return str(obj)
×
328
        if isinstance(obj, datetime):
1✔
329
            return obj.isoformat(' ')[:-3]
×
330
        if isinstance(obj, set):
1✔
331
            return list(obj)
×
332
        if isinstance(obj, bytes): # for nametuples in lnchannel
1✔
333
            return obj.hex()
1✔
334
        if hasattr(obj, 'to_json') and callable(obj.to_json):
1✔
335
            return obj.to_json()
1✔
336
        return super(MyEncoder, self).default(obj)
×
337

338

339
class ThreadJob(Logger):
1✔
340
    """A job that is run periodically from a thread's main loop.  run() is
341
    called from that thread's context.
342
    """
343

344
    def __init__(self):
1✔
345
        Logger.__init__(self)
1✔
346

347
    def run(self):
1✔
348
        """Called periodically from the thread"""
349
        pass
×
350

351
class DebugMem(ThreadJob):
1✔
352
    '''A handy class for debugging GC memory leaks'''
353
    def __init__(self, classes, interval=30):
1✔
354
        ThreadJob.__init__(self)
×
355
        self.next_time = 0
×
356
        self.classes = classes
×
357
        self.interval = interval
×
358

359
    def mem_stats(self):
1✔
360
        import gc
×
361
        self.logger.info("Start memscan")
×
362
        gc.collect()
×
363
        objmap = defaultdict(list)
×
364
        for obj in gc.get_objects():
×
365
            for class_ in self.classes:
×
366
                if isinstance(obj, class_):
×
367
                    objmap[class_].append(obj)
×
368
        for class_, objs in objmap.items():
×
369
            self.logger.info(f"{class_.__name__}: {len(objs)}")
×
370
        self.logger.info("Finish memscan")
×
371

372
    def run(self):
1✔
373
        if time.time() > self.next_time:
×
374
            self.mem_stats()
×
375
            self.next_time = time.time() + self.interval
×
376

377
class DaemonThread(threading.Thread, Logger):
1✔
378
    """ daemon thread that terminates cleanly """
379

380
    LOGGING_SHORTCUT = 'd'
1✔
381

382
    def __init__(self):
1✔
383
        threading.Thread.__init__(self)
1✔
384
        Logger.__init__(self)
1✔
385
        self.parent_thread = threading.current_thread()
1✔
386
        self.running = False
1✔
387
        self.running_lock = threading.Lock()
1✔
388
        self.job_lock = threading.Lock()
1✔
389
        self.jobs = []
1✔
390
        self.stopped_event = threading.Event()        # set when fully stopped
1✔
391
        self.stopped_event_async = asyncio.Event()    # set when fully stopped
1✔
392
        self.wake_up_event = threading.Event()  # for perf optimisation of polling in run()
1✔
393

394
    def add_jobs(self, jobs):
1✔
395
        with self.job_lock:
1✔
396
            self.jobs.extend(jobs)
1✔
397

398
    def run_jobs(self):
1✔
399
        # Don't let a throwing job disrupt the thread, future runs of
400
        # itself, or other jobs.  This is useful protection against
401
        # malformed or malicious server responses
402
        with self.job_lock:
1✔
403
            for job in self.jobs:
1✔
404
                try:
1✔
405
                    job.run()
1✔
406
                except Exception as e:
×
407
                    self.logger.exception('')
×
408

409
    def remove_jobs(self, jobs):
1✔
410
        with self.job_lock:
×
411
            for job in jobs:
×
412
                self.jobs.remove(job)
×
413

414
    def start(self):
1✔
415
        with self.running_lock:
1✔
416
            self.running = True
1✔
417
        return threading.Thread.start(self)
1✔
418

419
    def is_running(self):
1✔
420
        with self.running_lock:
1✔
421
            return self.running and self.parent_thread.is_alive()
1✔
422

423
    def stop(self):
1✔
424
        with self.running_lock:
1✔
425
            self.running = False
1✔
426
            self.wake_up_event.set()
1✔
427
            self.wake_up_event.clear()
1✔
428

429
    def on_stop(self):
1✔
430
        if 'ANDROID_DATA' in os.environ:
1✔
431
            import jnius
×
432
            jnius.detach()
×
433
            self.logger.info("jnius detach")
×
434
        self.logger.info("stopped")
1✔
435
        self.stopped_event.set()
1✔
436
        loop = get_asyncio_loop()
1✔
437
        loop.call_soon_threadsafe(self.stopped_event_async.set)
1✔
438

439

440
def print_stderr(*args):
1✔
441
    args = [str(item) for item in args]
×
442
    sys.stderr.write(" ".join(args) + "\n")
×
443
    sys.stderr.flush()
×
444

445
def print_msg(*args):
1✔
446
    # Stringify args
447
    args = [str(item) for item in args]
×
448
    sys.stdout.write(" ".join(args) + "\n")
×
449
    sys.stdout.flush()
×
450

451
def json_encode(obj):
1✔
452
    try:
×
453
        s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
×
454
    except TypeError:
×
455
        s = repr(obj)
×
456
    return s
×
457

458
def json_decode(x):
1✔
459
    try:
1✔
460
        return json.loads(x, parse_float=Decimal)
1✔
461
    except Exception:
1✔
462
        return x
1✔
463

464
def json_normalize(x):
1✔
465
    # note: The return value of commands, when going through the JSON-RPC interface,
466
    #       is json-encoded. The encoder used there cannot handle some types, e.g. electrum.util.Satoshis.
467
    # note: We should not simply do "json_encode(x)" here, as then later x would get doubly json-encoded.
468
    # see #5868
469
    return json_decode(json_encode(x))
×
470

471

472
# taken from Django Source Code
473
def constant_time_compare(val1, val2):
1✔
474
    """Return True if the two strings are equal, False otherwise."""
475
    return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
×
476

477

478
_profiler_logger = _logger.getChild('profiler')
1✔
479
def profiler(func=None, *, min_threshold: Union[int, float, None] = None):
1✔
480
    """Function decorator that logs execution time.
481

482
    min_threshold: if set, only log if time taken is higher than threshold
483
    NOTE: does not work with async methods.
484
    """
485
    if func is None:  # to make "@profiler(...)" work. (in addition to bare "@profiler")
1✔
486
        return partial(profiler, min_threshold=min_threshold)
1✔
487
    def do_profile(*args, **kw_args):
1✔
488
        name = func.__qualname__
1✔
489
        t0 = time.time()
1✔
490
        o = func(*args, **kw_args)
1✔
491
        t = time.time() - t0
1✔
492
        if min_threshold is None or t > min_threshold:
1✔
493
            _profiler_logger.debug(f"{name} {t:,.4f} sec")
1✔
494
        return o
1✔
495
    return do_profile
1✔
496

497

498
class AsyncHangDetector:
1✔
499
    """Context manager that logs every `n` seconds if encapsulated context still has not exited."""
500

501
    def __init__(
1✔
502
        self,
503
        *,
504
        period_sec: int = 15,
505
        message: str,
506
        logger: logging.Logger = None,
507
    ):
508
        self.period_sec = period_sec
1✔
509
        self.message = message
1✔
510
        self.logger = logger or _logger
1✔
511

512
    async def _monitor(self):
1✔
513
        # note: this assumes that the event loop itself is not blocked
514
        t0 = time.monotonic()
1✔
515
        while True:
1✔
516
            await asyncio.sleep(self.period_sec)
1✔
517
            t1 = time.monotonic()
×
518
            self.logger.info(f"{self.message} (after {t1 - t0:.2f} sec)")
×
519

520
    async def __aenter__(self):
1✔
521
        self.mtask = asyncio.create_task(self._monitor())
1✔
522

523
    async def __aexit__(self, exc_type, exc, tb):
1✔
524
        self.mtask.cancel()
1✔
525

526

527
def android_ext_dir():
1✔
528
    from android.storage import primary_external_storage_path
×
529
    return primary_external_storage_path()
×
530

531
def android_backup_dir():
1✔
532
    pkgname = get_android_package_name()
×
533
    d = os.path.join(android_ext_dir(), pkgname)
×
534
    if not os.path.exists(d):
×
535
        os.mkdir(d)
×
536
    return d
×
537

538
def android_data_dir():
1✔
539
    import jnius
×
540
    PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
×
541
    return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
×
542

543
def ensure_sparse_file(filename):
1✔
544
    # On modern Linux, no need to do anything.
545
    # On Windows, need to explicitly mark file.
546
    if os.name == "nt":
×
547
        try:
×
548
            os.system('fsutil sparse setflag "{}" 1'.format(filename))
×
549
        except Exception as e:
×
550
            _logger.info(f'error marking file {filename} as sparse: {e}')
×
551

552

553
def get_headers_dir(config):
1✔
554
    return config.path
1✔
555

556

557
def assert_datadir_available(config_path):
1✔
558
    path = config_path
1✔
559
    if os.path.exists(path):
1✔
560
        return
1✔
561
    else:
562
        raise FileNotFoundError(
×
563
            'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
564
            'Should be at {}'.format(path))
565

566

567
def assert_file_in_datadir_available(path, config_path):
1✔
568
    if os.path.exists(path):
×
569
        return
×
570
    else:
571
        assert_datadir_available(config_path)
×
572
        raise FileNotFoundError(
×
573
            'Cannot find file but datadir is there.' + '\n' +
574
            'Should be at {}'.format(path))
575

576

577
def standardize_path(path):
1✔
578
    # note: os.path.realpath() is not used, as on Windows it can return non-working paths (see #8495).
579
    #       This means that we don't resolve symlinks!
580
    return os.path.normcase(
1✔
581
                os.path.abspath(
582
                    os.path.expanduser(
583
                        path
584
    )))
585

586

587
def get_new_wallet_name(wallet_folder: str) -> str:
1✔
588
    """Returns a file basename for a new wallet to be used.
589
    Can raise OSError.
590
    """
591
    i = 1
1✔
592
    while True:
1✔
593
        filename = "wallet_%d" % i
1✔
594
        if filename in os.listdir(wallet_folder):
1✔
595
            i += 1
1✔
596
        else:
597
            break
1✔
598
    return filename
1✔
599

600

601
def is_android_debug_apk() -> bool:
1✔
602
    is_android = 'ANDROID_DATA' in os.environ
×
603
    if not is_android:
×
604
        return False
×
605
    from jnius import autoclass
×
606
    pkgname = get_android_package_name()
×
607
    build_config = autoclass(f"{pkgname}.BuildConfig")
×
608
    return bool(build_config.DEBUG)
×
609

610

611
def get_android_package_name() -> str:
1✔
612
    is_android = 'ANDROID_DATA' in os.environ
×
613
    assert is_android
×
614
    from jnius import autoclass
×
615
    from android.config import ACTIVITY_CLASS_NAME
×
616
    activity = autoclass(ACTIVITY_CLASS_NAME).mActivity
×
617
    pkgname = str(activity.getPackageName())
×
618
    return pkgname
×
619

620

621
def assert_bytes(*args):
1✔
622
    """
623
    porting helper, assert args type
624
    """
625
    try:
1✔
626
        for x in args:
1✔
627
            assert isinstance(x, (bytes, bytearray))
1✔
628
    except Exception:
×
629
        print('assert bytes failed', list(map(type, args)))
×
630
        raise
×
631

632

633
def assert_str(*args):
1✔
634
    """
635
    porting helper, assert args type
636
    """
637
    for x in args:
×
638
        assert isinstance(x, str)
×
639

640

641
def to_string(x, enc) -> str:
1✔
642
    if isinstance(x, (bytes, bytearray)):
1✔
643
        return x.decode(enc)
1✔
644
    if isinstance(x, str):
×
645
        return x
×
646
    else:
647
        raise TypeError("Not a string or bytes like object")
×
648

649

650
def to_bytes(something, encoding='utf8') -> bytes:
1✔
651
    """
652
    cast string to bytes() like object, but for python2 support it's bytearray copy
653
    """
654
    if isinstance(something, bytes):
1✔
655
        return something
1✔
656
    if isinstance(something, str):
1✔
657
        return something.encode(encoding)
1✔
658
    elif isinstance(something, bytearray):
1✔
659
        return bytes(something)
1✔
660
    else:
661
        raise TypeError("Not a string or bytes like object")
1✔
662

663

664
bfh = bytes.fromhex
1✔
665

666

667
def xor_bytes(a: bytes, b: bytes) -> bytes:
1✔
668
    size = min(len(a), len(b))
1✔
669
    return ((int.from_bytes(a[:size], "big") ^ int.from_bytes(b[:size], "big"))
1✔
670
            .to_bytes(size, "big"))
671

672

673
def user_dir():
1✔
674
    if "ELECTRUMDIR" in os.environ:
1✔
675
        return os.environ["ELECTRUMDIR"]
×
676
    elif 'ANDROID_DATA' in os.environ:
1✔
677
        return android_data_dir()
×
678
    elif os.name == 'posix':
1✔
679
        return os.path.join(os.environ["HOME"], ".electrum")
1✔
680
    elif "APPDATA" in os.environ:
×
681
        return os.path.join(os.environ["APPDATA"], "Electrum")
×
682
    elif "LOCALAPPDATA" in os.environ:
×
683
        return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
×
684
    else:
685
        #raise Exception("No home directory found in environment variables.")
686
        return
×
687

688

689
def resource_path(*parts):
1✔
690
    return os.path.join(pkg_dir, *parts)
1✔
691

692

693
# absolute path to python package folder of electrum ("lib")
694
pkg_dir = os.path.split(os.path.realpath(__file__))[0]
1✔
695

696

697
def is_valid_email(s):
1✔
698
    regexp = r"[^@]+@[^@]+\.[^@]+"
×
699
    return re.match(regexp, s) is not None
×
700

701

702
def is_hash256_str(text: Any) -> bool:
1✔
703
    if not isinstance(text, str): return False
1✔
704
    if len(text) != 64: return False
1✔
705
    return is_hex_str(text)
1✔
706

707

708
def is_hex_str(text: Any) -> bool:
1✔
709
    if not isinstance(text, str): return False
1✔
710
    try:
1✔
711
        b = bytes.fromhex(text)
1✔
712
    except Exception:
1✔
713
        return False
1✔
714
    # forbid whitespaces in text:
715
    if len(text) != 2 * len(b):
1✔
716
        return False
1✔
717
    return True
1✔
718

719

720
def is_integer(val: Any) -> bool:
1✔
721
    return isinstance(val, int)
1✔
722

723

724
def is_non_negative_integer(val: Any) -> bool:
1✔
725
    if is_integer(val):
1✔
726
        return val >= 0
1✔
727
    return False
1✔
728

729

730
def is_int_or_float(val: Any) -> bool:
1✔
731
    return isinstance(val, (int, float))
1✔
732

733

734
def is_non_negative_int_or_float(val: Any) -> bool:
1✔
735
    if is_int_or_float(val):
1✔
736
        return val >= 0
1✔
737
    return False
1✔
738

739

740
def chunks(items, size: int):
1✔
741
    """Break up items, an iterable, into chunks of length size."""
742
    if size < 1:
1✔
743
        raise ValueError(f"size must be positive, not {repr(size)}")
1✔
744
    for i in range(0, len(items), size):
1✔
745
        yield items[i: i + size]
1✔
746

747

748
def format_satoshis_plain(
1✔
749
        x: Union[int, float, Decimal, str],  # amount in satoshis,
750
        *,
751
        decimal_point: int = 8,  # how much to shift decimal point to left (default: sat->BTC)
752
) -> str:
753
    """Display a satoshi amount scaled.  Always uses a '.' as a decimal
754
    point and has no thousands separator"""
755
    if parse_max_spend(x):
1✔
756
        return f'max({x})'
×
757
    assert isinstance(x, (int, float, Decimal)), f"{x!r} should be a number"
1✔
758
    scale_factor = pow(10, decimal_point)
1✔
759
    return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
1✔
760

761

762
# Check that Decimal precision is sufficient.
763
# We need at the very least ~20, as we deal with msat amounts, and
764
# log10(21_000_000 * 10**8 * 1000) ~= 18.3
765
# decimal.DefaultContext.prec == 28 by default, but it is mutable.
766
# We enforce that we have at least that available.
767
assert decimal.getcontext().prec >= 28, f"PyDecimal precision too low: {decimal.getcontext().prec}"
1✔
768

769
# DECIMAL_POINT = locale.localeconv()['decimal_point']  # type: str
770
DECIMAL_POINT = "."
1✔
771
THOUSANDS_SEP = " "
1✔
772
assert len(DECIMAL_POINT) == 1, f"DECIMAL_POINT has unexpected len. {DECIMAL_POINT!r}"
1✔
773
assert len(THOUSANDS_SEP) == 1, f"THOUSANDS_SEP has unexpected len. {THOUSANDS_SEP!r}"
1✔
774

775

776
def format_satoshis(
1✔
777
        x: Union[int, float, Decimal, str, None],  # amount in satoshis
778
        *,
779
        num_zeros: int = 0,
780
        decimal_point: int = 8,  # how much to shift decimal point to left (default: sat->BTC)
781
        precision: int = 0,  # extra digits after satoshi precision
782
        is_diff: bool = False,  # if True, enforce a leading sign (+/-)
783
        whitespaces: bool = False,  # if True, add whitespaces, to align numbers in a column
784
        add_thousands_sep: bool = False,  # if True, add whitespaces, for better readability of the numbers
785
) -> str:
786
    if x is None:
1✔
787
        return 'unknown'
×
788
    if parse_max_spend(x):
1✔
789
        return f'max({x})'
×
790
    assert isinstance(x, (int, float, Decimal)), f"{x!r} should be a number"
1✔
791
    # lose redundant precision
792
    x = Decimal(x).quantize(Decimal(10) ** (-precision))
1✔
793
    # format string
794
    overall_precision = decimal_point + precision  # max digits after final decimal point
1✔
795
    decimal_format = "." + str(overall_precision) if overall_precision > 0 else ""
1✔
796
    if is_diff:
1✔
797
        decimal_format = '+' + decimal_format
1✔
798
    # initial result
799
    scale_factor = pow(10, decimal_point)
1✔
800
    result = ("{:" + decimal_format + "f}").format(x / scale_factor)
1✔
801
    if "." not in result: result += "."
1✔
802
    result = result.rstrip('0')
1✔
803
    # add extra decimal places (zeros)
804
    integer_part, fract_part = result.split(".")
1✔
805
    if len(fract_part) < num_zeros:
1✔
806
        fract_part += "0" * (num_zeros - len(fract_part))
1✔
807
    # add whitespaces as thousands' separator for better readability of numbers
808
    if add_thousands_sep:
1✔
809
        sign = integer_part[0] if integer_part[0] in ("+", "-") else ""
1✔
810
        if sign == "-":
1✔
811
            integer_part = integer_part[1:]
1✔
812
        integer_part = "{:,}".format(int(integer_part)).replace(',', THOUSANDS_SEP)
1✔
813
        integer_part = sign + integer_part
1✔
814
        fract_part = THOUSANDS_SEP.join(fract_part[i:i+3] for i in range(0, len(fract_part), 3))
1✔
815
    result = integer_part + DECIMAL_POINT + fract_part
1✔
816
    # add leading/trailing whitespaces so that numbers can be aligned in a column
817
    if whitespaces:
1✔
818
        target_fract_len = overall_precision
1✔
819
        target_integer_len = 14 - decimal_point  # should be enough for up to unsigned 999999 BTC
1✔
820
        if add_thousands_sep:
1✔
821
            target_fract_len += max(0, (target_fract_len - 1) // 3)
1✔
822
            target_integer_len += max(0, (target_integer_len - 1) // 3)
1✔
823
        # add trailing whitespaces
824
        result += " " * (target_fract_len - len(fract_part))
1✔
825
        # add leading whitespaces
826
        target_total_len = target_integer_len + 1 + target_fract_len
1✔
827
        result = " " * (target_total_len - len(result)) + result
1✔
828
    return result
1✔
829

830

831
FEERATE_PRECISION = 1  # num fractional decimal places for sat/byte fee rates
1✔
832
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
1✔
833
UI_UNIT_NAME_FEERATE_SAT_PER_VBYTE = "sat/vbyte"
1✔
834
UI_UNIT_NAME_FEERATE_SAT_PER_VB = "sat/vB"
1✔
835
UI_UNIT_NAME_TXSIZE_VBYTES = "vbytes"
1✔
836
UI_UNIT_NAME_MEMPOOL_MB = "vMB"
1✔
837

838

839
def format_fee_satoshis(fee, *, num_zeros=0, precision=None):
1✔
840
    if precision is None:
1✔
841
        precision = FEERATE_PRECISION
1✔
842
    num_zeros = min(num_zeros, FEERATE_PRECISION)  # no more zeroes than available prec
1✔
843
    return format_satoshis(fee, num_zeros=num_zeros, decimal_point=0, precision=precision)
1✔
844

845

846
def quantize_feerate(fee) -> Union[None, Decimal, int]:
1✔
847
    """Strip sat/byte fee rate of excess precision."""
848
    if fee is None:
1✔
849
        return None
×
850
    return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
1✔
851

852

853
def timestamp_to_datetime(timestamp: Union[int, float, None], *, utc: bool = False) -> Optional[datetime]:
1✔
854
    if timestamp is None:
1✔
855
        return None
×
856
    tz = None
1✔
857
    if utc:
1✔
858
        tz = timezone.utc
×
859
    return datetime.fromtimestamp(timestamp, tz=tz)
1✔
860

861

862
def format_time(timestamp: Union[int, float, None]) -> str:
1✔
863
    date = timestamp_to_datetime(timestamp)
×
864
    return date.isoformat(' ', timespec="minutes") if date else _("Unknown")
×
865

866

867
def age(
1✔
868
    from_date: Union[int, float, None],  # POSIX timestamp
869
    *,
870
    since_date: datetime = None,
871
    target_tz=None,
872
    include_seconds: bool = False,
873
) -> str:
874
    """Takes a timestamp and returns a string with the approximation of the age"""
875
    if from_date is None:
1✔
876
        return _("Unknown")
1✔
877

878
    from_date = datetime.fromtimestamp(from_date)
1✔
879
    if since_date is None:
1✔
880
        since_date = datetime.now(target_tz)
×
881

882
    distance_in_time = from_date - since_date
1✔
883
    is_in_past = from_date < since_date
1✔
884
    distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
1✔
885
    distance_in_minutes = int(round(distance_in_seconds / 60))
1✔
886

887
    if distance_in_minutes == 0:
1✔
888
        if include_seconds:
1✔
889
            if is_in_past:
1✔
890
                return _("{} seconds ago").format(distance_in_seconds)
1✔
891
            else:
892
                return _("in {} seconds").format(distance_in_seconds)
1✔
893
        else:
894
            if is_in_past:
1✔
895
                return _("less than a minute ago")
1✔
896
            else:
897
                return _("in less than a minute")
1✔
898
    elif distance_in_minutes < 45:
1✔
899
        if is_in_past:
1✔
900
            return _("about {} minutes ago").format(distance_in_minutes)
1✔
901
        else:
902
            return _("in about {} minutes").format(distance_in_minutes)
1✔
903
    elif distance_in_minutes < 90:
1✔
904
        if is_in_past:
1✔
905
            return _("about 1 hour ago")
1✔
906
        else:
907
            return _("in about 1 hour")
1✔
908
    elif distance_in_minutes < 1440:
1✔
909
        if is_in_past:
1✔
910
            return _("about {} hours ago").format(round(distance_in_minutes / 60.0))
1✔
911
        else:
912
            return _("in about {} hours").format(round(distance_in_minutes / 60.0))
1✔
913
    elif distance_in_minutes < 2880:
1✔
914
        if is_in_past:
1✔
915
            return _("about 1 day ago")
1✔
916
        else:
917
            return _("in about 1 day")
1✔
918
    elif distance_in_minutes < 43220:
1✔
919
        if is_in_past:
1✔
920
            return _("about {} days ago").format(round(distance_in_minutes / 1440))
1✔
921
        else:
922
            return _("in about {} days").format(round(distance_in_minutes / 1440))
1✔
923
    elif distance_in_minutes < 86400:
1✔
924
        if is_in_past:
1✔
925
            return _("about 1 month ago")
1✔
926
        else:
927
            return _("in about 1 month")
1✔
928
    elif distance_in_minutes < 525600:
1✔
929
        if is_in_past:
1✔
930
            return _("about {} months ago").format(round(distance_in_minutes / 43200))
1✔
931
        else:
932
            return _("in about {} months").format(round(distance_in_minutes / 43200))
1✔
933
    elif distance_in_minutes < 1051200:
1✔
934
        if is_in_past:
1✔
935
            return _("about 1 year ago")
1✔
936
        else:
937
            return _("in about 1 year")
1✔
938
    else:
939
        if is_in_past:
1✔
940
            return _("over {} years ago").format(round(distance_in_minutes / 525600))
1✔
941
        else:
942
            return _("in over {} years").format(round(distance_in_minutes / 525600))
1✔
943

944
mainnet_block_explorers = {
1✔
945
    '3xpl.com': ('https://3xpl.com/bitcoin/',
946
                        {'tx': 'transaction/', 'addr': 'address/'}),
947
    'Bitflyer.jp': ('https://chainflyer.bitflyer.jp/',
948
                        {'tx': 'Transaction/', 'addr': 'Address/'}),
949
    'Blockchain.info': ('https://blockchain.com/btc/',
950
                        {'tx': 'tx/', 'addr': 'address/'}),
951
    'Blockstream.info': ('https://blockstream.info/',
952
                        {'tx': 'tx/', 'addr': 'address/'}),
953
    'Bitaps.com': ('https://btc.bitaps.com/',
954
                        {'tx': '', 'addr': ''}),
955
    'BTC.com': ('https://btc.com/',
956
                        {'tx': '', 'addr': ''}),
957
    'Chain.so': ('https://www.chain.so/',
958
                        {'tx': 'tx/BTC/', 'addr': 'address/BTC/'}),
959
    'Insight.is': ('https://insight.bitpay.com/',
960
                        {'tx': 'tx/', 'addr': 'address/'}),
961
    'BlockCypher.com': ('https://live.blockcypher.com/btc/',
962
                        {'tx': 'tx/', 'addr': 'address/'}),
963
    'Blockchair.com': ('https://blockchair.com/bitcoin/',
964
                        {'tx': 'transaction/', 'addr': 'address/'}),
965
    'blockonomics.co': ('https://www.blockonomics.co/',
966
                        {'tx': 'api/tx?txid=', 'addr': '#/search?q='}),
967
    'mempool.space': ('https://mempool.space/',
968
                        {'tx': 'tx/', 'addr': 'address/'}),
969
    'mempool.emzy.de': ('https://mempool.emzy.de/',
970
                        {'tx': 'tx/', 'addr': 'address/'}),
971
    'OXT.me': ('https://oxt.me/',
972
                        {'tx': 'transaction/', 'addr': 'address/'}),
973
    'mynode.local': ('http://mynode.local:3002/',
974
                        {'tx': 'tx/', 'addr': 'address/'}),
975
    'system default': ('blockchain:/',
976
                        {'tx': 'tx/', 'addr': 'address/'}),
977
}
978

979
testnet_block_explorers = {
1✔
980
    'Bitaps.com': ('https://tbtc.bitaps.com/',
981
                       {'tx': '', 'addr': ''}),
982
    'BlockCypher.com': ('https://live.blockcypher.com/btc-testnet/',
983
                       {'tx': 'tx/', 'addr': 'address/'}),
984
    'Blockchain.info': ('https://www.blockchain.com/btc-testnet/',
985
                       {'tx': 'tx/', 'addr': 'address/'}),
986
    'Blockstream.info': ('https://blockstream.info/testnet/',
987
                        {'tx': 'tx/', 'addr': 'address/'}),
988
    'mempool.space': ('https://mempool.space/testnet/',
989
                        {'tx': 'tx/', 'addr': 'address/'}),
990
    'smartbit.com.au': ('https://testnet.smartbit.com.au/',
991
                       {'tx': 'tx/', 'addr': 'address/'}),
992
    'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
993
                       {'tx': 'tx/', 'addr': 'address/'}),
994
}
995

996
testnet4_block_explorers = {
1✔
997
    'mempool.space': ('https://mempool.space/testnet4/',
998
                        {'tx': 'tx/', 'addr': 'address/'}),
999
    'wakiyamap.dev': ('https://testnet4-explorer.wakiyamap.dev/',
1000
                       {'tx': 'tx/', 'addr': 'address/'}),
1001
}
1002

1003
signet_block_explorers = {
1✔
1004
    'bc-2.jp': ('https://explorer.bc-2.jp/',
1005
                        {'tx': 'tx/', 'addr': 'address/'}),
1006
    'mempool.space': ('https://mempool.space/signet/',
1007
                        {'tx': 'tx/', 'addr': 'address/'}),
1008
    'bitcoinexplorer.org': ('https://signet.bitcoinexplorer.org/',
1009
                       {'tx': 'tx/', 'addr': 'address/'}),
1010
    'wakiyamap.dev': ('https://signet-explorer.wakiyamap.dev/',
1011
                       {'tx': 'tx/', 'addr': 'address/'}),
1012
    'ex.signet.bublina.eu.org': ('https://ex.signet.bublina.eu.org/',
1013
                       {'tx': 'tx/', 'addr': 'address/'}),
1014
    'system default': ('blockchain:/',
1015
                       {'tx': 'tx/', 'addr': 'address/'}),
1016
}
1017

1018
_block_explorer_default_api_loc = {'tx': 'tx/', 'addr': 'address/'}
1✔
1019

1020

1021
def block_explorer_info():
1✔
1022
    from . import constants
×
1023
    if constants.net.NET_NAME == "testnet":
×
1024
        return testnet_block_explorers
×
1025
    elif constants.net.NET_NAME == "testnet4":
×
1026
        return testnet4_block_explorers
×
1027
    elif constants.net.NET_NAME == "signet":
×
1028
        return signet_block_explorers
×
1029
    return mainnet_block_explorers
×
1030

1031

1032
def block_explorer(config: 'SimpleConfig') -> Optional[str]:
1✔
1033
    """Returns name of selected block explorer,
1034
    or None if a custom one (not among hardcoded ones) is configured.
1035
    """
1036
    if config.BLOCK_EXPLORER_CUSTOM is not None:
×
1037
        return None
×
1038
    be_key = config.BLOCK_EXPLORER
×
1039
    be_tuple = block_explorer_info().get(be_key)
×
1040
    if be_tuple is None:
×
1041
        be_key = config.cv.BLOCK_EXPLORER.get_default_value()
×
1042
    assert isinstance(be_key, str), f"{be_key!r} should be str"
×
1043
    return be_key
×
1044

1045

1046
def block_explorer_tuple(config: 'SimpleConfig') -> Optional[Tuple[str, dict]]:
1✔
1047
    custom_be = config.BLOCK_EXPLORER_CUSTOM
×
1048
    if custom_be:
×
1049
        if isinstance(custom_be, str):
×
1050
            return custom_be, _block_explorer_default_api_loc
×
1051
        if isinstance(custom_be, (tuple, list)) and len(custom_be) == 2:
×
1052
            return tuple(custom_be)
×
1053
        _logger.warning(f"not using {config.cv.BLOCK_EXPLORER_CUSTOM.key()!r} from config. "
×
1054
                        f"expected a str or a pair but got {custom_be!r}")
1055
        return None
×
1056
    else:
1057
        # using one of the hardcoded block explorers
1058
        return block_explorer_info().get(block_explorer(config))
×
1059

1060

1061
def block_explorer_URL(config: 'SimpleConfig', kind: str, item: str) -> Optional[str]:
1✔
1062
    be_tuple = block_explorer_tuple(config)
×
1063
    if not be_tuple:
×
1064
        return
×
1065
    explorer_url, explorer_dict = be_tuple
×
1066
    kind_str = explorer_dict.get(kind)
×
1067
    if kind_str is None:
×
1068
        return
×
1069
    if explorer_url[-1] != "/":
×
1070
        explorer_url += "/"
×
1071
    url_parts = [explorer_url, kind_str, item]
×
1072
    return ''.join(url_parts)
×
1073

1074

1075

1076

1077

1078
# Python bug (http://bugs.python.org/issue1927) causes raw_input
1079
# to be redirected improperly between stdin/stderr on Unix systems
1080
#TODO: py3
1081
def raw_input(prompt=None):
1✔
1082
    if prompt:
×
1083
        sys.stdout.write(prompt)
×
1084
    return builtin_raw_input()
×
1085

1086
builtin_raw_input = builtins.input
1✔
1087
builtins.input = raw_input
1✔
1088

1089

1090
def parse_json(message):
1✔
1091
    # TODO: check \r\n pattern
1092
    n = message.find(b'\n')
×
1093
    if n==-1:
×
1094
        return None, message
×
1095
    try:
×
1096
        j = json.loads(message[0:n].decode('utf8'))
×
1097
    except Exception:
×
1098
        j = None
×
1099
    return j, message[n+1:]
×
1100

1101

1102
def setup_thread_excepthook():
1✔
1103
    """
1104
    Workaround for `sys.excepthook` thread bug from:
1105
    http://bugs.python.org/issue1230540
1106

1107
    Call once from the main thread before creating any threads.
1108
    """
1109

1110
    init_original = threading.Thread.__init__
×
1111

1112
    def init(self, *args, **kwargs):
×
1113

1114
        init_original(self, *args, **kwargs)
×
1115
        run_original = self.run
×
1116

1117
        def run_with_except_hook(*args2, **kwargs2):
×
1118
            try:
×
1119
                run_original(*args2, **kwargs2)
×
1120
            except Exception:
×
1121
                sys.excepthook(*sys.exc_info())
×
1122

1123
        self.run = run_with_except_hook
×
1124

1125
    threading.Thread.__init__ = init
×
1126

1127

1128
def send_exception_to_crash_reporter(e: BaseException):
1✔
1129
    from .base_crash_reporter import send_exception_to_crash_reporter
×
1130
    send_exception_to_crash_reporter(e)
×
1131

1132

1133
def versiontuple(v):
1✔
1134
    return tuple(map(int, (v.split("."))))
1✔
1135

1136

1137
def read_json_file(path):
1✔
1138
    try:
1✔
1139
        with open(path, 'r', encoding='utf-8') as f:
1✔
1140
            data = json.loads(f.read())
1✔
1141
    except json.JSONDecodeError:
×
1142
        _logger.exception('')
×
1143
        raise FileImportFailed(_("Invalid JSON code."))
×
1144
    except BaseException as e:
×
1145
        _logger.exception('')
×
1146
        raise FileImportFailed(e)
×
1147
    return data
1✔
1148

1149

1150
def write_json_file(path, data):
1✔
1151
    try:
×
1152
        with open(path, 'w+', encoding='utf-8') as f:
×
1153
            json.dump(data, f, indent=4, sort_keys=True, cls=MyEncoder)
×
1154
    except (IOError, os.error) as e:
×
1155
        _logger.exception('')
×
1156
        raise FileExportFailed(e)
×
1157

1158

1159
def os_chmod(path, mode):
1✔
1160
    """os.chmod aware of tmpfs"""
1161
    try:
1✔
1162
        os.chmod(path, mode)
1✔
1163
    except OSError as e:
×
1164
        xdg_runtime_dir = os.environ.get("XDG_RUNTIME_DIR", None)
×
1165
        if xdg_runtime_dir and is_subpath(path, xdg_runtime_dir):
×
1166
            _logger.info(f"Tried to chmod in tmpfs. Skipping... {e!r}")
×
1167
        else:
1168
            raise
×
1169

1170

1171
def make_dir(path, allow_symlink=True):
1✔
1172
    """Make directory if it does not yet exist."""
1173
    if not os.path.exists(path):
1✔
1174
        if not allow_symlink and os.path.islink(path):
1✔
1175
            raise Exception('Dangling link: ' + path)
×
1176
        os.mkdir(path)
1✔
1177
        os_chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
1✔
1178

1179

1180
def is_subpath(long_path: str, short_path: str) -> bool:
1✔
1181
    """Returns whether long_path is a sub-path of short_path."""
1182
    try:
1✔
1183
        common = os.path.commonpath([long_path, short_path])
1✔
1184
    except ValueError:
1✔
1185
        return False
1✔
1186
    short_path = standardize_path(short_path)
1✔
1187
    common     = standardize_path(common)
1✔
1188
    return short_path == common
1✔
1189

1190

1191
def log_exceptions(func):
1✔
1192
    """Decorator to log AND re-raise exceptions."""
1193
    assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
1✔
1194
    @functools.wraps(func)
1✔
1195
    async def wrapper(*args, **kwargs):
1✔
1196
        self = args[0] if len(args) > 0 else None
1✔
1197
        try:
1✔
1198
            return await func(*args, **kwargs)
1✔
1199
        except asyncio.CancelledError as e:
1✔
1200
            raise
1✔
1201
        except BaseException as e:
1✔
1202
            mylogger = self.logger if hasattr(self, 'logger') else _logger
1✔
1203
            try:
1✔
1204
                mylogger.exception(f"Exception in {func.__name__}: {repr(e)}")
1✔
1205
            except BaseException as e2:
×
1206
                print(f"logging exception raised: {repr(e2)}... orig exc: {repr(e)} in {func.__name__}")
×
1207
            raise
1✔
1208
    return wrapper
1✔
1209

1210

1211
def ignore_exceptions(func):
1✔
1212
    """Decorator to silently swallow all exceptions."""
1213
    assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
1✔
1214
    @functools.wraps(func)
1✔
1215
    async def wrapper(*args, **kwargs):
1✔
1216
        try:
×
1217
            return await func(*args, **kwargs)
×
1218
        except Exception as e:
×
1219
            pass
×
1220
    return wrapper
1✔
1221

1222

1223
def with_lock(func):
1✔
1224
    """Decorator to enforce a lock on a function call."""
1225
    def func_wrapper(self, *args, **kwargs):
1✔
1226
        with self.lock:
1✔
1227
            return func(self, *args, **kwargs)
1✔
1228
    return func_wrapper
1✔
1229

1230

1231
class TxMinedInfo(NamedTuple):
1✔
1232
    height: int                        # height of block that mined tx
1✔
1233
    conf: Optional[int] = None         # number of confirmations, SPV verified. >=0, or None (None means unknown)
1✔
1234
    timestamp: Optional[int] = None    # timestamp of block that mined tx
1✔
1235
    txpos: Optional[int] = None        # position of tx in serialized block
1✔
1236
    header_hash: Optional[str] = None  # hash of block that mined tx
1✔
1237
    wanted_height: Optional[int] = None  # in case of timelock, min abs block height
1✔
1238

1239
    def short_id(self) -> Optional[str]:
1✔
1240
        if self.txpos is not None and self.txpos >= 0:
×
1241
            assert self.height > 0
×
1242
            return f"{self.height}x{self.txpos}"
×
1243
        return None
×
1244

1245
    def is_local_like(self) -> bool:
1✔
1246
        """Returns whether the tx is local-like (LOCAL/FUTURE)."""
1247
        from .address_synchronizer import TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT
×
1248
        if self.height > 0:
×
1249
            return False
×
1250
        if self.height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT):
×
1251
            return False
×
1252
        return True
×
1253

1254

1255
class ShortID(bytes):
1✔
1256

1257
    def __repr__(self):
1✔
1258
        return f"<ShortID: {format_short_id(self)}>"
1✔
1259

1260
    def __str__(self):
1✔
1261
        return format_short_id(self)
1✔
1262

1263
    @classmethod
1✔
1264
    def from_components(cls, block_height: int, tx_pos_in_block: int, output_index: int) -> 'ShortID':
1✔
1265
        bh = block_height.to_bytes(3, byteorder='big')
1✔
1266
        tpos = tx_pos_in_block.to_bytes(3, byteorder='big')
1✔
1267
        oi = output_index.to_bytes(2, byteorder='big')
1✔
1268
        return ShortID(bh + tpos + oi)
1✔
1269

1270
    @classmethod
1✔
1271
    def from_str(cls, scid: str) -> 'ShortID':
1✔
1272
        """Parses a formatted scid str, e.g. '643920x356x0'."""
1273
        components = scid.split("x")
1✔
1274
        if len(components) != 3:
1✔
1275
            raise ValueError(f"failed to parse ShortID: {scid!r}")
×
1276
        try:
1✔
1277
            components = [int(x) for x in components]
1✔
1278
        except ValueError:
×
1279
            raise ValueError(f"failed to parse ShortID: {scid!r}") from None
×
1280
        return ShortID.from_components(*components)
1✔
1281

1282
    @classmethod
1✔
1283
    def normalize(cls, data: Union[None, str, bytes, 'ShortID']) -> Optional['ShortID']:
1✔
1284
        if isinstance(data, ShortID) or data is None:
1✔
1285
            return data
1✔
1286
        if isinstance(data, str):
1✔
1287
            assert len(data) == 16
1✔
1288
            return ShortID.fromhex(data)
1✔
1289
        if isinstance(data, (bytes, bytearray)):
1✔
1290
            assert len(data) == 8
1✔
1291
            return ShortID(data)
1✔
1292

1293
    @property
1✔
1294
    def block_height(self) -> int:
1✔
1295
        return int.from_bytes(self[:3], byteorder='big')
1✔
1296

1297
    @property
1✔
1298
    def txpos(self) -> int:
1✔
1299
        return int.from_bytes(self[3:6], byteorder='big')
1✔
1300

1301
    @property
1✔
1302
    def output_index(self) -> int:
1✔
1303
        return int.from_bytes(self[6:8], byteorder='big')
1✔
1304

1305

1306
def format_short_id(short_channel_id: Optional[bytes]):
1✔
1307
    if not short_channel_id:
1✔
1308
        return _('Not yet available')
×
1309
    return str(int.from_bytes(short_channel_id[:3], 'big')) \
1✔
1310
        + 'x' + str(int.from_bytes(short_channel_id[3:6], 'big')) \
1311
        + 'x' + str(int.from_bytes(short_channel_id[6:], 'big'))
1312

1313

1314
def make_aiohttp_proxy_connector(proxy: 'ProxySettings', ssl_context: Optional[ssl.SSLContext] = None) -> ProxyConnector:
1✔
1315
    return ProxyConnector(
×
1316
        proxy_type=ProxyType.SOCKS5 if proxy.mode == 'socks5' else ProxyType.SOCKS4,
1317
        host=proxy.host,
1318
        port=int(proxy.port),
1319
        username=proxy.user,
1320
        password=proxy.password,
1321
        rdns=True,  # needed to prevent DNS leaks over proxy
1322
        ssl=ssl_context,
1323
    )
1324

1325

1326
def make_aiohttp_session(proxy: Optional['ProxySettings'], headers=None, timeout=None):
1✔
1327
    if headers is None:
×
1328
        headers = {'User-Agent': 'Electrum'}
×
1329
    if timeout is None:
×
1330
        # The default timeout is high intentionally.
1331
        # DNS on some systems can be really slow, see e.g. #5337
1332
        timeout = aiohttp.ClientTimeout(total=45)
×
1333
    elif isinstance(timeout, (int, float)):
×
1334
        timeout = aiohttp.ClientTimeout(total=timeout)
×
1335
    ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path)
×
1336

1337
    if proxy and proxy.enabled:
×
1338
        connector = make_aiohttp_proxy_connector(proxy, ssl_context)
×
1339
    else:
1340
        connector = aiohttp.TCPConnector(ssl=ssl_context)
×
1341

1342
    return aiohttp.ClientSession(headers=headers, timeout=timeout, connector=connector)
×
1343

1344

1345
class OldTaskGroup(aiorpcx.TaskGroup):
1✔
1346
    """Automatically raises exceptions on join; as in aiorpcx prior to version 0.20.
1347
    That is, when using TaskGroup as a context manager, if any task encounters an exception,
1348
    we would like that exception to be re-raised (propagated out). For the wait=all case,
1349
    the OldTaskGroup class is emulating the following code-snippet:
1350
    ```
1351
    async with TaskGroup() as group:
1352
        await group.spawn(task1())
1353
        await group.spawn(task2())
1354

1355
        async for task in group:
1356
            if not task.cancelled():
1357
                task.result()
1358
    ```
1359
    So instead of the above, one can just write:
1360
    ```
1361
    async with OldTaskGroup() as group:
1362
        await group.spawn(task1())
1363
        await group.spawn(task2())
1364
    ```
1365
    # TODO see if we can migrate to asyncio.timeout, introduced in python 3.11, and use stdlib instead of aiorpcx.curio...
1366
    """
1367
    async def join(self):
1✔
1368
        if self._wait is all:
1✔
1369
            exc = False
1✔
1370
            try:
1✔
1371
                async for task in self:
1✔
1372
                    if not task.cancelled():
1✔
1373
                        task.result()
1✔
1374
            except BaseException:  # including asyncio.CancelledError
1✔
1375
                exc = True
1✔
1376
                raise
1✔
1377
            finally:
1378
                if exc:
1✔
1379
                    await self.cancel_remaining()
1✔
1380
                await super().join()
1✔
1381
        else:
1382
            await super().join()
1✔
1383
            if self.completed:
1✔
1384
                self.completed.result()
1✔
1385

1386
# We monkey-patch aiorpcx TimeoutAfter (used by timeout_after and ignore_after API),
1387
# to fix a timing issue present in asyncio as a whole re timing out tasks.
1388
# To see the issue we are trying to fix, consider example:
1389
#     async def outer_task():
1390
#         async with timeout_after(0.1):
1391
#             await inner_task()
1392
# When the 0.1 sec timeout expires, inner_task will get cancelled by timeout_after (=internal cancellation).
1393
# If around the same time (in terms of event loop iterations) another coroutine
1394
# cancels outer_task (=external cancellation), there will be a race.
1395
# Both cancellations work by propagating a CancelledError out to timeout_after, which then
1396
# needs to decide (in TimeoutAfter.__aexit__) whether it's due to an internal or external cancellation.
1397
# AFAICT asyncio provides no reliable way of distinguishing between the two.
1398
# This patch tries to always give priority to external cancellations.
1399
# see https://github.com/kyuupichan/aiorpcX/issues/44
1400
# see https://github.com/aio-libs/async-timeout/issues/229
1401
# see https://bugs.python.org/issue42130 and https://bugs.python.org/issue45098
1402
# TODO see if we can migrate to asyncio.timeout, introduced in python 3.11, and use stdlib instead of aiorpcx.curio...
1403
def _aiorpcx_monkeypatched_set_new_deadline(task, deadline):
1✔
1404
    def timeout_task():
1✔
1405
        task._orig_cancel()
1✔
1406
        task._timed_out = None if getattr(task, "_externally_cancelled", False) else deadline
1✔
1407
    def mycancel(*args, **kwargs):
1✔
1408
        task._orig_cancel(*args, **kwargs)
1✔
1409
        task._externally_cancelled = True
1✔
1410
        task._timed_out = None
1✔
1411
    if not hasattr(task, "_orig_cancel"):
1✔
1412
        task._orig_cancel = task.cancel
1✔
1413
        task.cancel = mycancel
1✔
1414
    task._deadline_handle = task._loop.call_at(deadline, timeout_task)
1✔
1415

1416

1417
def _aiorpcx_monkeypatched_set_task_deadline(task, deadline):
1✔
1418
    ret = _aiorpcx_orig_set_task_deadline(task, deadline)
1✔
1419
    task._externally_cancelled = None
1✔
1420
    return ret
1✔
1421

1422

1423
def _aiorpcx_monkeypatched_unset_task_deadline(task):
1✔
1424
    if hasattr(task, "_orig_cancel"):
1✔
1425
        task.cancel = task._orig_cancel
1✔
1426
        del task._orig_cancel
1✔
1427
    return _aiorpcx_orig_unset_task_deadline(task)
1✔
1428

1429

1430
_aiorpcx_orig_set_task_deadline    = aiorpcx.curio._set_task_deadline
1✔
1431
_aiorpcx_orig_unset_task_deadline  = aiorpcx.curio._unset_task_deadline
1✔
1432

1433
aiorpcx.curio._set_new_deadline    = _aiorpcx_monkeypatched_set_new_deadline
1✔
1434
aiorpcx.curio._set_task_deadline   = _aiorpcx_monkeypatched_set_task_deadline
1✔
1435
aiorpcx.curio._unset_task_deadline = _aiorpcx_monkeypatched_unset_task_deadline
1✔
1436

1437

1438
async def wait_for2(fut: Awaitable, timeout: Union[int, float, None]):
1✔
1439
    """Replacement for asyncio.wait_for,
1440
     due to bugs: https://bugs.python.org/issue42130 and https://github.com/python/cpython/issues/86296 ,
1441
     which are only fixed in python 3.12+.
1442
     """
1443
    if sys.version_info[:3] >= (3, 12):
1✔
1444
        return await asyncio.wait_for(fut, timeout)
1✔
1445
    else:
1446
        async with async_timeout(timeout):
×
1447
            return await asyncio.ensure_future(fut, loop=get_running_loop())
×
1448

1449

1450
if hasattr(asyncio, 'timeout'):  # python 3.11+
1✔
1451
    async_timeout = asyncio.timeout
1✔
1452
else:
1453
    class TimeoutAfterAsynciolike(aiorpcx.curio.TimeoutAfter):
×
1454
        async def __aexit__(self, exc_type, exc_value, tb):
×
1455
            try:
×
1456
                await super().__aexit__(exc_type, exc_value, tb)
×
1457
            except (aiorpcx.TaskTimeout, aiorpcx.UncaughtTimeoutError):
×
1458
                raise asyncio.TimeoutError from None
×
1459
            except aiorpcx.TimeoutCancellationError:
×
1460
                raise asyncio.CancelledError from None
×
1461

1462
    def async_timeout(delay: Union[int, float, None]):
×
1463
        if delay is None:
×
1464
            return nullcontext()
×
1465
        return TimeoutAfterAsynciolike(delay)
×
1466

1467

1468
class NetworkJobOnDefaultServer(Logger, ABC):
1✔
1469
    """An abstract base class for a job that runs on the main network
1470
    interface. Every time the main interface changes, the job is
1471
    restarted, and some of its internals are reset.
1472
    """
1473
    def __init__(self, network: 'Network'):
1✔
1474
        Logger.__init__(self)
1✔
1475
        self.network = network
1✔
1476
        self.interface = None  # type: Interface
1✔
1477
        self._restart_lock = asyncio.Lock()
1✔
1478
        # Ensure fairness between NetworkJobs. e.g. if multiple wallets
1479
        # are open, a large wallet's Synchronizer should not starve the small wallets:
1480
        self._network_request_semaphore = asyncio.Semaphore(100)
1✔
1481

1482
        self._reset()
1✔
1483
        # every time the main interface changes, restart:
1484
        register_callback(self._restart, ['default_server_changed'])
1✔
1485
        # also schedule a one-off restart now, as there might already be a main interface:
1486
        asyncio.run_coroutine_threadsafe(self._restart(), network.asyncio_loop)
1✔
1487

1488
    def _reset(self):
1✔
1489
        """Initialise fields. Called every time the underlying
1490
        server connection changes.
1491
        """
1492
        self.taskgroup = OldTaskGroup()
1✔
1493
        self.reset_request_counters()
1✔
1494

1495
    async def _start(self, interface: 'Interface'):
1✔
1496
        self.logger.debug(f"starting. interface.server={repr(str(interface.server))}")
×
1497
        self.interface = interface
×
1498

1499
        taskgroup = self.taskgroup
×
1500
        async def run_tasks_wrapper():
×
1501
            self.logger.debug(f"starting taskgroup ({hex(id(taskgroup))}).")
×
1502
            try:
×
1503
                await self._run_tasks(taskgroup=taskgroup)
×
1504
            except Exception as e:
×
1505
                self.logger.error(f"taskgroup died ({hex(id(taskgroup))}). exc={e!r}")
×
1506
                raise
×
1507
            finally:
1508
                self.logger.debug(f"taskgroup stopped ({hex(id(taskgroup))}).")
×
1509
        await interface.taskgroup.spawn(run_tasks_wrapper)
×
1510

1511
    @abstractmethod
1✔
1512
    async def _run_tasks(self, *, taskgroup: OldTaskGroup) -> None:
1✔
1513
        """Start tasks in taskgroup. Called every time the underlying
1514
        server connection changes.
1515
        """
1516
        # If self.taskgroup changed, don't start tasks. This can happen if we have
1517
        # been restarted *just now*, i.e. after the _run_tasks coroutine object was created.
1518
        if taskgroup != self.taskgroup:
×
1519
            raise asyncio.CancelledError()
×
1520

1521
    async def stop(self, *, full_shutdown: bool = True):
1✔
1522
        self.logger.debug(f"stopping. {full_shutdown=}")
×
1523
        if full_shutdown:
×
1524
            unregister_callback(self._restart)
×
1525
        await self.taskgroup.cancel_remaining()
×
1526

1527
    @log_exceptions
1✔
1528
    async def _restart(self, *args):
1✔
1529
        interface = self.network.interface
1✔
1530
        if interface is None:
1✔
1531
            return  # we should get called again soon
1✔
1532

1533
        async with self._restart_lock:
×
1534
            await self.stop(full_shutdown=False)
×
1535
            self._reset()
×
1536
            await self._start(interface)
×
1537

1538
    def reset_request_counters(self):
1✔
1539
        self._requests_sent = 0
1✔
1540
        self._requests_answered = 0
1✔
1541

1542
    def num_requests_sent_and_answered(self) -> Tuple[int, int]:
1✔
1543
        return self._requests_sent, self._requests_answered
×
1544

1545
    @property
1✔
1546
    def session(self):
1✔
1547
        s = self.interface.session
×
1548
        assert s is not None
×
1549
        return s
×
1550

1551

1552
async def detect_tor_socks_proxy() -> Optional[Tuple[str, int]]:
1✔
1553
    # Probable ports for Tor to listen at
1554
    candidates = [
×
1555
        ("127.0.0.1", 9050),
1556
        ("127.0.0.1", 9051),
1557
        ("127.0.0.1", 9150),
1558
    ]
1559

1560
    proxy_addr = None
×
1561
    async def test_net_addr(net_addr):
×
1562
        is_tor = await is_tor_socks_port(*net_addr)
×
1563
        # set result, and cancel remaining probes
1564
        if is_tor:
×
1565
            nonlocal proxy_addr
1566
            proxy_addr = net_addr
×
1567
            await group.cancel_remaining()
×
1568

1569
    async with OldTaskGroup() as group:
×
1570
        for net_addr in candidates:
×
1571
            await group.spawn(test_net_addr(net_addr))
×
1572
    return proxy_addr
×
1573

1574

1575
@log_exceptions
1✔
1576
async def is_tor_socks_port(host: str, port: int) -> bool:
1✔
1577
    # mimic "tor-resolve 0.0.0.0".
1578
    # see https://github.com/spesmilo/electrum/issues/7317#issuecomment-1369281075
1579
    # > this is a socks5 handshake, followed by a socks RESOLVE request as defined in
1580
    # > [tor's socks extension spec](https://github.com/torproject/torspec/blob/7116c9cdaba248aae07a3f1d0e15d9dd102f62c5/socks-extensions.txt#L63),
1581
    # > resolving 0.0.0.0, which being an IP, tor resolves itself without needing to ask a relay.
1582
    writer = None
×
1583
    try:
×
1584
        async with async_timeout(10):
×
1585
            reader, writer = await asyncio.open_connection(host, port)
×
1586
            writer.write(b'\x05\x01\x00\x05\xf0\x00\x03\x070.0.0.0\x00\x00')
×
1587
            await writer.drain()
×
1588
            data = await reader.read(1024)
×
1589
            if data == b'\x05\x00\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00':
×
1590
                return True
×
1591
            return False
×
1592
    except (OSError, asyncio.TimeoutError):
×
1593
        return False
×
1594
    finally:
1595
        if writer:
×
1596
            writer.close()
×
1597

1598

1599
AS_LIB_USER_I_WANT_TO_MANAGE_MY_OWN_ASYNCIO_LOOP = False  # used by unit tests
1✔
1600

1601
_asyncio_event_loop = None  # type: Optional[asyncio.AbstractEventLoop]
1✔
1602
def get_asyncio_loop() -> asyncio.AbstractEventLoop:
1✔
1603
    """Returns the global asyncio event loop we use."""
1604
    if loop := _asyncio_event_loop:
1✔
1605
        return loop
1✔
1606
    if AS_LIB_USER_I_WANT_TO_MANAGE_MY_OWN_ASYNCIO_LOOP:
1✔
1607
        if loop := get_running_loop():
1✔
1608
            return loop
1✔
1609
    raise Exception("event loop not created yet")
×
1610

1611

1612
def create_and_start_event_loop() -> Tuple[asyncio.AbstractEventLoop,
1✔
1613
                                           asyncio.Future,
1614
                                           threading.Thread]:
1615
    global _asyncio_event_loop
1616
    if _asyncio_event_loop is not None:
×
1617
        raise Exception("there is already a running event loop")
×
1618

1619
    # asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
1620
    # We set a custom event loop policy purely to be compatible with code that
1621
    # relies on asyncio.get_event_loop().
1622
    # - in python 3.8-3.9, asyncio.Event.__init__, asyncio.Lock.__init__,
1623
    #   and similar, calls get_event_loop. see https://github.com/python/cpython/pull/23420
1624
    class MyEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
×
1625
        def get_event_loop(self):
×
1626
            # In case electrum is being used as a library, there might be other
1627
            # event loops in use besides ours. To minimise interfering with those,
1628
            # if there is a loop running in the current thread, return that:
1629
            running_loop = get_running_loop()
×
1630
            if running_loop is not None:
×
1631
                return running_loop
×
1632
            # Otherwise, return our global loop:
1633
            return get_asyncio_loop()
×
1634
    asyncio.set_event_loop_policy(MyEventLoopPolicy())
×
1635

1636
    loop = asyncio.new_event_loop()
×
1637
    _asyncio_event_loop = loop
×
1638

1639
    def on_exception(loop, context):
×
1640
        """Suppress spurious messages it appears we cannot control."""
1641
        SUPPRESS_MESSAGE_REGEX = re.compile('SSL handshake|Fatal read error on|'
×
1642
                                            'SSL error in data received')
1643
        message = context.get('message')
×
1644
        if message and SUPPRESS_MESSAGE_REGEX.match(message):
×
1645
            return
×
1646
        loop.default_exception_handler(context)
×
1647

1648
    def run_event_loop():
×
1649
        try:
×
1650
            loop.run_until_complete(stopping_fut)
×
1651
        finally:
1652
            # clean-up
1653
            global _asyncio_event_loop
1654
            _asyncio_event_loop = None
×
1655

1656
    loop.set_exception_handler(on_exception)
×
1657
    _set_custom_task_factory(loop)
×
1658
    # loop.set_debug(True)
1659
    stopping_fut = loop.create_future()
×
1660
    loop_thread = threading.Thread(
×
1661
        target=run_event_loop,
1662
        name='EventLoop',
1663
    )
1664
    loop_thread.start()
×
1665
    # Wait until the loop actually starts.
1666
    # On a slow PC, or with a debugger attached, this can take a few dozens of ms,
1667
    # and if we returned without a running loop, weird things can happen...
1668
    t0 = time.monotonic()
×
1669
    while not loop.is_running():
×
1670
        time.sleep(0.01)
×
1671
        if time.monotonic() - t0 > 5:
×
1672
            raise Exception("been waiting for 5 seconds but asyncio loop would not start!")
×
1673
    return loop, stopping_fut, loop_thread
×
1674

1675

1676
_running_asyncio_tasks = set()  # type: Set[asyncio.Future]
1✔
1677
def _set_custom_task_factory(loop: asyncio.AbstractEventLoop):
1✔
1678
    """Wrap task creation to track pending and running tasks.
1679
    When tasks are created, asyncio only maintains a weak reference to them.
1680
    Hence, the garbage collector might destroy the task mid-execution.
1681
    To avoid this, we store a strong reference for the task until it completes.
1682

1683
    Without this, a lot of APIs are basically Heisenbug-generators... e.g.:
1684
    - "asyncio.create_task"
1685
    - "loop.create_task"
1686
    - "asyncio.ensure_future"
1687
    - what about "asyncio.run_coroutine_threadsafe"? not sure if that is safe.
1688

1689
    related:
1690
        - https://bugs.python.org/issue44665
1691
        - https://github.com/python/cpython/issues/88831
1692
        - https://github.com/python/cpython/issues/91887
1693
        - https://textual.textualize.io/blog/2023/02/11/the-heisenbug-lurking-in-your-async-code/
1694
        - https://github.com/python/cpython/issues/91887#issuecomment-1434816045
1695
        - "Task was destroyed but it is pending!"
1696
    """
1697

1698
    platform_task_factory = loop.get_task_factory()
1✔
1699

1700
    def factory(loop_, coro, **kwargs):
1✔
1701
        if platform_task_factory is not None:
1✔
1702
            task = platform_task_factory(loop_, coro, **kwargs)
×
1703
        else:
1704
            task = asyncio.Task(coro, loop=loop_, **kwargs)
1✔
1705
        _running_asyncio_tasks.add(task)
1✔
1706
        task.add_done_callback(_running_asyncio_tasks.discard)
1✔
1707
        return task
1✔
1708

1709
    loop.set_task_factory(factory)
1✔
1710

1711

1712
class OrderedDictWithIndex(OrderedDict):
1✔
1713
    """An OrderedDict that keeps track of the positions of keys.
1714

1715
    Note: very inefficient to modify contents, except to add new items.
1716
    """
1717

1718
    def __init__(self):
1✔
1719
        super().__init__()
×
1720
        self._key_to_pos = {}
×
1721
        self._pos_to_key = {}
×
1722

1723
    def _recalc_index(self):
1✔
1724
        self._key_to_pos = {key: pos for (pos, key) in enumerate(self.keys())}
×
1725
        self._pos_to_key = {pos: key for (pos, key) in enumerate(self.keys())}
×
1726

1727
    def pos_from_key(self, key):
1✔
1728
        return self._key_to_pos[key]
×
1729

1730
    def value_from_pos(self, pos):
1✔
1731
        key = self._pos_to_key[pos]
×
1732
        return self[key]
×
1733

1734
    def popitem(self, *args, **kwargs):
1✔
1735
        ret = super().popitem(*args, **kwargs)
×
1736
        self._recalc_index()
×
1737
        return ret
×
1738

1739
    def move_to_end(self, *args, **kwargs):
1✔
1740
        ret = super().move_to_end(*args, **kwargs)
×
1741
        self._recalc_index()
×
1742
        return ret
×
1743

1744
    def clear(self):
1✔
1745
        ret = super().clear()
×
1746
        self._recalc_index()
×
1747
        return ret
×
1748

1749
    def pop(self, *args, **kwargs):
1✔
1750
        ret = super().pop(*args, **kwargs)
×
1751
        self._recalc_index()
×
1752
        return ret
×
1753

1754
    def update(self, *args, **kwargs):
1✔
1755
        ret = super().update(*args, **kwargs)
×
1756
        self._recalc_index()
×
1757
        return ret
×
1758

1759
    def __delitem__(self, *args, **kwargs):
1✔
1760
        ret = super().__delitem__(*args, **kwargs)
×
1761
        self._recalc_index()
×
1762
        return ret
×
1763

1764
    def __setitem__(self, key, *args, **kwargs):
1✔
1765
        is_new_key = key not in self
×
1766
        ret = super().__setitem__(key, *args, **kwargs)
×
1767
        if is_new_key:
×
1768
            pos = len(self) - 1
×
1769
            self._key_to_pos[key] = pos
×
1770
            self._pos_to_key[pos] = key
×
1771
        return ret
×
1772

1773

1774
def multisig_type(wallet_type):
1✔
1775
    '''If wallet_type is mofn multi-sig, return [m, n],
1776
    otherwise return None.'''
1777
    if not wallet_type:
1✔
1778
        return None
×
1779
    match = re.match(r'(\d+)of(\d+)', wallet_type)
1✔
1780
    if match:
1✔
1781
        match = [int(x) for x in match.group(1, 2)]
1✔
1782
    return match
1✔
1783

1784

1785
def is_ip_address(x: Union[str, bytes]) -> bool:
1✔
1786
    if isinstance(x, bytes):
1✔
1787
        x = x.decode("utf-8")
×
1788
    try:
1✔
1789
        ipaddress.ip_address(x)
1✔
1790
        return True
1✔
1791
    except ValueError:
1✔
1792
        return False
1✔
1793

1794

1795
def is_localhost(host: str) -> bool:
1✔
1796
    if str(host) in ('localhost', 'localhost.',):
1✔
1797
        return True
1✔
1798
    if host[0] == '[' and host[-1] == ']':  # IPv6
1✔
1799
        host = host[1:-1]
1✔
1800
    try:
1✔
1801
        ip_addr = ipaddress.ip_address(host)  # type: Union[IPv4Address, IPv6Address]
1✔
1802
        return ip_addr.is_loopback
1✔
1803
    except ValueError:
1✔
1804
        pass  # not an IP
1✔
1805
    return False
1✔
1806

1807

1808
def is_private_netaddress(host: str) -> bool:
1✔
1809
    if is_localhost(host):
1✔
1810
        return True
1✔
1811
    if host[0] == '[' and host[-1] == ']':  # IPv6
1✔
1812
        host = host[1:-1]
1✔
1813
    try:
1✔
1814
        ip_addr = ipaddress.ip_address(host)  # type: Union[IPv4Address, IPv6Address]
1✔
1815
        return ip_addr.is_private
1✔
1816
    except ValueError:
1✔
1817
        pass  # not an IP
1✔
1818
    return False
1✔
1819

1820

1821
def list_enabled_bits(x: int) -> Sequence[int]:
1✔
1822
    """e.g. 77 (0b1001101) --> (0, 2, 3, 6)"""
1823
    binary = bin(x)[2:]
1✔
1824
    rev_bin = reversed(binary)
1✔
1825
    return tuple(i for i, b in enumerate(rev_bin) if b == '1')
1✔
1826

1827

1828
def resolve_dns_srv(host: str):
1✔
1829
    # FIXME this method is not using the network proxy. (although the proxy might not support UDP?)
1830
    srv_records = dns.resolver.resolve(host, 'SRV')
×
1831
    # priority: prefer lower
1832
    # weight: tie breaker; prefer higher
1833
    srv_records = sorted(srv_records, key=lambda x: (x.priority, -x.weight))
×
1834

1835
    def dict_from_srv_record(srv):
×
1836
        return {
×
1837
            'host': str(srv.target),
1838
            'port': srv.port,
1839
        }
1840
    return [dict_from_srv_record(srv) for srv in srv_records]
×
1841

1842

1843
def randrange(bound: int) -> int:
1✔
1844
    """Return a random integer k such that 1 <= k < bound, uniformly
1845
    distributed across that range.
1846
    This is guaranteed to be cryptographically strong.
1847
    """
1848
    # secrets.randbelow(bound) returns a random int: 0 <= r < bound,
1849
    # hence transformations:
1850
    return secrets.randbelow(bound - 1) + 1
1✔
1851

1852

1853
class CallbackManager(Logger):
1✔
1854
    # callbacks set by the GUI or any thread
1855
    # guarantee: the callbacks will always get triggered from the asyncio thread.
1856

1857
    def __init__(self):
1✔
1858
        Logger.__init__(self)
1✔
1859
        self.callback_lock = threading.Lock()
1✔
1860
        self.callbacks = defaultdict(list)      # note: needs self.callback_lock
1✔
1861
        self._running_cb_futs = set()
1✔
1862

1863
    def register_callback(self, func, events):
1✔
1864
        with self.callback_lock:
1✔
1865
            for event in events:
1✔
1866
                self.callbacks[event].append(func)
1✔
1867

1868
    def unregister_callback(self, callback):
1✔
1869
        with self.callback_lock:
1✔
1870
            for callbacks in self.callbacks.values():
1✔
1871
                if callback in callbacks:
1✔
1872
                    callbacks.remove(callback)
1✔
1873

1874
    def trigger_callback(self, event, *args):
1✔
1875
        """Trigger a callback with given arguments.
1876
        Can be called from any thread. The callback itself will get scheduled
1877
        on the event loop.
1878
        """
1879
        loop = get_asyncio_loop()
1✔
1880
        assert loop.is_running(), "event loop not running"
1✔
1881
        with self.callback_lock:
1✔
1882
            callbacks = self.callbacks[event][:]
1✔
1883
        for callback in callbacks:
1✔
1884
            if asyncio.iscoroutinefunction(callback):  # async cb
1✔
1885
                fut = asyncio.run_coroutine_threadsafe(callback(*args), loop)
1✔
1886
                # keep strong references around to avoid GC issues:
1887
                self._running_cb_futs.add(fut)
1✔
1888
                def on_done(fut_: concurrent.futures.Future):
1✔
1889
                    assert fut_.done()
1✔
1890
                    self._running_cb_futs.remove(fut_)
1✔
1891
                    if fut_.cancelled():
1✔
1892
                        self.logger.debug(f"cb cancelled. {event=}.")
1✔
1893
                    elif exc := fut_.exception():
1✔
1894
                        self.logger.error(f"cb errored. {event=}. {exc=}", exc_info=exc)
×
1895
                fut.add_done_callback(on_done)
1✔
1896
            else:  # non-async cb
1897
                # note: the cb needs to run in the asyncio thread
1898
                if get_running_loop() == loop:
1✔
1899
                    # run callback immediately, so that it is guaranteed
1900
                    # to have been executed when this method returns
1901
                    callback(*args)
1✔
1902
                else:
1903
                    # note: if cb raises, asyncio will log the exception
1904
                    loop.call_soon_threadsafe(callback, *args)
×
1905

1906

1907
callback_mgr = CallbackManager()
1✔
1908
trigger_callback = callback_mgr.trigger_callback
1✔
1909
register_callback = callback_mgr.register_callback
1✔
1910
unregister_callback = callback_mgr.unregister_callback
1✔
1911
_event_listeners = defaultdict(set)  # type: Dict[str, Set[str]]
1✔
1912

1913

1914
class EventListener:
1✔
1915
    """Use as a mixin for a class that has methods to be triggered on events.
1916
    - Methods that receive the callbacks should be named "on_event_*" and decorated with @event_listener.
1917
    - register_callbacks() should be called exactly once per instance of EventListener, e.g. in __init__
1918
    - unregister_callbacks() should be called at least once, e.g. when the instance is destroyed
1919
    """
1920

1921
    def _list_callbacks(self):
1✔
1922
        for c in self.__class__.__mro__:
1✔
1923
            classpath = f"{c.__module__}.{c.__name__}"
1✔
1924
            for method_name in _event_listeners[classpath]:
1✔
1925
                method = getattr(self, method_name)
1✔
1926
                assert callable(method)
1✔
1927
                assert method_name.startswith('on_event_')
1✔
1928
                yield method_name[len('on_event_'):], method
1✔
1929

1930
    def register_callbacks(self):
1✔
1931
        for name, method in self._list_callbacks():
1✔
1932
            #_logger.debug(f'registering callback {method}')
1933
            register_callback(method, [name])
1✔
1934

1935
    def unregister_callbacks(self):
1✔
1936
        for name, method in self._list_callbacks():
1✔
1937
            #_logger.debug(f'unregistering callback {method}')
1938
            unregister_callback(method)
1✔
1939

1940

1941
def event_listener(func):
1✔
1942
    """To be used in subclasses of EventListener only. (how to enforce this programmatically?)"""
1943
    classname, method_name = func.__qualname__.split('.')
1✔
1944
    assert method_name.startswith('on_event_')
1✔
1945
    classpath = f"{func.__module__}.{classname}"
1✔
1946
    _event_listeners[classpath].add(method_name)
1✔
1947
    return func
1✔
1948

1949

1950
_NetAddrType = TypeVar("_NetAddrType")
1✔
1951
# requirements for _NetAddrType:
1952
# - reasonable __hash__() implementation (e.g. based on host/port of remote endpoint)
1953

1954
class NetworkRetryManager(Generic[_NetAddrType]):
1✔
1955
    """Truncated Exponential Backoff for network connections."""
1956

1957
    def __init__(
1✔
1958
            self, *,
1959
            max_retry_delay_normal: float,
1960
            init_retry_delay_normal: float,
1961
            max_retry_delay_urgent: float = None,
1962
            init_retry_delay_urgent: float = None,
1963
    ):
1964
        self._last_tried_addr = {}  # type: Dict[_NetAddrType, Tuple[float, int]]  # (unix ts, num_attempts)
1✔
1965

1966
        # note: these all use "seconds" as unit
1967
        if max_retry_delay_urgent is None:
1✔
1968
            max_retry_delay_urgent = max_retry_delay_normal
1✔
1969
        if init_retry_delay_urgent is None:
1✔
1970
            init_retry_delay_urgent = init_retry_delay_normal
1✔
1971
        self._max_retry_delay_normal = max_retry_delay_normal
1✔
1972
        self._init_retry_delay_normal = init_retry_delay_normal
1✔
1973
        self._max_retry_delay_urgent = max_retry_delay_urgent
1✔
1974
        self._init_retry_delay_urgent = init_retry_delay_urgent
1✔
1975

1976
    def _trying_addr_now(self, addr: _NetAddrType) -> None:
1✔
1977
        last_time, num_attempts = self._last_tried_addr.get(addr, (0, 0))
×
1978
        # we add up to 1 second of noise to the time, so that clients are less likely
1979
        # to get synchronised and bombard the remote in connection waves:
1980
        cur_time = time.time() + random.random()
×
1981
        self._last_tried_addr[addr] = cur_time, num_attempts + 1
×
1982

1983
    def _on_connection_successfully_established(self, addr: _NetAddrType) -> None:
1✔
1984
        self._last_tried_addr[addr] = time.time(), 0
×
1985

1986
    def _can_retry_addr(self, addr: _NetAddrType, *,
1✔
1987
                        now: float = None, urgent: bool = False) -> bool:
1988
        if now is None:
×
1989
            now = time.time()
×
1990
        last_time, num_attempts = self._last_tried_addr.get(addr, (0, 0))
×
1991
        if urgent:
×
1992
            max_delay = self._max_retry_delay_urgent
×
1993
            init_delay = self._init_retry_delay_urgent
×
1994
        else:
1995
            max_delay = self._max_retry_delay_normal
×
1996
            init_delay = self._init_retry_delay_normal
×
1997
        delay = self.__calc_delay(multiplier=init_delay, max_delay=max_delay, num_attempts=num_attempts)
×
1998
        next_time = last_time + delay
×
1999
        return next_time < now
×
2000

2001
    @classmethod
1✔
2002
    def __calc_delay(cls, *, multiplier: float, max_delay: float,
1✔
2003
                     num_attempts: int) -> float:
2004
        num_attempts = min(num_attempts, 100_000)
×
2005
        try:
×
2006
            res = multiplier * 2 ** num_attempts
×
2007
        except OverflowError:
×
2008
            return max_delay
×
2009
        return max(0, min(max_delay, res))
×
2010

2011
    def _clear_addr_retry_times(self) -> None:
1✔
2012
        self._last_tried_addr.clear()
1✔
2013

2014

2015
class ESocksProxy(aiorpcx.SOCKSProxy):
1✔
2016
    # note: proxy will not leak DNS as create_connection()
2017
    # sets (local DNS) resolve=False by default
2018

2019
    async def open_connection(self, host=None, port=None, **kwargs):
1✔
2020
        loop = asyncio.get_running_loop()
×
2021
        reader = asyncio.StreamReader(loop=loop)
×
2022
        protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
×
2023
        transport, _ = await self.create_connection(
×
2024
            lambda: protocol, host, port, **kwargs)
2025
        writer = asyncio.StreamWriter(transport, protocol, reader, loop)
×
2026
        return reader, writer
×
2027

2028
    @classmethod
1✔
2029
    def from_network_settings(cls, network: Optional['Network']) -> Optional['ESocksProxy']:
1✔
2030
        if not network or not network.proxy or not network.proxy.enabled:
1✔
2031
            return None
1✔
2032
        proxy = network.proxy
×
2033
        username, pw = proxy.user, proxy.password
×
2034
        if not username or not pw:
×
2035
            # is_proxy_tor is tri-state; None indicates it is still probing the proxy to test for TOR
2036
            if network.is_proxy_tor:
×
2037
                auth = aiorpcx.socks.SOCKSRandomAuth()
×
2038
            else:
2039
                auth = None
×
2040
        else:
2041
            auth = aiorpcx.socks.SOCKSUserAuth(username, pw)
×
2042
        addr = aiorpcx.NetAddress(proxy.host, proxy.port)
×
2043
        if proxy.mode == "socks4":
×
2044
            ret = cls(addr, aiorpcx.socks.SOCKS4a, auth)
×
2045
        elif proxy.mode == "socks5":
×
2046
            ret = cls(addr, aiorpcx.socks.SOCKS5, auth)
×
2047
        else:
2048
            raise NotImplementedError  # http proxy not available with aiorpcx
×
2049
        return ret
×
2050

2051

2052
class JsonRPCError(Exception):
1✔
2053

2054
    class Codes(enum.IntEnum):
1✔
2055
        # application-specific error codes
2056
        USERFACING = 1
1✔
2057
        INTERNAL = 2
1✔
2058

2059
    def __init__(self, *, code: int, message: str, data: Optional[dict] = None):
1✔
2060
        Exception.__init__(self)
×
2061
        self.code = code
×
2062
        self.message = message
×
2063
        self.data = data
×
2064

2065

2066
class JsonRPCClient:
1✔
2067

2068
    def __init__(self, session: aiohttp.ClientSession, url: str):
1✔
2069
        self.session = session
×
2070
        self.url = url
×
2071
        self._id = 0
×
2072

2073
    async def request(self, endpoint, *args):
1✔
2074
        """Send request to server, parse and return result.
2075
        note: parsing code is naive, the server is assumed to be well-behaved.
2076
              Up to the caller to handle exceptions, including those arising from parsing errors.
2077
        """
2078
        self._id += 1
×
2079
        data = ('{"jsonrpc": "2.0", "id":"%d", "method": "%s", "params": %s }'
×
2080
                % (self._id, endpoint, json.dumps(args)))
2081
        async with self.session.post(self.url, data=data) as resp:
×
2082
            if resp.status == 200:
×
2083
                r = await resp.json()
×
2084
                result = r.get('result')
×
2085
                error = r.get('error')
×
2086
                if error:
×
2087
                    raise JsonRPCError(code=error["code"], message=error["message"], data=error.get("data"))
×
2088
                else:
2089
                    return result
×
2090
            else:
2091
                text = await resp.text()
×
2092
                return 'Error: ' + str(text)
×
2093

2094
    def add_method(self, endpoint):
1✔
2095
        async def coro(*args):
×
2096
            return await self.request(endpoint, *args)
×
2097
        setattr(self, endpoint, coro)
×
2098

2099

2100
T = TypeVar('T')
1✔
2101

2102
def random_shuffled_copy(x: Iterable[T]) -> List[T]:
1✔
2103
    """Returns a shuffled copy of the input."""
2104
    x_copy = list(x)  # copy
1✔
2105
    random.shuffle(x_copy)  # shuffle in-place
1✔
2106
    return x_copy
1✔
2107

2108

2109
def test_read_write_permissions(path) -> None:
1✔
2110
    # note: There might already be a file at 'path'.
2111
    #       Make sure we do NOT overwrite/corrupt that!
2112
    temp_path = "%s.tmptest.%s" % (path, os.getpid())
1✔
2113
    echo = "fs r/w test"
1✔
2114
    try:
1✔
2115
        # test READ permissions for actual path
2116
        if os.path.exists(path):
1✔
2117
            with open(path, "rb") as f:
1✔
2118
                f.read(1)  # read 1 byte
1✔
2119
        # test R/W sanity for "similar" path
2120
        with open(temp_path, "w", encoding='utf-8') as f:
1✔
2121
            f.write(echo)
1✔
2122
        with open(temp_path, "r", encoding='utf-8') as f:
1✔
2123
            echo2 = f.read()
1✔
2124
        os.remove(temp_path)
1✔
2125
    except Exception as e:
×
2126
        raise IOError(e) from e
×
2127
    if echo != echo2:
1✔
2128
        raise IOError('echo sanity-check failed')
×
2129

2130

2131
class classproperty(property):
1✔
2132
    """~read-only class-level @property
2133
    from https://stackoverflow.com/a/13624858 by denis-ryzhkov
2134
    """
2135
    def __get__(self, owner_self, owner_cls):
1✔
2136
        return self.fget(owner_cls)
1✔
2137

2138

2139
def get_running_loop() -> Optional[asyncio.AbstractEventLoop]:
1✔
2140
    """Returns the asyncio event loop that is *running in this thread*, if any."""
2141
    try:
1✔
2142
        return asyncio.get_running_loop()
1✔
2143
    except RuntimeError:
×
2144
        return None
×
2145

2146

2147
def error_text_str_to_safe_str(err: str, *, max_len: Optional[int] = 500) -> str:
1✔
2148
    """Converts an untrusted error string to a sane printable ascii str.
2149
    Never raises.
2150
    """
2151
    text = error_text_bytes_to_safe_str(
1✔
2152
        err.encode("ascii", errors='backslashreplace'),
2153
        max_len=None)
2154
    return truncate_text(text, max_len=max_len)
1✔
2155

2156

2157
def error_text_bytes_to_safe_str(err: bytes, *, max_len: Optional[int] = 500) -> str:
1✔
2158
    """Converts an untrusted error bytes text to a sane printable ascii str.
2159
    Never raises.
2160

2161
    Note that naive ascii conversion would be insufficient. Fun stuff:
2162
    >>> b = b"my_long_prefix_blabla" + 21 * b"\x08" + b"malicious_stuff"
2163
    >>> s = b.decode("ascii")
2164
    >>> print(s)
2165
    malicious_stuffblabla
2166
    """
2167
    # convert to ascii, to get rid of unicode stuff
2168
    ascii_text = err.decode("ascii", errors='backslashreplace')
1✔
2169
    # do repr to handle ascii special chars (especially when printing/logging the str)
2170
    text = repr(ascii_text)
1✔
2171
    return truncate_text(text, max_len=max_len)
1✔
2172

2173

2174
def truncate_text(text: str, *, max_len: Optional[int]) -> str:
1✔
2175
    if max_len is None or len(text) <= max_len:
1✔
2176
        return text
1✔
2177
    else:
2178
        return text[:max_len] + f"... (truncated. orig_len={len(text)})"
1✔
2179

2180

2181
def nostr_pow_worker(nonce, nostr_pubk, target_bits, hash_function, hash_len_bits, shutdown):
1✔
2182
    """Function to generate PoW for Nostr, to be spawned in a ProcessPoolExecutor."""
2183
    hash_preimage = b'electrum-' + nostr_pubk
×
2184
    while True:
×
2185
        # we cannot check is_set on each iteration as it has a lot of overhead, this way we can check
2186
        # it with low overhead (just the additional range counter)
2187
        for i in range(1000000):
×
2188
            digest = hash_function(hash_preimage + nonce.to_bytes(32, 'big')).digest()
×
2189
            if int.from_bytes(digest, 'big') < (1 << (hash_len_bits - target_bits)):
×
2190
                shutdown.set()
×
2191
                return hash, nonce
×
2192
            nonce += 1
×
2193
        if shutdown.is_set():
×
2194
            return None, None
×
2195

2196

2197
async def gen_nostr_ann_pow(nostr_pubk: bytes, target_bits: int) -> Tuple[int, int]:
1✔
2198
    """Generate a PoW for a Nostr announcement. The PoW is hash[b'electrum-'+pubk+nonce]"""
2199
    import multiprocessing  # not available on Android, so we import it here
×
2200
    hash_function = hashlib.sha256
×
2201
    hash_len_bits = 256
×
2202
    max_nonce: int = (1 << (32 * 8)) - 1  # 32-byte nonce
×
2203
    start_nonce = 0
×
2204

2205
    max_workers = max(multiprocessing.cpu_count() - 1, 1)  # use all but one CPU
×
2206
    manager = multiprocessing.Manager()
×
2207
    shutdown = manager.Event()
×
2208
    with ProcessPoolExecutor(max_workers=max_workers) as executor:
×
2209
        tasks = []
×
2210
        loop = asyncio.get_running_loop()
×
2211
        for task in range(0, max_workers):
×
2212
            task = loop.run_in_executor(
×
2213
                executor,
2214
                nostr_pow_worker,
2215
                start_nonce,
2216
                nostr_pubk,
2217
                target_bits,
2218
                hash_function,
2219
                hash_len_bits,
2220
                shutdown
2221
            )
2222
            tasks.append(task)
×
2223
            start_nonce += max_nonce // max_workers  # split the nonce range between the processes
×
2224
            if start_nonce > max_nonce:  # make sure we don't go over the max_nonce
×
2225
                start_nonce = random.randint(0, int(max_nonce * 0.75))
×
2226

2227
        done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
×
2228
        hash_res, nonce_res = done.pop().result()
×
2229
        executor.shutdown(wait=False, cancel_futures=True)
×
2230

2231
    return nonce_res, get_nostr_ann_pow_amount(nostr_pubk, nonce_res)
×
2232

2233

2234
def get_nostr_ann_pow_amount(nostr_pubk: bytes, nonce: Optional[int]) -> int:
1✔
2235
    """Return the amount of leading zero bits for a nostr announcement PoW."""
2236
    if not nonce:
×
2237
        return 0
×
2238
    hash_function = hashlib.sha256
×
2239
    hash_len_bits = 256
×
2240
    hash_preimage = b'electrum-' + nostr_pubk
×
2241

2242
    digest = hash_function(hash_preimage + nonce.to_bytes(32, 'big')).digest()
×
2243
    digest = int.from_bytes(digest, 'big')
×
2244
    return hash_len_bits - digest.bit_length()
×
2245

2246

2247
class OnchainHistoryItem(NamedTuple):
1✔
2248
    txid: str
1✔
2249
    amount_sat: int
1✔
2250
    fee_sat: int
1✔
2251
    balance_sat: int
1✔
2252
    tx_mined_status: TxMinedInfo
1✔
2253
    group_id: Optional[str]
1✔
2254
    label: str
1✔
2255
    monotonic_timestamp: int
1✔
2256
    group_id: Optional[str]
1✔
2257
    def to_dict(self):
1✔
2258
        return {
×
2259
            'txid': self.txid,
2260
            'amount_sat': self.amount_sat,
2261
            'fee_sat': self.fee_sat,
2262
            'height': self.tx_mined_status.height,
2263
            'confirmations': self.tx_mined_status.conf,
2264
            'timestamp': self.tx_mined_status.timestamp,
2265
            'monotonic_timestamp': self.monotonic_timestamp,
2266
            'incoming': True if self.amount_sat>0 else False,
2267
            'bc_value': Satoshis(self.amount_sat),
2268
            'bc_balance': Satoshis(self.balance_sat),
2269
            'date': timestamp_to_datetime(self.tx_mined_status.timestamp),
2270
            'txpos_in_block': self.tx_mined_status.txpos,
2271
            'wanted_height': self.tx_mined_status.wanted_height,
2272
            'label': self.label,
2273
            'group_id': self.group_id,
2274
        }
2275

2276
class LightningHistoryItem(NamedTuple):
1✔
2277
    payment_hash: str
1✔
2278
    preimage: str
1✔
2279
    amount_msat: int
1✔
2280
    fee_msat: Optional[int]
1✔
2281
    type: str
1✔
2282
    group_id: Optional[str]
1✔
2283
    timestamp: int
1✔
2284
    label: str
1✔
2285
    def to_dict(self):
1✔
2286
        return {
×
2287
            'type': self.type,
2288
            'label': self.label,
2289
            'timestamp': self.timestamp or 0,
2290
            'date': timestamp_to_datetime(self.timestamp),
2291
            'amount_msat': self.amount_msat,
2292
            'fee_msat': self.fee_msat,
2293
            'payment_hash': self.payment_hash,
2294
            'preimage': self.preimage,
2295
            'group_id': self.group_id,
2296
            'ln_value': Satoshis(Decimal(self.amount_msat) / 1000),
2297
        }
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc