• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13236757158

10 Feb 2025 08:39AM UTC coverage: 57.649% (-1.2%) from 58.815%
13236757158

Pull #9493

github

ziggie1984
lncli: for some cmds we don't replace the data of the response.

For some cmds it is not very practical to replace the json output
because we might pipe it into other commands. For example when
creating the route we want to pipe it into sendtoRoute.
Pull Request #9493: For some lncli cmds we should not replace the content with other data

0 of 9 new or added lines in 2 files covered. (0.0%)

19535 existing lines in 252 files now uncovered.

103517 of 179563 relevant lines covered (57.65%)

24878.49 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

69.62
/graph/builder.go
1
package graph
2

3
import (
4
        "bytes"
5
        "fmt"
6
        "strings"
7
        "sync"
8
        "sync/atomic"
9
        "time"
10

11
        "github.com/btcsuite/btcd/btcec/v2"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/chaincfg/chainhash"
14
        "github.com/btcsuite/btcd/wire"
15
        "github.com/go-errors/errors"
16
        "github.com/lightningnetwork/lnd/batch"
17
        "github.com/lightningnetwork/lnd/chainntnfs"
18
        "github.com/lightningnetwork/lnd/fn/v2"
19
        graphdb "github.com/lightningnetwork/lnd/graph/db"
20
        "github.com/lightningnetwork/lnd/graph/db/models"
21
        "github.com/lightningnetwork/lnd/input"
22
        "github.com/lightningnetwork/lnd/kvdb"
23
        "github.com/lightningnetwork/lnd/lnutils"
24
        "github.com/lightningnetwork/lnd/lnwallet"
25
        "github.com/lightningnetwork/lnd/lnwallet/btcwallet"
26
        "github.com/lightningnetwork/lnd/lnwallet/chanvalidate"
27
        "github.com/lightningnetwork/lnd/lnwire"
28
        "github.com/lightningnetwork/lnd/multimutex"
29
        "github.com/lightningnetwork/lnd/netann"
30
        "github.com/lightningnetwork/lnd/routing/chainview"
31
        "github.com/lightningnetwork/lnd/routing/route"
32
        "github.com/lightningnetwork/lnd/ticker"
33
)
34

35
const (
36
        // DefaultChannelPruneExpiry is the default duration used to determine
37
        // if a channel should be pruned or not.
38
        DefaultChannelPruneExpiry = time.Hour * 24 * 14
39

40
        // DefaultFirstTimePruneDelay is the time we'll wait after startup
41
        // before attempting to prune the graph for zombie channels. We don't
42
        // do it immediately after startup to allow lnd to start up without
43
        // getting blocked by this job.
44
        DefaultFirstTimePruneDelay = 30 * time.Second
45

46
        // defaultStatInterval governs how often the router will log non-empty
47
        // stats related to processing new channels, updates, or node
48
        // announcements.
49
        defaultStatInterval = time.Minute
50
)
51

52
var (
53
        // ErrGraphBuilderShuttingDown is returned if the graph builder is in
54
        // the process of shutting down.
55
        ErrGraphBuilderShuttingDown = fmt.Errorf("graph builder shutting down")
56
)
57

58
// Config holds the configuration required by the Builder.
59
type Config struct {
60
        // SelfNode is the public key of the node that this channel router
61
        // belongs to.
62
        SelfNode route.Vertex
63

64
        // Graph is the channel graph that the ChannelRouter will use to gather
65
        // metrics from and also to carry out path finding queries.
66
        Graph DB
67

68
        // Chain is the router's source to the most up-to-date blockchain data.
69
        // All incoming advertised channels will be checked against the chain
70
        // to ensure that the channels advertised are still open.
71
        Chain lnwallet.BlockChainIO
72

73
        // ChainView is an instance of a FilteredChainView which is used to
74
        // watch the sub-set of the UTXO set (the set of active channels) that
75
        // we need in order to properly maintain the channel graph.
76
        ChainView chainview.FilteredChainView
77

78
        // Notifier is a reference to the ChainNotifier, used to grab
79
        // the latest blocks if the router is missing any.
80
        Notifier chainntnfs.ChainNotifier
81

82
        // ChannelPruneExpiry is the duration used to determine if a channel
83
        // should be pruned or not. If the delta between now and when the
84
        // channel was last updated is greater than ChannelPruneExpiry, then
85
        // the channel is marked as a zombie channel eligible for pruning.
86
        ChannelPruneExpiry time.Duration
87

88
        // GraphPruneInterval is used as an interval to determine how often we
89
        // should examine the channel graph to garbage collect zombie channels.
90
        GraphPruneInterval time.Duration
91

92
        // FirstTimePruneDelay is the time we'll wait after startup before
93
        // attempting to prune the graph for zombie channels. We don't do it
94
        // immediately after startup to allow lnd to start up without getting
95
        // blocked by this job.
96
        FirstTimePruneDelay time.Duration
97

98
        // AssumeChannelValid toggles whether the router will check for
99
        // spentness of channel outpoints. For neutrino, this saves long rescans
100
        // from blocking initial usage of the daemon.
101
        AssumeChannelValid bool
102

103
        // StrictZombiePruning determines if we attempt to prune zombie
104
        // channels according to a stricter criteria. If true, then we'll prune
105
        // a channel if only *one* of the edges is considered a zombie.
106
        // Otherwise, we'll only prune the channel when both edges have a very
107
        // dated last update.
108
        StrictZombiePruning bool
109

110
        // IsAlias returns whether a passed ShortChannelID is an alias. This is
111
        // only used for our local channels.
112
        IsAlias func(scid lnwire.ShortChannelID) bool
113
}
114

115
// Builder builds and maintains a view of the Lightning Network graph.
116
type Builder struct {
117
        started atomic.Bool
118
        stopped atomic.Bool
119

120
        ntfnClientCounter atomic.Uint64
121
        bestHeight        atomic.Uint32
122

123
        cfg *Config
124

125
        // newBlocks is a channel in which new blocks connected to the end of
126
        // the main chain are sent over, and blocks updated after a call to
127
        // UpdateFilter.
128
        newBlocks <-chan *chainview.FilteredBlock
129

130
        // staleBlocks is a channel in which blocks disconnected from the end
131
        // of our currently known best chain are sent over.
132
        staleBlocks <-chan *chainview.FilteredBlock
133

134
        // topologyClients maps a client's unique notification ID to a
135
        // topologyClient client that contains its notification dispatch
136
        // channel.
137
        topologyClients *lnutils.SyncMap[uint64, *topologyClient]
138

139
        // ntfnClientUpdates is a channel that's used to send new updates to
140
        // topology notification clients to the Builder. Updates either
141
        // add a new notification client, or cancel notifications for an
142
        // existing client.
143
        ntfnClientUpdates chan *topologyClientUpdate
144

145
        // channelEdgeMtx is a mutex we use to make sure we process only one
146
        // ChannelEdgePolicy at a time for a given channelID, to ensure
147
        // consistency between the various database accesses.
148
        channelEdgeMtx *multimutex.Mutex[uint64]
149

150
        // statTicker is a resumable ticker that logs the router's progress as
151
        // it discovers channels or receives updates.
152
        statTicker ticker.Ticker
153

154
        // stats tracks newly processed channels, updates, and node
155
        // announcements over a window of defaultStatInterval.
156
        stats *builderStats
157

158
        quit chan struct{}
159
        wg   sync.WaitGroup
160
}
161

162
// A compile time check to ensure Builder implements the
163
// ChannelGraphSource interface.
164
var _ ChannelGraphSource = (*Builder)(nil)
165

166
// NewBuilder constructs a new Builder.
167
func NewBuilder(cfg *Config) (*Builder, error) {
21✔
168
        return &Builder{
21✔
169
                cfg:               cfg,
21✔
170
                topologyClients:   &lnutils.SyncMap[uint64, *topologyClient]{},
21✔
171
                ntfnClientUpdates: make(chan *topologyClientUpdate),
21✔
172
                channelEdgeMtx:    multimutex.NewMutex[uint64](),
21✔
173
                statTicker:        ticker.New(defaultStatInterval),
21✔
174
                stats:             new(builderStats),
21✔
175
                quit:              make(chan struct{}),
21✔
176
        }, nil
21✔
177
}
21✔
178

179
// Start launches all the goroutines the Builder requires to carry out its
180
// duties. If the builder has already been started, then this method is a noop.
181
func (b *Builder) Start() error {
21✔
182
        if !b.started.CompareAndSwap(false, true) {
21✔
183
                return nil
×
184
        }
×
185

186
        log.Info("Builder starting")
21✔
187

21✔
188
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
21✔
189
        if err != nil {
21✔
190
                return err
×
191
        }
×
192

193
        // If the graph has never been pruned, or hasn't fully been created yet,
194
        // then we don't treat this as an explicit error.
195
        if _, _, err := b.cfg.Graph.PruneTip(); err != nil {
40✔
196
                switch {
19✔
197
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
19✔
198
                        fallthrough
19✔
199

200
                case errors.Is(err, graphdb.ErrGraphNotFound):
19✔
201
                        // If the graph has never been pruned, then we'll set
19✔
202
                        // the prune height to the current best height of the
19✔
203
                        // chain backend.
19✔
204
                        _, err = b.cfg.Graph.PruneGraph(
19✔
205
                                nil, bestHash, uint32(bestHeight),
19✔
206
                        )
19✔
207
                        if err != nil {
19✔
208
                                return err
×
209
                        }
×
210

211
                default:
×
212
                        return err
×
213
                }
214
        }
215

216
        // If AssumeChannelValid is present, then we won't rely on pruning
217
        // channels from the graph based on their spentness, but whether they
218
        // are considered zombies or not. We will start zombie pruning after a
219
        // small delay, to avoid slowing down startup of lnd.
220
        if b.cfg.AssumeChannelValid { //nolint:nestif
22✔
221
                time.AfterFunc(b.cfg.FirstTimePruneDelay, func() {
2✔
222
                        select {
1✔
223
                        case <-b.quit:
×
224
                                return
×
225
                        default:
1✔
226
                        }
227

228
                        log.Info("Initial zombie prune starting")
1✔
229
                        if err := b.pruneZombieChans(); err != nil {
1✔
230
                                log.Errorf("Unable to prune zombies: %v", err)
×
231
                        }
×
232
                })
233
        } else {
20✔
234
                // Otherwise, we'll use our filtered chain view to prune
20✔
235
                // channels as soon as they are detected as spent on-chain.
20✔
236
                if err := b.cfg.ChainView.Start(); err != nil {
20✔
237
                        return err
×
238
                }
×
239

240
                // Once the instance is active, we'll fetch the channel we'll
241
                // receive notifications over.
242
                b.newBlocks = b.cfg.ChainView.FilteredBlocks()
20✔
243
                b.staleBlocks = b.cfg.ChainView.DisconnectedBlocks()
20✔
244

20✔
245
                // Before we perform our manual block pruning, we'll construct
20✔
246
                // and apply a fresh chain filter to the active
20✔
247
                // FilteredChainView instance.  We do this before, as otherwise
20✔
248
                // we may miss on-chain events as the filter hasn't properly
20✔
249
                // been applied.
20✔
250
                channelView, err := b.cfg.Graph.ChannelView()
20✔
251
                if err != nil && !errors.Is(
20✔
252
                        err, graphdb.ErrGraphNoEdgesFound,
20✔
253
                ) {
20✔
254

×
255
                        return err
×
256
                }
×
257

258
                log.Infof("Filtering chain using %v channels active",
20✔
259
                        len(channelView))
20✔
260

20✔
261
                if len(channelView) != 0 {
27✔
262
                        err = b.cfg.ChainView.UpdateFilter(
7✔
263
                                channelView, uint32(bestHeight),
7✔
264
                        )
7✔
265
                        if err != nil {
7✔
266
                                return err
×
267
                        }
×
268
                }
269

270
                // The graph pruning might have taken a while and there could be
271
                // new blocks available.
272
                _, bestHeight, err = b.cfg.Chain.GetBestBlock()
20✔
273
                if err != nil {
20✔
274
                        return err
×
275
                }
×
276
                b.bestHeight.Store(uint32(bestHeight))
20✔
277

20✔
278
                // Before we begin normal operation of the router, we first need
20✔
279
                // to synchronize the channel graph to the latest state of the
20✔
280
                // UTXO set.
20✔
281
                if err := b.syncGraphWithChain(); err != nil {
20✔
282
                        return err
×
283
                }
×
284

285
                // Finally, before we proceed, we'll prune any unconnected nodes
286
                // from the graph in order to ensure we maintain a tight graph
287
                // of "useful" nodes.
288
                err = b.cfg.Graph.PruneGraphNodes()
20✔
289
                if err != nil &&
20✔
290
                        !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
20✔
291

×
292
                        return err
×
293
                }
×
294
        }
295

296
        b.wg.Add(1)
21✔
297
        go b.networkHandler()
21✔
298

21✔
299
        log.Debug("Builder started")
21✔
300

21✔
301
        return nil
21✔
302
}
303

304
// Stop signals to the Builder that it should halt all routines. This method
305
// will *block* until all goroutines have excited. If the builder has already
306
// stopped then this method will return immediately.
307
func (b *Builder) Stop() error {
21✔
308
        if !b.stopped.CompareAndSwap(false, true) {
23✔
309
                return nil
2✔
310
        }
2✔
311

312
        log.Info("Builder shutting down...")
19✔
313

19✔
314
        // Our filtered chain view could've only been started if
19✔
315
        // AssumeChannelValid isn't present.
19✔
316
        if !b.cfg.AssumeChannelValid {
37✔
317
                if err := b.cfg.ChainView.Stop(); err != nil {
18✔
318
                        return err
×
319
                }
×
320
        }
321

322
        close(b.quit)
19✔
323
        b.wg.Wait()
19✔
324

19✔
325
        log.Debug("Builder shutdown complete")
19✔
326

19✔
327
        return nil
19✔
328
}
329

330
// syncGraphWithChain attempts to synchronize the current channel graph with
331
// the latest UTXO set state. This process involves pruning from the channel
332
// graph any channels which have been closed by spending their funding output
333
// since we've been down.
334
func (b *Builder) syncGraphWithChain() error {
20✔
335
        // First, we'll need to check to see if we're already in sync with the
20✔
336
        // latest state of the UTXO set.
20✔
337
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
20✔
338
        if err != nil {
20✔
339
                return err
×
340
        }
×
341
        b.bestHeight.Store(uint32(bestHeight))
20✔
342

20✔
343
        pruneHash, pruneHeight, err := b.cfg.Graph.PruneTip()
20✔
344
        if err != nil {
20✔
345
                switch {
×
346
                // If the graph has never been pruned, or hasn't fully been
347
                // created yet, then we don't treat this as an explicit error.
348
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
349
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
350
                default:
×
351
                        return err
×
352
                }
353
        }
354

355
        log.Infof("Prune tip for Channel Graph: height=%v, hash=%v",
20✔
356
                pruneHeight, pruneHash)
20✔
357

20✔
358
        switch {
20✔
359
        // If the graph has never been pruned, then we can exit early as this
360
        // entails it's being created for the first time and hasn't seen any
361
        // block or created channels.
362
        case pruneHeight == 0 || pruneHash == nil:
4✔
363
                return nil
4✔
364

365
        // If the block hashes and heights match exactly, then we don't need to
366
        // prune the channel graph as we're already fully in sync.
367
        case bestHash.IsEqual(pruneHash) && uint32(bestHeight) == pruneHeight:
14✔
368
                return nil
14✔
369
        }
370

371
        // If the main chain blockhash at prune height is different from the
372
        // prune hash, this might indicate the database is on a stale branch.
373
        mainBlockHash, err := b.cfg.Chain.GetBlockHash(int64(pruneHeight))
2✔
374
        if err != nil {
2✔
375
                return err
×
376
        }
×
377

378
        // While we are on a stale branch of the chain, walk backwards to find
379
        // first common block.
380
        for !pruneHash.IsEqual(mainBlockHash) {
12✔
381
                log.Infof("channel graph is stale. Disconnecting block %v "+
10✔
382
                        "(hash=%v)", pruneHeight, pruneHash)
10✔
383
                // Prune the graph for every channel that was opened at height
10✔
384
                // >= pruneHeight.
10✔
385
                _, err := b.cfg.Graph.DisconnectBlockAtHeight(pruneHeight)
10✔
386
                if err != nil {
10✔
387
                        return err
×
388
                }
×
389

390
                pruneHash, pruneHeight, err = b.cfg.Graph.PruneTip()
10✔
391
                switch {
10✔
392
                // If at this point the graph has never been pruned, we can exit
393
                // as this entails we are back to the point where it hasn't seen
394
                // any block or created channels, alas there's nothing left to
395
                // prune.
396
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
397
                        return nil
×
398

399
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
400
                        return nil
×
401

402
                case err != nil:
×
403
                        return err
×
404

405
                default:
10✔
406
                }
407

408
                mainBlockHash, err = b.cfg.Chain.GetBlockHash(
10✔
409
                        int64(pruneHeight),
10✔
410
                )
10✔
411
                if err != nil {
10✔
412
                        return err
×
413
                }
×
414
        }
415

416
        log.Infof("Syncing channel graph from height=%v (hash=%v) to "+
2✔
417
                "height=%v (hash=%v)", pruneHeight, pruneHash, bestHeight,
2✔
418
                bestHash)
2✔
419

2✔
420
        // If we're not yet caught up, then we'll walk forward in the chain
2✔
421
        // pruning the channel graph with each new block that hasn't yet been
2✔
422
        // consumed by the channel graph.
2✔
423
        var spentOutputs []*wire.OutPoint
2✔
424
        for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { //nolint:ll
22✔
425
                // Break out of the rescan early if a shutdown has been
20✔
426
                // requested, otherwise long rescans will block the daemon from
20✔
427
                // shutting down promptly.
20✔
428
                select {
20✔
429
                case <-b.quit:
×
430
                        return ErrGraphBuilderShuttingDown
×
431
                default:
20✔
432
                }
433

434
                // Using the next height, request a manual block pruning from
435
                // the chainview for the particular block hash.
436
                log.Infof("Filtering block for closed channels, at height: %v",
20✔
437
                        int64(nextHeight))
20✔
438
                nextHash, err := b.cfg.Chain.GetBlockHash(int64(nextHeight))
20✔
439
                if err != nil {
20✔
440
                        return err
×
441
                }
×
442
                log.Tracef("Running block filter on block with hash: %v",
20✔
443
                        nextHash)
20✔
444
                filterBlock, err := b.cfg.ChainView.FilterBlock(nextHash)
20✔
445
                if err != nil {
20✔
446
                        return err
×
447
                }
×
448

449
                // We're only interested in all prior outputs that have been
450
                // spent in the block, so collate all the referenced previous
451
                // outpoints within each tx and input.
452
                for _, tx := range filterBlock.Transactions {
21✔
453
                        for _, txIn := range tx.TxIn {
2✔
454
                                spentOutputs = append(spentOutputs,
1✔
455
                                        &txIn.PreviousOutPoint)
1✔
456
                        }
1✔
457
                }
458
        }
459

460
        // With the spent outputs gathered, attempt to prune the channel graph,
461
        // also passing in the best hash+height so the prune tip can be updated.
462
        closedChans, err := b.cfg.Graph.PruneGraph(
2✔
463
                spentOutputs, bestHash, uint32(bestHeight),
2✔
464
        )
2✔
465
        if err != nil {
2✔
466
                return err
×
467
        }
×
468

469
        log.Infof("Graph pruning complete: %v channels were closed since "+
2✔
470
                "height %v", len(closedChans), pruneHeight)
2✔
471

2✔
472
        return nil
2✔
473
}
474

475
// isZombieChannel takes two edge policy updates and determines if the
476
// corresponding channel should be considered a zombie. The first boolean is
477
// true if the policy update from node 1 is considered a zombie, the second
478
// boolean is that of node 2, and the final boolean is true if the channel
479
// is considered a zombie.
480
func (b *Builder) isZombieChannel(e1,
481
        e2 *models.ChannelEdgePolicy) (bool, bool, bool) {
6✔
482

6✔
483
        chanExpiry := b.cfg.ChannelPruneExpiry
6✔
484

6✔
485
        e1Zombie := e1 == nil || time.Since(e1.LastUpdate) >= chanExpiry
6✔
486
        e2Zombie := e2 == nil || time.Since(e2.LastUpdate) >= chanExpiry
6✔
487

6✔
488
        var e1Time, e2Time time.Time
6✔
489
        if e1 != nil {
10✔
490
                e1Time = e1.LastUpdate
4✔
491
        }
4✔
492
        if e2 != nil {
12✔
493
                e2Time = e2.LastUpdate
6✔
494
        }
6✔
495

496
        return e1Zombie, e2Zombie, b.IsZombieChannel(e1Time, e2Time)
6✔
497
}
498

499
// IsZombieChannel takes the timestamps of the latest channel updates for a
500
// channel and returns true if the channel should be considered a zombie based
501
// on these timestamps.
502
func (b *Builder) IsZombieChannel(updateTime1,
503
        updateTime2 time.Time) bool {
6✔
504

6✔
505
        chanExpiry := b.cfg.ChannelPruneExpiry
6✔
506

6✔
507
        e1Zombie := updateTime1.IsZero() ||
6✔
508
                time.Since(updateTime1) >= chanExpiry
6✔
509

6✔
510
        e2Zombie := updateTime2.IsZero() ||
6✔
511
                time.Since(updateTime2) >= chanExpiry
6✔
512

6✔
513
        // If we're using strict zombie pruning, then a channel is only
6✔
514
        // considered live if both edges have a recent update we know of.
6✔
515
        if b.cfg.StrictZombiePruning {
9✔
516
                return e1Zombie || e2Zombie
3✔
517
        }
3✔
518

519
        // Otherwise, if we're using the less strict variant, then a channel is
520
        // considered live if either of the edges have a recent update.
521
        return e1Zombie && e2Zombie
3✔
522
}
523

524
// pruneZombieChans is a method that will be called periodically to prune out
525
// any "zombie" channels. We consider channels zombies if *both* edges haven't
526
// been updated since our zombie horizon. If AssumeChannelValid is present,
527
// we'll also consider channels zombies if *both* edges are disabled. This
528
// usually signals that a channel has been closed on-chain. We do this
529
// periodically to keep a healthy, lively routing table.
530
func (b *Builder) pruneZombieChans() error {
5✔
531
        chansToPrune := make(map[uint64]struct{})
5✔
532
        chanExpiry := b.cfg.ChannelPruneExpiry
5✔
533

5✔
534
        log.Infof("Examining channel graph for zombie channels")
5✔
535

5✔
536
        // A helper method to detect if the channel belongs to this node
5✔
537
        isSelfChannelEdge := func(info *models.ChannelEdgeInfo) bool {
16✔
538
                return info.NodeKey1Bytes == b.cfg.SelfNode ||
11✔
539
                        info.NodeKey2Bytes == b.cfg.SelfNode
11✔
540
        }
11✔
541

542
        // First, we'll collect all the channels which are eligible for garbage
543
        // collection due to being zombies.
544
        filterPruneChans := func(info *models.ChannelEdgeInfo,
5✔
545
                e1, e2 *models.ChannelEdgePolicy) error {
13✔
546

8✔
547
                // Exit early in case this channel is already marked to be
8✔
548
                // pruned
8✔
549
                _, markedToPrune := chansToPrune[info.ChannelID]
8✔
550
                if markedToPrune {
8✔
551
                        return nil
×
552
                }
×
553

554
                // We'll ensure that we don't attempt to prune our *own*
555
                // channels from the graph, as in any case this should be
556
                // re-advertised by the sub-system above us.
557
                if isSelfChannelEdge(info) {
10✔
558
                        return nil
2✔
559
                }
2✔
560

561
                e1Zombie, e2Zombie, isZombieChan := b.isZombieChannel(e1, e2)
6✔
562

6✔
563
                if e1Zombie {
10✔
564
                        log.Tracef("Node1 pubkey=%x of chan_id=%v is zombie",
4✔
565
                                info.NodeKey1Bytes, info.ChannelID)
4✔
566
                }
4✔
567

568
                if e2Zombie {
12✔
569
                        log.Tracef("Node2 pubkey=%x of chan_id=%v is zombie",
6✔
570
                                info.NodeKey2Bytes, info.ChannelID)
6✔
571
                }
6✔
572

573
                // If either edge hasn't been updated for a period of
574
                // chanExpiry, then we'll mark the channel itself as eligible
575
                // for graph pruning.
576
                if !isZombieChan {
7✔
577
                        return nil
1✔
578
                }
1✔
579

580
                log.Debugf("ChannelID(%v) is a zombie, collecting to prune",
5✔
581
                        info.ChannelID)
5✔
582

5✔
583
                // TODO(roasbeef): add ability to delete single directional edge
5✔
584
                chansToPrune[info.ChannelID] = struct{}{}
5✔
585

5✔
586
                return nil
5✔
587
        }
588

589
        // If AssumeChannelValid is present we'll look at the disabled bit for
590
        // both edges. If they're both disabled, then we can interpret this as
591
        // the channel being closed and can prune it from our graph.
592
        if b.cfg.AssumeChannelValid {
7✔
593
                disabledChanIDs, err := b.cfg.Graph.DisabledChannelIDs()
2✔
594
                if err != nil {
2✔
595
                        return fmt.Errorf("unable to get disabled channels "+
×
596
                                "ids chans: %v", err)
×
597
                }
×
598

599
                disabledEdges, err := b.cfg.Graph.FetchChanInfos(
2✔
600
                        disabledChanIDs,
2✔
601
                )
2✔
602
                if err != nil {
2✔
603
                        return fmt.Errorf("unable to fetch disabled channels "+
×
604
                                "edges chans: %v", err)
×
605
                }
×
606

607
                // Ensuring we won't prune our own channel from the graph.
608
                for _, disabledEdge := range disabledEdges {
5✔
609
                        if !isSelfChannelEdge(disabledEdge.Info) {
4✔
610
                                chansToPrune[disabledEdge.Info.ChannelID] =
1✔
611
                                        struct{}{}
1✔
612
                        }
1✔
613
                }
614
        }
615

616
        startTime := time.Unix(0, 0)
5✔
617
        endTime := time.Now().Add(-1 * chanExpiry)
5✔
618
        oldEdges, err := b.cfg.Graph.ChanUpdatesInHorizon(startTime, endTime)
5✔
619
        if err != nil {
5✔
620
                return fmt.Errorf("unable to fetch expired channel updates "+
×
621
                        "chans: %v", err)
×
622
        }
×
623

624
        for _, u := range oldEdges {
13✔
625
                err = filterPruneChans(u.Info, u.Policy1, u.Policy2)
8✔
626
                if err != nil {
8✔
627
                        return fmt.Errorf("error filtering channels to "+
×
628
                                "prune: %w", err)
×
629
                }
×
630
        }
631

632
        log.Infof("Pruning %v zombie channels", len(chansToPrune))
5✔
633
        if len(chansToPrune) == 0 {
7✔
634
                return nil
2✔
635
        }
2✔
636

637
        // With the set of zombie-like channels obtained, we'll do another pass
638
        // to delete them from the channel graph.
639
        toPrune := make([]uint64, 0, len(chansToPrune))
3✔
640
        for chanID := range chansToPrune {
9✔
641
                toPrune = append(toPrune, chanID)
6✔
642
                log.Tracef("Pruning zombie channel with ChannelID(%v)", chanID)
6✔
643
        }
6✔
644
        err = b.cfg.Graph.DeleteChannelEdges(
3✔
645
                b.cfg.StrictZombiePruning, true, toPrune...,
3✔
646
        )
3✔
647
        if err != nil {
3✔
648
                return fmt.Errorf("unable to delete zombie channels: %w", err)
×
649
        }
×
650

651
        // With the channels pruned, we'll also attempt to prune any nodes that
652
        // were a part of them.
653
        err = b.cfg.Graph.PruneGraphNodes()
3✔
654
        if err != nil && !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
3✔
655
                return fmt.Errorf("unable to prune graph nodes: %w", err)
×
656
        }
×
657

658
        return nil
3✔
659
}
660

661
// handleNetworkUpdate is responsible for processing the update message and
662
// notifies topology changes, if any.
663
//
664
// NOTE: must be run inside goroutine.
665
func (b *Builder) handleNetworkUpdate(update *routingMsg) {
30✔
666
        defer b.wg.Done()
30✔
667

30✔
668
        // Process the routing update to determine if this is either a new
30✔
669
        // update from our PoV or an update to a prior vertex/edge we
30✔
670
        // previously accepted.
30✔
671
        var err error
30✔
672
        switch msg := update.msg.(type) {
30✔
673
        case *models.LightningNode:
7✔
674
                err = b.addNode(msg, update.op...)
7✔
675

676
        case *models.ChannelEdgeInfo:
17✔
677
                err = b.addEdge(msg, update.op...)
17✔
678

679
        case *models.ChannelEdgePolicy:
6✔
680
                err = b.updateEdge(msg, update.op...)
6✔
681

682
        default:
×
683
                err = errors.Errorf("wrong routing update message type")
×
684
        }
685
        update.err <- err
30✔
686

30✔
687
        // If the error is not nil here, there's no need to send topology
30✔
688
        // change.
30✔
689
        if err != nil {
35✔
690
                // Log as a debug message if this is not an error we need to be
5✔
691
                // concerned about.
5✔
692
                if IsError(err, ErrIgnored, ErrOutdated) {
7✔
693
                        log.Debugf("process network updates got: %v", err)
2✔
694
                } else {
5✔
695
                        log.Errorf("process network updates got: %v", err)
3✔
696
                }
3✔
697

698
                return
5✔
699
        }
700

701
        // Otherwise, we'll send off a new notification for the newly accepted
702
        // update, if any.
703
        topChange := &TopologyChange{}
25✔
704
        err = addToTopologyChange(b.cfg.Graph, topChange, update.msg)
25✔
705
        if err != nil {
25✔
706
                log.Errorf("unable to update topology change notification: %v",
×
707
                        err)
×
708
                return
×
709
        }
×
710

711
        if !topChange.isEmpty() {
36✔
712
                b.notifyTopologyChange(topChange)
11✔
713
        }
11✔
714
}
715

716
// networkHandler is the primary goroutine for the Builder. The roles of
717
// this goroutine include answering queries related to the state of the
718
// network, pruning the graph on new block notification and registering new
719
// topology clients.
720
//
721
// NOTE: This MUST be run as a goroutine.
722
func (b *Builder) networkHandler() {
21✔
723
        defer b.wg.Done()
21✔
724

21✔
725
        graphPruneTicker := time.NewTicker(b.cfg.GraphPruneInterval)
21✔
726
        defer graphPruneTicker.Stop()
21✔
727

21✔
728
        defer b.statTicker.Stop()
21✔
729

21✔
730
        b.stats.Reset()
21✔
731

21✔
732
        for {
125✔
733
                // If there are stats, resume the statTicker.
104✔
734
                if !b.stats.Empty() {
136✔
735
                        b.statTicker.Resume()
32✔
736
                }
32✔
737

738
                select {
104✔
739
                case chainUpdate, ok := <-b.staleBlocks:
10✔
740
                        // If the channel has been closed, then this indicates
10✔
741
                        // the daemon is shutting down, so we exit ourselves.
10✔
742
                        if !ok {
10✔
743
                                return
×
744
                        }
×
745

746
                        // Since this block is stale, we update our best height
747
                        // to the previous block.
748
                        blockHeight := chainUpdate.Height
10✔
749
                        b.bestHeight.Store(blockHeight - 1)
10✔
750

10✔
751
                        // Update the channel graph to reflect that this block
10✔
752
                        // was disconnected.
10✔
753
                        _, err := b.cfg.Graph.DisconnectBlockAtHeight(
10✔
754
                                blockHeight,
10✔
755
                        )
10✔
756
                        if err != nil {
10✔
757
                                log.Errorf("unable to prune graph with stale "+
×
758
                                        "block: %v", err)
×
759
                                continue
×
760
                        }
761

762
                        // TODO(halseth): notify client about the reorg?
763

764
                // A new block has arrived, so we can prune the channel graph
765
                // of any channels which were closed in the block.
766
                case chainUpdate, ok := <-b.newBlocks:
68✔
767
                        // If the channel has been closed, then this indicates
68✔
768
                        // the daemon is shutting down, so we exit ourselves.
68✔
769
                        if !ok {
68✔
770
                                return
×
771
                        }
×
772

773
                        // We'll ensure that any new blocks received attach
774
                        // directly to the end of our main chain. If not, then
775
                        // we've somehow missed some blocks. Here we'll catch
776
                        // up the chain with the latest blocks.
777
                        currentHeight := b.bestHeight.Load()
68✔
778
                        switch {
68✔
779
                        case chainUpdate.Height == currentHeight+1:
62✔
780
                                err := b.updateGraphWithClosedChannels(
62✔
781
                                        chainUpdate,
62✔
782
                                )
62✔
783
                                if err != nil {
62✔
784
                                        log.Errorf("unable to prune graph "+
×
785
                                                "with closed channels: %v", err)
×
786
                                }
×
787

788
                        case chainUpdate.Height > currentHeight+1:
1✔
789
                                log.Errorf("out of order block: expecting "+
1✔
790
                                        "height=%v, got height=%v",
1✔
791
                                        currentHeight+1, chainUpdate.Height)
1✔
792

1✔
793
                                err := b.getMissingBlocks(
1✔
794
                                        currentHeight, chainUpdate,
1✔
795
                                )
1✔
796
                                if err != nil {
1✔
797
                                        log.Errorf("unable to retrieve missing"+
×
798
                                                "blocks: %v", err)
×
799
                                }
×
800

801
                        case chainUpdate.Height < currentHeight+1:
5✔
802
                                log.Errorf("out of order block: expecting "+
5✔
803
                                        "height=%v, got height=%v",
5✔
804
                                        currentHeight+1, chainUpdate.Height)
5✔
805

5✔
806
                                log.Infof("Skipping channel pruning since "+
5✔
807
                                        "received block height %v was already"+
5✔
808
                                        " processed.", chainUpdate.Height)
5✔
809
                        }
810

811
                // A new notification client update has arrived. We're either
812
                // gaining a new client, or cancelling notifications for an
813
                // existing client.
814
                case ntfnUpdate := <-b.ntfnClientUpdates:
5✔
815
                        clientID := ntfnUpdate.clientID
5✔
816

5✔
817
                        if ntfnUpdate.cancel {
6✔
818
                                client, ok := b.topologyClients.LoadAndDelete(
1✔
819
                                        clientID,
1✔
820
                                )
1✔
821
                                if ok {
2✔
822
                                        close(client.exit)
1✔
823
                                        client.wg.Wait()
1✔
824

1✔
825
                                        close(client.ntfnChan)
1✔
826
                                }
1✔
827

828
                                continue
1✔
829
                        }
830

831
                        b.topologyClients.Store(clientID, &topologyClient{
4✔
832
                                ntfnChan: ntfnUpdate.ntfnChan,
4✔
833
                                exit:     make(chan struct{}),
4✔
834
                        })
4✔
835

836
                // The graph prune ticker has ticked, so we'll examine the
837
                // state of the known graph to filter out any zombie channels
838
                // for pruning.
839
                case <-graphPruneTicker.C:
×
840
                        if err := b.pruneZombieChans(); err != nil {
×
841
                                log.Errorf("Unable to prune zombies: %v", err)
×
842
                        }
×
843

844
                // Log any stats if we've processed a non-empty number of
845
                // channels, updates, or nodes. We'll only pause the ticker if
846
                // the last window contained no updates to avoid resuming and
847
                // pausing while consecutive windows contain new info.
UNCOV
848
                case <-b.statTicker.Ticks():
×
UNCOV
849
                        if !b.stats.Empty() {
×
UNCOV
850
                                log.Infof(b.stats.String())
×
UNCOV
851
                        } else {
×
852
                                b.statTicker.Pause()
×
853
                        }
×
UNCOV
854
                        b.stats.Reset()
×
855

856
                // The router has been signalled to exit, to we exit our main
857
                // loop so the wait group can be decremented.
858
                case <-b.quit:
19✔
859
                        return
19✔
860
                }
861
        }
862
}
863

864
// getMissingBlocks walks through all missing blocks and updates the graph
865
// closed channels accordingly.
866
func (b *Builder) getMissingBlocks(currentHeight uint32,
867
        chainUpdate *chainview.FilteredBlock) error {
1✔
868

1✔
869
        outdatedHash, err := b.cfg.Chain.GetBlockHash(int64(currentHeight))
1✔
870
        if err != nil {
1✔
871
                return err
×
872
        }
×
873

874
        outdatedBlock := &chainntnfs.BlockEpoch{
1✔
875
                Height: int32(currentHeight),
1✔
876
                Hash:   outdatedHash,
1✔
877
        }
1✔
878

1✔
879
        epochClient, err := b.cfg.Notifier.RegisterBlockEpochNtfn(
1✔
880
                outdatedBlock,
1✔
881
        )
1✔
882
        if err != nil {
1✔
883
                return err
×
884
        }
×
885
        defer epochClient.Cancel()
1✔
886

1✔
887
        blockDifference := int(chainUpdate.Height - currentHeight)
1✔
888

1✔
889
        // We'll walk through all the outdated blocks and make sure we're able
1✔
890
        // to update the graph with any closed channels from them.
1✔
891
        for i := 0; i < blockDifference; i++ {
6✔
892
                var (
5✔
893
                        missingBlock *chainntnfs.BlockEpoch
5✔
894
                        ok           bool
5✔
895
                )
5✔
896

5✔
897
                select {
5✔
898
                case missingBlock, ok = <-epochClient.Epochs:
5✔
899
                        if !ok {
5✔
900
                                return nil
×
901
                        }
×
902

903
                case <-b.quit:
×
904
                        return nil
×
905
                }
906

907
                filteredBlock, err := b.cfg.ChainView.FilterBlock(
5✔
908
                        missingBlock.Hash,
5✔
909
                )
5✔
910
                if err != nil {
5✔
911
                        return err
×
912
                }
×
913

914
                err = b.updateGraphWithClosedChannels(
5✔
915
                        filteredBlock,
5✔
916
                )
5✔
917
                if err != nil {
5✔
918
                        return err
×
919
                }
×
920
        }
921

922
        return nil
1✔
923
}
924

925
// updateGraphWithClosedChannels prunes the channel graph of closed channels
926
// that are no longer needed.
927
func (b *Builder) updateGraphWithClosedChannels(
928
        chainUpdate *chainview.FilteredBlock) error {
67✔
929

67✔
930
        // Once a new block arrives, we update our running track of the height
67✔
931
        // of the chain tip.
67✔
932
        blockHeight := chainUpdate.Height
67✔
933

67✔
934
        b.bestHeight.Store(blockHeight)
67✔
935
        log.Infof("Pruning channel graph using block %v (height=%v)",
67✔
936
                chainUpdate.Hash, blockHeight)
67✔
937

67✔
938
        // We're only interested in all prior outputs that have been spent in
67✔
939
        // the block, so collate all the referenced previous outpoints within
67✔
940
        // each tx and input.
67✔
941
        var spentOutputs []*wire.OutPoint
67✔
942
        for _, tx := range chainUpdate.Transactions {
68✔
943
                for _, txIn := range tx.TxIn {
2✔
944
                        spentOutputs = append(spentOutputs,
1✔
945
                                &txIn.PreviousOutPoint)
1✔
946
                }
1✔
947
        }
948

949
        // With the spent outputs gathered, attempt to prune the channel graph,
950
        // also passing in the hash+height of the block being pruned so the
951
        // prune tip can be updated.
952
        chansClosed, err := b.cfg.Graph.PruneGraph(spentOutputs,
67✔
953
                &chainUpdate.Hash, chainUpdate.Height)
67✔
954
        if err != nil {
67✔
955
                log.Errorf("unable to prune routing table: %v", err)
×
956
                return err
×
957
        }
×
958

959
        log.Infof("Block %v (height=%v) closed %v channels", chainUpdate.Hash,
67✔
960
                blockHeight, len(chansClosed))
67✔
961

67✔
962
        if len(chansClosed) == 0 {
133✔
963
                return err
66✔
964
        }
66✔
965

966
        // Notify all currently registered clients of the newly closed channels.
967
        closeSummaries := createCloseSummaries(blockHeight, chansClosed...)
1✔
968
        b.notifyTopologyChange(&TopologyChange{
1✔
969
                ClosedChannels: closeSummaries,
1✔
970
        })
1✔
971

1✔
972
        return nil
1✔
973
}
974

975
// assertNodeAnnFreshness returns a non-nil error if we have an announcement in
976
// the database for the passed node with a timestamp newer than the passed
977
// timestamp. ErrIgnored will be returned if we already have the node, and
978
// ErrOutdated will be returned if we have a timestamp that's after the new
979
// timestamp.
980
func (b *Builder) assertNodeAnnFreshness(node route.Vertex,
981
        msgTimestamp time.Time) error {
10✔
982

10✔
983
        // If we are not already aware of this node, it means that we don't
10✔
984
        // know about any channel using this node. To avoid a DoS attack by
10✔
985
        // node announcements, we will ignore such nodes. If we do know about
10✔
986
        // this node, check that this update brings info newer than what we
10✔
987
        // already have.
10✔
988
        lastUpdate, exists, err := b.cfg.Graph.HasLightningNode(node)
10✔
989
        if err != nil {
10✔
990
                return errors.Errorf("unable to query for the "+
×
991
                        "existence of node: %v", err)
×
992
        }
×
993
        if !exists {
11✔
994
                return NewErrf(ErrIgnored, "Ignoring node announcement"+
1✔
995
                        " for node not found in channel graph (%x)",
1✔
996
                        node[:])
1✔
997
        }
1✔
998

999
        // If we've reached this point then we're aware of the vertex being
1000
        // advertised. So we now check if the new message has a new time stamp,
1001
        // if not then we won't accept the new data as it would override newer
1002
        // data.
1003
        if !lastUpdate.Before(msgTimestamp) {
10✔
1004
                return NewErrf(ErrOutdated, "Ignoring outdated "+
1✔
1005
                        "announcement for %x", node[:])
1✔
1006
        }
1✔
1007

1008
        return nil
8✔
1009
}
1010

1011
// addZombieEdge adds a channel that failed complete validation into the zombie
1012
// index so we can avoid having to re-validate it in the future.
1013
func (b *Builder) addZombieEdge(chanID uint64) error {
3✔
1014
        // If the edge fails validation we'll mark the edge itself as a zombie
3✔
1015
        // so we don't continue to request it. We use the "zero key" for both
3✔
1016
        // node pubkeys so this edge can't be resurrected.
3✔
1017
        var zeroKey [33]byte
3✔
1018
        err := b.cfg.Graph.MarkEdgeZombie(chanID, zeroKey, zeroKey)
3✔
1019
        if err != nil {
3✔
1020
                return fmt.Errorf("unable to mark spent chan(id=%v) as a "+
×
1021
                        "zombie: %w", chanID, err)
×
1022
        }
×
1023

1024
        return nil
3✔
1025
}
1026

1027
// makeFundingScript is used to make the funding script for both segwit v0 and
1028
// segwit v1 (taproot) channels.
1029
//
1030
// TODO(roasbeef: export and use elsewhere?
1031
func makeFundingScript(bitcoinKey1, bitcoinKey2 []byte, chanFeatures []byte,
1032
        tapscriptRoot fn.Option[chainhash.Hash]) ([]byte, error) {
16✔
1033

16✔
1034
        legacyFundingScript := func() ([]byte, error) {
32✔
1035
                witnessScript, err := input.GenMultiSigScript(
16✔
1036
                        bitcoinKey1, bitcoinKey2,
16✔
1037
                )
16✔
1038
                if err != nil {
16✔
1039
                        return nil, err
×
1040
                }
×
1041
                pkScript, err := input.WitnessScriptHash(witnessScript)
16✔
1042
                if err != nil {
16✔
1043
                        return nil, err
×
1044
                }
×
1045

1046
                return pkScript, nil
16✔
1047
        }
1048

1049
        if len(chanFeatures) == 0 {
32✔
1050
                return legacyFundingScript()
16✔
1051
        }
16✔
1052

1053
        // In order to make the correct funding script, we'll need to parse the
1054
        // chanFeatures bytes into a feature vector we can interact with.
UNCOV
1055
        rawFeatures := lnwire.NewRawFeatureVector()
×
UNCOV
1056
        err := rawFeatures.Decode(bytes.NewReader(chanFeatures))
×
UNCOV
1057
        if err != nil {
×
1058
                return nil, fmt.Errorf("unable to parse chan feature "+
×
1059
                        "bits: %w", err)
×
1060
        }
×
1061

UNCOV
1062
        chanFeatureBits := lnwire.NewFeatureVector(
×
UNCOV
1063
                rawFeatures, lnwire.Features,
×
UNCOV
1064
        )
×
UNCOV
1065
        if chanFeatureBits.HasFeature(
×
UNCOV
1066
                lnwire.SimpleTaprootChannelsOptionalStaging,
×
UNCOV
1067
        ) {
×
UNCOV
1068

×
UNCOV
1069
                pubKey1, err := btcec.ParsePubKey(bitcoinKey1)
×
UNCOV
1070
                if err != nil {
×
1071
                        return nil, err
×
1072
                }
×
UNCOV
1073
                pubKey2, err := btcec.ParsePubKey(bitcoinKey2)
×
UNCOV
1074
                if err != nil {
×
1075
                        return nil, err
×
1076
                }
×
1077

UNCOV
1078
                fundingScript, _, err := input.GenTaprootFundingScript(
×
UNCOV
1079
                        pubKey1, pubKey2, 0, tapscriptRoot,
×
UNCOV
1080
                )
×
UNCOV
1081
                if err != nil {
×
1082
                        return nil, err
×
1083
                }
×
1084

1085
                // TODO(roasbeef): add tapscript root to gossip v1.5
1086

UNCOV
1087
                return fundingScript, nil
×
1088
        }
1089

UNCOV
1090
        return legacyFundingScript()
×
1091
}
1092

1093
// routingMsg couples a routing related routing topology update to the
1094
// error channel.
1095
type routingMsg struct {
1096
        msg interface{}
1097
        op  []batch.SchedulerOption
1098
        err chan error
1099
}
1100

1101
// ApplyChannelUpdate validates a channel update and if valid, applies it to the
1102
// database. It returns a bool indicating whether the updates were successful.
UNCOV
1103
func (b *Builder) ApplyChannelUpdate(msg *lnwire.ChannelUpdate1) bool {
×
UNCOV
1104
        ch, _, _, err := b.GetChannelByID(msg.ShortChannelID)
×
UNCOV
1105
        if err != nil {
×
UNCOV
1106
                log.Errorf("Unable to retrieve channel by id: %v", err)
×
UNCOV
1107
                return false
×
UNCOV
1108
        }
×
1109

UNCOV
1110
        var pubKey *btcec.PublicKey
×
UNCOV
1111

×
UNCOV
1112
        switch msg.ChannelFlags & lnwire.ChanUpdateDirection {
×
UNCOV
1113
        case 0:
×
UNCOV
1114
                pubKey, _ = ch.NodeKey1()
×
1115

UNCOV
1116
        case 1:
×
UNCOV
1117
                pubKey, _ = ch.NodeKey2()
×
1118
        }
1119

1120
        // Exit early if the pubkey cannot be decided.
UNCOV
1121
        if pubKey == nil {
×
1122
                log.Errorf("Unable to decide pubkey with ChannelFlags=%v",
×
1123
                        msg.ChannelFlags)
×
1124
                return false
×
1125
        }
×
1126

UNCOV
1127
        err = netann.ValidateChannelUpdateAnn(pubKey, ch.Capacity, msg)
×
UNCOV
1128
        if err != nil {
×
1129
                log.Errorf("Unable to validate channel update: %v", err)
×
1130
                return false
×
1131
        }
×
1132

UNCOV
1133
        err = b.UpdateEdge(&models.ChannelEdgePolicy{
×
UNCOV
1134
                SigBytes:                  msg.Signature.ToSignatureBytes(),
×
UNCOV
1135
                ChannelID:                 msg.ShortChannelID.ToUint64(),
×
UNCOV
1136
                LastUpdate:                time.Unix(int64(msg.Timestamp), 0),
×
UNCOV
1137
                MessageFlags:              msg.MessageFlags,
×
UNCOV
1138
                ChannelFlags:              msg.ChannelFlags,
×
UNCOV
1139
                TimeLockDelta:             msg.TimeLockDelta,
×
UNCOV
1140
                MinHTLC:                   msg.HtlcMinimumMsat,
×
UNCOV
1141
                MaxHTLC:                   msg.HtlcMaximumMsat,
×
UNCOV
1142
                FeeBaseMSat:               lnwire.MilliSatoshi(msg.BaseFee),
×
UNCOV
1143
                FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
×
UNCOV
1144
                ExtraOpaqueData:           msg.ExtraOpaqueData,
×
UNCOV
1145
        })
×
UNCOV
1146
        if err != nil && !IsError(err, ErrIgnored, ErrOutdated) {
×
1147
                log.Errorf("Unable to apply channel update: %v", err)
×
1148
                return false
×
1149
        }
×
1150

UNCOV
1151
        return true
×
1152
}
1153

1154
// AddNode is used to add information about a node to the router database. If
1155
// the node with this pubkey is not present in an existing channel, it will
1156
// be ignored.
1157
//
1158
// NOTE: This method is part of the ChannelGraphSource interface.
1159
func (b *Builder) AddNode(node *models.LightningNode,
1160
        op ...batch.SchedulerOption) error {
7✔
1161

7✔
1162
        rMsg := &routingMsg{
7✔
1163
                msg: node,
7✔
1164
                op:  op,
7✔
1165
                err: make(chan error, 1),
7✔
1166
        }
7✔
1167

7✔
1168
        b.wg.Add(1)
7✔
1169
        go b.handleNetworkUpdate(rMsg)
7✔
1170

7✔
1171
        select {
7✔
1172
        case err := <-rMsg.err:
7✔
1173
                return err
7✔
1174
        case <-b.quit:
×
1175
                return ErrGraphBuilderShuttingDown
×
1176
        }
1177
}
1178

1179
// addNode does some basic checks on the given LightningNode against what we
1180
// currently have persisted in the graph, and then adds it to the graph. If we
1181
// already know about the node, then we only update our DB if the new update
1182
// has a newer timestamp than the last one we received.
1183
func (b *Builder) addNode(node *models.LightningNode,
1184
        op ...batch.SchedulerOption) error {
7✔
1185

7✔
1186
        // Before we add the node to the database, we'll check to see if the
7✔
1187
        // announcement is "fresh" or not. If it isn't, then we'll return an
7✔
1188
        // error.
7✔
1189
        err := b.assertNodeAnnFreshness(node.PubKeyBytes, node.LastUpdate)
7✔
1190
        if err != nil {
8✔
1191
                return err
1✔
1192
        }
1✔
1193

1194
        if err := b.cfg.Graph.AddLightningNode(node, op...); err != nil {
6✔
1195
                return errors.Errorf("unable to add node %x to the "+
×
1196
                        "graph: %v", node.PubKeyBytes, err)
×
1197
        }
×
1198

1199
        log.Tracef("Updated vertex data for node=%x", node.PubKeyBytes)
6✔
1200
        b.stats.incNumNodeUpdates()
6✔
1201

6✔
1202
        return nil
6✔
1203
}
1204

1205
// AddEdge is used to add edge/channel to the topology of the router, after all
1206
// information about channel will be gathered this edge/channel might be used
1207
// in construction of payment path.
1208
//
1209
// NOTE: This method is part of the ChannelGraphSource interface.
1210
func (b *Builder) AddEdge(edge *models.ChannelEdgeInfo,
1211
        op ...batch.SchedulerOption) error {
17✔
1212

17✔
1213
        rMsg := &routingMsg{
17✔
1214
                msg: edge,
17✔
1215
                op:  op,
17✔
1216
                err: make(chan error, 1),
17✔
1217
        }
17✔
1218

17✔
1219
        b.wg.Add(1)
17✔
1220
        go b.handleNetworkUpdate(rMsg)
17✔
1221

17✔
1222
        select {
17✔
1223
        case err := <-rMsg.err:
17✔
1224
                return err
17✔
1225
        case <-b.quit:
×
1226
                return ErrGraphBuilderShuttingDown
×
1227
        }
1228
}
1229

1230
// addEdge does some validation on the new channel edge against what we
1231
// currently have persisted in the graph, and then adds it to the graph. The
1232
// Chain View is updated with the new edge if it is successfully added to the
1233
// graph. We only persist the channel if we currently dont have it at all in
1234
// our graph.
1235
//
1236
// TODO(elle): this currently also does funding-transaction validation. But this
1237
// should be moved to the gossiper instead.
1238
func (b *Builder) addEdge(edge *models.ChannelEdgeInfo,
1239
        op ...batch.SchedulerOption) error {
17✔
1240

17✔
1241
        log.Debugf("Received ChannelEdgeInfo for channel %v", edge.ChannelID)
17✔
1242

17✔
1243
        // Prior to processing the announcement we first check if we
17✔
1244
        // already know of this channel, if so, then we can exit early.
17✔
1245
        _, _, exists, isZombie, err := b.cfg.Graph.HasChannelEdge(
17✔
1246
                edge.ChannelID,
17✔
1247
        )
17✔
1248
        if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
17✔
1249
                return errors.Errorf("unable to check for edge existence: %v",
×
1250
                        err)
×
1251
        }
×
1252
        if isZombie {
17✔
1253
                return NewErrf(ErrIgnored, "ignoring msg for zombie chan_id=%v",
×
1254
                        edge.ChannelID)
×
1255
        }
×
1256
        if exists {
17✔
UNCOV
1257
                return NewErrf(ErrIgnored, "ignoring msg for known chan_id=%v",
×
UNCOV
1258
                        edge.ChannelID)
×
UNCOV
1259
        }
×
1260

1261
        // If AssumeChannelValid is present, then we are unable to perform any
1262
        // of the expensive checks below, so we'll short-circuit our path
1263
        // straight to adding the edge to our graph. If the passed
1264
        // ShortChannelID is an alias, then we'll skip validation as it will
1265
        // not map to a legitimate tx. This is not a DoS vector as only we can
1266
        // add an alias ChannelAnnouncement from the gossiper.
1267
        scid := lnwire.NewShortChanIDFromInt(edge.ChannelID)
17✔
1268
        if b.cfg.AssumeChannelValid || b.cfg.IsAlias(scid) {
17✔
UNCOV
1269
                err := b.cfg.Graph.AddChannelEdge(edge, op...)
×
UNCOV
1270
                if err != nil {
×
1271
                        return fmt.Errorf("unable to add edge: %w", err)
×
1272
                }
×
UNCOV
1273
                log.Tracef("New channel discovered! Link connects %x and %x "+
×
UNCOV
1274
                        "with ChannelID(%v)", edge.NodeKey1Bytes,
×
UNCOV
1275
                        edge.NodeKey2Bytes, edge.ChannelID)
×
UNCOV
1276
                b.stats.incNumEdgesDiscovered()
×
UNCOV
1277

×
UNCOV
1278
                return nil
×
1279
        }
1280

1281
        // Before we can add the channel to the channel graph, we need to obtain
1282
        // the full funding outpoint that's encoded within the channel ID.
1283
        channelID := lnwire.NewShortChanIDFromInt(edge.ChannelID)
17✔
1284
        fundingTx, err := lnwallet.FetchFundingTxWrapper(
17✔
1285
                b.cfg.Chain, &channelID, b.quit,
17✔
1286
        )
17✔
1287
        if err != nil {
18✔
1288
                //nolint:ll
1✔
1289
                //
1✔
1290
                // In order to ensure we don't erroneously mark a channel as a
1✔
1291
                // zombie due to an RPC failure, we'll attempt to string match
1✔
1292
                // for the relevant errors.
1✔
1293
                //
1✔
1294
                // * btcd:
1✔
1295
                //    * https://github.com/btcsuite/btcd/blob/master/rpcserver.go#L1316
1✔
1296
                //    * https://github.com/btcsuite/btcd/blob/master/rpcserver.go#L1086
1✔
1297
                // * bitcoind:
1✔
1298
                //    * https://github.com/bitcoin/bitcoin/blob/7fcf53f7b4524572d1d0c9a5fdc388e87eb02416/src/rpc/blockchain.cpp#L770
1✔
1299
                //     * https://github.com/bitcoin/bitcoin/blob/7fcf53f7b4524572d1d0c9a5fdc388e87eb02416/src/rpc/blockchain.cpp#L954
1✔
1300
                switch {
1✔
1301
                case strings.Contains(err.Error(), "not found"):
×
1302
                        fallthrough
×
1303

1304
                case strings.Contains(err.Error(), "out of range"):
1✔
1305
                        // If the funding transaction isn't found at all, then
1✔
1306
                        // we'll mark the edge itself as a zombie so we don't
1✔
1307
                        // continue to request it. We use the "zero key" for
1✔
1308
                        // both node pubkeys so this edge can't be resurrected.
1✔
1309
                        zErr := b.addZombieEdge(edge.ChannelID)
1✔
1310
                        if zErr != nil {
1✔
1311
                                return zErr
×
1312
                        }
×
1313

1314
                default:
×
1315
                }
1316

1317
                return NewErrf(ErrNoFundingTransaction, "unable to "+
1✔
1318
                        "locate funding tx: %v", err)
1✔
1319
        }
1320

1321
        // Recreate witness output to be sure that declared in channel edge
1322
        // bitcoin keys and channel value corresponds to the reality.
1323
        fundingPkScript, err := makeFundingScript(
16✔
1324
                edge.BitcoinKey1Bytes[:], edge.BitcoinKey2Bytes[:],
16✔
1325
                edge.Features, edge.TapscriptRoot,
16✔
1326
        )
16✔
1327
        if err != nil {
16✔
1328
                return err
×
1329
        }
×
1330

1331
        // Next we'll validate that this channel is actually well formed. If
1332
        // this check fails, then this channel either doesn't exist, or isn't
1333
        // the one that was meant to be created according to the passed channel
1334
        // proofs.
1335
        fundingPoint, err := chanvalidate.Validate(
16✔
1336
                &chanvalidate.Context{
16✔
1337
                        Locator: &chanvalidate.ShortChanIDChanLocator{
16✔
1338
                                ID: channelID,
16✔
1339
                        },
16✔
1340
                        MultiSigPkScript: fundingPkScript,
16✔
1341
                        FundingTx:        fundingTx,
16✔
1342
                },
16✔
1343
        )
16✔
1344
        if err != nil {
17✔
1345
                // Mark the edge as a zombie so we won't try to re-validate it
1✔
1346
                // on start up.
1✔
1347
                if err := b.addZombieEdge(edge.ChannelID); err != nil {
1✔
1348
                        return err
×
1349
                }
×
1350

1351
                return NewErrf(ErrInvalidFundingOutput, "output failed "+
1✔
1352
                        "validation: %w", err)
1✔
1353
        }
1354

1355
        // Now that we have the funding outpoint of the channel, ensure
1356
        // that it hasn't yet been spent. If so, then this channel has
1357
        // been closed so we'll ignore it.
1358
        chanUtxo, err := b.cfg.Chain.GetUtxo(
15✔
1359
                fundingPoint, fundingPkScript, channelID.BlockHeight, b.quit,
15✔
1360
        )
15✔
1361
        if err != nil {
16✔
1362
                if errors.Is(err, btcwallet.ErrOutputSpent) {
2✔
1363
                        zErr := b.addZombieEdge(edge.ChannelID)
1✔
1364
                        if zErr != nil {
1✔
1365
                                return zErr
×
1366
                        }
×
1367
                }
1368

1369
                return NewErrf(ErrChannelSpent, "unable to fetch utxo for "+
1✔
1370
                        "chan_id=%v, chan_point=%v: %v", edge.ChannelID,
1✔
1371
                        fundingPoint, err)
1✔
1372
        }
1373

1374
        // TODO(roasbeef): this is a hack, needs to be removed after commitment
1375
        // fees are dynamic.
1376
        edge.Capacity = btcutil.Amount(chanUtxo.Value)
14✔
1377
        edge.ChannelPoint = *fundingPoint
14✔
1378
        if err := b.cfg.Graph.AddChannelEdge(edge, op...); err != nil {
14✔
1379
                return errors.Errorf("unable to add edge: %v", err)
×
1380
        }
×
1381

1382
        log.Debugf("New channel discovered! Link connects %x and %x with "+
14✔
1383
                "ChannelPoint(%v): chan_id=%v, capacity=%v", edge.NodeKey1Bytes,
14✔
1384
                edge.NodeKey2Bytes, fundingPoint, edge.ChannelID, edge.Capacity)
14✔
1385
        b.stats.incNumEdgesDiscovered()
14✔
1386

14✔
1387
        // As a new edge has been added to the channel graph, we'll update the
14✔
1388
        // current UTXO filter within our active FilteredChainView so we are
14✔
1389
        // notified if/when this channel is closed.
14✔
1390
        filterUpdate := []graphdb.EdgePoint{
14✔
1391
                {
14✔
1392
                        FundingPkScript: fundingPkScript,
14✔
1393
                        OutPoint:        *fundingPoint,
14✔
1394
                },
14✔
1395
        }
14✔
1396

14✔
1397
        err = b.cfg.ChainView.UpdateFilter(filterUpdate, b.bestHeight.Load())
14✔
1398
        if err != nil {
14✔
1399
                return errors.Errorf("unable to update chain "+
×
1400
                        "view: %v", err)
×
1401
        }
×
1402

1403
        return nil
14✔
1404
}
1405

1406
// UpdateEdge is used to update edge information, without this message edge
1407
// considered as not fully constructed.
1408
//
1409
// NOTE: This method is part of the ChannelGraphSource interface.
1410
func (b *Builder) UpdateEdge(update *models.ChannelEdgePolicy,
1411
        op ...batch.SchedulerOption) error {
6✔
1412

6✔
1413
        rMsg := &routingMsg{
6✔
1414
                msg: update,
6✔
1415
                op:  op,
6✔
1416
                err: make(chan error, 1),
6✔
1417
        }
6✔
1418

6✔
1419
        b.wg.Add(1)
6✔
1420
        go b.handleNetworkUpdate(rMsg)
6✔
1421

6✔
1422
        select {
6✔
1423
        case err := <-rMsg.err:
6✔
1424
                return err
6✔
1425
        case <-b.quit:
×
1426
                return ErrGraphBuilderShuttingDown
×
1427
        }
1428
}
1429

1430
// updateEdge validates the new edge policy against what we currently have
1431
// persisted in the graph, and then applies it to the graph if the update is
1432
// considered fresh enough and if we actually have a channel persisted for the
1433
// given update.
1434
func (b *Builder) updateEdge(policy *models.ChannelEdgePolicy,
1435
        op ...batch.SchedulerOption) error {
6✔
1436

6✔
1437
        log.Debugf("Received ChannelEdgePolicy for channel %v",
6✔
1438
                policy.ChannelID)
6✔
1439

6✔
1440
        // We make sure to hold the mutex for this channel ID, such that no
6✔
1441
        // other goroutine is concurrently doing database accesses for the same
6✔
1442
        // channel ID.
6✔
1443
        b.channelEdgeMtx.Lock(policy.ChannelID)
6✔
1444
        defer b.channelEdgeMtx.Unlock(policy.ChannelID)
6✔
1445

6✔
1446
        edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
6✔
1447
                b.cfg.Graph.HasChannelEdge(policy.ChannelID)
6✔
1448
        if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
6✔
1449
                return errors.Errorf("unable to check for edge existence: %v",
×
1450
                        err)
×
1451
        }
×
1452

1453
        // If the channel is marked as a zombie in our database, and
1454
        // we consider this a stale update, then we should not apply the
1455
        // policy.
1456
        isStaleUpdate := time.Since(policy.LastUpdate) >
6✔
1457
                b.cfg.ChannelPruneExpiry
6✔
1458

6✔
1459
        if isZombie && isStaleUpdate {
6✔
1460
                return NewErrf(ErrIgnored, "ignoring stale update "+
×
1461
                        "(flags=%v|%v) for zombie chan_id=%v",
×
1462
                        policy.MessageFlags, policy.ChannelFlags,
×
1463
                        policy.ChannelID)
×
1464
        }
×
1465

1466
        // If the channel doesn't exist in our database, we cannot apply the
1467
        // updated policy.
1468
        if !exists {
7✔
1469
                return NewErrf(ErrIgnored, "ignoring update (flags=%v|%v) for "+
1✔
1470
                        "unknown chan_id=%v", policy.MessageFlags,
1✔
1471
                        policy.ChannelFlags, policy.ChannelID)
1✔
1472
        }
1✔
1473

1474
        log.Debugf("Found edge1Timestamp=%v, edge2Timestamp=%v",
5✔
1475
                edge1Timestamp, edge2Timestamp)
5✔
1476

5✔
1477
        // As edges are directional edge node has a unique policy for the
5✔
1478
        // direction of the edge they control. Therefore, we first check if we
5✔
1479
        // already have the most up-to-date information for that edge. If this
5✔
1480
        // message has a timestamp not strictly newer than what we already know
5✔
1481
        // of we can exit early.
5✔
1482
        switch policy.ChannelFlags & lnwire.ChanUpdateDirection {
5✔
1483
        // A flag set of 0 indicates this is an announcement for the "first"
1484
        // node in the channel.
1485
        case 0:
3✔
1486
                // Ignore outdated message.
3✔
1487
                if !edge1Timestamp.Before(policy.LastUpdate) {
3✔
UNCOV
1488
                        return NewErrf(ErrOutdated, "Ignoring "+
×
UNCOV
1489
                                "outdated update (flags=%v|%v) for "+
×
UNCOV
1490
                                "known chan_id=%v", policy.MessageFlags,
×
UNCOV
1491
                                policy.ChannelFlags, policy.ChannelID)
×
UNCOV
1492
                }
×
1493

1494
        // Similarly, a flag set of 1 indicates this is an announcement
1495
        // for the "second" node in the channel.
1496
        case 1:
2✔
1497
                // Ignore outdated message.
2✔
1498
                if !edge2Timestamp.Before(policy.LastUpdate) {
2✔
UNCOV
1499
                        return NewErrf(ErrOutdated, "Ignoring "+
×
UNCOV
1500
                                "outdated update (flags=%v|%v) for "+
×
UNCOV
1501
                                "known chan_id=%v", policy.MessageFlags,
×
UNCOV
1502
                                policy.ChannelFlags, policy.ChannelID)
×
UNCOV
1503
                }
×
1504
        }
1505

1506
        // Now that we know this isn't a stale update, we'll apply the new edge
1507
        // policy to the proper directional edge within the channel graph.
1508
        if err = b.cfg.Graph.UpdateEdgePolicy(policy, op...); err != nil {
5✔
1509
                err := errors.Errorf("unable to add channel: %v", err)
×
1510
                log.Error(err)
×
1511
                return err
×
1512
        }
×
1513

1514
        log.Tracef("New channel update applied: %v",
5✔
1515
                lnutils.SpewLogClosure(policy))
5✔
1516
        b.stats.incNumChannelUpdates()
5✔
1517

5✔
1518
        return nil
5✔
1519
}
1520

1521
// CurrentBlockHeight returns the block height from POV of the router subsystem.
1522
//
1523
// NOTE: This method is part of the ChannelGraphSource interface.
UNCOV
1524
func (b *Builder) CurrentBlockHeight() (uint32, error) {
×
UNCOV
1525
        _, height, err := b.cfg.Chain.GetBestBlock()
×
UNCOV
1526
        return uint32(height), err
×
UNCOV
1527
}
×
1528

1529
// SyncedHeight returns the block height to which the router subsystem currently
1530
// is synced to. This can differ from the above chain height if the goroutine
1531
// responsible for processing the blocks isn't yet up to speed.
UNCOV
1532
func (b *Builder) SyncedHeight() uint32 {
×
UNCOV
1533
        return b.bestHeight.Load()
×
UNCOV
1534
}
×
1535

1536
// GetChannelByID return the channel by the channel id.
1537
//
1538
// NOTE: This method is part of the ChannelGraphSource interface.
1539
func (b *Builder) GetChannelByID(chanID lnwire.ShortChannelID) (
1540
        *models.ChannelEdgeInfo,
1541
        *models.ChannelEdgePolicy,
1542
        *models.ChannelEdgePolicy, error) {
1✔
1543

1✔
1544
        return b.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
1✔
1545
}
1✔
1546

1547
// FetchLightningNode attempts to look up a target node by its identity public
1548
// key. graphdb.ErrGraphNodeNotFound is returned if the node doesn't exist
1549
// within the graph.
1550
//
1551
// NOTE: This method is part of the ChannelGraphSource interface.
1552
func (b *Builder) FetchLightningNode(
UNCOV
1553
        node route.Vertex) (*models.LightningNode, error) {
×
UNCOV
1554

×
UNCOV
1555
        return b.cfg.Graph.FetchLightningNode(node)
×
UNCOV
1556
}
×
1557

1558
// ForEachNode is used to iterate over every node in router topology.
1559
//
1560
// NOTE: This method is part of the ChannelGraphSource interface.
1561
func (b *Builder) ForEachNode(
1562
        cb func(*models.LightningNode) error) error {
×
1563

×
1564
        return b.cfg.Graph.ForEachNode(
×
1565
                func(_ kvdb.RTx, n *models.LightningNode) error {
×
1566
                        return cb(n)
×
1567
                })
×
1568
}
1569

1570
// ForAllOutgoingChannels is used to iterate over all outgoing channels owned by
1571
// the router.
1572
//
1573
// NOTE: This method is part of the ChannelGraphSource interface.
1574
func (b *Builder) ForAllOutgoingChannels(cb func(*models.ChannelEdgeInfo,
UNCOV
1575
        *models.ChannelEdgePolicy) error) error {
×
UNCOV
1576

×
UNCOV
1577
        return b.cfg.Graph.ForEachNodeChannel(b.cfg.SelfNode,
×
UNCOV
1578
                func(_ kvdb.RTx, c *models.ChannelEdgeInfo,
×
UNCOV
1579
                        e *models.ChannelEdgePolicy,
×
UNCOV
1580
                        _ *models.ChannelEdgePolicy) error {
×
UNCOV
1581

×
UNCOV
1582
                        if e == nil {
×
1583
                                return fmt.Errorf("channel from self node " +
×
1584
                                        "has no policy")
×
1585
                        }
×
1586

UNCOV
1587
                        return cb(c, e)
×
1588
                },
1589
        )
1590
}
1591

1592
// AddProof updates the channel edge info with proof which is needed to
1593
// properly announce the edge to the rest of the network.
1594
//
1595
// NOTE: This method is part of the ChannelGraphSource interface.
1596
func (b *Builder) AddProof(chanID lnwire.ShortChannelID,
1597
        proof *models.ChannelAuthProof) error {
1✔
1598

1✔
1599
        info, _, _, err := b.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
1✔
1600
        if err != nil {
1✔
1601
                return err
×
1602
        }
×
1603

1604
        info.AuthProof = proof
1✔
1605

1✔
1606
        return b.cfg.Graph.UpdateChannelEdge(info)
1✔
1607
}
1608

1609
// IsStaleNode returns true if the graph source has a node announcement for the
1610
// target node with a more recent timestamp.
1611
//
1612
// NOTE: This method is part of the ChannelGraphSource interface.
1613
func (b *Builder) IsStaleNode(node route.Vertex,
1614
        timestamp time.Time) bool {
3✔
1615

3✔
1616
        // If our attempt to assert that the node announcement is fresh fails,
3✔
1617
        // then we know that this is actually a stale announcement.
3✔
1618
        err := b.assertNodeAnnFreshness(node, timestamp)
3✔
1619
        if err != nil {
4✔
1620
                log.Debugf("Checking stale node %x got %v", node, err)
1✔
1621
                return true
1✔
1622
        }
1✔
1623

1624
        return false
2✔
1625
}
1626

1627
// IsPublicNode determines whether the given vertex is seen as a public node in
1628
// the graph from the graph's source node's point of view.
1629
//
1630
// NOTE: This method is part of the ChannelGraphSource interface.
UNCOV
1631
func (b *Builder) IsPublicNode(node route.Vertex) (bool, error) {
×
UNCOV
1632
        return b.cfg.Graph.IsPublicNode(node)
×
UNCOV
1633
}
×
1634

1635
// IsKnownEdge returns true if the graph source already knows of the passed
1636
// channel ID either as a live or zombie edge.
1637
//
1638
// NOTE: This method is part of the ChannelGraphSource interface.
1639
func (b *Builder) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
1✔
1640
        _, _, exists, isZombie, _ := b.cfg.Graph.HasChannelEdge(
1✔
1641
                chanID.ToUint64(),
1✔
1642
        )
1✔
1643

1✔
1644
        return exists || isZombie
1✔
1645
}
1✔
1646

1647
// IsStaleEdgePolicy returns true if the graph source has a channel edge for
1648
// the passed channel ID (and flags) that have a more recent timestamp.
1649
//
1650
// NOTE: This method is part of the ChannelGraphSource interface.
1651
func (b *Builder) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
1652
        timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
6✔
1653

6✔
1654
        edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
6✔
1655
                b.cfg.Graph.HasChannelEdge(chanID.ToUint64())
6✔
1656
        if err != nil {
6✔
1657
                log.Debugf("Check stale edge policy got error: %v", err)
×
1658
                return false
×
1659
        }
×
1660

1661
        // If we know of the edge as a zombie, then we'll make some additional
1662
        // checks to determine if the new policy is fresh.
1663
        if isZombie {
6✔
1664
                // When running with AssumeChannelValid, we also prune channels
×
1665
                // if both of their edges are disabled. We'll mark the new
×
1666
                // policy as stale if it remains disabled.
×
1667
                if b.cfg.AssumeChannelValid {
×
1668
                        isDisabled := flags&lnwire.ChanUpdateDisabled ==
×
1669
                                lnwire.ChanUpdateDisabled
×
1670
                        if isDisabled {
×
1671
                                return true
×
1672
                        }
×
1673
                }
1674

1675
                // Otherwise, we'll fall back to our usual ChannelPruneExpiry.
1676
                return time.Since(timestamp) > b.cfg.ChannelPruneExpiry
×
1677
        }
1678

1679
        // If we don't know of the edge, then it means it's fresh (thus not
1680
        // stale).
1681
        if !exists {
8✔
1682
                return false
2✔
1683
        }
2✔
1684

1685
        // As edges are directional edge node has a unique policy for the
1686
        // direction of the edge they control. Therefore, we first check if we
1687
        // already have the most up-to-date information for that edge. If so,
1688
        // then we can exit early.
1689
        switch {
4✔
1690
        // A flag set of 0 indicates this is an announcement for the "first"
1691
        // node in the channel.
1692
        case flags&lnwire.ChanUpdateDirection == 0:
2✔
1693
                return !edge1Timestamp.Before(timestamp)
2✔
1694

1695
        // Similarly, a flag set of 1 indicates this is an announcement for the
1696
        // "second" node in the channel.
1697
        case flags&lnwire.ChanUpdateDirection == 1:
2✔
1698
                return !edge2Timestamp.Before(timestamp)
2✔
1699
        }
1700

1701
        return false
×
1702
}
1703

1704
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
1705
//
1706
// NOTE: This method is part of the ChannelGraphSource interface.
1707
func (b *Builder) MarkEdgeLive(chanID lnwire.ShortChannelID) error {
×
1708
        return b.cfg.Graph.MarkEdgeLive(chanID.ToUint64())
×
1709
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc