Coveralls logob
Coveralls logo
  • Home
  • Features
  • Pricing
  • Docs
  • Sign In

lightningnetwork / lnd / 11827

11 Oct 2019 - 20:47 coverage increased (+0.02%) to 63.216%
11827

Pull #3595

travis-ci

9181eb84f9c35729a3bad740fb7f9d93?size=18&default=identiconweb-flow
htlcswitch: raise max cltv limit to 2016 blocks

The previous limit of 1008 proved to be low, given that almost 50% of
the network still advertises CLTV deltas of 144 blocks, possibly
resulting in routes with many hops failing.
Pull Request #3595: routing+routerrpc: take max cltv limit into account within path finding

3 of 9 new or added lines in 3 files covered. (33.33%)

16 existing lines in 8 files now uncovered.

45935 of 72663 relevant lines covered (63.22%)

68288.37 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/rpcserver.go
1
package lnd
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/tls"
7
        "encoding/hex"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "net/http"
14
        "sort"
15
        "strings"
16
        "sync"
17
        "sync/atomic"
18
        "time"
19

20
        "github.com/lightningnetwork/lnd/chanacceptor"
21
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
22
        "github.com/lightningnetwork/lnd/routing/route"
23
        "github.com/lightningnetwork/lnd/tlv"
24
        "github.com/lightningnetwork/lnd/watchtower"
25

26
        "github.com/btcsuite/btcd/blockchain"
27
        "github.com/btcsuite/btcd/btcec"
28
        "github.com/btcsuite/btcd/chaincfg/chainhash"
29
        "github.com/btcsuite/btcd/txscript"
30
        "github.com/btcsuite/btcd/wire"
31
        "github.com/btcsuite/btcutil"
32
        "github.com/btcsuite/btcwallet/wallet/txauthor"
33
        "github.com/coreos/bbolt"
34
        "github.com/davecgh/go-spew/spew"
35
        grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
36
        proxy "github.com/grpc-ecosystem/grpc-gateway/runtime"
37
        "github.com/lightningnetwork/lnd/autopilot"
38
        "github.com/lightningnetwork/lnd/build"
39
        "github.com/lightningnetwork/lnd/chanbackup"
40
        "github.com/lightningnetwork/lnd/channeldb"
41
        "github.com/lightningnetwork/lnd/channelnotifier"
42
        "github.com/lightningnetwork/lnd/discovery"
43
        "github.com/lightningnetwork/lnd/htlcswitch"
44
        "github.com/lightningnetwork/lnd/input"
45
        "github.com/lightningnetwork/lnd/invoices"
46
        "github.com/lightningnetwork/lnd/lncfg"
47
        "github.com/lightningnetwork/lnd/lnrpc"
48
        "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
49
        "github.com/lightningnetwork/lnd/lntypes"
50
        "github.com/lightningnetwork/lnd/lnwallet"
51
        "github.com/lightningnetwork/lnd/lnwire"
52
        "github.com/lightningnetwork/lnd/macaroons"
53
        "github.com/lightningnetwork/lnd/monitoring"
54
        "github.com/lightningnetwork/lnd/routing"
55
        "github.com/lightningnetwork/lnd/signal"
56
        "github.com/lightningnetwork/lnd/sweep"
57
        "github.com/lightningnetwork/lnd/zpay32"
58
        "github.com/tv42/zbase32"
59
        "google.golang.org/grpc"
60
        "gopkg.in/macaroon-bakery.v2/bakery"
61
)
62

63
const (
64
        // maxBtcPaymentMSat is the maximum allowed Bitcoin payment currently
65
        // permitted as defined in BOLT-0002.
66
        maxBtcPaymentMSat = lnwire.MilliSatoshi(math.MaxUint32)
67

68
        // maxLtcPaymentMSat is the maximum allowed Litecoin payment currently
69
        // permitted.
70
        maxLtcPaymentMSat = lnwire.MilliSatoshi(math.MaxUint32) *
71
                btcToLtcConversionRate
72
)
73

74
var (
75
        // MaxPaymentMSat is the maximum allowed payment currently permitted as
76
        // defined in BOLT-002. This value depends on which chain is active.
77
        // It is set to the value under the Bitcoin chain as default.
78
        MaxPaymentMSat = maxBtcPaymentMSat
79

80
        // defaultAcceptorTimeout is the time after which an RPCAcceptor will time
81
        // out and return false if it hasn't yet received a response.
82
        //
83
        // TODO: Make this configurable
84
        defaultAcceptorTimeout = 15 * time.Second
85

86
        // readPermissions is a slice of all entities that allow read
87
        // permissions for authorization purposes, all lowercase.
88
        readPermissions = []bakery.Op{
89
                {
90
                        Entity: "onchain",
91
                        Action: "read",
92
                },
93
                {
94
                        Entity: "offchain",
95
                        Action: "read",
96
                },
97
                {
98
                        Entity: "address",
99
                        Action: "read",
100
                },
101
                {
102
                        Entity: "message",
103
                        Action: "read",
104
                },
105
                {
106
                        Entity: "peers",
107
                        Action: "read",
108
                },
109
                {
110
                        Entity: "info",
111
                        Action: "read",
112
                },
113
                {
114
                        Entity: "invoices",
115
                        Action: "read",
116
                },
117
        }
118

119
        // writePermissions is a slice of all entities that allow write
120
        // permissions for authorization purposes, all lowercase.
121
        writePermissions = []bakery.Op{
122
                {
123
                        Entity: "onchain",
124
                        Action: "write",
125
                },
126
                {
127
                        Entity: "offchain",
128
                        Action: "write",
129
                },
130
                {
131
                        Entity: "address",
132
                        Action: "write",
133
                },
134
                {
135
                        Entity: "message",
136
                        Action: "write",
137
                },
138
                {
139
                        Entity: "peers",
140
                        Action: "write",
141
                },
142
                {
143
                        Entity: "info",
144
                        Action: "write",
145
                },
146
                {
147
                        Entity: "invoices",
148
                        Action: "write",
149
                },
150
                {
151
                        Entity: "signer",
152
                        Action: "generate",
153
                },
154
        }
155

156
        // invoicePermissions is a slice of all the entities that allows a user
157
        // to only access calls that are related to invoices, so: streaming
158
        // RPCs, generating, and listening invoices.
159
        invoicePermissions = []bakery.Op{
160
                {
161
                        Entity: "invoices",
162
                        Action: "read",
163
                },
164
                {
165
                        Entity: "invoices",
166
                        Action: "write",
167
                },
168
                {
169
                        Entity: "address",
170
                        Action: "read",
171
                },
172
                {
173
                        Entity: "address",
174
                        Action: "write",
175
                },
176
        }
177
)
178

179
// mainRPCServerPermissions returns a mapping of the main RPC server calls to
180
// the permissions they require.
181
func mainRPCServerPermissions() map[string][]bakery.Op {
!
182
        return map[string][]bakery.Op{
!
183
                "/lnrpc.Lightning/SendCoins": {{
!
184
                        Entity: "onchain",
!
185
                        Action: "write",
!
186
                }},
!
187
                "/lnrpc.Lightning/ListUnspent": {{
!
188
                        Entity: "onchain",
!
189
                        Action: "read",
!
190
                }},
!
191
                "/lnrpc.Lightning/SendMany": {{
!
192
                        Entity: "onchain",
!
193
                        Action: "write",
!
194
                }},
!
195
                "/lnrpc.Lightning/NewAddress": {{
!
196
                        Entity: "address",
!
197
                        Action: "write",
!
198
                }},
!
199
                "/lnrpc.Lightning/SignMessage": {{
!
200
                        Entity: "message",
!
201
                        Action: "write",
!
202
                }},
!
203
                "/lnrpc.Lightning/VerifyMessage": {{
!
204
                        Entity: "message",
!
205
                        Action: "read",
!
206
                }},
!
207
                "/lnrpc.Lightning/ConnectPeer": {{
!
208
                        Entity: "peers",
!
209
                        Action: "write",
!
210
                }},
!
211
                "/lnrpc.Lightning/DisconnectPeer": {{
!
212
                        Entity: "peers",
!
213
                        Action: "write",
!
214
                }},
!
215
                "/lnrpc.Lightning/OpenChannel": {{
!
216
                        Entity: "onchain",
!
217
                        Action: "write",
!
218
                }, {
!
219
                        Entity: "offchain",
!
220
                        Action: "write",
!
221
                }},
!
222
                "/lnrpc.Lightning/OpenChannelSync": {{
!
223
                        Entity: "onchain",
!
224
                        Action: "write",
!
225
                }, {
!
226
                        Entity: "offchain",
!
227
                        Action: "write",
!
228
                }},
!
229
                "/lnrpc.Lightning/CloseChannel": {{
!
230
                        Entity: "onchain",
!
231
                        Action: "write",
!
232
                }, {
!
233
                        Entity: "offchain",
!
234
                        Action: "write",
!
235
                }},
!
236
                "/lnrpc.Lightning/AbandonChannel": {{
!
237
                        Entity: "offchain",
!
238
                        Action: "write",
!
239
                }},
!
240
                "/lnrpc.Lightning/GetInfo": {{
!
241
                        Entity: "info",
!
242
                        Action: "read",
!
243
                }},
!
244
                "/lnrpc.Lightning/ListPeers": {{
!
245
                        Entity: "peers",
!
246
                        Action: "read",
!
247
                }},
!
248
                "/lnrpc.Lightning/WalletBalance": {{
!
249
                        Entity: "onchain",
!
250
                        Action: "read",
!
251
                }},
!
252
                "/lnrpc.Lightning/EstimateFee": {{
!
253
                        Entity: "onchain",
!
254
                        Action: "read",
!
255
                }},
!
256
                "/lnrpc.Lightning/ChannelBalance": {{
!
257
                        Entity: "offchain",
!
258
                        Action: "read",
!
259
                }},
!
260
                "/lnrpc.Lightning/PendingChannels": {{
!
261
                        Entity: "offchain",
!
262
                        Action: "read",
!
263
                }},
!
264
                "/lnrpc.Lightning/ListChannels": {{
!
265
                        Entity: "offchain",
!
266
                        Action: "read",
!
267
                }},
!
268
                "/lnrpc.Lightning/SubscribeChannelEvents": {{
!
269
                        Entity: "offchain",
!
270
                        Action: "read",
!
271
                }},
!
272
                "/lnrpc.Lightning/ClosedChannels": {{
!
273
                        Entity: "offchain",
!
274
                        Action: "read",
!
275
                }},
!
276
                "/lnrpc.Lightning/SendPayment": {{
!
277
                        Entity: "offchain",
!
278
                        Action: "write",
!
279
                }},
!
280
                "/lnrpc.Lightning/SendPaymentSync": {{
!
281
                        Entity: "offchain",
!
282
                        Action: "write",
!
283
                }},
!
284
                "/lnrpc.Lightning/SendToRoute": {{
!
285
                        Entity: "offchain",
!
286
                        Action: "write",
!
287
                }},
!
288
                "/lnrpc.Lightning/SendToRouteSync": {{
!
289
                        Entity: "offchain",
!
290
                        Action: "write",
!
291
                }},
!
292
                "/lnrpc.Lightning/AddInvoice": {{
!
293
                        Entity: "invoices",
!
294
                        Action: "write",
!
295
                }},
!
296
                "/lnrpc.Lightning/LookupInvoice": {{
!
297
                        Entity: "invoices",
!
298
                        Action: "read",
!
299
                }},
!
300
                "/lnrpc.Lightning/ListInvoices": {{
!
301
                        Entity: "invoices",
!
302
                        Action: "read",
!
303
                }},
!
304
                "/lnrpc.Lightning/SubscribeInvoices": {{
!
305
                        Entity: "invoices",
!
306
                        Action: "read",
!
307
                }},
!
308
                "/lnrpc.Lightning/SubscribeTransactions": {{
!
309
                        Entity: "onchain",
!
310
                        Action: "read",
!
311
                }},
!
312
                "/lnrpc.Lightning/GetTransactions": {{
!
313
                        Entity: "onchain",
!
314
                        Action: "read",
!
315
                }},
!
316
                "/lnrpc.Lightning/DescribeGraph": {{
!
317
                        Entity: "info",
!
318
                        Action: "read",
!
319
                }},
!
320
                "/lnrpc.Lightning/GetChanInfo": {{
!
321
                        Entity: "info",
!
322
                        Action: "read",
!
323
                }},
!
324
                "/lnrpc.Lightning/GetNodeInfo": {{
!
325
                        Entity: "info",
!
326
                        Action: "read",
!
327
                }},
!
328
                "/lnrpc.Lightning/QueryRoutes": {{
!
329
                        Entity: "info",
!
330
                        Action: "read",
!
331
                }},
!
332
                "/lnrpc.Lightning/GetNetworkInfo": {{
!
333
                        Entity: "info",
!
334
                        Action: "read",
!
335
                }},
!
336
                "/lnrpc.Lightning/StopDaemon": {{
!
337
                        Entity: "info",
!
338
                        Action: "write",
!
339
                }},
!
340
                "/lnrpc.Lightning/SubscribeChannelGraph": {{
!
341
                        Entity: "info",
!
342
                        Action: "read",
!
343
                }},
!
344
                "/lnrpc.Lightning/ListPayments": {{
!
345
                        Entity: "offchain",
!
346
                        Action: "read",
!
347
                }},
!
348
                "/lnrpc.Lightning/DeleteAllPayments": {{
!
349
                        Entity: "offchain",
!
350
                        Action: "write",
!
351
                }},
!
352
                "/lnrpc.Lightning/DebugLevel": {{
!
353
                        Entity: "info",
!
354
                        Action: "write",
!
355
                }},
!
356
                "/lnrpc.Lightning/DecodePayReq": {{
!
357
                        Entity: "offchain",
!
358
                        Action: "read",
!
359
                }},
!
360
                "/lnrpc.Lightning/FeeReport": {{
!
361
                        Entity: "offchain",
!
362
                        Action: "read",
!
363
                }},
!
364
                "/lnrpc.Lightning/UpdateChannelPolicy": {{
!
365
                        Entity: "offchain",
!
366
                        Action: "write",
!
367
                }},
!
368
                "/lnrpc.Lightning/ForwardingHistory": {{
!
369
                        Entity: "offchain",
!
370
                        Action: "read",
!
371
                }},
!
372
                "/lnrpc.Lightning/RestoreChannelBackups": {{
!
373
                        Entity: "offchain",
!
374
                        Action: "write",
!
375
                }},
!
376
                "/lnrpc.Lightning/ExportChannelBackup": {{
!
377
                        Entity: "offchain",
!
378
                        Action: "read",
!
379
                }},
!
380
                "/lnrpc.Lightning/VerifyChanBackup": {{
!
381
                        Entity: "offchain",
!
382
                        Action: "read",
!
383
                }},
!
384
                "/lnrpc.Lightning/ExportAllChannelBackups": {{
!
385
                        Entity: "offchain",
!
386
                        Action: "read",
!
387
                }},
!
388
                "/lnrpc.Lightning/SubscribeChannelBackups": {{
!
389
                        Entity: "offchain",
!
390
                        Action: "read",
!
391
                }},
!
392
                "/lnrpc.Lightning/ChannelAcceptor": {{
!
393
                        Entity: "onchain",
!
394
                        Action: "write",
!
395
                }, {
!
396
                        Entity: "offchain",
!
397
                        Action: "write",
!
398
                }},
!
399
        }
!
400
}
!
401

402
// rpcServer is a gRPC, RPC front end to the lnd daemon.
403
// TODO(roasbeef): pagination support for the list-style calls
404
type rpcServer struct {
405
        started  int32 // To be used atomically.
406
        shutdown int32 // To be used atomically.
407

408
        server *server
409

410
        // subServers are a set of sub-RPC servers that use the same gRPC and
411
        // listening sockets as the main RPC server, but which maintain their
412
        // own independent service. This allows us to expose a set of
413
        // micro-service like abstractions to the outside world for users to
414
        // consume.
415
        subServers []lnrpc.SubServer
416

417
        // grpcServer is the main gRPC server that this RPC server, and all the
418
        // sub-servers will use to register themselves and accept client
419
        // requests from.
420
        grpcServer *grpc.Server
421

422
        // listeners is a list of listeners to use when starting the grpc
423
        // server. We make it configurable such that the grpc server can listen
424
        // on custom interfaces.
425
        listeners []net.Listener
426

427
        // listenerCleanUp are a set of closures functions that will allow this
428
        // main RPC server to clean up all the listening socket created for the
429
        // server.
430
        listenerCleanUp []func()
431

432
        // restDialOpts are a set of gRPC dial options that the REST server
433
        // proxy will use to connect to the main gRPC server.
434
        restDialOpts []grpc.DialOption
435

436
        // restProxyDest is the address to forward REST requests to.
437
        restProxyDest string
438

439
        // tlsCfg is the TLS config that allows the REST server proxy to
440
        // connect to the main gRPC server to proxy all incoming requests.
441
        tlsCfg *tls.Config
442

443
        // routerBackend contains the backend implementation of the router
444
        // rpc sub server.
445
        routerBackend *routerrpc.RouterBackend
446

447
        // chanPredicate is used in the bidirectional ChannelAcceptor streaming
448
        // method.
449
        chanPredicate *chanacceptor.ChainedAcceptor
450

451
        quit chan struct{}
452
}
453

454
// A compile time check to ensure that rpcServer fully implements the
455
// LightningServer gRPC service.
456
var _ lnrpc.LightningServer = (*rpcServer)(nil)
457

458
// newRPCServer creates and returns a new instance of the rpcServer. The
459
// rpcServer will handle creating all listening sockets needed by it, and any
460
// of the sub-servers that it maintains. The set of serverOpts should be the
461
// base level options passed to the grPC server. This typically includes things
462
// like requiring TLS, etc.
463
func newRPCServer(s *server, macService *macaroons.Service,
464
        subServerCgs *subRPCServerConfigs, restDialOpts []grpc.DialOption,
465
        restProxyDest string, atpl *autopilot.Manager,
466
        invoiceRegistry *invoices.InvoiceRegistry, tower *watchtower.Standalone,
467
        tlsCfg *tls.Config, getListeners rpcListeners,
468
        chanPredicate *chanacceptor.ChainedAcceptor) (*rpcServer, error) {
!
469

!
470
        // Set up router rpc backend.
!
471
        channelGraph := s.chanDB.ChannelGraph()
!
472
        selfNode, err := channelGraph.SourceNode()
!
473
        if err != nil {
!
474
                return nil, err
!
475
        }
!
476
        graph := s.chanDB.ChannelGraph()
!
477
        routerBackend := &routerrpc.RouterBackend{
!
478
                MaxPaymentMSat: MaxPaymentMSat,
!
479
                SelfNode:       selfNode.PubKeyBytes,
!
480
                FetchChannelCapacity: func(chanID uint64) (btcutil.Amount,
!
481
                        error) {
!
482

!
483
                        info, _, _, err := graph.FetchChannelEdgesByID(chanID)
!
484
                        if err != nil {
!
485
                                return 0, err
!
486
                        }
!
487
                        return info.Capacity, nil
!
488
                },
489
                FetchChannelEndpoints: func(chanID uint64) (route.Vertex,
490
                        route.Vertex, error) {
!
491

!
492
                        info, _, _, err := graph.FetchChannelEdgesByID(
!
493
                                chanID,
!
494
                        )
!
495
                        if err != nil {
!
496
                                return route.Vertex{}, route.Vertex{},
!
497
                                        fmt.Errorf("unable to fetch channel "+
!
498
                                                "edges by channel ID %d: %v",
!
499
                                                chanID, err)
!
500
                        }
!
501

502
                        return info.NodeKey1Bytes, info.NodeKey2Bytes, nil
!
503
                },
504
                FindRoute:        s.chanRouter.FindRoute,
505
                MissionControl:   s.missionControl,
506
                ActiveNetParams:  activeNetParams.Params,
507
                Tower:            s.controlTower,
508
                MaxTotalTimelock: cfg.MaxOutgoingCltvExpiry,
509
        }
510

511
        var (
!
512
                subServers     []lnrpc.SubServer
!
513
                subServerPerms []lnrpc.MacaroonPerms
!
514
        )
!
515

!
516
        // Before we create any of the sub-servers, we need to ensure that all
!
517
        // the dependencies they need are properly populated within each sub
!
518
        // server configuration struct.
!
519
        err = subServerCgs.PopulateDependencies(
!
520
                s.cc, networkDir, macService, atpl, invoiceRegistry,
!
521
                s.htlcSwitch, activeNetParams.Params, s.chanRouter,
!
522
                routerBackend, s.nodeSigner, s.chanDB, s.sweeper, tower,
!
523
                s.towerClient, cfg.net.ResolveTCPAddr,
!
524
        )
!
525
        if err != nil {
!
526
                return nil, err
!
527
        }
!
528

529
        // Now that the sub-servers have all their dependencies in place, we
530
        // can create each sub-server!
531
        registeredSubServers := lnrpc.RegisteredSubServers()
!
532
        for _, subServer := range registeredSubServers {
!
533
                subServerInstance, macPerms, err := subServer.New(subServerCgs)
!
534
                if err != nil {
!
535
                        return nil, err
!
536
                }
!
537

538
                // We'll collect the sub-server, and also the set of
539
                // permissions it needs for macaroons so we can apply the
540
                // interceptors below.
541
                subServers = append(subServers, subServerInstance)
!
542
                subServerPerms = append(subServerPerms, macPerms)
!
543
        }
544

545
        // Next, we need to merge the set of sub server macaroon permissions
546
        // with the main RPC server permissions so we can unite them under a
547
        // single set of interceptors.
548
        permissions := mainRPCServerPermissions()
!
549
        for _, subServerPerm := range subServerPerms {
!
550
                for method, ops := range subServerPerm {
!
551
                        // For each new method:ops combo, we also ensure that
!
552
                        // non of the sub-servers try to override each other.
!
553
                        if _, ok := permissions[method]; ok {
!
554
                                return nil, fmt.Errorf("detected duplicate "+
!
555
                                        "macaroon constraints for path: %v",
!
556
                                        method)
!
557
                        }
!
558

559
                        permissions[method] = ops
!
560
                }
561
        }
562

563
        // If macaroons aren't disabled (a non-nil service), then we'll set up
564
        // our set of interceptors which will allow us to handle the macaroon
565
        // authentication in a single location.
566
        macUnaryInterceptors := []grpc.UnaryServerInterceptor{}
!
567
        macStrmInterceptors := []grpc.StreamServerInterceptor{}
!
568
        if macService != nil {
!
569
                unaryInterceptor := macService.UnaryServerInterceptor(permissions)
!
570
                macUnaryInterceptors = append(macUnaryInterceptors, unaryInterceptor)
!
571

!
572
                strmInterceptor := macService.StreamServerInterceptor(permissions)
!
573
                macStrmInterceptors = append(macStrmInterceptors, strmInterceptor)
!
574
        }
!
575

576
        // Get interceptors for Prometheus to gather gRPC performance metrics.
577
        // If monitoring is disabled, GetPromInterceptors() will return empty
578
        // slices.
579
        promUnaryInterceptors, promStrmInterceptors := monitoring.GetPromInterceptors()
!
580

!
581
        // Concatenate the slices of unary and stream interceptors respectively.
!
582
        unaryInterceptors := append(macUnaryInterceptors, promUnaryInterceptors...)
!
583
        strmInterceptors := append(macStrmInterceptors, promStrmInterceptors...)
!
584

!
585
        // We'll also add our logging interceptors as well, so we can
!
586
        // automatically log all errors that happen during RPC calls.
!
587
        unaryInterceptors = append(
!
588
                unaryInterceptors, errorLogUnaryServerInterceptor(rpcsLog),
!
589
        )
!
590
        strmInterceptors = append(
!
591
                strmInterceptors, errorLogStreamServerInterceptor(rpcsLog),
!
592
        )
!
593

!
594
        // Get the listeners and server options to use for this rpc server.
!
595
        listeners, cleanup, serverOpts, err := getListeners()
!
596
        if err != nil {
!
597
                return nil, err
!
598
        }
!
599

600
        // If any interceptors have been set up, add them to the server options.
601
        if len(unaryInterceptors) != 0 && len(strmInterceptors) != 0 {
!
602
                chainedUnary := grpc_middleware.WithUnaryServerChain(
!
603
                        unaryInterceptors...,
!
604
                )
!
605
                chainedStream := grpc_middleware.WithStreamServerChain(
!
606
                        strmInterceptors...,
!
607
                )
!
608
                serverOpts = append(serverOpts, chainedUnary, chainedStream)
!
609
        }
!
610

611
        // Finally, with all the pre-set up complete,  we can create the main
612
        // gRPC server, and register the main lnrpc server along side.
613
        grpcServer := grpc.NewServer(serverOpts...)
!
614
        rootRPCServer := &rpcServer{
!
615
                restDialOpts:    restDialOpts,
!
616
                listeners:       listeners,
!
617
                listenerCleanUp: []func(){cleanup},
!
618
                restProxyDest:   restProxyDest,
!
619
                subServers:      subServers,
!
620
                tlsCfg:          tlsCfg,
!
621
                grpcServer:      grpcServer,
!
622
                server:          s,
!
623
                routerBackend:   routerBackend,
!
624
                chanPredicate:   chanPredicate,
!
625
                quit:            make(chan struct{}, 1),
!
626
        }
!
627
        lnrpc.RegisterLightningServer(grpcServer, rootRPCServer)
!
628

!
629
        // Now the main RPC server has been registered, we'll iterate through
!
630
        // all the sub-RPC servers and register them to ensure that requests
!
631
        // are properly routed towards them.
!
632
        for _, subServer := range subServers {
!
633
                err := subServer.RegisterWithRootServer(grpcServer)
!
634
                if err != nil {
!
635
                        return nil, fmt.Errorf("unable to register "+
!
636
                                "sub-server %v with root: %v",
!
637
                                subServer.Name(), err)
!
638
                }
!
639
        }
640

641
        return rootRPCServer, nil
!
642
}
643

644
// Start launches any helper goroutines required for the rpcServer to function.
645
func (r *rpcServer) Start() error {
!
646
        if atomic.AddInt32(&r.started, 1) != 1 {
!
647
                return nil
!
648
        }
!
649

650
        // First, we'll start all the sub-servers to ensure that they're ready
651
        // to take new requests in.
652
        //
653
        // TODO(roasbeef): some may require that the entire daemon be started
654
        // at that point
655
        for _, subServer := range r.subServers {
!
656
                rpcsLog.Debugf("Starting sub RPC server: %v", subServer.Name())
!
657

!
658
                if err := subServer.Start(); err != nil {
!
659
                        return err
!
660
                }
!
661
        }
662

663
        // With all the sub-servers started, we'll spin up the listeners for
664
        // the main RPC server itself.
665
        for _, lis := range r.listeners {
!
666
                go func(lis net.Listener) {
!
667
                        rpcsLog.Infof("RPC server listening on %s", lis.Addr())
!
668
                        r.grpcServer.Serve(lis)
!
669
                }(lis)
!
670
        }
671

672
        // If Prometheus monitoring is enabled, start the Prometheus exporter.
673
        if cfg.Prometheus.Enabled() {
!
674
                err := monitoring.ExportPrometheusMetrics(
!
675
                        r.grpcServer, cfg.Prometheus,
!
676
                )
!
677
                if err != nil {
!
678
                        return err
!
679
                }
!
680
        }
681

682
        // Finally, start the REST proxy for our gRPC server above. We'll ensure
683
        // we direct LND to connect to its loopback address rather than a
684
        // wildcard to prevent certificate issues when accessing the proxy
685
        // externally.
686
        //
687
        // TODO(roasbeef): eventually also allow the sub-servers to themselves
688
        // have a REST proxy.
689
        mux := proxy.NewServeMux()
!
690

!
691
        err := lnrpc.RegisterLightningHandlerFromEndpoint(
!
692
                context.Background(), mux, r.restProxyDest,
!
693
                r.restDialOpts,
!
694
        )
!
695
        if err != nil {
!
696
                return err
!
697
        }
!
698
        for _, restEndpoint := range cfg.RESTListeners {
!
699
                lis, err := lncfg.TLSListenOnAddress(restEndpoint, r.tlsCfg)
!
700
                if err != nil {
!
701
                        ltndLog.Errorf(
!
702
                                "gRPC proxy unable to listen on %s",
!
703
                                restEndpoint,
!
704
                        )
!
705
                        return err
!
706
                }
!
707

708
                r.listenerCleanUp = append(r.listenerCleanUp, func() {
!
709
                        lis.Close()
!
710
                })
!
711

712
                go func() {
!
713
                        rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
!
714
                        http.Serve(lis, mux)
!
715
                }()
!
716
        }
717

718
        return nil
!
719
}
720

721
// Stop signals any active goroutines for a graceful closure.
722
func (r *rpcServer) Stop() error {
!
723
        if atomic.AddInt32(&r.shutdown, 1) != 1 {
!
724
                return nil
!
725
        }
!
726

727
        rpcsLog.Infof("Stopping RPC Server")
!
728

!
729
        close(r.quit)
!
730

!
731
        // After we've signalled all of our active goroutines to exit, we'll
!
732
        // then do the same to signal a graceful shutdown of all the sub
!
733
        // servers.
!
734
        for _, subServer := range r.subServers {
!
735
                rpcsLog.Infof("Stopping %v Sub-RPC Server",
!
736
                        subServer.Name())
!
737

!
738
                if err := subServer.Stop(); err != nil {
!
739
                        rpcsLog.Errorf("unable to stop sub-server %v: %v",
!
740
                                subServer.Name(), err)
!
741
                        continue
!
742
                }
743
        }
744

745
        // Finally, we can clean up all the listening sockets to ensure that we
746
        // give the file descriptors back to the OS.
747
        for _, cleanUp := range r.listenerCleanUp {
!
748
                cleanUp()
!
749
        }
!
750

751
        return nil
!
752
}
753

754
// addrPairsToOutputs converts a map describing a set of outputs to be created,
755
// the outputs themselves. The passed map pairs up an address, to a desired
756
// output value amount. Each address is converted to its corresponding pkScript
757
// to be used within the constructed output(s).
758
func addrPairsToOutputs(addrPairs map[string]int64) ([]*wire.TxOut, error) {
!
759
        outputs := make([]*wire.TxOut, 0, len(addrPairs))
!
760
        for addr, amt := range addrPairs {
!
761
                addr, err := btcutil.DecodeAddress(addr, activeNetParams.Params)
!
762
                if err != nil {
!
763
                        return nil, err
!
764
                }
!
765

766
                pkscript, err := txscript.PayToAddrScript(addr)
!
767
                if err != nil {
!
768
                        return nil, err
!
769
                }
!
770

771
                outputs = append(outputs, wire.NewTxOut(amt, pkscript))
!
772
        }
773

774
        return outputs, nil
!
775
}
776

777
// sendCoinsOnChain makes an on-chain transaction in or to send coins to one or
778
// more addresses specified in the passed payment map. The payment map maps an
779
// address to a specified output value to be sent to that address.
780
func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64,
781
        feeRate lnwallet.SatPerKWeight) (*chainhash.Hash, error) {
!
782

!
783
        outputs, err := addrPairsToOutputs(paymentMap)
!
784
        if err != nil {
!
785
                return nil, err
!
786
        }
!
787

788
        tx, err := r.server.cc.wallet.SendOutputs(outputs, feeRate)
!
789
        if err != nil {
!
790
                return nil, err
!
791
        }
!
792

793
        txHash := tx.TxHash()
!
794
        return &txHash, nil
!
795
}
796

797
// ListUnspent returns useful information about each unspent output owned by
798
// the wallet, as reported by the underlying `ListUnspentWitness`; the
799
// information returned is: outpoint, amount in satoshis, address, address
800
// type, scriptPubKey in hex and number of confirmations.  The result is
801
// filtered to contain outputs whose number of confirmations is between a
802
// minimum and maximum number of confirmations specified by the user, with 0
803
// meaning unconfirmed.
804
func (r *rpcServer) ListUnspent(ctx context.Context,
805
        in *lnrpc.ListUnspentRequest) (*lnrpc.ListUnspentResponse, error) {
!
806

!
807
        minConfs := in.MinConfs
!
808
        maxConfs := in.MaxConfs
!
809

!
810
        switch {
!
811
        // Ensure that the user didn't attempt to specify a negative number of
812
        // confirmations, as that isn't possible.
813
        case minConfs < 0:
!
814
                return nil, fmt.Errorf("min confirmations must be >= 0")
!
815

816
        // We'll also ensure that the min number of confs is strictly less than
817
        // or equal to the max number of confs for sanity.
818
        case minConfs > maxConfs:
!
819
                return nil, fmt.Errorf("max confirmations must be >= min " +
!
820
                        "confirmations")
!
821
        }
822

823
        // With our arguments validated, we'll query the internal wallet for
824
        // the set of UTXOs that match our query.
825
        utxos, err := r.server.cc.wallet.ListUnspentWitness(minConfs, maxConfs)
!
826
        if err != nil {
!
827
                return nil, err
!
828
        }
!
829

830
        resp := &lnrpc.ListUnspentResponse{
!
831
                Utxos: make([]*lnrpc.Utxo, 0, len(utxos)),
!
832
        }
!
833

!
834
        for _, utxo := range utxos {
!
835
                // Translate lnwallet address type to the proper gRPC proto
!
836
                // address type.
!
837
                var addrType lnrpc.AddressType
!
838
                switch utxo.AddressType {
!
839

840
                case lnwallet.WitnessPubKey:
!
841
                        addrType = lnrpc.AddressType_WITNESS_PUBKEY_HASH
!
842

843
                case lnwallet.NestedWitnessPubKey:
!
844
                        addrType = lnrpc.AddressType_NESTED_PUBKEY_HASH
!
845

846
                case lnwallet.UnknownAddressType:
!
847
                        rpcsLog.Warnf("[listunspent] utxo with address of "+
!
848
                                "unknown type ignored: %v",
!
849
                                utxo.OutPoint.String())
!
850
                        continue
!
851

852
                default:
!
853
                        return nil, fmt.Errorf("invalid utxo address type")
!
854
                }
855

856
                // Now that we know we have a proper mapping to an address,
857
                // we'll convert the regular outpoint to an lnrpc variant.
858
                outpoint := &lnrpc.OutPoint{
!
859
                        TxidBytes:   utxo.OutPoint.Hash[:],
!
860
                        TxidStr:     utxo.OutPoint.Hash.String(),
!
861
                        OutputIndex: utxo.OutPoint.Index,
!
862
                }
!
863

!
864
                utxoResp := lnrpc.Utxo{
!
865
                        Type:          addrType,
!
866
                        AmountSat:     int64(utxo.Value),
!
867
                        PkScript:      hex.EncodeToString(utxo.PkScript),
!
868
                        Outpoint:      outpoint,
!
869
                        Confirmations: utxo.Confirmations,
!
870
                }
!
871

!
872
                // Finally, we'll attempt to extract the raw address from the
!
873
                // script so we can display a human friendly address to the end
!
874
                // user.
!
875
                _, outAddresses, _, err := txscript.ExtractPkScriptAddrs(
!
876
                        utxo.PkScript, activeNetParams.Params,
!
877
                )
!
878
                if err != nil {
!
879
                        return nil, err
!
880
                }
!
881

882
                // If we can't properly locate a single address, then this was
883
                // an error in our mapping, and we'll return an error back to
884
                // the user.
885
                if len(outAddresses) != 1 {
!
886
                        return nil, fmt.Errorf("an output was unexpectedly " +
!
887
                                "multisig")
!
888
                }
!
889

890
                utxoResp.Address = outAddresses[0].String()
!
891

!
892
                resp.Utxos = append(resp.Utxos, &utxoResp)
!
893
        }
894

895
        maxStr := ""
!
896
        if maxConfs != math.MaxInt32 {
!
897
                maxStr = " max=" + fmt.Sprintf("%d", maxConfs)
!
898
        }
!
899

900
        rpcsLog.Debugf("[listunspent] min=%v%v, generated utxos: %v", minConfs,
!
901
                maxStr, utxos)
!
902

!
903
        return resp, nil
!
904
}
905

906
// EstimateFee handles a request for estimating the fee for sending a
907
// transaction spending to multiple specified outputs in parallel.
908
func (r *rpcServer) EstimateFee(ctx context.Context,
909
        in *lnrpc.EstimateFeeRequest) (*lnrpc.EstimateFeeResponse, error) {
!
910

!
911
        // Create the list of outputs we are spending to.
!
912
        outputs, err := addrPairsToOutputs(in.AddrToAmount)
!
913
        if err != nil {
!
914
                return nil, err
!
915
        }
!
916

917
        // Query the fee estimator for the fee rate for the given confirmation
918
        // target.
919
        target := in.TargetConf
!
920
        feePerKw, err := sweep.DetermineFeePerKw(
!
921
                r.server.cc.feeEstimator, sweep.FeePreference{
!
922
                        ConfTarget: uint32(target),
!
923
                },
!
924
        )
!
925
        if err != nil {
!
926
                return nil, err
!
927
        }
!
928

929
        // We will ask the wallet to create a tx using this fee rate. We set
930
        // dryRun=true to avoid inflating the change addresses in the db.
931
        var tx *txauthor.AuthoredTx
!
932
        wallet := r.server.cc.wallet
!
933
        err = wallet.WithCoinSelectLock(func() error {
!
934
                tx, err = wallet.CreateSimpleTx(outputs, feePerKw, true)
!
935
                return err
!
936
        })
!
937
        if err != nil {
!
938
                return nil, err
!
939
        }
!
940

941
        // Use the created tx to calculate the total fee.
942
        totalOutput := int64(0)
!
943
        for _, out := range tx.Tx.TxOut {
!
944
                totalOutput += out.Value
!
945
        }
!
946
        totalFee := int64(tx.TotalInput) - totalOutput
!
947

!
948
        resp := &lnrpc.EstimateFeeResponse{
!
949
                FeeSat:            totalFee,
!
950
                FeerateSatPerByte: int64(feePerKw.FeePerKVByte() / 1000),
!
951
        }
!
952

!
953
        rpcsLog.Debugf("[estimatefee] fee estimate for conf target %d: %v",
!
954
                target, resp)
!
955

!
956
        return resp, nil
!
957
}
958

959
// SendCoins executes a request to send coins to a particular address. Unlike
960
// SendMany, this RPC call only allows creating a single output at a time.
961
func (r *rpcServer) SendCoins(ctx context.Context,
962
        in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
!
963

!
964
        // Based on the passed fee related parameters, we'll determine an
!
965
        // appropriate fee rate for this transaction.
!
966
        satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
!
967
        feePerKw, err := sweep.DetermineFeePerKw(
!
968
                r.server.cc.feeEstimator, sweep.FeePreference{
!
969
                        ConfTarget: uint32(in.TargetConf),
!
970
                        FeeRate:    satPerKw,
!
971
                },
!
972
        )
!
973
        if err != nil {
!
974
                return nil, err
!
975
        }
!
976

977
        rpcsLog.Infof("[sendcoins] addr=%v, amt=%v, sat/kw=%v, sweep_all=%v",
!
978
                in.Addr, btcutil.Amount(in.Amount), int64(feePerKw),
!
979
                in.SendAll)
!
980

!
981
        // Decode the address receiving the coins, we need to check whether the
!
982
        // address is valid for this network.
!
983
        targetAddr, err := btcutil.DecodeAddress(in.Addr, activeNetParams.Params)
!
984
        if err != nil {
!
985
                return nil, err
!
986
        }
!
987

988
        // Make the check on the decoded address according to the active network.
989
        if !targetAddr.IsForNet(activeNetParams.Params) {
!
990
                return nil, fmt.Errorf("address: %v is not valid for this "+
!
991
                        "network: %v", targetAddr.String(),
!
992
                        activeNetParams.Params.Name)
!
993
        }
!
994

995
        // If the destination address parses to a valid pubkey, we assume the user
996
        // accidentally tried to send funds to a bare pubkey address. This check is
997
        // here to prevent unintended transfers.
998
        decodedAddr, _ := hex.DecodeString(in.Addr)
!
999
        _, err = btcec.ParsePubKey(decodedAddr, btcec.S256())
!
1000
        if err == nil {
!
1001
                return nil, fmt.Errorf("cannot send coins to pubkeys")
!
1002
        }
!
1003

1004
        var txid *chainhash.Hash
!
1005

!
1006
        wallet := r.server.cc.wallet
!
1007

!
1008
        // If the send all flag is active, then we'll attempt to sweep all the
!
1009
        // coins in the wallet in a single transaction (if possible),
!
1010
        // otherwise, we'll respect the amount, and attempt a regular 2-output
!
1011
        // send.
!
1012
        if in.SendAll {
!
1013
                // At this point, the amount shouldn't be set since we've been
!
1014
                // instructed to sweep all the coins from the wallet.
!
1015
                if in.Amount != 0 {
!
1016
                        return nil, fmt.Errorf("amount set while SendAll is " +
!
1017
                                "active")
!
1018
                }
!
1019

1020
                _, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
!
1021
                if err != nil {
!
1022
                        return nil, err
!
1023
                }
!
1024

1025
                // With the sweeper instance created, we can now generate a
1026
                // transaction that will sweep ALL outputs from the wallet in a
1027
                // single transaction. This will be generated in a concurrent
1028
                // safe manner, so no need to worry about locking.
1029
                sweepTxPkg, err := sweep.CraftSweepAllTx(
!
1030
                        feePerKw, uint32(bestHeight), targetAddr, wallet,
!
1031
                        wallet.WalletController, wallet.WalletController,
!
1032
                        r.server.cc.feeEstimator, r.server.cc.signer,
!
1033
                )
!
1034
                if err != nil {
!
1035
                        return nil, err
!
1036
                }
!
1037

1038
                rpcsLog.Debugf("Sweeping all coins from wallet to addr=%v, "+
!
1039
                        "with tx=%v", in.Addr, spew.Sdump(sweepTxPkg.SweepTx))
!
1040

!
1041
                // As our sweep transaction was created, successfully, we'll
!
1042
                // now attempt to publish it, cancelling the sweep pkg to
!
1043
                // return all outputs if it fails.
!
1044
                err = wallet.PublishTransaction(sweepTxPkg.SweepTx)
!
1045
                if err != nil {
!
1046
                        sweepTxPkg.CancelSweepAttempt()
!
1047

!
1048
                        return nil, fmt.Errorf("unable to broadcast sweep "+
!
1049
                                "transaction: %v", err)
!
1050
                }
!
1051

1052
                sweepTXID := sweepTxPkg.SweepTx.TxHash()
!
1053
                txid = &sweepTXID
!
1054
        } else {
!
1055

!
1056
                // We'll now construct out payment map, and use the wallet's
!
1057
                // coin selection synchronization method to ensure that no coin
!
1058
                // selection (funding, sweep alls, other sends) can proceed
!
1059
                // while we instruct the wallet to send this transaction.
!
1060
                paymentMap := map[string]int64{targetAddr.String(): in.Amount}
!
1061
                err := wallet.WithCoinSelectLock(func() error {
!
1062
                        newTXID, err := r.sendCoinsOnChain(paymentMap, feePerKw)
!
1063
                        if err != nil {
!
1064
                                return err
!
1065
                        }
!
1066

1067
                        txid = newTXID
!
1068

!
1069
                        return nil
!
1070
                })
1071
                if err != nil {
!
1072
                        return nil, err
!
1073
                }
!
1074
        }
1075

1076
        rpcsLog.Infof("[sendcoins] spend generated txid: %v", txid.String())
!
1077

!
1078
        return &lnrpc.SendCoinsResponse{Txid: txid.String()}, nil
!
1079
}
1080

1081
// SendMany handles a request for a transaction create multiple specified
1082
// outputs in parallel.
1083
func (r *rpcServer) SendMany(ctx context.Context,
1084
        in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
!
1085

!
1086
        // Based on the passed fee related parameters, we'll determine an
!
1087
        // appropriate fee rate for this transaction.
!
1088
        satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
!
1089
        feePerKw, err := sweep.DetermineFeePerKw(
!
1090
                r.server.cc.feeEstimator, sweep.FeePreference{
!
1091
                        ConfTarget: uint32(in.TargetConf),
!
1092
                        FeeRate:    satPerKw,
!
1093
                },
!
1094
        )
!
1095
        if err != nil {
!
1096
                return nil, err
!
1097
        }
!
1098

1099
        rpcsLog.Infof("[sendmany] outputs=%v, sat/kw=%v",
!
1100
                spew.Sdump(in.AddrToAmount), int64(feePerKw))
!
1101

!
1102
        var txid *chainhash.Hash
!
1103

!
1104
        // We'll attempt to send to the target set of outputs, ensuring that we
!
1105
        // synchronize with any other ongoing coin selection attempts which
!
1106
        // happen to also be concurrently executing.
!
1107
        wallet := r.server.cc.wallet
!
1108
        err = wallet.WithCoinSelectLock(func() error {
!
1109
                sendManyTXID, err := r.sendCoinsOnChain(
!
1110
                        in.AddrToAmount, feePerKw,
!
1111
                )
!
1112
                if err != nil {
!
1113
                        return err
!
1114
                }
!
1115

1116
                txid = sendManyTXID
!
1117

!
1118
                return nil
!
1119
        })
1120
        if err != nil {
!
1121
                return nil, err
!
1122
        }
!
1123

1124
        rpcsLog.Infof("[sendmany] spend generated txid: %v", txid.String())
!
1125

!
1126
        return &lnrpc.SendManyResponse{Txid: txid.String()}, nil
!
1127
}
1128

1129
// NewAddress creates a new address under control of the local wallet.
1130
func (r *rpcServer) NewAddress(ctx context.Context,
1131
        in *lnrpc.NewAddressRequest) (*lnrpc.NewAddressResponse, error) {
!
1132

!
1133
        // Translate the gRPC proto address type to the wallet controller's
!
1134
        // available address types.
!
1135
        var (
!
1136
                addr btcutil.Address
!
1137
                err  error
!
1138
        )
!
1139
        switch in.Type {
!
1140
        case lnrpc.AddressType_WITNESS_PUBKEY_HASH:
!
1141
                addr, err = r.server.cc.wallet.NewAddress(
!
1142
                        lnwallet.WitnessPubKey, false,
!
1143
                )
!
1144
                if err != nil {
!
1145
                        return nil, err
!
1146
                }
!
1147

1148
        case lnrpc.AddressType_NESTED_PUBKEY_HASH:
!
1149
                addr, err = r.server.cc.wallet.NewAddress(
!
1150
                        lnwallet.NestedWitnessPubKey, false,
!
1151
                )
!
1152
                if err != nil {
!
1153
                        return nil, err
!
1154
                }
!
1155

1156
        case lnrpc.AddressType_UNUSED_WITNESS_PUBKEY_HASH:
!
1157
                addr, err = r.server.cc.wallet.LastUnusedAddress(
!
1158
                        lnwallet.WitnessPubKey,
!
1159
                )
!
1160
                if err != nil {
!
1161
                        return nil, err
!
1162
                }
!
1163

1164
        case lnrpc.AddressType_UNUSED_NESTED_PUBKEY_HASH:
!
1165
                addr, err = r.server.cc.wallet.LastUnusedAddress(
!
1166
                        lnwallet.NestedWitnessPubKey,
!
1167
                )
!
1168
                if err != nil {
!
1169
                        return nil, err
!
1170
                }
!
1171
        }
1172

1173
        rpcsLog.Debugf("[newaddress] type=%v addr=%v", in.Type, addr.String())
!
1174
        return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
!
1175
}
1176

1177
var (
1178
        // signedMsgPrefix is a special prefix that we'll prepend to any
1179
        // messages we sign/verify. We do this to ensure that we don't
1180
        // accidentally sign a sighash, or other sensitive material. By
1181
        // prepending this fragment, we mind message signing to our particular
1182
        // context.
1183
        signedMsgPrefix = []byte("Lightning Signed Message:")
1184
)
1185

1186
// SignMessage signs a message with the resident node's private key. The
1187
// returned signature string is zbase32 encoded and pubkey recoverable, meaning
1188
// that only the message digest and signature are needed for verification.
1189
func (r *rpcServer) SignMessage(ctx context.Context,
1190
        in *lnrpc.SignMessageRequest) (*lnrpc.SignMessageResponse, error) {
!
1191

!
1192
        if in.Msg == nil {
!
1193
                return nil, fmt.Errorf("need a message to sign")
!
1194
        }
!
1195

1196
        in.Msg = append(signedMsgPrefix, in.Msg...)
!
1197
        sigBytes, err := r.server.nodeSigner.SignCompact(in.Msg)
!
1198
        if err != nil {
!
1199
                return nil, err
!
1200
        }
!
1201

1202
        sig := zbase32.EncodeToString(sigBytes)
!
1203
        return &lnrpc.SignMessageResponse{Signature: sig}, nil
!
1204
}
1205

1206
// VerifyMessage verifies a signature over a msg. The signature must be zbase32
1207
// encoded and signed by an active node in the resident node's channel
1208
// database. In addition to returning the validity of the signature,
1209
// VerifyMessage also returns the recovered pubkey from the signature.
1210
func (r *rpcServer) VerifyMessage(ctx context.Context,
1211
        in *lnrpc.VerifyMessageRequest) (*lnrpc.VerifyMessageResponse, error) {
!
1212

!
1213
        if in.Msg == nil {
!
1214
                return nil, fmt.Errorf("need a message to verify")
!
1215
        }
!
1216

1217
        // The signature should be zbase32 encoded
1218
        sig, err := zbase32.DecodeString(in.Signature)
!
1219
        if err != nil {
!
1220
                return nil, fmt.Errorf("failed to decode signature: %v", err)
!
1221
        }
!
1222

1223
        // The signature is over the double-sha256 hash of the message.
1224
        in.Msg = append(signedMsgPrefix, in.Msg...)
!
1225
        digest := chainhash.DoubleHashB(in.Msg)
!
1226

!
1227
        // RecoverCompact both recovers the pubkey and validates the signature.
!
1228
        pubKey, _, err := btcec.RecoverCompact(btcec.S256(), sig, digest)
!
1229
        if err != nil {
!
1230
                return &lnrpc.VerifyMessageResponse{Valid: false}, nil
!
1231
        }
!
1232
        pubKeyHex := hex.EncodeToString(pubKey.SerializeCompressed())
!
1233

!
1234
        var pub [33]byte
!
1235
        copy(pub[:], pubKey.SerializeCompressed())
!
1236

!
1237
        // Query the channel graph to ensure a node in the network with active
!
1238
        // channels signed the message.
!
1239
        //
!
1240
        // TODO(phlip9): Require valid nodes to have capital in active channels.
!
1241
        graph := r.server.chanDB.ChannelGraph()
!
1242
        _, active, err := graph.HasLightningNode(pub)
!
1243
        if err != nil {
!
1244
                return nil, fmt.Errorf("failed to query graph: %v", err)
!
1245
        }
!
1246

1247
        return &lnrpc.VerifyMessageResponse{
!
1248
                Valid:  active,
!
1249
                Pubkey: pubKeyHex,
!
1250
        }, nil
!
1251
}
1252

1253
// ConnectPeer attempts to establish a connection to a remote peer.
1254
func (r *rpcServer) ConnectPeer(ctx context.Context,
1255
        in *lnrpc.ConnectPeerRequest) (*lnrpc.ConnectPeerResponse, error) {
!
1256

!
1257
        // The server hasn't yet started, so it won't be able to service any of
!
1258
        // our requests, so we'll bail early here.
!
1259
        if !r.server.Started() {
!
1260
                return nil, fmt.Errorf("chain backend is still syncing, server " +
!
1261
                        "not active yet")
!
1262
        }
!
1263

1264
        if in.Addr == nil {
!
1265
                return nil, fmt.Errorf("need: lnc pubkeyhash@hostname")
!
1266
        }
!
1267

1268
        pubkeyHex, err := hex.DecodeString(in.Addr.Pubkey)
!
1269
        if err != nil {
!
1270
                return nil, err
!
1271
        }
!
1272
        pubKey, err := btcec.ParsePubKey(pubkeyHex, btcec.S256())
!
1273
        if err != nil {
!
1274
                return nil, err
!
1275
        }
!
1276

1277
        // Connections to ourselves are disallowed for obvious reasons.
1278
        if pubKey.IsEqual(r.server.identityPriv.PubKey()) {
!
1279
                return nil, fmt.Errorf("cannot make connection to self")
!
1280
        }
!
1281

1282
        addr, err := parseAddr(in.Addr.Host)
!
1283
        if err != nil {
!
1284
                return nil, err
!
1285
        }
!
1286

1287
        peerAddr := &lnwire.NetAddress{
!
1288
                IdentityKey: pubKey,
!
1289
                Address:     addr,
!
1290
                ChainNet:    activeNetParams.Net,
!
1291
        }
!
1292

!
1293
        rpcsLog.Debugf("[connectpeer] requested connection to %x@%s",
!
1294
                peerAddr.IdentityKey.SerializeCompressed(), peerAddr.Address)
!
1295

!
1296
        if err := r.server.ConnectToPeer(peerAddr, in.Perm); err != nil {
!
1297
                rpcsLog.Errorf("[connectpeer]: error connecting to peer: %v", err)
!
1298
                return nil, err
!
1299
        }
!
1300

1301
        rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
!
1302
        return &lnrpc.ConnectPeerResponse{}, nil
!
1303
}
1304

1305
// DisconnectPeer attempts to disconnect one peer from another identified by a
1306
// given pubKey. In the case that we currently have a pending or active channel
1307
// with the target peer, this action will be disallowed.
1308
func (r *rpcServer) DisconnectPeer(ctx context.Context,
1309
        in *lnrpc.DisconnectPeerRequest) (*lnrpc.DisconnectPeerResponse, error) {
!
1310

!
1311
        rpcsLog.Debugf("[disconnectpeer] from peer(%s)", in.PubKey)
!
1312

!
1313
        if !r.server.Started() {
!
1314
                return nil, fmt.Errorf("chain backend is still syncing, server " +
!
1315
                        "not active yet")
!
1316
        }
!
1317

1318
        // First we'll validate the string passed in within the request to
1319
        // ensure that it's a valid hex-string, and also a valid compressed
1320
        // public key.
1321
        pubKeyBytes, err := hex.DecodeString(in.PubKey)
!
1322
        if err != nil {
!
1323
                return nil, fmt.Errorf("unable to decode pubkey bytes: %v", err)
!
1324
        }
!
1325
        peerPubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
!
1326
        if err != nil {
!
1327
                return nil, fmt.Errorf("unable to parse pubkey: %v", err)
!
1328
        }
!
1329

1330
        // Next, we'll fetch the pending/active channels we have with a
1331
        // particular peer.
1332
        nodeChannels, err := r.server.chanDB.FetchOpenChannels(peerPubKey)
!
1333
        if err != nil {
!
1334
                return nil, fmt.Errorf("unable to fetch channels for peer: %v", err)
!
1335
        }
!
1336

1337
        // In order to avoid erroneously disconnecting from a peer that we have
1338
        // an active channel with, if we have any channels active with this
1339
        // peer, then we'll disallow disconnecting from them.
1340
        if len(nodeChannels) > 0 && !cfg.UnsafeDisconnect {
!
1341
                return nil, fmt.Errorf("cannot disconnect from peer(%x), "+
!
1342
                        "all active channels with the peer need to be closed "+
!
1343
                        "first", pubKeyBytes)
!
1344
        }
!
1345

1346
        // With all initial validation complete, we'll now request that the
1347
        // server disconnects from the peer.
1348
        if err := r.server.DisconnectPeer(peerPubKey); err != nil {
!
1349
                return nil, fmt.Errorf("unable to disconnect peer: %v", err)
!
1350
        }
!
1351

1352
        return &lnrpc.DisconnectPeerResponse{}, nil
!
1353
}
1354

1355
// extractOpenChannelMinConfs extracts the minimum number of confirmations from
1356
// the OpenChannelRequest that each output used to fund the channel's funding
1357
// transaction should satisfy.
1358
func extractOpenChannelMinConfs(in *lnrpc.OpenChannelRequest) (int32, error) {
!
1359
        switch {
!
1360
        // Ensure that the MinConfs parameter is non-negative.
1361
        case in.MinConfs < 0:
!
1362
                return 0, errors.New("minimum number of confirmations must " +
!
1363
                        "be a non-negative number")
!
1364

1365
        // The funding transaction should not be funded with unconfirmed outputs
1366
        // unless explicitly specified by SpendUnconfirmed. We do this to
1367
        // provide sane defaults to the OpenChannel RPC, as otherwise, if the
1368
        // MinConfs field isn't explicitly set by the caller, we'll use
1369
        // unconfirmed outputs without the caller being aware.
1370
        case in.MinConfs == 0 && !in.SpendUnconfirmed:
!
1371
                return 1, nil
!
1372

1373
        // In the event that the caller set MinConfs > 0 and SpendUnconfirmed to
1374
        // true, we'll return an error to indicate the conflict.
1375
        case in.MinConfs > 0 && in.SpendUnconfirmed:
!
1376
                return 0, errors.New("SpendUnconfirmed set to true with " +
!
1377
                        "MinConfs > 0")
!
1378

1379
        // The funding transaction of the new channel to be created can be
1380
        // funded with unconfirmed outputs.
1381
        case in.SpendUnconfirmed:
!
1382
                return 0, nil
!
1383

1384
        // If none of the above cases matched, we'll return the value set
1385
        // explicitly by the caller.
1386
        default:
!
1387
                return in.MinConfs, nil
!
1388
        }
1389
}
1390

1391
// OpenChannel attempts to open a singly funded channel specified in the
1392
// request to a remote peer.
1393
func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
1394
        updateStream lnrpc.Lightning_OpenChannelServer) error {
!
1395

!
1396
        rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+
!
1397
                "allocation(us=%v, them=%v)", in.NodePubkeyString,
!
1398
                in.LocalFundingAmount, in.PushSat)
!
1399

!
1400
        if !r.server.Started() {
!
1401
                return fmt.Errorf("chain backend is still syncing, server " +
!
1402
                        "not active yet")
!
1403
        }
!
1404

1405
        localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
!
1406
        remoteInitialBalance := btcutil.Amount(in.PushSat)
!
1407
        minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat)
!
1408
        remoteCsvDelay := uint16(in.RemoteCsvDelay)
!
1409

!
1410
        // Ensure that the initial balance of the remote party (if pushing
!
1411
        // satoshis) does not exceed the amount the local party has requested
!
1412
        // for funding.
!
1413
        //
!
1414
        // TODO(roasbeef): incorporate base fee?
!
1415
        if remoteInitialBalance >= localFundingAmt {
!
1416
                return fmt.Errorf("amount pushed to remote peer for initial " +
!
1417
                        "state must be below the local funding amount")
!
1418
        }
!
1419

1420
        // Ensure that the user doesn't exceed the current soft-limit for
1421
        // channel size. If the funding amount is above the soft-limit, then
1422
        // we'll reject the request.
1423
        if localFundingAmt > MaxFundingAmount {
!
1424
                return fmt.Errorf("funding amount is too large, the max "+
!
1425
                        "channel size is: %v", MaxFundingAmount)
!
1426
        }
!
1427

1428
        // Restrict the size of the channel we'll actually open. At a later
1429
        // level, we'll ensure that the output we create after accounting for
1430
        // fees that a dust output isn't created.
1431
        if localFundingAmt < minChanFundingSize {
!
1432
                return fmt.Errorf("channel is too small, the minimum channel "+
!
1433
                        "size is: %v SAT", int64(minChanFundingSize))
!
1434
        }
!
1435

1436
        // Then, we'll extract the minimum number of confirmations that each
1437
        // output we use to fund the channel's funding transaction should
1438
        // satisfy.
1439
        minConfs, err := extractOpenChannelMinConfs(in)
!
1440
        if err != nil {
!
1441
                return err
!
1442
        }
!
1443

1444
        var (
!
1445
                nodePubKey      *btcec.PublicKey
!
1446
                nodePubKeyBytes []byte
!
1447
        )
!
1448

!
1449
        // TODO(roasbeef): also return channel ID?
!
1450

!
1451
        // Ensure that the NodePubKey is set before attempting to use it
!
1452
        if len(in.NodePubkey) == 0 {
!
1453
                return fmt.Errorf("NodePubKey is not set")
!
1454
        }
!
1455

1456
        // Parse the raw bytes of the node key into a pubkey object so we
1457
        // can easily manipulate it.
1458
        nodePubKey, err = btcec.ParsePubKey(in.NodePubkey, btcec.S256())
!
1459
        if err != nil {
!
1460
                return err
!
1461
        }
!
1462

1463
        // Making a channel to ourselves wouldn't be of any use, so we
1464
        // explicitly disallow them.
1465
        if nodePubKey.IsEqual(r.server.identityPriv.PubKey()) {
!
1466
                return fmt.Errorf("cannot open channel to self")
!
1467
        }
!
1468

1469
        nodePubKeyBytes = nodePubKey.SerializeCompressed()
!
1470

!
1471
        // Based on the passed fee related parameters, we'll determine an
!
1472
        // appropriate fee rate for the funding transaction.
!
1473
        satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
!
1474
        feeRate, err := sweep.DetermineFeePerKw(
!
1475
                r.server.cc.feeEstimator, sweep.FeePreference{
!
1476
                        ConfTarget: uint32(in.TargetConf),
!
1477
                        FeeRate:    satPerKw,
!
1478
                },
!
1479
        )
!
1480
        if err != nil {
!
1481
                return err
!
1482
        }
!
1483

1484
        rpcsLog.Debugf("[openchannel]: using fee of %v sat/kw for funding tx",
!
1485
                int64(feeRate))
!
1486

!
1487
        // Instruct the server to trigger the necessary events to attempt to
!
1488
        // open a new channel. A stream is returned in place, this stream will
!
1489
        // be used to consume updates of the state of the pending channel.
!
1490
        req := &openChanReq{
!
1491
                targetPubkey:    nodePubKey,
!
1492
                chainHash:       *activeNetParams.GenesisHash,
!
1493
                localFundingAmt: localFundingAmt,
!
1494
                pushAmt:         lnwire.NewMSatFromSatoshis(remoteInitialBalance),
!
1495
                minHtlc:         minHtlc,
!
1496
                fundingFeePerKw: feeRate,
!
1497
                private:         in.Private,
!
1498
                remoteCsvDelay:  remoteCsvDelay,
!
1499
                minConfs:        minConfs,
!
1500
        }
!
1501

!
1502
        updateChan, errChan := r.server.OpenChannel(req)
!
1503

!
1504
        var outpoint wire.OutPoint
!
1505
out:
!
1506
        for {
!
1507
                select {
!
1508
                case err := <-errChan:
!
1509
                        rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
!
1510
                                nodePubKeyBytes, err)
!
1511
                        return err
!
1512
                case fundingUpdate := <-updateChan:
!
1513
                        rpcsLog.Tracef("[openchannel] sending update: %v",
!
1514
                                fundingUpdate)
!
1515
                        if err := updateStream.Send(fundingUpdate); err != nil {
!
1516
                                return err
!
1517
                        }
!
1518

1519
                        // If a final channel open update is being sent, then
1520
                        // we can break out of our recv loop as we no longer
1521
                        // need to process any further updates.
1522
                        switch update := fundingUpdate.Update.(type) {
!
1523
                        case *lnrpc.OpenStatusUpdate_ChanOpen:
!
1524
                                chanPoint := update.ChanOpen.ChannelPoint
!
1525
                                txid, err := GetChanPointFundingTxid(chanPoint)
!
1526
                                if err != nil {
!
1527
                                        return err
!
1528
                                }
!
1529
                                outpoint = wire.OutPoint{
!
1530
                                        Hash:  *txid,
!
1531
                                        Index: chanPoint.OutputIndex,
!
1532
                                }
!
1533

!
1534
                                break out
!
1535
                        }
1536
                case <-r.quit:
!
1537
                        return nil
!
1538
                }
1539
        }
1540

1541
        rpcsLog.Tracef("[openchannel] success NodeKey(%x), ChannelPoint(%v)",
!
1542
                nodePubKeyBytes, outpoint)
!
1543
        return nil
!
1544
}
1545

1546
// OpenChannelSync is a synchronous version of the OpenChannel RPC call. This
1547
// call is meant to be consumed by clients to the REST proxy. As with all other
1548
// sync calls, all byte slices are instead to be populated as hex encoded
1549
// strings.
1550
func (r *rpcServer) OpenChannelSync(ctx context.Context,
1551
        in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) {
!
1552

!
1553
        rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+
!
1554
                "allocation(us=%v, them=%v)", in.NodePubkeyString,
!
1555
                in.LocalFundingAmount, in.PushSat)
!
1556

!
1557
        // We don't allow new channels to be open while the server is still
!
1558
        // syncing, as otherwise we may not be able to obtain the relevant
!
1559
        // notifications.
!
1560
        if !r.server.Started() {
!
1561
                return nil, fmt.Errorf("chain backend is still syncing, server " +
!
1562
                        "not active yet")
!
1563
        }
!
1564

1565
        // Creation of channels before the wallet syncs up is currently
1566
        // disallowed.
1567
        isSynced, _, err := r.server.cc.wallet.IsSynced()
!
1568
        if err != nil {
!
1569
                return nil, err
!
1570
        }
!
1571
        if !isSynced {
!
1572
                return nil, errors.New("channels cannot be created before the " +
!
1573
                        "wallet is fully synced")
!
1574
        }
!
1575

1576
        // Decode the provided target node's public key, parsing it into a pub
1577
        // key object. For all sync call, byte slices are expected to be
1578
        // encoded as hex strings.
1579
        keyBytes, err := hex.DecodeString(in.NodePubkeyString)
!
1580
        if err != nil {
!
1581
                return nil, err
!
1582
        }
!
1583
        nodepubKey, err := btcec.ParsePubKey(keyBytes, btcec.S256())
!
1584
        if err != nil {
!
1585
                return nil, err
!
1586
        }
!
1587

1588
        localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
!
1589
        remoteInitialBalance := btcutil.Amount(in.PushSat)
!
1590
        minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat)
!
1591
        remoteCsvDelay := uint16(in.RemoteCsvDelay)
!
1592

!
1593
        // Ensure that the initial balance of the remote party (if pushing
!
1594
        // satoshis) does not exceed the amount the local party has requested
!
1595
        // for funding.
!
1596
        if remoteInitialBalance >= localFundingAmt {
!
1597
                return nil, fmt.Errorf("amount pushed to remote peer for " +
!
1598
                        "initial state must be below the local funding amount")
!
1599
        }
!
1600

1601
        // Restrict the size of the channel we'll actually open. At a later
1602
        // level, we'll ensure that the output we create after accounting for
1603
        // fees that a dust output isn't created.
1604
        if localFundingAmt < minChanFundingSize {
!
1605
                return nil, fmt.Errorf("channel is too small, the minimum channel "+
!
1606
                        "size is: %v SAT", int64(minChanFundingSize))
!
1607
        }
!
1608

1609
        // Then, we'll extract the minimum number of confirmations that each
1610
        // output we use to fund the channel's funding transaction should
1611
        // satisfy.
1612
        minConfs, err := extractOpenChannelMinConfs(in)
!
1613
        if err != nil {
!
1614
                return nil, err
!
1615
        }
!
1616

1617
        // Based on the passed fee related parameters, we'll determine an
1618
        // appropriate fee rate for the funding transaction.
1619
        satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
!
1620
        feeRate, err := sweep.DetermineFeePerKw(
!
1621
                r.server.cc.feeEstimator, sweep.FeePreference{
!
1622
                        ConfTarget: uint32(in.TargetConf),
!
1623
                        FeeRate:    satPerKw,
!
1624
                },
!
1625
        )
!
1626
        if err != nil {
!
1627
                return nil, err
!
1628
        }
!
1629

1630
        rpcsLog.Tracef("[openchannel] target sat/kw for funding tx: %v",
!
1631
                int64(feeRate))
!
1632

!
1633
        req := &openChanReq{
!
1634
                targetPubkey:    nodepubKey,
!
1635
                chainHash:       *activeNetParams.GenesisHash,
!
1636
                localFundingAmt: localFundingAmt,
!
1637
                pushAmt:         lnwire.NewMSatFromSatoshis(remoteInitialBalance),
!
1638
                minHtlc:         minHtlc,
!
1639
                fundingFeePerKw: feeRate,
!
1640
                private:         in.Private,
!
1641
                remoteCsvDelay:  remoteCsvDelay,
!
1642
                minConfs:        minConfs,
!
1643
        }
!
1644

!
1645
        updateChan, errChan := r.server.OpenChannel(req)
!
1646
        select {
!
1647
        // If an error occurs them immediately return the error to the client.
1648
        case err := <-errChan:
!
1649
                rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
!
1650
                        nodepubKey, err)
!
1651
                return nil, err
!
1652

1653
        // Otherwise, wait for the first channel update. The first update sent
1654
        // is when the funding transaction is broadcast to the network.
1655
        case fundingUpdate := <-updateChan:
!
1656
                rpcsLog.Tracef("[openchannel] sending update: %v",
!
1657
                        fundingUpdate)
!
1658

!
1659
                // Parse out the txid of the pending funding transaction. The
!
1660
                // sync client can use this to poll against the list of
!
1661
                // PendingChannels.
!
1662
                openUpdate := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
!
1663
                chanUpdate := openUpdate.ChanPending
!
1664

!
1665
                return &lnrpc.ChannelPoint{
!
1666
                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
!
1667
                                FundingTxidBytes: chanUpdate.Txid,
!
1668
                        },
!
1669
                        OutputIndex: chanUpdate.OutputIndex,
!
1670
                }, nil
!
1671
        case <-r.quit:
!
1672
                return nil, nil
!
1673
        }
1674
}
1675

1676
// GetChanPointFundingTxid returns the given channel point's funding txid in
1677
// raw bytes.
1678
func GetChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) (*chainhash.Hash, error) {
!
1679
        var txid []byte
!
1680

!
1681
        // A channel point's funding txid can be get/set as a byte slice or a
!
1682
        // string. In the case it is a string, decode it.
!
1683
        switch chanPoint.GetFundingTxid().(type) {
!
1684
        case *lnrpc.ChannelPoint_FundingTxidBytes:
!
1685
                txid = chanPoint.GetFundingTxidBytes()
!
1686
        case *lnrpc.ChannelPoint_FundingTxidStr:
!
1687
                s := chanPoint.GetFundingTxidStr()
!
1688
                h, err := chainhash.NewHashFromStr(s)
!
1689
                if err != nil {
!
1690
                        return nil, err
!
1691
                }
!
1692

1693
                txid = h[:]
!
1694
        }
1695

1696
        return chainhash.NewHash(txid)
!
1697
}
1698

1699
// CloseChannel attempts to close an active channel identified by its channel
1700
// point. The actions of this method can additionally be augmented to attempt
1701
// a force close after a timeout period in the case of an inactive peer.
1702
func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
1703
        updateStream lnrpc.Lightning_CloseChannelServer) error {
!
1704

!
1705
        // If the user didn't specify a channel point, then we'll reject this
!
1706
        // request all together.
!
1707
        if in.GetChannelPoint() == nil {
!
1708
                return fmt.Errorf("must specify channel point in close channel")
!
1709
        }
!
1710

1711
        // If force closing a channel, the fee set in the commitment transaction
1712
        // is used.
1713
        if in.Force && (in.SatPerByte != 0 || in.TargetConf != 0) {
!
1714
                return fmt.Errorf("force closing a channel uses a pre-defined fee")
!
1715
        }
!
1716

1717
        force := in.Force
!
1718
        index := in.ChannelPoint.OutputIndex
!
1719
        txid, err := GetChanPointFundingTxid(in.GetChannelPoint())
!
1720
        if err != nil {
!
1721
                rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err)
!
1722
                return err
!
1723
        }
!
1724
        chanPoint := wire.NewOutPoint(txid, index)
!
1725

!
1726
        rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v), force=%v",
!
1727
                chanPoint, force)
!
1728

!
1729
        var (
!
1730
                updateChan chan interface{}
!
1731
                errChan    chan error
!
1732
        )
!
1733

!
1734
        // TODO(roasbeef): if force and peer online then don't force?
!
1735

!
1736
        // First, we'll fetch the channel as is, as we'll need to examine it
!
1737
        // regardless of if this is a force close or not.
!
1738
        channel, err := r.fetchActiveChannel(*chanPoint)
!
1739
        if err != nil {
!
1740
                return err
!
1741
        }
!
1742

1743
        // If a force closure was requested, then we'll handle all the details
1744
        // around the creation and broadcast of the unilateral closure
1745
        // transaction here rather than going to the switch as we don't require
1746
        // interaction from the peer.
1747
        if force {
!
1748
                _, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
!
1749
                if err != nil {
!
1750
                        return err
!
1751
                }
!
1752

1753
                // As we're force closing this channel, as a precaution, we'll
1754
                // ensure that the switch doesn't continue to see this channel
1755
                // as eligible for forwarding HTLC's. If the peer is online,
1756
                // then we'll also purge all of its indexes.
1757
                remotePub := &channel.StateSnapshot().RemoteIdentity
!
1758
                if peer, err := r.server.FindPeer(remotePub); err == nil {
!
1759
                        // TODO(roasbeef): actually get the active channel
!
1760
                        // instead too?
!
1761
                        //  * so only need to grab from database
!
1762
                        peer.WipeChannel(channel.ChannelPoint())
!
1763
                } else {
!
1764
                        chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
!
1765
                        r.server.htlcSwitch.RemoveLink(chanID)
!
1766
                }
!
1767

1768
                // With the necessary indexes cleaned up, we'll now force close
1769
                // the channel.
1770
                chainArbitrator := r.server.chainArb
!
1771
                closingTx, err := chainArbitrator.ForceCloseContract(
!
1772
                        *chanPoint,
!
1773
                )
!
1774
                if err != nil {
!
1775
                        rpcsLog.Errorf("unable to force close transaction: %v", err)
!
1776
                        return err
!
1777
                }
!
1778

1779
                closingTxid := closingTx.TxHash()
!
1780

!
1781
                // With the transaction broadcast, we send our first update to
!
1782
                // the client.
!
1783
                updateChan = make(chan interface{}, 2)
!
1784
                updateChan <- &pendingUpdate{
!
1785
                        Txid: closingTxid[:],
!
1786
                }
!
1787

!
1788
                errChan = make(chan error, 1)
!
1789
                notifier := r.server.cc.chainNotifier
!
1790
                go waitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint,
!
1791
                        &closingTxid, closingTx.TxOut[0].PkScript, func() {
!
1792
                                // Respond to the local subsystem which
!
1793
                                // requested the channel closure.
!
1794
                                updateChan <- &channelCloseUpdate{
!
1795
                                        ClosingTxid: closingTxid[:],
!
1796
                                        Success:     true,
!
1797
                                }
!
1798
                        })
!
1799
        } else {
!
1800
                // If the link is not known by the switch, we cannot gracefully close
!
1801
                // the channel.
!
1802
                channelID := lnwire.NewChanIDFromOutPoint(chanPoint)
!
1803
                if _, err := r.server.htlcSwitch.GetLink(channelID); err != nil {
!
1804
                        rpcsLog.Debugf("Trying to non-force close offline channel with "+
!
1805
                                "chan_point=%v", chanPoint)
!
1806
                        return fmt.Errorf("unable to gracefully close channel while peer "+
!
1807
                                "is offline (try force closing it instead): %v", err)
!
1808
                }
!
1809

1810
                // Based on the passed fee related parameters, we'll determine
1811
                // an appropriate fee rate for the cooperative closure
1812
                // transaction.
1813
                satPerKw := lnwallet.SatPerKVByte(
!
1814
                        in.SatPerByte * 1000,
!
1815
                ).FeePerKWeight()
!
1816
                feeRate, err := sweep.DetermineFeePerKw(
!
1817
                        r.server.cc.feeEstimator, sweep.FeePreference{
!
1818
                                ConfTarget: uint32(in.TargetConf),
!
1819
                                FeeRate:    satPerKw,
!
1820
                        },
!
1821
                )
!
1822
                if err != nil {
!
1823
                        return err
!
1824
                }
!
1825

1826
                rpcsLog.Debugf("Target sat/kw for closing transaction: %v",
!
1827
                        int64(feeRate))
!
1828

!
1829
                // Before we attempt the cooperative channel closure, we'll
!
1830
                // examine the channel to ensure that it doesn't have a
!
1831
                // lingering HTLC.
!
1832
                if len(channel.ActiveHtlcs()) != 0 {
!
1833
                        return fmt.Errorf("cannot co-op close channel " +
!
1834
                                "with active htlcs")
!
1835
                }
!
1836

1837
                // Otherwise, the caller has requested a regular interactive
1838
                // cooperative channel closure. So we'll forward the request to
1839
                // the htlc switch which will handle the negotiation and
1840
                // broadcast details.
1841
                updateChan, errChan = r.server.htlcSwitch.CloseLink(
!
1842
                        chanPoint, htlcswitch.CloseRegular, feeRate,
!
1843
                )
!
1844
        }
1845
out:
!
1846
        for {
!
1847
                select {
!
1848
                case err := <-errChan:
!
1849
                        rpcsLog.Errorf("[closechannel] unable to close "+
!
1850
                                "ChannelPoint(%v): %v", chanPoint, err)
!
1851
                        return err
!
1852
                case closingUpdate := <-updateChan:
!
1853
                        rpcClosingUpdate, err := createRPCCloseUpdate(
!
1854
                                closingUpdate,
!
1855
                        )
!
1856
                        if err != nil {
!
1857
                                return err
!
1858
                        }
!
1859

1860
                        rpcsLog.Tracef("[closechannel] sending update: %v",
!
1861
                                rpcClosingUpdate)
!
1862

!
1863
                        if err := updateStream.Send(rpcClosingUpdate); err != nil {
!
1864
                                return err
!
1865
                        }
!
1866

1867
                        // If a final channel closing updates is being sent,
1868
                        // then we can break out of our dispatch loop as we no
1869
                        // longer need to process any further updates.
1870
                        switch closeUpdate := closingUpdate.(type) {
!
1871
                        case *channelCloseUpdate:
!
1872
                                h, _ := chainhash.NewHash(closeUpdate.ClosingTxid)
!
1873
                                rpcsLog.Infof("[closechannel] close completed: "+
!
1874
                                        "txid(%v)", h)
!
1875
                                break out
!
1876
                        }
1877
                case <-r.quit:
!
1878
                        return nil
!
1879
                }
1880
        }
1881

1882
        return nil
!
1883
}
1884

1885
func createRPCCloseUpdate(update interface{}) (
1886
        *lnrpc.CloseStatusUpdate, error) {
!
1887

!
1888
        switch u := update.(type) {
!
1889
        case *channelCloseUpdate:
!
1890
                return &lnrpc.CloseStatusUpdate{
!
1891
                        Update: &lnrpc.CloseStatusUpdate_ChanClose{
!
1892
                                ChanClose: &lnrpc.ChannelCloseUpdate{
!
1893
                                        ClosingTxid: u.ClosingTxid,
!
1894
                                },
!
1895
                        },
!
1896
                }, nil
!
1897
        case *pendingUpdate:
!
1898
                return &lnrpc.CloseStatusUpdate{
!
1899
                        Update: &lnrpc.CloseStatusUpdate_ClosePending{
!
1900
                                ClosePending: &lnrpc.PendingUpdate{
!
1901
                                        Txid:        u.Txid,
!
1902
                                        OutputIndex: u.OutputIndex,
!
1903
                                },
!
1904
                        },
!
1905
                }, nil
!
1906
        }
1907

1908
        return nil, errors.New("unknown close status update")
!
1909
}
1910

1911
// AbandonChannel removes all channel state from the database except for a
1912
// close summary. This method can be used to get rid of permanently unusable
1913
// channels due to bugs fixed in newer versions of lnd.
1914
func (r *rpcServer) AbandonChannel(ctx context.Context,
1915
        in *lnrpc.AbandonChannelRequest) (*lnrpc.AbandonChannelResponse, error) {
!
1916

!
1917
        // If this isn't the dev build, then we won't allow the RPC to be
!
1918
        // executed, as it's an advanced feature and won't be activated in
!
1919
        // regular production/release builds.
!
1920
        if !build.IsDevBuild() {
!
1921
                return nil, fmt.Errorf("AbandonChannel RPC call only " +
!
1922
                        "available in dev builds")
!
1923
        }
!
1924

1925
        // We'll parse out the arguments to we can obtain the chanPoint of the
1926
        // target channel.
1927
        txid, err := GetChanPointFundingTxid(in.GetChannelPoint())
!
1928
        if err != nil {
!
1929
                return nil, err
!
1930
        }
!
1931
        index := in.ChannelPoint.OutputIndex
!
1932
        chanPoint := wire.NewOutPoint(txid, index)
!
1933

!
1934
        // With the chanPoint constructed, we'll attempt to find the target
!
1935
        // channel in the database. If we can't find the channel, then we'll
!
1936
        // return the error back to the caller.
!
1937
        dbChan, err := r.server.chanDB.FetchChannel(*chanPoint)
!
1938
        if err != nil {
!
1939
                return nil, err
!
1940
        }
!
1941

1942
        // Now that we've found the channel, we'll populate a close summary for
1943
        // the channel, so we can store as much information for this abounded
1944
        // channel as possible. We also ensure that we set Pending to false, to
1945
        // indicate that this channel has been "fully" closed.
1946
        _, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
!
1947
        if err != nil {
!
1948
                return nil, err
!
1949
        }
!
1950
        summary := &channeldb.ChannelCloseSummary{
!
1951
                CloseType:               channeldb.Abandoned,
!
1952
                ChanPoint:               *chanPoint,
!
1953
                ChainHash:               dbChan.ChainHash,
!
1954
                CloseHeight:             uint32(bestHeight),
!
1955
                RemotePub:               dbChan.IdentityPub,
!
1956
                Capacity:                dbChan.Capacity,
!
1957
                SettledBalance:          dbChan.LocalCommitment.LocalBalance.ToSatoshis(),
!
1958
                ShortChanID:             dbChan.ShortChanID(),
!
1959
                RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation,
!
1960
                RemoteNextRevocation:    dbChan.RemoteNextRevocation,
!
1961
                LocalChanConfig:         dbChan.LocalChanCfg,
!
1962
        }
!
1963

!
1964
        // Finally, we'll close the channel in the DB, and return back to the
!
1965
        // caller.
!
1966
        err = dbChan.CloseChannel(summary)
!
1967
        if err != nil {
!
1968
                return nil, err
!
1969
        }
!
1970

1971
        return &lnrpc.AbandonChannelResponse{}, nil
!
1972
}
1973

1974
// fetchActiveChannel attempts to locate a channel identified by its channel
1975
// point from the database's set of all currently opened channels and
1976
// return it as a fully populated state machine
1977
func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (
1978
        *lnwallet.LightningChannel, error) {
!
1979

!
1980
        dbChan, err := r.server.chanDB.FetchChannel(chanPoint)
!
1981
        if err != nil {
!
1982
                return nil, err
!
1983
        }
!
1984

1985
        // If the channel is successfully fetched from the database,
1986
        // we create a fully populated channel state machine which
1987
        // uses the db channel as backing storage.
1988
        return lnwallet.NewLightningChannel(
!
1989
                r.server.cc.wallet.Cfg.Signer, dbChan, nil,
!
1990
        )
!
1991
}
1992

1993
// GetInfo returns general information concerning the lightning node including
1994
// its identity pubkey, alias, the chains it is connected to, and information
1995
// concerning the number of open+pending channels.
1996
func (r *rpcServer) GetInfo(ctx context.Context,
1997
        in *lnrpc.GetInfoRequest) (*lnrpc.GetInfoResponse, error) {
!
1998

!
1999
        serverPeers := r.server.Peers()
!
2000

!
2001
        openChannels, err := r.server.chanDB.FetchAllOpenChannels()
!
2002
        if err != nil {
!
2003
                return nil, err
!
2004
        }
!
2005

2006
        var activeChannels uint32
!
2007
        for _, channel := range openChannels {
!
2008
                chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
!
2009
                if r.server.htlcSwitch.HasActiveLink(chanID) {
!
2010
                        activeChannels++
!
2011
                }
!
2012
        }
2013

2014
        inactiveChannels := uint32(len(openChannels)) - activeChannels
!
2015

!
2016
        pendingChannels, err := r.server.chanDB.FetchPendingChannels()
!
2017
        if err != nil {
!
2018
                return nil, fmt.Errorf("unable to get retrieve pending "+
!
2019
                        "channels: %v", err)
!
2020
        }
!
2021
        nPendingChannels := uint32(len(pendingChannels))
!
2022

!
2023
        idPub := r.server.identityPriv.PubKey().SerializeCompressed()
!
2024
        encodedIDPub := hex.EncodeToString(idPub)
!
2025

!
2026
        bestHash, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
!
2027
        if err != nil {
!
2028
                return nil, fmt.Errorf("unable to get best block info: %v", err)
!
2029
        }
!
2030

2031
        isSynced, bestHeaderTimestamp, err := r.server.cc.wallet.IsSynced()
!
2032
        if err != nil {
!
2033
                return nil, fmt.Errorf("unable to sync PoV of the wallet "+
!
2034
                        "with current best block in the main chain: %v", err)
!
2035
        }
!
2036

2037
        network := normalizeNetwork(activeNetParams.Name)
!
2038
        activeChains := make([]*lnrpc.Chain, registeredChains.NumActiveChains())
!
2039
        for i, chain := range registeredChains.ActiveChains() {
!
2040
                activeChains[i] = &lnrpc.Chain{
!
2041
                        Chain:   chain.String(),
!
2042
                        Network: network,
!
2043
                }
!
2044

!
2045
        }
!
2046

2047
        // Check if external IP addresses were provided to lnd and use them
2048
        // to set the URIs.
2049
        nodeAnn, err := r.server.genNodeAnnouncement(false)
!
2050
        if err != nil {
!
2051
                return nil, fmt.Errorf("unable to retrieve current fully signed "+
!
2052
                        "node announcement: %v", err)
!
2053
        }
!
2054
        addrs := nodeAnn.Addresses
!
2055
        uris := make([]string, len(addrs))
!
2056
        for i, addr := range addrs {
!
2057
                uris[i] = fmt.Sprintf("%s@%s", encodedIDPub, addr.String())
!
2058
        }
!
2059

2060
        isGraphSynced := r.server.authGossiper.SyncManager().IsGraphSynced()
!
2061

!
2062
        // TODO(roasbeef): add synced height n stuff
!
2063
        return &lnrpc.GetInfoResponse{
!
2064
                IdentityPubkey:      encodedIDPub,
!
2065
                NumPendingChannels:  nPendingChannels,
!
2066
                NumActiveChannels:   activeChannels,
!
2067
                NumInactiveChannels: inactiveChannels,
!
2068
                NumPeers:            uint32(len(serverPeers)),
!
2069
                BlockHeight:         uint32(bestHeight),
!
2070
                BlockHash:           bestHash.String(),
!
2071
                SyncedToChain:       isSynced,
!
2072
                Testnet:             isTestnet(&activeNetParams),
!
2073
                Chains:              activeChains,
!
2074
                Uris:                uris,
!
2075
                Alias:               nodeAnn.Alias.String(),
!
2076
                Color:               routing.EncodeHexColor(nodeAnn.RGBColor),
!
2077
                BestHeaderTimestamp: int64(bestHeaderTimestamp),
!
2078
                Version:             build.Version(),
!
2079
                SyncedToGraph:       isGraphSynced,
!
2080
        }, nil
!
2081
}
2082

2083
// ListPeers returns a verbose listing of all currently active peers.
2084
func (r *rpcServer) ListPeers(ctx context.Context,
2085
        in *lnrpc.ListPeersRequest) (*lnrpc.ListPeersResponse, error) {
!
2086

!
2087
        rpcsLog.Tracef("[listpeers] request")
!
2088

!
2089
        serverPeers := r.server.Peers()
!
2090
        resp := &lnrpc.ListPeersResponse{
!
2091
                Peers: make([]*lnrpc.Peer, 0, len(serverPeers)),
!
2092
        }
!
2093

!
2094
        for _, serverPeer := range serverPeers {
!
2095
                var (
!
2096
                        satSent int64
!
2097
                        satRecv int64
!
2098
                )
!
2099

!
2100
                // In order to display the total number of satoshis of outbound
!
2101
                // (sent) and inbound (recv'd) satoshis that have been
!
2102
                // transported through this peer, we'll sum up the sent/recv'd
!
2103
                // values for each of the active channels we have with the
!
2104
                // peer.
!
2105
                chans := serverPeer.ChannelSnapshots()
!
2106
                for _, c := range chans {
!
2107
                        satSent += int64(c.TotalMSatSent.ToSatoshis())
!
2108
                        satRecv += int64(c.TotalMSatReceived.ToSatoshis())
!
2109
                }
!
2110

2111
                nodePub := serverPeer.PubKey()
!
2112

!
2113
                // Retrieve the peer's sync type. If we don't currently have a
!
2114
                // syncer for the peer, then we'll default to a passive sync.
!
2115
                // This can happen if the RPC is called while a peer is
!
2116
                // initializing.
!
2117
                syncer, ok := r.server.authGossiper.SyncManager().GossipSyncer(
!
2118
                        nodePub,
!
2119
                )
!
2120

!
2121
                var lnrpcSyncType lnrpc.Peer_SyncType
!
2122
                if !ok {
!
2123
                        rpcsLog.Warnf("Gossip syncer for peer=%x not found",
!
2124
                                nodePub)
!
2125
                        lnrpcSyncType = lnrpc.Peer_UNKNOWN_SYNC
!
2126
                } else {
!
2127
                        syncType := syncer.SyncType()
!
2128
                        switch syncType {
!
2129
                        case discovery.ActiveSync:
!
2130
                                lnrpcSyncType = lnrpc.Peer_ACTIVE_SYNC
!
2131
                        case discovery.PassiveSync:
!
2132
                                lnrpcSyncType = lnrpc.Peer_PASSIVE_SYNC
!
2133
                        default:
!
2134
                                return nil, fmt.Errorf("unhandled sync type %v",
!
2135
                                        syncType)
!
2136
                        }
2137
                }
2138

2139
                peer := &lnrpc.Peer{
!
2140
                        PubKey:    hex.EncodeToString(nodePub[:]),
!
2141
                        Address:   serverPeer.conn.RemoteAddr().String(),
!
2142
                        Inbound:   serverPeer.inbound,
!
2143
                        BytesRecv: atomic.LoadUint64(&serverPeer.bytesReceived),
!
2144
                        BytesSent: atomic.LoadUint64(&serverPeer.bytesSent),
!
2145
                        SatSent:   satSent,
!
2146
                        SatRecv:   satRecv,
!
2147
                        PingTime:  serverPeer.PingTime(),
!
2148
                        SyncType:  lnrpcSyncType,
!
2149
                }
!
2150

!
2151
                resp.Peers = append(resp.Peers, peer)
!
2152
        }
2153

2154
        rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
!
2155

!
2156
        return resp, nil
!
2157
}
2158

2159
// WalletBalance returns total unspent outputs(confirmed and unconfirmed), all
2160
// confirmed unspent outputs and all unconfirmed unspent outputs under control
2161
// by the wallet. This method can be modified by having the request specify
2162
// only witness outputs should be factored into the final output sum.
2163
// TODO(roasbeef): add async hooks into wallet balance changes
2164
func (r *rpcServer) WalletBalance(ctx context.Context,
2165
        in *lnrpc.WalletBalanceRequest) (*lnrpc.WalletBalanceResponse, error) {
!
2166

!
2167
        // Get total balance, from txs that have >= 0 confirmations.
!
2168
        totalBal, err := r.server.cc.wallet.ConfirmedBalance(0)
!
2169
        if err != nil {
!
2170
                return nil, err
!
2171
        }
!
2172

2173
        // Get confirmed balance, from txs that have >= 1 confirmations.
2174
        // TODO(halseth): get both unconfirmed and confirmed balance in one
2175
        // call, as this is racy.
2176
        confirmedBal, err := r.server.cc.wallet.ConfirmedBalance(1)
!
2177
        if err != nil {
!
2178
                return nil, err
!
2179
        }
!
2180

2181
        // Get unconfirmed balance, from txs with 0 confirmations.
2182
        unconfirmedBal := totalBal - confirmedBal
!
2183

!
2184
        rpcsLog.Debugf("[walletbalance] Total balance=%v (confirmed=%v, "+
!
2185
                "unconfirmed=%v)", totalBal, confirmedBal, unconfirmedBal)
!
2186

!
2187
        return &lnrpc.WalletBalanceResponse{
!
2188
                TotalBalance:       int64(totalBal),
!
2189
                ConfirmedBalance:   int64(confirmedBal),
!
2190
                UnconfirmedBalance: int64(unconfirmedBal),
!
2191
        }, nil
!
2192
}
2193

2194
// ChannelBalance returns the total available channel flow across all open
2195
// channels in satoshis.
2196
func (r *rpcServer) ChannelBalance(ctx context.Context,
2197
        in *lnrpc.ChannelBalanceRequest) (*lnrpc.ChannelBalanceResponse, error) {
!
2198

!
2199
        openChannels, err := r.server.chanDB.FetchAllOpenChannels()
!
2200
        if err != nil {
!
2201
                return nil, err
!
2202
        }
!
2203

2204
        var balance btcutil.Amount
!
2205
        for _, channel := range openChannels {
!
2206
                balance += channel.LocalCommitment.LocalBalance.ToSatoshis()
!
2207
        }
!
2208

2209
        pendingChannels, err := r.server.chanDB.FetchPendingChannels()
!
2210
        if err != nil {
!
2211
                return nil, err
!
2212
        }
!
2213

2214
        var pendingOpenBalance btcutil.Amount
!
2215
        for _, channel := range pendingChannels {
!
2216
                pendingOpenBalance += channel.LocalCommitment.LocalBalance.ToSatoshis()
!
2217
        }
!
2218

2219
        rpcsLog.Debugf("[channelbalance] balance=%v pending-open=%v",
!
2220
                balance, pendingOpenBalance)
!
2221

!
2222
        return &lnrpc.ChannelBalanceResponse{
!
2223
                Balance:            int64(balance),
!
2224
                PendingOpenBalance: int64(pendingOpenBalance),
!
2225
        }, nil
!
2226
}
2227

2228
// PendingChannels returns a list of all the channels that are currently
2229
// considered "pending". A channel is pending if it has finished the funding
2230
// workflow and is waiting for confirmations for the funding txn, or is in the
2231
// process of closure, either initiated cooperatively or non-cooperatively.
2232
func (r *rpcServer) PendingChannels(ctx context.Context,
2233
        in *lnrpc.PendingChannelsRequest) (*lnrpc.PendingChannelsResponse, error) {
!
2234

!
2235
        rpcsLog.Debugf("[pendingchannels]")
!
2236

!
2237
        resp := &lnrpc.PendingChannelsResponse{}
!
2238

!
2239
        // First, we'll populate the response with all the channels that are
!
2240
        // soon to be opened. We can easily fetch this data from the database
!
2241
        // and map the db struct to the proto response.
!
2242
        pendingOpenChannels, err := r.server.chanDB.FetchPendingChannels()
!
2243
        if err != nil {
!
2244
                rpcsLog.Errorf("unable to fetch pending channels: %v", err)
!
2245
                return nil, err
!
2246
        }
!
2247
        resp.PendingOpenChannels = make([]*lnrpc.PendingChannelsResponse_PendingOpenChannel,
!
2248
                len(pendingOpenChannels))
!
2249
        for i, pendingChan := range pendingOpenChannels {
!
2250
                pub := pendingChan.IdentityPub.SerializeCompressed()
!
2251

!
2252
                // As this is required for display purposes, we'll calculate
!
2253
                // the weight of the commitment transaction. We also add on the
!
2254
                // estimated weight of the witness to calculate the weight of
!
2255
                // the transaction if it were to be immediately unilaterally
!
2256
                // broadcast.
!
2257
                // TODO(roasbeef): query for funding tx from wallet, display
!
2258
                // that also?
!
2259
                localCommitment := pendingChan.LocalCommitment
!
2260
                utx := btcutil.NewTx(localCommitment.CommitTx)
!
2261
                commitBaseWeight := blockchain.GetTransactionWeight(utx)
!
2262
                commitWeight := commitBaseWeight + input.WitnessCommitmentTxWeight
!
2263

!
2264
                resp.PendingOpenChannels[i] = &lnrpc.PendingChannelsResponse_PendingOpenChannel{
!
2265
                        Channel: &lnrpc.PendingChannelsResponse_PendingChannel{
!
2266
                                RemoteNodePub:        hex.EncodeToString(pub),
!
2267
                                ChannelPoint:         pendingChan.FundingOutpoint.String(),
!
2268
                                Capacity:             int64(pendingChan.Capacity),
!
2269
                                LocalBalance:         int64(localCommitment.LocalBalance.ToSatoshis()),
!
2270
                                RemoteBalance:        int64(localCommitment.RemoteBalance.ToSatoshis()),
!
2271
                                LocalChanReserveSat:  int64(pendingChan.LocalChanCfg.ChanReserve),
!
2272
                                RemoteChanReserveSat: int64(pendingChan.RemoteChanCfg.ChanReserve),
!
2273
                        },
!
2274
                        CommitWeight: commitWeight,
!
2275
                        CommitFee:    int64(localCommitment.CommitFee),
!
2276
                        FeePerKw:     int64(localCommitment.FeePerKw),
!
2277
                        // TODO(roasbeef): need to track confirmation height
!
2278
                }
!
2279
        }
!
2280

2281
        _, currentHeight, err := r.server.cc.chainIO.GetBestBlock()
!
2282
        if err != nil {
!
2283
                return nil, err
!
2284
        }
!
2285

2286
        // Next, we'll examine the channels that are soon to be closed so we
2287
        // can populate these fields within the response.
2288
        pendingCloseChannels, err := r.server.chanDB.FetchClosedChannels(true)
!
2289
        if err != nil {
!
2290
                rpcsLog.Errorf("unable to fetch closed channels: %v", err)
!
2291
                return nil, err
!
2292
        }
!
2293
        for _, pendingClose := range pendingCloseChannels {
!
2294
                // First construct the channel struct itself, this will be
!
2295
                // needed regardless of how this channel was closed.
!
2296
                pub := pendingClose.RemotePub.SerializeCompressed()
!
2297
                chanPoint := pendingClose.ChanPoint
!
2298
                channel := &lnrpc.PendingChannelsResponse_PendingChannel{
!
2299
                        RemoteNodePub: hex.EncodeToString(pub),
!
2300
                        ChannelPoint:  chanPoint.String(),
!
2301
                        Capacity:      int64(pendingClose.Capacity),
!
2302
                        LocalBalance:  int64(pendingClose.SettledBalance),
!
2303
                }
!
2304

!
2305
                closeTXID := pendingClose.ClosingTXID.String()
!
2306

!
2307
                switch pendingClose.CloseType {
!
2308

2309
                // If the channel was closed cooperatively, then we'll only
2310
                // need to tack on the closing txid.
2311
                // TODO(halseth): remove. After recent changes, a coop closed
2312
                // channel should never be in the "pending close" state.
2313
                // Keeping for now to let someone that upgraded in the middle
2314
                // of a close let their closing tx confirm.
2315
                case channeldb.CooperativeClose:
!
2316
                        resp.PendingClosingChannels = append(
!
2317
                                resp.PendingClosingChannels,
!
2318
                                &lnrpc.PendingChannelsResponse_ClosedChannel{
!
2319
                                        Channel:     channel,
!
2320
                                        ClosingTxid: closeTXID,
!
2321
                                },
!
2322
                        )
!
2323

!
2324
                        resp.TotalLimboBalance += channel.LocalBalance
!
2325

2326
                // If the channel was force closed, then we'll need to query
2327
                // the utxoNursery for additional information.
2328
                // TODO(halseth): distinguish remote and local case?
2329
                case channeldb.LocalForceClose, channeldb.RemoteForceClose:
!
2330
                        forceClose := &lnrpc.PendingChannelsResponse_ForceClosedChannel{
!
2331
                                Channel:     channel,
!
2332
                                ClosingTxid: closeTXID,
!
2333
                        }
!
2334

!
2335
                        // Fetch reports from both nursery and resolvers. At the
!
2336
                        // moment this is not an atomic snapshot. This is
!
2337
                        // planned to be resolved when the nursery is removed
!
2338
                        // and channel arbitrator will be the single source for
!
2339
                        // these kind of reports.
!
2340
                        err := r.nurseryPopulateForceCloseResp(
!
2341
                                &chanPoint, currentHeight, forceClose,
!
2342
                        )
!
2343
                        if err != nil {
!
2344
                                return nil, err
!
2345
                        }
!
2346

2347
                        err = r.arbitratorPopulateForceCloseResp(
!
2348
                                &chanPoint, currentHeight, forceClose,
!
2349
                        )
!
2350
                        if err != nil {
!
2351
                                return nil, err
!
2352
                        }
!
2353

2354
                        resp.TotalLimboBalance += int64(forceClose.LimboBalance)
!
2355

!
2356
                        resp.PendingForceClosingChannels = append(
!
2357
                                resp.PendingForceClosingChannels,
!
2358
                                forceClose,
!
2359
                        )
!
2360
                }
2361
        }
2362

2363
        // We'll also fetch all channels that are open, but have had their
2364
        // commitment broadcasted, meaning they are waiting for the closing
2365
        // transaction to confirm.
2366
        waitingCloseChans, err := r.server.chanDB.FetchWaitingCloseChannels()
!
2367
        if err != nil {
!
2368
                rpcsLog.Errorf("unable to fetch channels waiting close: %v",
!
2369
                        err)
!
2370
                return nil, err
!
2371
        }
!
2372

2373
        for _, waitingClose := range waitingCloseChans {
!
2374
                pub := waitingClose.IdentityPub.SerializeCompressed()
!
2375
                chanPoint := waitingClose.FundingOutpoint
!
2376
                channel := &lnrpc.PendingChannelsResponse_PendingChannel{
!
2377
                        RemoteNodePub:        hex.EncodeToString(pub),
!
2378
                        ChannelPoint:         chanPoint.String(),
!
2379
                        Capacity:             int64(waitingClose.Capacity),
!
2380
                        LocalBalance:         int64(waitingClose.LocalCommitment.LocalBalance.ToSatoshis()),
!
2381
                        RemoteBalance:        int64(waitingClose.LocalCommitment.RemoteBalance.ToSatoshis()),
!
2382
                        LocalChanReserveSat:  int64(waitingClose.LocalChanCfg.ChanReserve),
!
2383
                        RemoteChanReserveSat: int64(waitingClose.RemoteChanCfg.ChanReserve),
!
2384
                }
!
2385

!
2386
                // A close tx has been broadcasted, all our balance will be in
!
2387
                // limbo until it confirms.
!
2388
                resp.WaitingCloseChannels = append(
!
2389
                        resp.WaitingCloseChannels,
!
2390
                        &lnrpc.PendingChannelsResponse_WaitingCloseChannel{
!
2391
                                Channel:      channel,
!
2392
                                LimboBalance: channel.LocalBalance,
!
2393
                        },
!
2394
                )
!
2395

!
2396
                resp.TotalLimboBalance += channel.LocalBalance
!
2397
        }
!
2398

2399
        return resp, nil
!
2400
}
2401

2402
// arbitratorPopulateForceCloseResp populates the pending channels response
2403
// message with channel resolution information from the contract resolvers.
2404
func (r *rpcServer) arbitratorPopulateForceCloseResp(chanPoint *wire.OutPoint,
2405
        currentHeight int32,
2406
        forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
!
2407

!
2408
        // Query for contract resolvers state.
!
2409
        arbitrator, err := r.server.chainArb.GetChannelArbitrator(*chanPoint)
!
2410
        if err != nil {
!
2411
                return err
!
2412
        }
!
2413
        reports := arbitrator.Report()
!
2414

!
2415
        for _, report := range reports {
!
2416
                htlc := &lnrpc.PendingHTLC{
!
2417
                        Incoming:       report.Incoming,
!
2418
                        Amount:         int64(report.Amount),
!
2419
                        Outpoint:       report.Outpoint.String(),
!
2420
                        MaturityHeight: report.MaturityHeight,
!
2421
                        Stage:          report.Stage,
!
2422
                }
!
2423

!
2424
                if htlc.MaturityHeight != 0 {
!
2425
                        htlc.BlocksTilMaturity =
!
2426
                                int32(htlc.MaturityHeight) - currentHeight
!
2427
                }
!
2428

2429
                forceClose.LimboBalance += int64(report.LimboBalance)
!
2430
                forceClose.RecoveredBalance += int64(report.RecoveredBalance)
!
2431

!
2432
                forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, htlc)
!
2433
        }
2434

2435
        return nil
!
2436
}
2437

2438
// nurseryPopulateForceCloseResp populates the pending channels response
2439
// message with contract resolution information from utxonursery.
2440
func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint,
2441
        currentHeight int32,
2442
        forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
!
2443

!
2444
        // Query for the maturity state for this force closed channel. If we
!
2445
        // didn't have any time-locked outputs, then the nursery may not know of
!
2446
        // the contract.
!
2447
        nurseryInfo, err := r.server.utxoNursery.NurseryReport(chanPoint)
!
2448
        if err == ErrContractNotFound {
!
2449
                return nil
!
2450
        }
!
2451
        if err != nil {
!
2452
                return fmt.Errorf("unable to obtain "+
!
2453
                        "nursery report for ChannelPoint(%v): %v",
!
2454
                        chanPoint, err)
!
2455
        }
!
2456

2457
        // If the nursery knows of this channel, then we can populate
2458
        // information detailing exactly how much funds are time locked and also
2459
        // the height in which we can ultimately sweep the funds into the
2460
        // wallet.
2461
        forceClose.LimboBalance = int64(nurseryInfo.limboBalance)
!
2462
        forceClose.RecoveredBalance = int64(nurseryInfo.recoveredBalance)
!
2463
        forceClose.MaturityHeight = nurseryInfo.maturityHeight
!
2464

!
2465
        // If the transaction has been confirmed, then we can compute how many
!
2466
        // blocks it has left.
!
2467
        if forceClose.MaturityHeight != 0 {
!
2468
                forceClose.BlocksTilMaturity =
!
2469
                        int32(forceClose.MaturityHeight) -
!
2470
                                currentHeight
!
2471
        }
!
2472

2473
        for _, htlcReport := range nurseryInfo.htlcs {
!
2474
                // TODO(conner) set incoming flag appropriately after handling
!
2475
                // incoming incubation
!
2476
                htlc := &lnrpc.PendingHTLC{
!
2477
                        Incoming:       false,
!
2478
                        Amount:         int64(htlcReport.amount),
!
2479
                        Outpoint:       htlcReport.outpoint.String(),
!
2480
                        MaturityHeight: htlcReport.maturityHeight,
!
2481
                        Stage:          htlcReport.stage,
!
2482
                }
!
2483

!
2484
                if htlc.MaturityHeight != 0 {
!
2485
                        htlc.BlocksTilMaturity =
!
2486
                                int32(htlc.MaturityHeight) -
!
2487
                                        currentHeight
!
2488
                }
!
2489

2490
                forceClose.PendingHtlcs = append(forceClose.PendingHtlcs,
!
2491
                        htlc)
!
2492
        }
2493

2494
        return nil
!
2495
}
2496

2497
// ClosedChannels returns a list of all the channels have been closed.
2498
// This does not include channels that are still in the process of closing.
2499
func (r *rpcServer) ClosedChannels(ctx context.Context,
2500
        in *lnrpc.ClosedChannelsRequest) (*lnrpc.ClosedChannelsResponse,
2501
        error) {
!
2502

!
2503
        // Show all channels when no filter flags are set.
!
2504
        filterResults := in.Cooperative || in.LocalForce ||
!
2505
                in.RemoteForce || in.Breach || in.FundingCanceled ||
!
2506
                in.Abandoned
!
2507

!
2508
        resp := &lnrpc.ClosedChannelsResponse{}
!
2509

!
2510
        dbChannels, err := r.server.chanDB.FetchClosedChannels(false)
!
2511
        if err != nil {
!
2512
                return nil, err
!
2513
        }
!
2514

2515
        // In order to make the response easier to parse for clients, we'll
2516
        // sort the set of closed channels by their closing height before
2517
        // serializing the proto response.
2518
        sort.Slice(dbChannels, func(i, j int) bool {
!
2519
                return dbChannels[i].CloseHeight < dbChannels[j].CloseHeight
!
2520
        })
!
2521

2522
        for _, dbChannel := range dbChannels {
!
2523
                if dbChannel.IsPending {
!
2524
                        continue
!
2525
                }
2526

2527
                switch dbChannel.CloseType {
!
2528
                case channeldb.CooperativeClose:
!
2529
                        if filterResults && !in.Cooperative {
!
2530
                                continue
!
2531
                        }
2532
                case channeldb.LocalForceClose:
!
2533
                        if filterResults && !in.LocalForce {
!
2534
                                continue
!
2535
                        }
2536
                case channeldb.RemoteForceClose:
!
2537
                        if filterResults && !in.RemoteForce {
!
2538
                                continue
!
2539
                        }
2540
                case channeldb.BreachClose:
!
2541
                        if filterResults && !in.Breach {
!
2542
                                continue
!
2543
                        }
2544
                case channeldb.FundingCanceled:
!
2545
                        if filterResults && !in.FundingCanceled {
!
2546
                                continue
!
2547
                        }
2548
                case channeldb.Abandoned:
!
2549
                        if filterResults && !in.Abandoned {
!
2550
                                continue
!
2551
                        }
2552
                }
2553

2554
                channel := createRPCClosedChannel(dbChannel)
!
2555
                resp.Channels = append(resp.Channels, channel)
!
2556
        }
2557

2558
        return resp, nil
!
2559
}
2560

2561
// ListChannels returns a description of all the open channels that this node
2562
// is a participant in.
2563
func (r *rpcServer) ListChannels(ctx context.Context,
2564
        in *lnrpc.ListChannelsRequest) (*lnrpc.ListChannelsResponse, error) {
!
2565

!
2566
        if in.ActiveOnly && in.InactiveOnly {
!
2567
                return nil, fmt.Errorf("either `active_only` or " +
!
2568
                        "`inactive_only` can be set, but not both")
!
2569
        }
!
2570

2571
        if in.PublicOnly && in.PrivateOnly {
!
2572
                return nil, fmt.Errorf("either `public_only` or " +
!
2573
                        "`private_only` can be set, but not both")
!
2574
        }
!
2575

2576
        resp := &lnrpc.ListChannelsResponse{}
!
2577

!
2578
        graph := r.server.chanDB.ChannelGraph()
!
2579

!
2580
        dbChannels, err := r.server.chanDB.FetchAllOpenChannels()
!
2581
        if err != nil {
!
2582
                return nil, err
!
2583
        }
!
2584

2585
        rpcsLog.Debugf("[listchannels] fetched %v channels from DB",
!
2586
                len(dbChannels))
!
2587

!
2588
        for _, dbChannel := range dbChannels {
!
2589
                nodePub := dbChannel.IdentityPub
!
2590
                chanPoint := dbChannel.FundingOutpoint
!
2591

!
2592
                var peerOnline bool
!
2593
                if _, err := r.server.FindPeer(nodePub); err == nil {
!
2594
                        peerOnline = true
!
2595
                }
!
2596

2597
                channelID := lnwire.NewChanIDFromOutPoint(&chanPoint)
!
2598
                var linkActive bool
!
2599
                if link, err := r.server.htlcSwitch.GetLink(channelID); err == nil {
!
2600
                        // A channel is only considered active if it is known
!
2601
                        // by the switch *and* able to forward
!
2602
                        // incoming/outgoing payments.
!
2603
                        linkActive = link.EligibleToForward()
!
2604
                }
!
2605

2606
                // Next, we'll determine whether we should add this channel to
2607
                // our list depending on the type of channels requested to us.
2608
                isActive := peerOnline && linkActive
!
2609
                channel := createRPCOpenChannel(r, graph, dbChannel, isActive)
!
2610

!
2611
                // We'll only skip returning this channel if we were requested
!
2612
                // for a specific kind and this channel doesn't satisfy it.
!
2613
                switch {
!
2614
                case in.ActiveOnly && !isActive:
!
2615
                        continue
!
2616
                case in.InactiveOnly && isActive:
!
2617
                        continue
!
2618
                case in.PublicOnly && channel.Private:
!
2619
                        continue
!
2620
                case in.PrivateOnly && !channel.Private:
!
2621
                        continue
!
2622
                }
2623

2624
                resp.Channels = append(resp.Channels, channel)
!
2625
        }
2626

2627
        return resp, nil
!
2628
}
2629

2630
// createRPCOpenChannel creates an *lnrpc.Channel from the *channeldb.Channel.
2631
func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph,
2632
        dbChannel *channeldb.OpenChannel, isActive bool) *lnrpc.Channel {
!
2633

!
2634
        nodePub := dbChannel.IdentityPub
!
2635
        nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
!
2636
        chanPoint := dbChannel.FundingOutpoint
!
2637

!
2638
        // Next, we'll determine whether the channel is public or not.
!
2639
        isPublic := dbChannel.ChannelFlags&lnwire.FFAnnounceChannel != 0
!
2640

!
2641
        // As this is required for display purposes, we'll calculate
!
2642
        // the weight of the commitment transaction. We also add on the
!
2643
        // estimated weight of the witness to calculate the weight of
!
2644
        // the transaction if it were to be immediately unilaterally
!
2645
        // broadcast.
!
2646
        localCommit := dbChannel.LocalCommitment
!
2647
        utx := btcutil.NewTx(localCommit.CommitTx)
!
2648
        commitBaseWeight := blockchain.GetTransactionWeight(utx)
!
2649
        commitWeight := commitBaseWeight + input.WitnessCommitmentTxWeight
!
2650

!
2651
        localBalance := localCommit.LocalBalance
!
2652
        remoteBalance := localCommit.RemoteBalance
!
2653

!
2654
        // As an artifact of our usage of mSAT internally, either party
!
2655
        // may end up in a state where they're holding a fractional
!
2656
        // amount of satoshis which can't be expressed within the
!
2657
        // actual commitment output. Since we round down when going
!
2658
        // from mSAT -> SAT, we may at any point be adding an
!
2659
        // additional SAT to miners fees. As a result, we display a
!
2660
        // commitment fee that accounts for this externally.
!
2661
        var sumOutputs btcutil.Amount
!
2662
        for _, txOut := range localCommit.CommitTx.TxOut {
!
2663
                sumOutputs += btcutil.Amount(txOut.Value)
!
2664
        }
!
2665
        externalCommitFee := dbChannel.Capacity - sumOutputs
!
2666

!
2667
        channel := &lnrpc.Channel{
!
2668
                Active:                isActive,
!
2669
                Private:               !isPublic,
!
2670
                RemotePubkey:          nodeID,
!
2671
                ChannelPoint:          chanPoint.String(),
!
2672
                ChanId:                dbChannel.ShortChannelID.ToUint64(),
!
2673
                Capacity:              int64(dbChannel.Capacity),
!
2674
                LocalBalance:          int64(localBalance.ToSatoshis()),
!
2675
                RemoteBalance:         int64(remoteBalance.ToSatoshis()),
!
2676
                CommitFee:             int64(externalCommitFee),
!
2677
                CommitWeight:          commitWeight,
!
2678
                FeePerKw:              int64(localCommit.FeePerKw),
!
2679
                TotalSatoshisSent:     int64(dbChannel.TotalMSatSent.ToSatoshis()),
!
2680
                TotalSatoshisReceived: int64(dbChannel.TotalMSatReceived.ToSatoshis()),
!
2681
                NumUpdates:            localCommit.CommitHeight,
!
2682
                PendingHtlcs:          make([]*lnrpc.HTLC, len(localCommit.Htlcs)),
!
2683
                CsvDelay:              uint32(dbChannel.LocalChanCfg.CsvDelay),
!
2684
                Initiator:             dbChannel.IsInitiator,
!
2685
                ChanStatusFlags:       dbChannel.ChanStatus().String(),
!
2686
                LocalChanReserveSat:   int64(dbChannel.LocalChanCfg.ChanReserve),
!
2687
                RemoteChanReserveSat:  int64(dbChannel.RemoteChanCfg.ChanReserve),
!
2688
                StaticRemoteKey:       dbChannel.ChanType.IsTweakless(),
!
2689
        }
!
2690

!
2691
        for i, htlc := range localCommit.Htlcs {
!
2692
                var rHash [32]byte
!
2693
                copy(rHash[:], htlc.RHash[:])
!
2694
                channel.PendingHtlcs[i] = &lnrpc.HTLC{
!
2695
                        Incoming:         htlc.Incoming,
!
2696
                        Amount:           int64(htlc.Amt.ToSatoshis()),
!
2697
                        HashLock:         rHash[:],
!
2698
                        ExpirationHeight: htlc.RefundTimeout,
!
2699
                }
!
2700

!
2701
                // Add the Pending Htlc Amount to UnsettledBalance field.
!
2702
                channel.UnsettledBalance += channel.PendingHtlcs[i].Amount
!
2703
        }
!
2704

2705
        return channel
!
2706
}
2707

2708
// createRPCClosedChannel creates an *lnrpc.ClosedChannelSummary from a
2709
// *channeldb.ChannelCloseSummary.
2710
func createRPCClosedChannel(
2711
        dbChannel *channeldb.ChannelCloseSummary) *lnrpc.ChannelCloseSummary {
!
2712

!
2713
        nodePub := dbChannel.RemotePub
!
2714
        nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
!
2715

!
2716
        var closeType lnrpc.ChannelCloseSummary_ClosureType
!
2717
        switch dbChannel.CloseType {
!
2718
        case channeldb.CooperativeClose:
!
2719
                closeType = lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
!
2720
        case channeldb.LocalForceClose:
!
2721
                closeType = lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE
!
2722
        case channeldb.RemoteForceClose:
!
2723
                closeType = lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE
!
2724
        case channeldb.BreachClose:
!
2725
                closeType = lnrpc.ChannelCloseSummary_BREACH_CLOSE
!
2726
        case channeldb.FundingCanceled:
!
2727
                closeType = lnrpc.ChannelCloseSummary_FUNDING_CANCELED
!
2728
        case channeldb.Abandoned:
!
2729
                closeType = lnrpc.ChannelCloseSummary_ABANDONED
!
2730
        }
2731

2732
        return &lnrpc.ChannelCloseSummary{
!
2733
                Capacity:          int64(dbChannel.Capacity),
!
2734
                RemotePubkey:      nodeID,
!
2735
                CloseHeight:       dbChannel.CloseHeight,
!
2736
                CloseType:         closeType,
!
2737
                ChannelPoint:      dbChannel.ChanPoint.String(),
!
2738
                ChanId:            dbChannel.ShortChanID.ToUint64(),
!
2739
                SettledBalance:    int64(dbChannel.SettledBalance),
!
2740
                TimeLockedBalance: int64(dbChannel.TimeLockedBalance),
!
2741
                ChainHash:         dbChannel.ChainHash.String(),
!
2742
                ClosingTxHash:     dbChannel.ClosingTXID.String(),
!
2743
        }
!
2744
}
2745

2746
// SubscribeChannelEvents returns a uni-directional stream (server -> client)
2747
// for notifying the client of newly active, inactive or closed channels.
2748
func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
2749
        updateStream lnrpc.Lightning_SubscribeChannelEventsServer) error {
!
2750

!
2751
        channelEventSub, err := r.server.channelNotifier.SubscribeChannelEvents()
!
2752
        if err != nil {
!
2753
                return err
!
2754
        }
!
2755

2756
        // Ensure that the resources for the client is cleaned up once either
2757
        // the server, or client exits.
2758
        defer channelEventSub.Cancel()
!
2759

!
2760
        graph := r.server.chanDB.ChannelGraph()
!
2761

!
2762
        for {
!
2763
                select {
!
2764
                // A new update has been sent by the channel router, we'll
2765
                // marshal it into the form expected by the gRPC client, then
2766
                // send it off to the client(s).
2767
                case e := <-channelEventSub.Updates():
!
2768
                        var update *lnrpc.ChannelEventUpdate
!
2769
                        switch event := e.(type) {
!
2770
                        case channelnotifier.OpenChannelEvent:
!
2771
                                channel := createRPCOpenChannel(r, graph,
!
2772
                                        event.Channel, true)
!
2773
                                update = &lnrpc.ChannelEventUpdate{
!
2774
                                        Type: lnrpc.ChannelEventUpdate_OPEN_CHANNEL,
!
2775
                                        Channel: &lnrpc.ChannelEventUpdate_OpenChannel{
!
2776
                                                OpenChannel: channel,
!
2777
                                        },
!
2778
                                }
!
2779
                        case channelnotifier.ClosedChannelEvent:
!
2780
                                closedChannel := createRPCClosedChannel(event.CloseSummary)
!
2781
                                update = &lnrpc.ChannelEventUpdate{
!
2782
                                        Type: lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
!
2783
                                        Channel: &lnrpc.ChannelEventUpdate_ClosedChannel{
!
2784
                                                ClosedChannel: closedChannel,
!
2785
                                        },
!
2786
                                }
!
2787
                        case channelnotifier.ActiveChannelEvent:
!
2788
                                update = &lnrpc.ChannelEventUpdate{
!
2789
                                        Type: lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL,
!
2790
                                        Channel: &lnrpc.ChannelEventUpdate_ActiveChannel{
!
2791
                                                ActiveChannel: &lnrpc.ChannelPoint{
!
2792
                                                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
!
2793
                                                                FundingTxidBytes: event.ChannelPoint.Hash[:],
!
2794
                                                        },
!
2795
                                                        OutputIndex: event.ChannelPoint.Index,
!
2796
                                                },
!
2797
                                        },
!
2798
                                }
!
2799
                        case channelnotifier.InactiveChannelEvent:
!
2800
                                update = &lnrpc.ChannelEventUpdate{
!
2801
                                        Type: lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
!
2802
                                        Channel: &lnrpc.ChannelEventUpdate_InactiveChannel{
!
2803
                                                InactiveChannel: &lnrpc.ChannelPoint{
!
2804
                                                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
!
2805
                                                                FundingTxidBytes: event.ChannelPoint.Hash[:],
!
2806
                                                        },
!
2807
                                                        OutputIndex: event.ChannelPoint.Index,
!
2808
                                                },
!
2809
                                        },
!
2810
                                }
!
2811
                        default:
!
2812
                                return fmt.Errorf("unexpected channel event update: %v", event)
!
2813
                        }
2814

2815
                        if err := updateStream.Send(update); err != nil {
!
2816
                                return err
!
2817
                        }
!
2818
                case <-r.quit:
!
2819
                        return nil
!
2820
                }
2821
        }
2822
}
2823

2824
// paymentStream enables different types of payment streams, such as:
2825
// lnrpc.Lightning_SendPaymentServer and lnrpc.Lightning_SendToRouteServer to
2826
// execute sendPayment. We use this struct as a sort of bridge to enable code
2827
// re-use between SendPayment and SendToRoute.
2828
type paymentStream struct {
2829
        recv func() (*rpcPaymentRequest, error)
2830
        send func(*lnrpc.SendResponse) error
2831
}
2832

2833
// rpcPaymentRequest wraps lnrpc.SendRequest so that routes from
2834
// lnrpc.SendToRouteRequest can be passed to sendPayment.
2835
type rpcPaymentRequest struct {
2836
        *lnrpc.SendRequest
2837
        route *route.Route
2838
}
2839

2840
// calculateFeeLimit returns the fee limit in millisatoshis. If a percentage
2841
// based fee limit has been requested, we'll factor in the ratio provided with
2842
// the amount of the payment.
2843
func calculateFeeLimit(feeLimit *lnrpc.FeeLimit,
2844
        amount lnwire.MilliSatoshi) lnwire.MilliSatoshi {
!
2845

!
2846
        switch feeLimit.GetLimit().(type) {
!
2847
        case *lnrpc.FeeLimit_Fixed:
!
2848
                return lnwire.NewMSatFromSatoshis(
!
2849
                        btcutil.Amount(feeLimit.GetFixed()),
!
2850
                )
!
2851
        case *lnrpc.FeeLimit_Percent:
!
2852
                return amount * lnwire.MilliSatoshi(feeLimit.GetPercent()) / 100
!
2853
        default:
!
2854
                // If a fee limit was not specified, we'll use the payment's
!
2855
                // amount as an upper bound in order to avoid payment attempts
!
2856
                // from incurring fees higher than the payment amount itself.
!
2857
                return amount
!
2858
        }
2859
}
2860

2861
// SendPayment dispatches a bi-directional streaming RPC for sending payments
2862
// through the Lightning Network. A single RPC invocation creates a persistent
2863
// bi-directional stream allowing clients to rapidly send payments through the
2864
// Lightning Network with a single persistent connection.
2865
func (r *rpcServer) SendPayment(stream lnrpc.Lightning_SendPaymentServer) error {
!
2866
        var lock sync.Mutex
!
2867

!
2868
        return r.sendPayment(&paymentStream{
!
2869
                recv: func() (*rpcPaymentRequest, error) {
!
2870
                        req, err := stream.Recv()
!
2871
                        if err != nil {
!
2872
                                return nil, err
!
2873
                        }
!
2874

2875
                        return &rpcPaymentRequest{
!
2876
                                SendRequest: req,
!
2877
                        }, nil
!
2878
                },
2879
                send: func(r *lnrpc.SendResponse) error {
!
2880
                        // Calling stream.Send concurrently is not safe.
!
2881
                        lock.Lock()
!
2882
                        defer lock.Unlock()
!
2883
                        return stream.Send(r)
!
2884
                },
!
2885
        })
2886
}
2887

2888
// SendToRoute dispatches a bi-directional streaming RPC for sending payments
2889
// through the Lightning Network via predefined routes passed in. A single RPC
2890
// invocation creates a persistent bi-directional stream allowing clients to
2891
// rapidly send payments through the Lightning Network with a single persistent
2892
// connection.
2893
func (r *rpcServer) SendToRoute(stream lnrpc.Lightning_SendToRouteServer) error {
!
2894
        var lock sync.Mutex
!
2895

!
2896
        return r.sendPayment(&paymentStream{
!
2897
                recv: func() (*rpcPaymentRequest, error) {
!
2898
                        req, err := stream.Recv()
!
2899
                        if err != nil {
!
2900
                                return nil, err
!
2901
                        }
!
2902

2903
                        return r.unmarshallSendToRouteRequest(req)
!
2904
                },
2905
                send: func(r *lnrpc.SendResponse) error {
!
2906
                        // Calling stream.Send concurrently is not safe.
!
2907
                        lock.Lock()
!
2908
                        defer lock.Unlock()
!
2909
                        return stream.Send(r)
!
2910
                },
!
2911
        })
2912
}
2913

2914
// unmarshallSendToRouteRequest unmarshalls an rpc sendtoroute request
2915
func (r *rpcServer) unmarshallSendToRouteRequest(
2916
        req *lnrpc.SendToRouteRequest) (*rpcPaymentRequest, error) {
!
2917

!
2918
        if req.Route == nil {
!
2919
                return nil, fmt.Errorf("unable to send, no route provided")
!
2920
        }
!
2921

2922
        route, err := r.routerBackend.UnmarshallRoute(req.Route)
!
2923
        if err != nil {
!
2924
                return nil, err
!
2925
        }
!
2926

2927
        return &rpcPaymentRequest{
!
2928
                SendRequest: &lnrpc.SendRequest{
!
2929
                        PaymentHash:       req.PaymentHash,
!
2930
                        PaymentHashString: req.PaymentHashString,
!
2931
                },
!
2932
                route: route,
!
2933
        }, nil
!
2934
}
2935

2936
// rpcPaymentIntent is a small wrapper struct around the of values we can
2937
// receive from a client over RPC if they wish to send a payment. We'll either
2938
// extract these fields from a payment request (which may include routing
2939
// hints), or we'll get a fully populated route from the user that we'll pass
2940
// directly to the channel router for dispatching.
2941
type rpcPaymentIntent struct {
2942
        msat              lnwire.MilliSatoshi
2943
        feeLimit          lnwire.MilliSatoshi
2944
        cltvLimit         uint32
2945
        dest              route.Vertex
2946
        rHash             [32]byte
2947
        cltvDelta         uint16
2948
        routeHints        [][]zpay32.HopHint
2949
        outgoingChannelID *uint64
2950
        payReq            []byte
2951

2952
        destTLV []tlv.Record
2953

2954
        route *route.Route
2955
}
2956

2957
// extractPaymentIntent attempts to parse the complete details required to
2958
// dispatch a client from the information presented by an RPC client. There are
2959
// three ways a client can specify their payment details: a payment request,
2960
// via manual details, or via a complete route.
2961
func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) {
!
2962
        payIntent := rpcPaymentIntent{}
!
2963

!
2964
        // If a route was specified, then we can use that directly.
!
2965
        if rpcPayReq.route != nil {
!
2966
                // If the user is using the REST interface, then they'll be
!
2967
                // passing the payment hash as a hex encoded string.
!
2968
                if rpcPayReq.PaymentHashString != "" {
!
2969
                        paymentHash, err := hex.DecodeString(
!
2970
                                rpcPayReq.PaymentHashString,
!
2971
                        )
!
2972
                        if err != nil {
!
2973
                                return payIntent, err
!
2974
                        }
!
2975

2976
                        copy(payIntent.rHash[:], paymentHash)
!
2977
                } else {
!
2978
                        copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
!
2979
                }
!
2980

2981
                payIntent.route = rpcPayReq.route
!
2982
                return payIntent, nil
!
2983
        }
2984

2985
        // If there are no routes specified, pass along a outgoing channel
2986
        // restriction if specified.
2987
        if rpcPayReq.OutgoingChanId != 0 {
!
2988
                payIntent.outgoingChannelID = &rpcPayReq.OutgoingChanId
!
2989
        }
!
2990

2991
        // Take the CLTV limit from the request if set, otherwise use the max.
NEW
2992
        cltvLimit, err := routerrpc.ValidateCLTVLimit(
!
NEW
2993
                rpcPayReq.CltvLimit, cfg.MaxOutgoingCltvExpiry,
!
NEW
2994
        )
!
NEW
2995
        if err != nil {
!
NEW
2996
                return payIntent, err
!
UNCOV
2997
        }
!
NEW
2998
        payIntent.cltvLimit = cltvLimit
!
2999

!
3000
        if len(rpcPayReq.DestTlv) != 0 {
!
3001
                var err error
!
3002
                payIntent.destTLV, err = tlv.MapToRecords(
!
3003
                        rpcPayReq.DestTlv,
!
3004
                )
!
3005
                if err != nil {
!
3006
                        return payIntent, err
!
3007
                }
!
3008
        }
3009

3010
        // If the payment request field isn't blank, then the details of the
3011
        // invoice are encoded entirely within the encoded payReq.  So we'll
3012
        // attempt to decode it, populating the payment accordingly.
3013
        if rpcPayReq.PaymentRequest != "" {
!
3014
                payReq, err := zpay32.Decode(
!
3015
                        rpcPayReq.PaymentRequest, activeNetParams.Params,
!
3016
                )
!
3017
                if err != nil {
!
3018
                        return payIntent, err
!
3019
                }
!
3020

3021
                // Next, we'll ensure that this payreq hasn't already expired.
3022
                err = routerrpc.ValidatePayReqExpiry(payReq)
!
3023
                if err != nil {
!
3024
                        return payIntent, err
!
3025
                }
!
3026

3027
                // If the amount was not included in the invoice, then we let
3028
                // the payee specify the amount of satoshis they wish to send.
3029
                // We override the amount to pay with the amount provided from
3030
                // the payment request.
3031
                if payReq.MilliSat == nil {
!
3032
                        if rpcPayReq.Amt == 0 {
!
3033
                                return payIntent, errors.New("amount must be " +
!
3034
                                        "specified when paying a zero amount " +
!
3035
                                        "invoice")
!
3036
                        }
!
3037

3038
                        payIntent.msat = lnwire.NewMSatFromSatoshis(
!
3039
                                btcutil.Amount(rpcPayReq.Amt),
!
3040
                        )
!
3041
                } else {
!
3042
                        payIntent.msat = *payReq.MilliSat
!
3043
                }
!
3044

3045
                // Calculate the fee limit that should be used for this payment.
3046
                payIntent.feeLimit = calculateFeeLimit(
!
3047
                        rpcPayReq.FeeLimit, payIntent.msat,
!
3048
                )
!
3049

!
3050
                copy(payIntent.rHash[:], payReq.PaymentHash[:])
!
3051
                destKey := payReq.Destination.SerializeCompressed()
!
3052
                copy(payIntent.dest[:], destKey)
!
3053
                payIntent.cltvDelta = uint16(payReq.MinFinalCLTVExpiry())
!
3054
                payIntent.routeHints = payReq.RouteHints
!
3055
                payIntent.payReq = []byte(rpcPayReq.PaymentRequest)
!
3056

!
3057
                return payIntent, nil
!
3058
        }
3059

3060
        // At this point, a destination MUST be specified, so we'll convert it
3061
        // into the proper representation now. The destination will either be
3062
        // encoded as raw bytes, or via a hex string.
3063
        var pubBytes []byte
!
3064
        if len(rpcPayReq.Dest) != 0 {
!
3065
                pubBytes = rpcPayReq.Dest
!
3066
        } else {
!
3067
                var err error
!
3068
                pubBytes, err = hex.DecodeString(rpcPayReq.DestString)
!
3069
                if err != nil {
!
3070
                        return payIntent, err
!
3071
                }
!
3072
        }
3073
        if len(pubBytes) != 33 {
!
3074
                return payIntent, errors.New("invalid key length")
!
3075
        }
!
3076
        copy(payIntent.dest[:], pubBytes)
!
3077

!
3078
        // Otherwise, If the payment request field was not specified
!
3079
        // (and a custom route wasn't specified), construct the payment
!
3080
        // from the other fields.
!
3081
        payIntent.msat = lnwire.NewMSatFromSatoshis(
!
3082
                btcutil.Amount(rpcPayReq.Amt),
!
3083
        )
!
3084

!
3085
        // Calculate the fee limit that should be used for this payment.
!
3086
        payIntent.feeLimit = calculateFeeLimit(
!
3087
                rpcPayReq.FeeLimit, payIntent.msat,
!
3088
        )
!
3089

!
3090
        if rpcPayReq.FinalCltvDelta != 0 {
!
3091
                payIntent.cltvDelta = uint16(rpcPayReq.FinalCltvDelta)
!
3092
        } else {
!
3093
                payIntent.cltvDelta = zpay32.DefaultFinalCLTVDelta
!
3094
        }
!
3095

3096
        // If the user is manually specifying payment details, then the payment
3097
        // hash may be encoded as a string.
3098
        switch {
!
3099
        case rpcPayReq.PaymentHashString != "":
!
3100
                paymentHash, err := hex.DecodeString(
!
3101
                        rpcPayReq.PaymentHashString,
!
3102
                )
!
3103
                if err != nil {
!
3104
                        return payIntent, err
!
3105
                }
!
3106

3107
                copy(payIntent.rHash[:], paymentHash)
!
3108

3109
        default:
!
3110
                copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
!
3111
        }
3112

3113
        // Currently, within the bootstrap phase of the network, we limit the
3114
        // largest payment size allotted to (2^32) - 1 mSAT or 4.29 million
3115
        // satoshis.
3116
        if payIntent.msat > MaxPaymentMSat {
!
3117
                // In this case, we'll send an error to the caller, but
!
3118
                // continue our loop for the next payment.
!
3119
                return payIntent, fmt.Errorf("payment of %v is too large, "+
!
3120
                        "max payment allowed is %v", payIntent.msat,
!
3121
                        MaxPaymentMSat)
!
3122

!
3123
        }
!
3124

3125
        return payIntent, nil
!
3126
}
3127

3128
type paymentIntentResponse struct {
3129
        Route    *route.Route
3130
        Preimage [32]byte
3131
        Err      error
3132
}
3133

3134
// dispatchPaymentIntent attempts to fully dispatch an RPC payment intent.
3135
// We'll either pass the payment as a whole to the channel router, or give it a
3136
// pre-built route. The first error this method returns denotes if we were
3137
// unable to save the payment. The second error returned denotes if the payment
3138
// didn't succeed.
3139
func (r *rpcServer) dispatchPaymentIntent(
3140
        payIntent *rpcPaymentIntent) (*paymentIntentResponse, error) {
!
3141

!
3142
        // Construct a payment request to send to the channel router. If the
!
3143
        // payment is successful, the route chosen will be returned. Otherwise,
!
3144
        // we'll get a non-nil error.
!
3145
        var (
!
3146
                preImage  [32]byte
!
3147
                route     *route.Route
!
3148
                routerErr error
!
3149
        )
!
3150

!
3151
        // If a route was specified, then we'll pass the route directly to the
!
3152
        // router, otherwise we'll create a payment session to execute it.
!
3153
        if payIntent.route == nil {
!
3154
                payment := &routing.LightningPayment{
!
3155
                        Target:            payIntent.dest,
!
3156
                        Amount:            payIntent.msat,
!
3157
                        FinalCLTVDelta:    payIntent.cltvDelta,
!
3158
                        FeeLimit:          payIntent.feeLimit,
!
3159
                        CltvLimit:         payIntent.cltvLimit,
!
3160
                        PaymentHash:       payIntent.rHash,
!
3161
                        RouteHints:        payIntent.routeHints,
!
3162
                        OutgoingChannelID: payIntent.outgoingChannelID,
!
3163
                        PaymentRequest:    payIntent.payReq,
!
3164
                        PayAttemptTimeout: routing.DefaultPayAttemptTimeout,
!
3165
                        FinalDestRecords:  payIntent.destTLV,
!
3166
                }
!
3167

!
3168
                preImage, route, routerErr = r.server.chanRouter.SendPayment(
!
3169
                        payment,
!
3170
                )
!
3171
        } else {
!
3172
                preImage, routerErr = r.server.chanRouter.SendToRoute(
!
3173
                        payIntent.rHash, payIntent.route,
!
3174
                )
!
3175

!
3176
                route = payIntent.route
!
3177
        }
!
3178

3179
        // If the route failed, then we'll return a nil save err, but a non-nil
3180
        // routing err.
3181
        if routerErr != nil {
!
3182
                rpcsLog.Warnf("Unable to send payment: %v", routerErr)
!
3183

!
3184
                return &paymentIntentResponse{
!
3185
                        Err: routerErr,
!
3186
                }, nil
!
3187
        }
!
3188

3189
        return &paymentIntentResponse{
!
3190
                Route:    route,
!
3191
                Preimage: preImage,
!
3192
        }, nil
!
3193
}
3194

3195
// sendPayment takes a paymentStream (a source of pre-built routes or payment
3196
// requests) and continually attempt to dispatch payment requests written to
3197
// the write end of the stream. Responses will also be streamed back to the
3198
// client via the write end of the stream. This method is by both SendToRoute
3199
// and SendPayment as the logic is virtually identical.
3200
func (r *rpcServer) sendPayment(stream *paymentStream) error {
!
3201
        payChan := make(chan *rpcPaymentIntent)
!
3202
        errChan := make(chan error, 1)
!
3203

!
3204
        // We don't allow payments to be sent while the daemon itself is still
!
3205
        // syncing as we may be trying to sent a payment over a "stale"
!
3206
        // channel.
!
3207
        if !r.server.Started() {
!
3208
                return fmt.Errorf("chain backend is still syncing, server " +
!
3209
                        "not active yet")
!
3210
        }
!
3211

3212
        // TODO(roasbeef): check payment filter to see if already used?
3213

3214
        // In order to limit the level of concurrency and prevent a client from
3215
        // attempting to OOM the server, we'll set up a semaphore to create an
3216
        // upper ceiling on the number of outstanding payments.
3217
        const numOutstandingPayments = 2000
!
3218
        htlcSema := make(chan struct{}, numOutstandingPayments)
!
3219
        for i := 0; i < numOutstandingPayments; i++ {
!
3220
                htlcSema <- struct{}{}
!
3221
        }
!
3222

3223
        // Launch a new goroutine to handle reading new payment requests from
3224
        // the client. This way we can handle errors independently of blocking
3225
        // and waiting for the next payment request to come through.
3226
        reqQuit := make(chan struct{})
!
3227
        defer func() {
!
3228
                close(reqQuit)
!
3229
        }()
!
3230

3231
        // TODO(joostjager): Callers expect result to come in in the same order
3232
        // as the request were sent, but this is far from guarantueed in the
3233
        // code below.
3234
        go func() {
!
3235
                for {
!
3236
                        select {
!
3237
                        case <-reqQuit:
!
3238
                                return
!
3239
                        case <-r.quit:
!
3240
                                errChan <- nil
!
3241
                                return
!
3242
                        default:
!
3243
                                // Receive the next pending payment within the
!
3244
                                // stream sent by the client. If we read the
!
3245
                                // EOF sentinel, then the client has closed the
!
3246
                                // stream, and we can exit normally.
!
3247
                                nextPayment, err := stream.recv()
!
3248
                                if err == io.EOF {
!
3249
                                        errChan <- nil
!
3250
                                        return
!
3251
                                } else if err != nil {
!
3252
                                        select {
!
3253
                                        case errChan <- err:
!
3254
                                        case <-reqQuit:
!
3255
                                                return
!
3256
                                        }
3257
                                        return
!
3258
                                }
3259

3260
                                // Populate the next payment, either from the
3261
                                // payment request, or from the explicitly set
3262
                                // fields. If the payment proto wasn't well
3263
                                // formed, then we'll send an error reply and
3264
                                // wait for the next payment.
3265
                                payIntent, err := extractPaymentIntent(nextPayment)
!
3266
                                if err != nil {
!
3267
                                        if err := stream.send(&lnrpc.SendResponse{
!
3268
                                                PaymentError: err.Error(),
!
3269
                                                PaymentHash:  payIntent.rHash[:],
!
3270
                                        }); err != nil {
!
3271
                                                select {
!
3272
                                                case errChan <- err:
!
3273
                                                case <-reqQuit:
!
3274
                                                        return
!
3275
                                                }
3276
                                        }
3277
                                        continue
!
3278
                                }
3279

3280
                                // If the payment was well formed, then we'll
3281
                                // send to the dispatch goroutine, or exit,
3282
                                // which ever comes first
3283
                                select {
!
3284
                                case payChan <- &payIntent:
!
3285
                                case <-reqQuit:
!
3286
                                        return
!
3287
                                }
3288
                        }
3289
                }
3290
        }()
3291

3292
        for {
!
3293
                select {
!
3294
                case err := <-errChan:
!
3295
                        return err
!
3296

3297
                case payIntent := <-payChan:
!
3298
                        // We launch a new goroutine to execute the current
!
3299
                        // payment so we can continue to serve requests while
!
3300
                        // this payment is being dispatched.
!
3301
                        go func() {
!
3302
                                // Attempt to grab a free semaphore slot, using
!
3303
                                // a defer to eventually release the slot
!
3304
                                // regardless of payment success.
!
3305
                                <-htlcSema
!
3306
                                defer func() {
!
3307
                                        htlcSema <- struct{}{}
!
3308
                                }()
!
3309

3310
                                resp, saveErr := r.dispatchPaymentIntent(
!
3311
                                        payIntent,
!
3312
                                )
!
3313

!
3314
                                switch {
!
3315
                                // If we were unable to save the state of the
3316
                                // payment, then we'll return the error to the
3317
                                // user, and terminate.
3318
                                case saveErr != nil:
!
3319
                                        errChan <- saveErr
!
3320
                                        return
!
3321

3322
                                // If we receive payment error than, instead of
3323
                                // terminating the stream, send error response
3324
                                // to the user.
3325
                                case resp.Err != nil:
!
3326
                                        err := stream.send(&lnrpc.SendResponse{
!
3327
                                                PaymentError: resp.Err.Error(),
!
3328
                                                PaymentHash:  payIntent.rHash[:],
!
3329
                                        })
!
3330
                                        if err != nil {
!
3331
                                                errChan <- err
!
3332
                                        }
!
3333
                                        return
!
3334
                                }
3335

3336
                                backend := r.routerBackend
!
3337
                                marshalledRouted, err := backend.MarshallRoute(
!
3338
                                        resp.Route,
!
3339
                                )
!
3340
                                if err != nil {
!
3341
                                        errChan <- err
!
3342
                                        return
!
3343
                                }
!
3344

3345
                                err = stream.send(&lnrpc.SendResponse{
!
3346
                                        PaymentHash:     payIntent.rHash[:],
!
3347
                                        PaymentPreimage: resp.Preimage[:],
!
3348
                                        PaymentRoute:    marshalledRouted,
!
3349
                                })
!
3350
                                if err != nil {
!
3351
                                        errChan <- err
!
3352
                                        return
!
3353
                                }
!
3354
                        }()
3355
                }
3356
        }
3357
}
3358

3359
// SendPaymentSync is the synchronous non-streaming version of SendPayment.
3360
// This RPC is intended to be consumed by clients of the REST proxy.
3361
// Additionally, this RPC expects the destination's public key and the payment
3362
// hash (if any) to be encoded as hex strings.
3363
func (r *rpcServer) SendPaymentSync(ctx context.Context,
3364
        nextPayment *lnrpc.SendRequest) (*lnrpc.SendResponse, error) {
!
3365

!
3366
        return r.sendPaymentSync(ctx, &rpcPaymentRequest{
!
3367
                SendRequest: nextPayment,
!
3368
        })
!
3369
}
!
3370

3371
// SendToRouteSync is the synchronous non-streaming version of SendToRoute.
3372
// This RPC is intended to be consumed by clients of the REST proxy.
3373
// Additionally, this RPC expects the payment hash (if any) to be encoded as
3374
// hex strings.
3375
func (r *rpcServer) SendToRouteSync(ctx context.Context,
3376
        req *lnrpc.SendToRouteRequest) (*lnrpc.SendResponse, error) {
!
3377

!
3378
        if req.Route == nil {
!
3379
                return nil, fmt.Errorf("unable to send, no routes provided")
!
3380
        }
!
3381

3382
        paymentRequest, err := r.unmarshallSendToRouteRequest(req)
!
3383
        if err != nil {
!
3384
                return nil, err
!
3385
        }
!
3386

3387
        return r.sendPaymentSync(ctx, paymentRequest)
!
3388
}
3389

3390
// sendPaymentSync is the synchronous variant of sendPayment. It will block and
3391
// wait until the payment has been fully completed.
3392
func (r *rpcServer) sendPaymentSync(ctx context.Context,
3393
        nextPayment *rpcPaymentRequest) (*lnrpc.SendResponse, error) {
!
3394

!
3395
        // We don't allow payments to be sent while the daemon itself is still
!
3396
        // syncing as we may be trying to sent a payment over a "stale"
!
3397
        // channel.
!
3398
        if !r.server.Started() {
!
3399
                return nil, fmt.Errorf("chain backend is still syncing, server " +
!
3400
                        "not active yet")
!
3401
        }
!
3402

3403
        // First we'll attempt to map the proto describing the next payment to
3404
        // an intent that we can pass to local sub-systems.
3405
        payIntent, err := extractPaymentIntent(nextPayment)
!
3406
        if err != nil {
!
3407
                return nil, err
!
3408
        }
!
3409

3410
        // With the payment validated, we'll now attempt to dispatch the
3411
        // payment.
3412
        resp, saveErr := r.dispatchPaymentIntent(&payIntent)
!
3413
        switch {
!
3414
        case saveErr != nil:
!
3415
                return nil, saveErr
!
3416

3417
        case resp.Err != nil:
!
3418
                return &lnrpc.SendResponse{
!
3419
                        PaymentError: resp.Err.Error(),
!
3420
                        PaymentHash:  payIntent.rHash[:],
!
3421
                }, nil
!
3422
        }
3423

3424
        rpcRoute, err := r.routerBackend.MarshallRoute(resp.Route)
!
3425
        if err != nil {
!
3426
                return nil, err
!
3427
        }
!
3428

3429
        return &lnrpc.SendResponse{
!
3430
                PaymentHash:     payIntent.rHash[:],
!
3431
                PaymentPreimage: resp.Preimage[:],
!
3432
                PaymentRoute:    rpcRoute,
!
3433
        }, nil
!
3434
}
3435

3436
// AddInvoice attempts to add a new invoice to the invoice database. Any
3437
// duplicated invoices are rejected, therefore all invoices *must* have a
3438
// unique payment preimage.
3439
func (r *rpcServer) AddInvoice(ctx context.Context,
3440
        invoice *lnrpc.Invoice) (*lnrpc.AddInvoiceResponse, error) {
!
3441

!
3442
        defaultDelta := cfg.Bitcoin.TimeLockDelta
!
3443
        if registeredChains.PrimaryChain() == litecoinChain {
!
3444
                defaultDelta = cfg.Litecoin.TimeLockDelta
!
3445
        }
!
3446

3447
        addInvoiceCfg := &invoicesrpc.AddInvoiceConfig{
!
3448
                AddInvoice:        r.server.invoices.AddInvoice,
!
3449
                IsChannelActive:   r.server.htlcSwitch.HasActiveLink,
!
3450
                ChainParams:       activeNetParams.Params,
!
3451
                NodeSigner:        r.server.nodeSigner,
!
3452
                MaxPaymentMSat:    MaxPaymentMSat,
!
3453
                DefaultCLTVExpiry: defaultDelta,
!
3454
                ChanDB:            r.server.chanDB,
!
3455
        }
!
3456

!
3457
        addInvoiceData := &invoicesrpc.AddInvoiceData{
!
3458
                Memo:            invoice.Memo,
!
3459
                Receipt:         invoice.Receipt,
!
3460
                Value:           btcutil.Amount(invoice.Value),
!
3461
                DescriptionHash: invoice.DescriptionHash,
!
3462
                Expiry:          invoice.Expiry,
!
3463
                FallbackAddr:    invoice.FallbackAddr,
!
3464
                CltvExpiry:      invoice.CltvExpiry,
!
3465
                Private:         invoice.Private,
!
3466
        }
!
3467

!
3468
        if invoice.RPreimage != nil {
!
3469
                preimage, err := lntypes.MakePreimage(invoice.RPreimage)
!
3470
                if err != nil {
!
3471
                        return nil, err
!
3472
                }
!
3473
                addInvoiceData.Preimage = &preimage
!
3474
        }
3475

3476
        hash, dbInvoice, err := invoicesrpc.AddInvoice(
!
3477
                ctx, addInvoiceCfg, addInvoiceData,
!
3478
        )
!
3479
        if err != nil {
!
3480
                return nil, err
!
3481
        }
!
3482

3483
        return &lnrpc.AddInvoiceResponse{
!
3484
                AddIndex:       dbInvoice.AddIndex,
!
3485
                PaymentRequest: string(dbInvoice.PaymentRequest),
!
3486
                RHash:          hash[:],
!
3487
        }, nil
!
3488
}
3489

3490
// LookupInvoice attempts to look up an invoice according to its payment hash.
3491
// The passed payment hash *must* be exactly 32 bytes, if not an error is
3492
// returned.
3493
func (r *rpcServer) LookupInvoice(ctx context.Context,
3494
        req *lnrpc.PaymentHash) (*lnrpc.Invoice, error) {
!
3495

!
3496
        var (
!
3497
                payHash [32]byte
!
3498
                rHash   []byte
!
3499
                err     error
!
3500
        )
!
3501

!
3502
        // If the RHash as a raw string was provided, then decode that and use
!
3503
        // that directly. Otherwise, we use the raw bytes provided.
!
3504
        if req.RHashStr != "" {
!
3505
                rHash, err = hex.DecodeString(req.RHashStr)
!
3506
                if err != nil {
!
3507
                        return nil, err
!
3508
                }
!
3509
        } else {
!
3510
                rHash = req.RHash
!
3511
        }
!
3512

3513
        // Ensure that the payment hash is *exactly* 32-bytes.
3514
        if len(rHash) != 0 && len(rHash) != 32 {
!
3515
                return nil, fmt.Errorf("payment hash must be exactly "+
!
3516
                        "32 bytes, is instead %v", len(rHash))
!
3517
        }
!
3518
        copy(payHash[:], rHash)
!
3519

!
3520
        rpcsLog.Tracef("[lookupinvoice] searching for invoice %x", payHash[:])
!
3521

!
3522
        invoice, err := r.server.invoices.LookupInvoice(payHash)
!
3523
        if err != nil {
!
3524
                return nil, err
!
3525
        }
!
3526

3527
        rpcsLog.Tracef("[lookupinvoice] located invoice %v",
!
3528
                newLogClosure(func() string {
!
3529
                        return spew.Sdump(invoice)
!
3530
                }))
!
3531

3532
        rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
!
3533
                &invoice, activeNetParams.Params,
!
3534
        )
!
3535
        if err != nil {
!
3536
                return nil, err
!
3537
        }
!
3538

3539
        return rpcInvoice, nil
!
3540
}
3541

3542
// ListInvoices returns a list of all the invoices currently stored within the
3543
// database. Any active debug invoices are ignored.
3544
func (r *rpcServer) ListInvoices(ctx context.Context,
3545
        req *lnrpc.ListInvoiceRequest) (*lnrpc.ListInvoiceResponse, error) {
!
3546

!
3547
        // If the number of invoices was not specified, then we'll default to
!
3548
        // returning the latest 100 invoices.
!
3549
        if req.NumMaxInvoices == 0 {
!
3550
                req.NumMaxInvoices = 100
!
3551
        }
!
3552

3553
        // Next, we'll map the proto request into a format that is understood by
3554
        // the database.
3555
        q := channeldb.InvoiceQuery{
!
3556
                IndexOffset:    req.IndexOffset,
!
3557
                NumMaxInvoices: req.NumMaxInvoices,
!
3558
                PendingOnly:    req.PendingOnly,
!
3559
                Reversed:       req.Reversed,
!
3560
        }
!
3561
        invoiceSlice, err := r.server.chanDB.QueryInvoices(q)
!
3562
        if err != nil {
!
3563
                return nil, fmt.Errorf("unable to query invoices: %v", err)
!
3564
        }
!
3565

3566
        // Before returning the response, we'll need to convert each invoice
3567
        // into it's proto representation.
3568
        resp := &lnrpc.ListInvoiceResponse{
!
3569
                Invoices:         make([]*lnrpc.Invoice, len(invoiceSlice.Invoices)),
!
3570
                FirstIndexOffset: invoiceSlice.FirstIndexOffset,
!
3571
                LastIndexOffset:  invoiceSlice.LastIndexOffset,
!
3572
        }
!
3573
        for i, invoice := range invoiceSlice.Invoices {
!
3574
                resp.Invoices[i], err = invoicesrpc.CreateRPCInvoice(
!
3575
                        &invoice, activeNetParams.Params,
!
3576
                )
!
3577
                if err != nil {
!
3578
                        return nil, err
!
3579
                }
!
3580
        }
3581

3582
        return resp, nil
!
3583
}
3584

3585
// SubscribeInvoices returns a uni-directional stream (server -> client) for
3586
// notifying the client of newly added/settled invoices.
3587
func (r *rpcServer) SubscribeInvoices(req *lnrpc.InvoiceSubscription,
3588
        updateStream lnrpc.Lightning_SubscribeInvoicesServer) error {
!
3589

!
3590
        invoiceClient := r.server.invoices.SubscribeNotifications(
!
3591
                req.AddIndex, req.SettleIndex,
!
3592
        )
!
3593
        defer invoiceClient.Cancel()
!
3594

!
3595
        for {
!
3596
                select {
!
3597
                case newInvoice := <-invoiceClient.NewInvoices:
!
3598
                        rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
!
3599
                                newInvoice, activeNetParams.Params,
!
3600
                        )
!
3601
                        if err != nil {
!
3602
                                return err
!
3603
                        }
!
3604

3605
                        if err := updateStream.Send(rpcInvoice); err != nil {
!
3606
                                return err
!
3607
                        }
!
3608

3609
                case settledInvoice := <-invoiceClient.SettledInvoices:
!
3610
                        rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
!
3611
                                settledInvoice, activeNetParams.Params,
!
3612
                        )
!
3613
                        if err != nil {
!
3614
                                return err
!
3615
                        }
!
3616

3617
                        if err := updateStream.Send(rpcInvoice); err != nil {
!
3618
                                return err
!
3619
                        }
!
3620

3621
                case <-r.quit:
!
3622
                        return nil
!
3623
                }
3624
        }
3625
}
3626

3627
// SubscribeTransactions creates a uni-directional stream (server -> client) in
3628
// which any newly discovered transactions relevant to the wallet are sent
3629
// over.
3630
func (r *rpcServer) SubscribeTransactions(req *lnrpc.GetTransactionsRequest,
3631
        updateStream lnrpc.Lightning_SubscribeTransactionsServer) error {
!
3632

!
3633
        txClient, err := r.server.cc.wallet.SubscribeTransactions()
!
3634
        if err != nil {
!
3635
                return err
!
3636
        }
!
3637
        defer txClient.Cancel()
!
3638

!
3639
        for {
!
3640
                select {
!
3641
                case tx := <-txClient.ConfirmedTransactions():
!
3642
                        destAddresses := make([]string, 0, len(tx.DestAddresses))
!
3643
                        for _, destAddress := range tx.DestAddresses {
!
3644
                                destAddresses = append(destAddresses, destAddress.EncodeAddress())
!
3645
                        }
!
3646
                        detail := &lnrpc.Transaction{
!
3647
                                TxHash:           tx.Hash.String(),
!
3648
                                Amount:           int64(tx.Value),