• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 10203737448

01 Aug 2024 06:26PM UTC coverage: 58.674% (+0.05%) from 58.627%
10203737448

push

github

web-flow
Merge pull request #8938 from bhandras/etcd-leader-election-fixups

multi: check leader status with our health checker to correctly shut down LND if network partitions

28 of 73 new or added lines in 6 files covered. (38.36%)

117 existing lines in 18 files now uncovered.

125392 of 213710 relevant lines covered (58.67%)

28078.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.58
/channeldb/channel.go
1
package channeldb
2

3
import (
4
        "bytes"
5
        "crypto/hmac"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "net"
12
        "strconv"
13
        "strings"
14
        "sync"
15

16
        "github.com/btcsuite/btcd/btcec/v2"
17
        "github.com/btcsuite/btcd/btcec/v2/schnorr/musig2"
18
        "github.com/btcsuite/btcd/btcutil"
19
        "github.com/btcsuite/btcd/chaincfg/chainhash"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/channeldb/models"
23
        "github.com/lightningnetwork/lnd/fn"
24
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
25
        "github.com/lightningnetwork/lnd/input"
26
        "github.com/lightningnetwork/lnd/keychain"
27
        "github.com/lightningnetwork/lnd/kvdb"
28
        "github.com/lightningnetwork/lnd/lntypes"
29
        "github.com/lightningnetwork/lnd/lnwire"
30
        "github.com/lightningnetwork/lnd/shachain"
31
        "github.com/lightningnetwork/lnd/tlv"
32
)
33

34
const (
35
        // AbsoluteThawHeightThreshold is the threshold at which a thaw height
36
        // begins to be interpreted as an absolute block height, rather than a
37
        // relative one.
38
        AbsoluteThawHeightThreshold uint32 = 500000
39

40
        // HTLCBlindingPointTLV is the tlv type used for storing blinding
41
        // points with HTLCs.
42
        HTLCBlindingPointTLV tlv.Type = 0
43
)
44

45
var (
46
        // closedChannelBucket stores summarization information concerning
47
        // previously open, but now closed channels.
48
        closedChannelBucket = []byte("closed-chan-bucket")
49

50
        // openChannelBucket stores all the currently open channels. This bucket
51
        // has a second, nested bucket which is keyed by a node's ID. Within
52
        // that node ID bucket, all attributes required to track, update, and
53
        // close a channel are stored.
54
        //
55
        // openChan -> nodeID -> chanPoint
56
        //
57
        // TODO(roasbeef): flesh out comment
58
        openChannelBucket = []byte("open-chan-bucket")
59

60
        // outpointBucket stores all of our channel outpoints and a tlv
61
        // stream containing channel data.
62
        //
63
        // outpoint -> tlv stream.
64
        //
65
        outpointBucket = []byte("outpoint-bucket")
66

67
        // chanIDBucket stores all of the 32-byte channel ID's we know about.
68
        // These could be derived from outpointBucket, but it is more
69
        // convenient to have these in their own bucket.
70
        //
71
        // chanID -> tlv stream.
72
        //
73
        chanIDBucket = []byte("chan-id-bucket")
74

75
        // historicalChannelBucket stores all channels that have seen their
76
        // commitment tx confirm. All information from their previous open state
77
        // is retained.
78
        historicalChannelBucket = []byte("historical-chan-bucket")
79

80
        // chanInfoKey can be accessed within the bucket for a channel
81
        // (identified by its chanPoint). This key stores all the static
82
        // information for a channel which is decided at the end of  the
83
        // funding flow.
84
        chanInfoKey = []byte("chan-info-key")
85

86
        // localUpfrontShutdownKey can be accessed within the bucket for a channel
87
        // (identified by its chanPoint). This key stores an optional upfront
88
        // shutdown script for the local peer.
89
        localUpfrontShutdownKey = []byte("local-upfront-shutdown-key")
90

91
        // remoteUpfrontShutdownKey can be accessed within the bucket for a channel
92
        // (identified by its chanPoint). This key stores an optional upfront
93
        // shutdown script for the remote peer.
94
        remoteUpfrontShutdownKey = []byte("remote-upfront-shutdown-key")
95

96
        // chanCommitmentKey can be accessed within the sub-bucket for a
97
        // particular channel. This key stores the up to date commitment state
98
        // for a particular channel party. Appending a 0 to the end of this key
99
        // indicates it's the commitment for the local party, and appending a 1
100
        // to the end of this key indicates it's the commitment for the remote
101
        // party.
102
        chanCommitmentKey = []byte("chan-commitment-key")
103

104
        // unsignedAckedUpdatesKey is an entry in the channel bucket that
105
        // contains the remote updates that we have acked, but not yet signed
106
        // for in one of our remote commits.
107
        unsignedAckedUpdatesKey = []byte("unsigned-acked-updates-key")
108

109
        // remoteUnsignedLocalUpdatesKey is an entry in the channel bucket that
110
        // contains the local updates that the remote party has acked, but
111
        // has not yet signed for in one of their local commits.
112
        remoteUnsignedLocalUpdatesKey = []byte("remote-unsigned-local-updates-key")
113

114
        // revocationStateKey stores their current revocation hash, our
115
        // preimage producer and their preimage store.
116
        revocationStateKey = []byte("revocation-state-key")
117

118
        // dataLossCommitPointKey stores the commitment point received from the
119
        // remote peer during a channel sync in case we have lost channel state.
120
        dataLossCommitPointKey = []byte("data-loss-commit-point-key")
121

122
        // forceCloseTxKey points to a the unilateral closing tx that we
123
        // broadcasted when moving the channel to state CommitBroadcasted.
124
        forceCloseTxKey = []byte("closing-tx-key")
125

126
        // coopCloseTxKey points to a the cooperative closing tx that we
127
        // broadcasted when moving the channel to state CoopBroadcasted.
128
        coopCloseTxKey = []byte("coop-closing-tx-key")
129

130
        // shutdownInfoKey points to the serialised shutdown info that has been
131
        // persisted for a channel. The existence of this info means that we
132
        // have sent the Shutdown message before and so should re-initiate the
133
        // shutdown on re-establish.
134
        shutdownInfoKey = []byte("shutdown-info-key")
135

136
        // commitDiffKey stores the current pending commitment state we've
137
        // extended to the remote party (if any). Each time we propose a new
138
        // state, we store the information necessary to reconstruct this state
139
        // from the prior commitment. This allows us to resync the remote party
140
        // to their expected state in the case of message loss.
141
        //
142
        // TODO(roasbeef): rename to commit chain?
143
        commitDiffKey = []byte("commit-diff-key")
144

145
        // frozenChanKey is the key where we store the information for any
146
        // active "frozen" channels. This key is present only in the leaf
147
        // bucket for a given channel.
148
        frozenChanKey = []byte("frozen-chans")
149

150
        // lastWasRevokeKey is a key that stores true when the last update we
151
        // sent was a revocation and false when it was a commitment signature.
152
        // This is nil in the case of new channels with no updates exchanged.
153
        lastWasRevokeKey = []byte("last-was-revoke")
154

155
        // finalHtlcsBucket contains the htlcs that have been resolved
156
        // definitively. Within this bucket, there is a sub-bucket for each
157
        // channel. In each channel bucket, the htlc indices are stored along
158
        // with final outcome.
159
        //
160
        // final-htlcs -> chanID -> htlcIndex -> outcome
161
        //
162
        // 'outcome' is a byte value that encodes:
163
        //
164
        //       | true      false
165
        // ------+------------------
166
        // bit 0 | settled   failed
167
        // bit 1 | offchain  onchain
168
        //
169
        // This bucket is positioned at the root level, because its contents
170
        // will be kept independent of the channel lifecycle. This is to avoid
171
        // the situation where a channel force-closes autonomously and the user
172
        // not being able to query for htlc outcomes anymore.
173
        finalHtlcsBucket = []byte("final-htlcs")
174
)
175

176
var (
177
        // ErrNoCommitmentsFound is returned when a channel has not set
178
        // commitment states.
179
        ErrNoCommitmentsFound = fmt.Errorf("no commitments found")
180

181
        // ErrNoChanInfoFound is returned when a particular channel does not
182
        // have any channels state.
183
        ErrNoChanInfoFound = fmt.Errorf("no chan info found")
184

185
        // ErrNoRevocationsFound is returned when revocation state for a
186
        // particular channel cannot be found.
187
        ErrNoRevocationsFound = fmt.Errorf("no revocations found")
188

189
        // ErrNoPendingCommit is returned when there is not a pending
190
        // commitment for a remote party. A new commitment is written to disk
191
        // each time we write a new state in order to be properly fault
192
        // tolerant.
193
        ErrNoPendingCommit = fmt.Errorf("no pending commits found")
194

195
        // ErrNoCommitPoint is returned when no data loss commit point is found
196
        // in the database.
197
        ErrNoCommitPoint = fmt.Errorf("no commit point found")
198

199
        // ErrNoCloseTx is returned when no closing tx is found for a channel
200
        // in the state CommitBroadcasted.
201
        ErrNoCloseTx = fmt.Errorf("no closing tx found")
202

203
        // ErrNoShutdownInfo is returned when no shutdown info has been
204
        // persisted for a channel.
205
        ErrNoShutdownInfo = errors.New("no shutdown info")
206

207
        // ErrNoRestoredChannelMutation is returned when a caller attempts to
208
        // mutate a channel that's been recovered.
209
        ErrNoRestoredChannelMutation = fmt.Errorf("cannot mutate restored " +
210
                "channel state")
211

212
        // ErrChanBorked is returned when a caller attempts to mutate a borked
213
        // channel.
214
        ErrChanBorked = fmt.Errorf("cannot mutate borked channel")
215

216
        // ErrMissingIndexEntry is returned when a caller attempts to close a
217
        // channel and the outpoint is missing from the index.
218
        ErrMissingIndexEntry = fmt.Errorf("missing outpoint from index")
219

220
        // ErrOnionBlobLength is returned is an onion blob with incorrect
221
        // length is read from disk.
222
        ErrOnionBlobLength = errors.New("onion blob < 1366 bytes")
223
)
224

225
const (
226
        // A tlv type definition used to serialize an outpoint's indexStatus
227
        // for use in the outpoint index.
228
        indexStatusType tlv.Type = 0
229

230
        // A tlv type definition used to serialize and deserialize a KeyLocator
231
        // from the database.
232
        keyLocType tlv.Type = 1
233

234
        // A tlv type used to serialize and deserialize the
235
        // `InitialLocalBalance` field.
236
        initialLocalBalanceType tlv.Type = 2
237

238
        // A tlv type used to serialize and deserialize the
239
        // `InitialRemoteBalance` field.
240
        initialRemoteBalanceType tlv.Type = 3
241

242
        // A tlv type definition used to serialize and deserialize the
243
        // confirmed ShortChannelID for a zero-conf channel.
244
        realScidType tlv.Type = 4
245

246
        // A tlv type definition used to serialize and deserialize the
247
        // Memo for the channel channel.
248
        channelMemoType tlv.Type = 5
249
)
250

251
// indexStatus is an enum-like type that describes what state the
252
// outpoint is in. Currently only two possible values.
253
type indexStatus uint8
254

255
const (
256
        // outpointOpen represents an outpoint that is open in the outpoint index.
257
        outpointOpen indexStatus = 0
258

259
        // outpointClosed represents an outpoint that is closed in the outpoint
260
        // index.
261
        outpointClosed indexStatus = 1
262
)
263

264
// ChannelType is an enum-like type that describes one of several possible
265
// channel types. Each open channel is associated with a particular type as the
266
// channel type may determine how higher level operations are conducted such as
267
// fee negotiation, channel closing, the format of HTLCs, etc. Structure-wise,
268
// a ChannelType is a bit field, with each bit denoting a modification from the
269
// base channel type of single funder.
270
type ChannelType uint64
271

272
const (
273
        // NOTE: iota isn't used here for this enum needs to be stable
274
        // long-term as it will be persisted to the database.
275

276
        // SingleFunderBit represents a channel wherein one party solely funds
277
        // the entire capacity of the channel.
278
        SingleFunderBit ChannelType = 0
279

280
        // DualFunderBit represents a channel wherein both parties contribute
281
        // funds towards the total capacity of the channel. The channel may be
282
        // funded symmetrically or asymmetrically.
283
        DualFunderBit ChannelType = 1 << 0
284

285
        // SingleFunderTweaklessBit is similar to the basic SingleFunder channel
286
        // type, but it omits the tweak for one's key in the commitment
287
        // transaction of the remote party.
288
        SingleFunderTweaklessBit ChannelType = 1 << 1
289

290
        // NoFundingTxBit denotes if we have the funding transaction locally on
291
        // disk. This bit may be on if the funding transaction was crafted by a
292
        // wallet external to the primary daemon.
293
        NoFundingTxBit ChannelType = 1 << 2
294

295
        // AnchorOutputsBit indicates that the channel makes use of anchor
296
        // outputs to bump the commitment transaction's effective feerate. This
297
        // channel type also uses a delayed to_remote output script.
298
        AnchorOutputsBit ChannelType = 1 << 3
299

300
        // FrozenBit indicates that the channel is a frozen channel, meaning
301
        // that only the responder can decide to cooperatively close the
302
        // channel.
303
        FrozenBit ChannelType = 1 << 4
304

305
        // ZeroHtlcTxFeeBit indicates that the channel should use zero-fee
306
        // second-level HTLC transactions.
307
        ZeroHtlcTxFeeBit ChannelType = 1 << 5
308

309
        // LeaseExpirationBit indicates that the channel has been leased for a
310
        // period of time, constraining every output that pays to the channel
311
        // initiator with an additional CLTV of the lease maturity.
312
        LeaseExpirationBit ChannelType = 1 << 6
313

314
        // ZeroConfBit indicates that the channel is a zero-conf channel.
315
        ZeroConfBit ChannelType = 1 << 7
316

317
        // ScidAliasChanBit indicates that the channel has negotiated the
318
        // scid-alias channel type.
319
        ScidAliasChanBit ChannelType = 1 << 8
320

321
        // ScidAliasFeatureBit indicates that the scid-alias feature bit was
322
        // negotiated during the lifetime of this channel.
323
        ScidAliasFeatureBit ChannelType = 1 << 9
324

325
        // SimpleTaprootFeatureBit indicates that the simple-taproot-chans
326
        // feature bit was negotiated during the lifetime of the channel.
327
        SimpleTaprootFeatureBit ChannelType = 1 << 10
328
)
329

330
// IsSingleFunder returns true if the channel type if one of the known single
331
// funder variants.
332
func (c ChannelType) IsSingleFunder() bool {
14,174✔
333
        return c&DualFunderBit == 0
14,174✔
334
}
14,174✔
335

336
// IsDualFunder returns true if the ChannelType has the DualFunderBit set.
337
func (c ChannelType) IsDualFunder() bool {
×
338
        return c&DualFunderBit == DualFunderBit
×
339
}
×
340

341
// IsTweakless returns true if the target channel uses a commitment that
342
// doesn't tweak the key for the remote party.
343
func (c ChannelType) IsTweakless() bool {
13,242✔
344
        return c&SingleFunderTweaklessBit == SingleFunderTweaklessBit
13,242✔
345
}
13,242✔
346

347
// HasFundingTx returns true if this channel type is one that has a funding
348
// transaction stored locally.
349
func (c ChannelType) HasFundingTx() bool {
14,184✔
350
        return c&NoFundingTxBit == 0
14,184✔
351
}
14,184✔
352

353
// HasAnchors returns true if this channel type has anchor outputs on its
354
// commitment.
355
func (c ChannelType) HasAnchors() bool {
8,230,962✔
356
        return c&AnchorOutputsBit == AnchorOutputsBit
8,230,962✔
357
}
8,230,962✔
358

359
// ZeroHtlcTxFee returns true if this channel type uses second-level HTLC
360
// transactions signed with zero-fee.
361
func (c ChannelType) ZeroHtlcTxFee() bool {
7,235,733✔
362
        return c&ZeroHtlcTxFeeBit == ZeroHtlcTxFeeBit
7,235,733✔
363
}
7,235,733✔
364

365
// IsFrozen returns true if the channel is considered to be "frozen". A frozen
366
// channel means that only the responder can initiate a cooperative channel
367
// closure.
368
func (c ChannelType) IsFrozen() bool {
11,283✔
369
        return c&FrozenBit == FrozenBit
11,283✔
370
}
11,283✔
371

372
// HasLeaseExpiration returns true if the channel originated from a lease.
373
func (c ChannelType) HasLeaseExpiration() bool {
275,169✔
374
        return c&LeaseExpirationBit == LeaseExpirationBit
275,169✔
375
}
275,169✔
376

377
// HasZeroConf returns true if the channel is a zero-conf channel.
378
func (c ChannelType) HasZeroConf() bool {
644✔
379
        return c&ZeroConfBit == ZeroConfBit
644✔
380
}
644✔
381

382
// HasScidAliasChan returns true if the scid-alias channel type was negotiated.
383
func (c ChannelType) HasScidAliasChan() bool {
×
384
        return c&ScidAliasChanBit == ScidAliasChanBit
×
385
}
×
386

387
// HasScidAliasFeature returns true if the scid-alias feature bit was
388
// negotiated during the lifetime of this channel.
389
func (c ChannelType) HasScidAliasFeature() bool {
483✔
390
        return c&ScidAliasFeatureBit == ScidAliasFeatureBit
483✔
391
}
483✔
392

393
// IsTaproot returns true if the channel is using taproot features.
394
func (c ChannelType) IsTaproot() bool {
8,961,438✔
395
        return c&SimpleTaprootFeatureBit == SimpleTaprootFeatureBit
8,961,438✔
396
}
8,961,438✔
397

398
// ChannelConstraints represents a set of constraints meant to allow a node to
399
// limit their exposure, enact flow control and ensure that all HTLCs are
400
// economically relevant. This struct will be mirrored for both sides of the
401
// channel, as each side will enforce various constraints that MUST be adhered
402
// to for the life time of the channel. The parameters for each of these
403
// constraints are static for the duration of the channel, meaning the channel
404
// must be torn down for them to change.
405
type ChannelConstraints struct {
406
        // DustLimit is the threshold (in satoshis) below which any outputs
407
        // should be trimmed. When an output is trimmed, it isn't materialized
408
        // as an actual output, but is instead burned to miner's fees.
409
        DustLimit btcutil.Amount
410

411
        // ChanReserve is an absolute reservation on the channel for the
412
        // owner of this set of constraints. This means that the current
413
        // settled balance for this node CANNOT dip below the reservation
414
        // amount. This acts as a defense against costless attacks when
415
        // either side no longer has any skin in the game.
416
        ChanReserve btcutil.Amount
417

418
        // MaxPendingAmount is the maximum pending HTLC value that the
419
        // owner of these constraints can offer the remote node at a
420
        // particular time.
421
        MaxPendingAmount lnwire.MilliSatoshi
422

423
        // MinHTLC is the minimum HTLC value that the owner of these
424
        // constraints can offer the remote node. If any HTLCs below this
425
        // amount are offered, then the HTLC will be rejected. This, in
426
        // tandem with the dust limit allows a node to regulate the
427
        // smallest HTLC that it deems economically relevant.
428
        MinHTLC lnwire.MilliSatoshi
429

430
        // MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of
431
        // this set of constraints can offer the remote node. This allows each
432
        // node to limit their over all exposure to HTLCs that may need to be
433
        // acted upon in the case of a unilateral channel closure or a contract
434
        // breach.
435
        MaxAcceptedHtlcs uint16
436

437
        // CsvDelay is the relative time lock delay expressed in blocks. Any
438
        // settled outputs that pay to the owner of this channel configuration
439
        // MUST ensure that the delay branch uses this value as the relative
440
        // time lock. Similarly, any HTLC's offered by this node should use
441
        // this value as well.
442
        CsvDelay uint16
443
}
444

445
// ChannelConfig is a struct that houses the various configuration opens for
446
// channels. Each side maintains an instance of this configuration file as it
447
// governs: how the funding and commitment transaction to be created, the
448
// nature of HTLC's allotted, the keys to be used for delivery, and relative
449
// time lock parameters.
450
type ChannelConfig struct {
451
        // ChannelConstraints is the set of constraints that must be upheld for
452
        // the duration of the channel for the owner of this channel
453
        // configuration. Constraints govern a number of flow control related
454
        // parameters, also including the smallest HTLC that will be accepted
455
        // by a participant.
456
        ChannelConstraints
457

458
        // MultiSigKey is the key to be used within the 2-of-2 output script
459
        // for the owner of this channel config.
460
        MultiSigKey keychain.KeyDescriptor
461

462
        // RevocationBasePoint is the base public key to be used when deriving
463
        // revocation keys for the remote node's commitment transaction. This
464
        // will be combined along with a per commitment secret to derive a
465
        // unique revocation key for each state.
466
        RevocationBasePoint keychain.KeyDescriptor
467

468
        // PaymentBasePoint is the base public key to be used when deriving
469
        // the key used within the non-delayed pay-to-self output on the
470
        // commitment transaction for a node. This will be combined with a
471
        // tweak derived from the per-commitment point to ensure unique keys
472
        // for each commitment transaction.
473
        PaymentBasePoint keychain.KeyDescriptor
474

475
        // DelayBasePoint is the base public key to be used when deriving the
476
        // key used within the delayed pay-to-self output on the commitment
477
        // transaction for a node. This will be combined with a tweak derived
478
        // from the per-commitment point to ensure unique keys for each
479
        // commitment transaction.
480
        DelayBasePoint keychain.KeyDescriptor
481

482
        // HtlcBasePoint is the base public key to be used when deriving the
483
        // local HTLC key. The derived key (combined with the tweak derived
484
        // from the per-commitment point) is used within the "to self" clause
485
        // within any HTLC output scripts.
486
        HtlcBasePoint keychain.KeyDescriptor
487
}
488

489
// ChannelCommitment is a snapshot of the commitment state at a particular
490
// point in the commitment chain. With each state transition, a snapshot of the
491
// current state along with all non-settled HTLCs are recorded. These snapshots
492
// detail the state of the _remote_ party's commitment at a particular state
493
// number.  For ourselves (the local node) we ONLY store our most recent
494
// (unrevoked) state for safety purposes.
495
type ChannelCommitment struct {
496
        // CommitHeight is the update number that this ChannelDelta represents
497
        // the total number of commitment updates to this point. This can be
498
        // viewed as sort of a "commitment height" as this number is
499
        // monotonically increasing.
500
        CommitHeight uint64
501

502
        // LocalLogIndex is the cumulative log index index of the local node at
503
        // this point in the commitment chain. This value will be incremented
504
        // for each _update_ added to the local update log.
505
        LocalLogIndex uint64
506

507
        // LocalHtlcIndex is the current local running HTLC index. This value
508
        // will be incremented for each outgoing HTLC the local node offers.
509
        LocalHtlcIndex uint64
510

511
        // RemoteLogIndex is the cumulative log index index of the remote node
512
        // at this point in the commitment chain. This value will be
513
        // incremented for each _update_ added to the remote update log.
514
        RemoteLogIndex uint64
515

516
        // RemoteHtlcIndex is the current remote running HTLC index. This value
517
        // will be incremented for each outgoing HTLC the remote node offers.
518
        RemoteHtlcIndex uint64
519

520
        // LocalBalance is the current available settled balance within the
521
        // channel directly spendable by us.
522
        //
523
        // NOTE: This is the balance *after* subtracting any commitment fee,
524
        // AND anchor output values.
525
        LocalBalance lnwire.MilliSatoshi
526

527
        // RemoteBalance is the current available settled balance within the
528
        // channel directly spendable by the remote node.
529
        //
530
        // NOTE: This is the balance *after* subtracting any commitment fee,
531
        // AND anchor output values.
532
        RemoteBalance lnwire.MilliSatoshi
533

534
        // CommitFee is the amount calculated to be paid in fees for the
535
        // current set of commitment transactions. The fee amount is persisted
536
        // with the channel in order to allow the fee amount to be removed and
537
        // recalculated with each channel state update, including updates that
538
        // happen after a system restart.
539
        CommitFee btcutil.Amount
540

541
        // FeePerKw is the min satoshis/kilo-weight that should be paid within
542
        // the commitment transaction for the entire duration of the channel's
543
        // lifetime. This field may be updated during normal operation of the
544
        // channel as on-chain conditions change.
545
        //
546
        // TODO(halseth): make this SatPerKWeight. Cannot be done atm because
547
        // this will cause the import cycle lnwallet<->channeldb. Fee
548
        // estimation stuff should be in its own package.
549
        FeePerKw btcutil.Amount
550

551
        // CommitTx is the latest version of the commitment state, broadcast
552
        // able by us.
553
        CommitTx *wire.MsgTx
554

555
        // CommitSig is one half of the signature required to fully complete
556
        // the script for the commitment transaction above. This is the
557
        // signature signed by the remote party for our version of the
558
        // commitment transactions.
559
        CommitSig []byte
560

561
        // Htlcs is the set of HTLC's that are pending at this particular
562
        // commitment height.
563
        Htlcs []HTLC
564

565
        // TODO(roasbeef): pending commit pointer?
566
        //  * lets just walk through
567
}
568

569
// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in
570
// the default usable state, or a state where it shouldn't be used.
571
type ChannelStatus uint64
572

573
var (
574
        // ChanStatusDefault is the normal state of an open channel.
575
        ChanStatusDefault ChannelStatus
576

577
        // ChanStatusBorked indicates that the channel has entered an
578
        // irreconcilable state, triggered by a state desynchronization or
579
        // channel breach.  Channels in this state should never be added to the
580
        // htlc switch.
581
        ChanStatusBorked ChannelStatus = 1
582

583
        // ChanStatusCommitBroadcasted indicates that a commitment for this
584
        // channel has been broadcasted.
585
        ChanStatusCommitBroadcasted ChannelStatus = 1 << 1
586

587
        // ChanStatusLocalDataLoss indicates that we have lost channel state
588
        // for this channel, and broadcasting our latest commitment might be
589
        // considered a breach.
590
        //
591
        // TODO(halseh): actually enforce that we are not force closing such a
592
        // channel.
593
        ChanStatusLocalDataLoss ChannelStatus = 1 << 2
594

595
        // ChanStatusRestored is a status flag that signals that the channel
596
        // has been restored, and doesn't have all the fields a typical channel
597
        // will have.
598
        ChanStatusRestored ChannelStatus = 1 << 3
599

600
        // ChanStatusCoopBroadcasted indicates that a cooperative close for
601
        // this channel has been broadcasted. Older cooperatively closed
602
        // channels will only have this status set. Newer ones will also have
603
        // close initiator information stored using the local/remote initiator
604
        // status. This status is set in conjunction with the initiator status
605
        // so that we do not need to check multiple channel statues for
606
        // cooperative closes.
607
        ChanStatusCoopBroadcasted ChannelStatus = 1 << 4
608

609
        // ChanStatusLocalCloseInitiator indicates that we initiated closing
610
        // the channel.
611
        ChanStatusLocalCloseInitiator ChannelStatus = 1 << 5
612

613
        // ChanStatusRemoteCloseInitiator indicates that the remote node
614
        // initiated closing the channel.
615
        ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6
616
)
617

618
// chanStatusStrings maps a ChannelStatus to a human friendly string that
619
// describes that status.
620
var chanStatusStrings = map[ChannelStatus]string{
621
        ChanStatusDefault:              "ChanStatusDefault",
622
        ChanStatusBorked:               "ChanStatusBorked",
623
        ChanStatusCommitBroadcasted:    "ChanStatusCommitBroadcasted",
624
        ChanStatusLocalDataLoss:        "ChanStatusLocalDataLoss",
625
        ChanStatusRestored:             "ChanStatusRestored",
626
        ChanStatusCoopBroadcasted:      "ChanStatusCoopBroadcasted",
627
        ChanStatusLocalCloseInitiator:  "ChanStatusLocalCloseInitiator",
628
        ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator",
629
}
630

631
// orderedChanStatusFlags is an in-order list of all that channel status flags.
632
var orderedChanStatusFlags = []ChannelStatus{
633
        ChanStatusBorked,
634
        ChanStatusCommitBroadcasted,
635
        ChanStatusLocalDataLoss,
636
        ChanStatusRestored,
637
        ChanStatusCoopBroadcasted,
638
        ChanStatusLocalCloseInitiator,
639
        ChanStatusRemoteCloseInitiator,
640
}
641

642
// String returns a human-readable representation of the ChannelStatus.
643
func (c ChannelStatus) String() string {
5✔
644
        // If no flags are set, then this is the default case.
5✔
645
        if c == ChanStatusDefault {
9✔
646
                return chanStatusStrings[ChanStatusDefault]
4✔
647
        }
4✔
648

649
        // Add individual bit flags.
650
        statusStr := ""
5✔
651
        for _, flag := range orderedChanStatusFlags {
16✔
652
                if c&flag == flag {
16✔
653
                        statusStr += chanStatusStrings[flag] + "|"
5✔
654
                        c -= flag
5✔
655
                }
5✔
656
        }
657

658
        // Remove anything to the right of the final bar, including it as well.
659
        statusStr = strings.TrimRight(statusStr, "|")
5✔
660

5✔
661
        // Add any remaining flags which aren't accounted for as hex.
5✔
662
        if c != 0 {
5✔
663
                statusStr += "|0x" + strconv.FormatUint(uint64(c), 16)
×
664
        }
×
665

666
        // If this was purely an unknown flag, then remove the extra bar at the
667
        // start of the string.
668
        statusStr = strings.TrimLeft(statusStr, "|")
5✔
669

5✔
670
        return statusStr
5✔
671
}
672

673
// FinalHtlcByte defines a byte type that encodes information about the final
674
// htlc resolution.
675
type FinalHtlcByte byte
676

677
const (
678
        // FinalHtlcSettledBit is the bit that encodes whether the htlc was
679
        // settled or failed.
680
        FinalHtlcSettledBit FinalHtlcByte = 1 << 0
681

682
        // FinalHtlcOffchainBit is the bit that encodes whether the htlc was
683
        // resolved offchain or onchain.
684
        FinalHtlcOffchainBit FinalHtlcByte = 1 << 1
685
)
686

687
// OpenChannel encapsulates the persistent and dynamic state of an open channel
688
// with a remote node. An open channel supports several options for on-disk
689
// serialization depending on the exact context. Full (upon channel creation)
690
// state commitments, and partial (due to a commitment update) writes are
691
// supported. Each partial write due to a state update appends the new update
692
// to an on-disk log, which can then subsequently be queried in order to
693
// "time-travel" to a prior state.
694
type OpenChannel struct {
695
        // ChanType denotes which type of channel this is.
696
        ChanType ChannelType
697

698
        // ChainHash is a hash which represents the blockchain that this
699
        // channel will be opened within. This value is typically the genesis
700
        // hash. In the case that the original chain went through a contentious
701
        // hard-fork, then this value will be tweaked using the unique fork
702
        // point on each branch.
703
        ChainHash chainhash.Hash
704

705
        // FundingOutpoint is the outpoint of the final funding transaction.
706
        // This value uniquely and globally identifies the channel within the
707
        // target blockchain as specified by the chain hash parameter.
708
        FundingOutpoint wire.OutPoint
709

710
        // ShortChannelID encodes the exact location in the chain in which the
711
        // channel was initially confirmed. This includes: the block height,
712
        // transaction index, and the output within the target transaction.
713
        //
714
        // If IsZeroConf(), then this will the "base" (very first) ALIAS scid
715
        // and the confirmed SCID will be stored in ConfirmedScid.
716
        ShortChannelID lnwire.ShortChannelID
717

718
        // IsPending indicates whether a channel's funding transaction has been
719
        // confirmed.
720
        IsPending bool
721

722
        // IsInitiator is a bool which indicates if we were the original
723
        // initiator for the channel. This value may affect how higher levels
724
        // negotiate fees, or close the channel.
725
        IsInitiator bool
726

727
        // chanStatus is the current status of this channel. If it is not in
728
        // the state Default, it should not be used for forwarding payments.
729
        chanStatus ChannelStatus
730

731
        // FundingBroadcastHeight is the height in which the funding
732
        // transaction was broadcast. This value can be used by higher level
733
        // sub-systems to determine if a channel is stale and/or should have
734
        // been confirmed before a certain height.
735
        FundingBroadcastHeight uint32
736

737
        // NumConfsRequired is the number of confirmations a channel's funding
738
        // transaction must have received in order to be considered available
739
        // for normal transactional use.
740
        NumConfsRequired uint16
741

742
        // ChannelFlags holds the flags that were sent as part of the
743
        // open_channel message.
744
        ChannelFlags lnwire.FundingFlag
745

746
        // IdentityPub is the identity public key of the remote node this
747
        // channel has been established with.
748
        IdentityPub *btcec.PublicKey
749

750
        // Capacity is the total capacity of this channel.
751
        Capacity btcutil.Amount
752

753
        // TotalMSatSent is the total number of milli-satoshis we've sent
754
        // within this channel.
755
        TotalMSatSent lnwire.MilliSatoshi
756

757
        // TotalMSatReceived is the total number of milli-satoshis we've
758
        // received within this channel.
759
        TotalMSatReceived lnwire.MilliSatoshi
760

761
        // InitialLocalBalance is the balance we have during the channel
762
        // opening. When we are not the initiator, this value represents the
763
        // push amount.
764
        InitialLocalBalance lnwire.MilliSatoshi
765

766
        // InitialRemoteBalance is the balance they have during the channel
767
        // opening.
768
        InitialRemoteBalance lnwire.MilliSatoshi
769

770
        // LocalChanCfg is the channel configuration for the local node.
771
        LocalChanCfg ChannelConfig
772

773
        // RemoteChanCfg is the channel configuration for the remote node.
774
        RemoteChanCfg ChannelConfig
775

776
        // LocalCommitment is the current local commitment state for the local
777
        // party. This is stored distinct from the state of the remote party
778
        // as there are certain asymmetric parameters which affect the
779
        // structure of each commitment.
780
        LocalCommitment ChannelCommitment
781

782
        // RemoteCommitment is the current remote commitment state for the
783
        // remote party. This is stored distinct from the state of the local
784
        // party as there are certain asymmetric parameters which affect the
785
        // structure of each commitment.
786
        RemoteCommitment ChannelCommitment
787

788
        // RemoteCurrentRevocation is the current revocation for their
789
        // commitment transaction. However, since this the derived public key,
790
        // we don't yet have the private key so we aren't yet able to verify
791
        // that it's actually in the hash chain.
792
        RemoteCurrentRevocation *btcec.PublicKey
793

794
        // RemoteNextRevocation is the revocation key to be used for the *next*
795
        // commitment transaction we create for the local node. Within the
796
        // specification, this value is referred to as the
797
        // per-commitment-point.
798
        RemoteNextRevocation *btcec.PublicKey
799

800
        // RevocationProducer is used to generate the revocation in such a way
801
        // that remote side might store it efficiently and have the ability to
802
        // restore the revocation by index if needed. Current implementation of
803
        // secret producer is shachain producer.
804
        RevocationProducer shachain.Producer
805

806
        // RevocationStore is used to efficiently store the revocations for
807
        // previous channels states sent to us by remote side. Current
808
        // implementation of secret store is shachain store.
809
        RevocationStore shachain.Store
810

811
        // Packager is used to create and update forwarding packages for this
812
        // channel, which encodes all necessary information to recover from
813
        // failures and reforward HTLCs that were not fully processed.
814
        Packager FwdPackager
815

816
        // FundingTxn is the transaction containing this channel's funding
817
        // outpoint. Upon restarts, this txn will be rebroadcast if the channel
818
        // is found to be pending.
819
        //
820
        // NOTE: This value will only be populated for single-funder channels
821
        // for which we are the initiator, and that we also have the funding
822
        // transaction for. One can check this by using the HasFundingTx()
823
        // method on the ChanType field.
824
        FundingTxn *wire.MsgTx
825

826
        // LocalShutdownScript is set to a pre-set script if the channel was opened
827
        // by the local node with option_upfront_shutdown_script set. If the option
828
        // was not set, the field is empty.
829
        LocalShutdownScript lnwire.DeliveryAddress
830

831
        // RemoteShutdownScript is set to a pre-set script if the channel was opened
832
        // by the remote node with option_upfront_shutdown_script set. If the option
833
        // was not set, the field is empty.
834
        RemoteShutdownScript lnwire.DeliveryAddress
835

836
        // ThawHeight is the height when a frozen channel once again becomes a
837
        // normal channel. If this is zero, then there're no restrictions on
838
        // this channel. If the value is lower than 500,000, then it's
839
        // interpreted as a relative height, or an absolute height otherwise.
840
        ThawHeight uint32
841

842
        // LastWasRevoke is a boolean that determines if the last update we sent
843
        // was a revocation (true) or a commitment signature (false).
844
        LastWasRevoke bool
845

846
        // RevocationKeyLocator stores the KeyLocator information that we will
847
        // need to derive the shachain root for this channel. This allows us to
848
        // have private key isolation from lnd.
849
        RevocationKeyLocator keychain.KeyLocator
850

851
        // confirmedScid is the confirmed ShortChannelID for a zero-conf
852
        // channel. If the channel is unconfirmed, then this will be the
853
        // default ShortChannelID. This is only set for zero-conf channels.
854
        confirmedScid lnwire.ShortChannelID
855

856
        // Memo is any arbitrary information we wish to store locally about the
857
        // channel that will be useful to our future selves.
858
        Memo []byte
859

860
        // TODO(roasbeef): eww
861
        Db *ChannelStateDB
862

863
        // TODO(roasbeef): just need to store local and remote HTLC's?
864

865
        sync.RWMutex
866
}
867

868
// String returns a string representation of the channel.
869
func (c *OpenChannel) String() string {
4✔
870
        indexStr := "height=%v, local_htlc_index=%v, local_log_index=%v, " +
4✔
871
                "remote_htlc_index=%v, remote_log_index=%v"
4✔
872

4✔
873
        commit := c.LocalCommitment
4✔
874
        local := fmt.Sprintf(indexStr, commit.CommitHeight,
4✔
875
                commit.LocalHtlcIndex, commit.LocalLogIndex,
4✔
876
                commit.RemoteHtlcIndex, commit.RemoteLogIndex,
4✔
877
        )
4✔
878

4✔
879
        commit = c.RemoteCommitment
4✔
880
        remote := fmt.Sprintf(indexStr, commit.CommitHeight,
4✔
881
                commit.LocalHtlcIndex, commit.LocalLogIndex,
4✔
882
                commit.RemoteHtlcIndex, commit.RemoteLogIndex,
4✔
883
        )
4✔
884

4✔
885
        return fmt.Sprintf("SCID=%v, status=%v, initiator=%v, pending=%v, "+
4✔
886
                "local commitment has %s, remote commitment has %s",
4✔
887
                c.ShortChannelID, c.chanStatus, c.IsInitiator, c.IsPending,
4✔
888
                local, remote,
4✔
889
        )
4✔
890
}
4✔
891

892
// ShortChanID returns the current ShortChannelID of this channel.
893
func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID {
13,966✔
894
        c.RLock()
13,966✔
895
        defer c.RUnlock()
13,966✔
896

13,966✔
897
        return c.ShortChannelID
13,966✔
898
}
13,966✔
899

900
// ZeroConfRealScid returns the zero-conf channel's confirmed scid. This should
901
// only be called if IsZeroConf returns true.
902
func (c *OpenChannel) ZeroConfRealScid() lnwire.ShortChannelID {
13✔
903
        c.RLock()
13✔
904
        defer c.RUnlock()
13✔
905

13✔
906
        return c.confirmedScid
13✔
907
}
13✔
908

909
// ZeroConfConfirmed returns whether the zero-conf channel has confirmed. This
910
// should only be called if IsZeroConf returns true.
911
func (c *OpenChannel) ZeroConfConfirmed() bool {
9✔
912
        c.RLock()
9✔
913
        defer c.RUnlock()
9✔
914

9✔
915
        return c.confirmedScid != hop.Source
9✔
916
}
9✔
917

918
// IsZeroConf returns whether the option_zeroconf channel type was negotiated.
919
func (c *OpenChannel) IsZeroConf() bool {
644✔
920
        c.RLock()
644✔
921
        defer c.RUnlock()
644✔
922

644✔
923
        return c.ChanType.HasZeroConf()
644✔
924
}
644✔
925

926
// IsOptionScidAlias returns whether the option_scid_alias channel type was
927
// negotiated.
928
func (c *OpenChannel) IsOptionScidAlias() bool {
×
929
        c.RLock()
×
930
        defer c.RUnlock()
×
931

×
932
        return c.ChanType.HasScidAliasChan()
×
933
}
×
934

935
// NegotiatedAliasFeature returns whether the option-scid-alias feature bit was
936
// negotiated.
937
func (c *OpenChannel) NegotiatedAliasFeature() bool {
482✔
938
        c.RLock()
482✔
939
        defer c.RUnlock()
482✔
940

482✔
941
        return c.ChanType.HasScidAliasFeature()
482✔
942
}
482✔
943

944
// ChanStatus returns the current ChannelStatus of this channel.
945
func (c *OpenChannel) ChanStatus() ChannelStatus {
212✔
946
        c.RLock()
212✔
947
        defer c.RUnlock()
212✔
948

212✔
949
        return c.chanStatus
212✔
950
}
212✔
951

952
// ApplyChanStatus allows the caller to modify the internal channel state in a
953
// thead-safe manner.
954
func (c *OpenChannel) ApplyChanStatus(status ChannelStatus) error {
3✔
955
        c.Lock()
3✔
956
        defer c.Unlock()
3✔
957

3✔
958
        return c.putChanStatus(status)
3✔
959
}
3✔
960

961
// ClearChanStatus allows the caller to clear a particular channel status from
962
// the primary channel status bit field. After this method returns, a call to
963
// HasChanStatus(status) should return false.
964
func (c *OpenChannel) ClearChanStatus(status ChannelStatus) error {
4✔
965
        c.Lock()
4✔
966
        defer c.Unlock()
4✔
967

4✔
968
        return c.clearChanStatus(status)
4✔
969
}
4✔
970

971
// HasChanStatus returns true if the internal bitfield channel status of the
972
// target channel has the specified status bit set.
973
func (c *OpenChannel) HasChanStatus(status ChannelStatus) bool {
350✔
974
        c.RLock()
350✔
975
        defer c.RUnlock()
350✔
976

350✔
977
        return c.hasChanStatus(status)
350✔
978
}
350✔
979

980
func (c *OpenChannel) hasChanStatus(status ChannelStatus) bool {
28,112✔
981
        // Special case ChanStatusDefualt since it isn't actually flag, but a
28,112✔
982
        // particular combination (or lack-there-of) of flags.
28,112✔
983
        if status == ChanStatusDefault {
28,120✔
984
                return c.chanStatus == ChanStatusDefault
8✔
985
        }
8✔
986

987
        return c.chanStatus&status == status
28,108✔
988
}
989

990
// BroadcastHeight returns the height at which the funding tx was broadcast.
991
func (c *OpenChannel) BroadcastHeight() uint32 {
107✔
992
        c.RLock()
107✔
993
        defer c.RUnlock()
107✔
994

107✔
995
        return c.FundingBroadcastHeight
107✔
996
}
107✔
997

998
// SetBroadcastHeight sets the FundingBroadcastHeight.
999
func (c *OpenChannel) SetBroadcastHeight(height uint32) {
4✔
1000
        c.Lock()
4✔
1001
        defer c.Unlock()
4✔
1002

4✔
1003
        c.FundingBroadcastHeight = height
4✔
1004
}
4✔
1005

1006
// Refresh updates the in-memory channel state using the latest state observed
1007
// on disk.
1008
func (c *OpenChannel) Refresh() error {
9✔
1009
        c.Lock()
9✔
1010
        defer c.Unlock()
9✔
1011

9✔
1012
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
18✔
1013
                chanBucket, err := fetchChanBucket(
9✔
1014
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
9✔
1015
                )
9✔
1016
                if err != nil {
13✔
1017
                        return err
4✔
1018
                }
4✔
1019

1020
                // We'll re-populating the in-memory channel with the info
1021
                // fetched from disk.
1022
                if err := fetchChanInfo(chanBucket, c); err != nil {
9✔
1023
                        return fmt.Errorf("unable to fetch chan info: %w", err)
×
1024
                }
×
1025

1026
                // Also populate the channel's commitment states for both sides
1027
                // of the channel.
1028
                if err := fetchChanCommitments(chanBucket, c); err != nil {
9✔
1029
                        return fmt.Errorf("unable to fetch chan commitments: "+
×
1030
                                "%v", err)
×
1031
                }
×
1032

1033
                // Also retrieve the current revocation state.
1034
                if err := fetchChanRevocationState(chanBucket, c); err != nil {
9✔
1035
                        return fmt.Errorf("unable to fetch chan revocations: "+
×
1036
                                "%v", err)
×
1037
                }
×
1038

1039
                return nil
9✔
1040
        }, func() {})
9✔
1041
        if err != nil {
13✔
1042
                return err
4✔
1043
        }
4✔
1044

1045
        return nil
9✔
1046
}
1047

1048
// fetchChanBucket is a helper function that returns the bucket where a
1049
// channel's data resides in given: the public key for the node, the outpoint,
1050
// and the chainhash that the channel resides on.
1051
func fetchChanBucket(tx kvdb.RTx, nodeKey *btcec.PublicKey,
1052
        outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RBucket, error) {
2,799✔
1053

2,799✔
1054
        // First fetch the top level bucket which stores all data related to
2,799✔
1055
        // current, active channels.
2,799✔
1056
        openChanBucket := tx.ReadBucket(openChannelBucket)
2,799✔
1057
        if openChanBucket == nil {
2,799✔
1058
                return nil, ErrNoChanDBExists
×
1059
        }
×
1060

1061
        // TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
1062
        // CreateIfNotExists, will return error
1063

1064
        // Within this top level bucket, fetch the bucket dedicated to storing
1065
        // open channel data specific to the remote node.
1066
        nodePub := nodeKey.SerializeCompressed()
2,799✔
1067
        nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
2,799✔
1068
        if nodeChanBucket == nil {
3,819✔
1069
                return nil, ErrNoActiveChannels
1,020✔
1070
        }
1,020✔
1071

1072
        // We'll then recurse down an additional layer in order to fetch the
1073
        // bucket for this particular chain.
1074
        chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:])
1,779✔
1075
        if chainBucket == nil {
1,779✔
1076
                return nil, ErrNoActiveChannels
×
1077
        }
×
1078

1079
        // With the bucket for the node and chain fetched, we can now go down
1080
        // another level, for this channel itself.
1081
        var chanPointBuf bytes.Buffer
1,779✔
1082
        if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
1,779✔
1083
                return nil, err
×
1084
        }
×
1085
        chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes())
1,779✔
1086
        if chanBucket == nil {
1,784✔
1087
                return nil, ErrChannelNotFound
5✔
1088
        }
5✔
1089

1090
        return chanBucket, nil
1,778✔
1091
}
1092

1093
// fetchChanBucketRw is a helper function that returns the bucket where a
1094
// channel's data resides in given: the public key for the node, the outpoint,
1095
// and the chainhash that the channel resides on. This differs from
1096
// fetchChanBucket in that it returns a writeable bucket.
1097
func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey,
1098
        outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket,
1099
        error) {
9,670✔
1100

9,670✔
1101
        // First fetch the top level bucket which stores all data related to
9,670✔
1102
        // current, active channels.
9,670✔
1103
        openChanBucket := tx.ReadWriteBucket(openChannelBucket)
9,670✔
1104
        if openChanBucket == nil {
9,670✔
1105
                return nil, ErrNoChanDBExists
×
1106
        }
×
1107

1108
        // TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
1109
        // CreateIfNotExists, will return error
1110

1111
        // Within this top level bucket, fetch the bucket dedicated to storing
1112
        // open channel data specific to the remote node.
1113
        nodePub := nodeKey.SerializeCompressed()
9,670✔
1114
        nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
9,670✔
1115
        if nodeChanBucket == nil {
9,670✔
1116
                return nil, ErrNoActiveChannels
×
1117
        }
×
1118

1119
        // We'll then recurse down an additional layer in order to fetch the
1120
        // bucket for this particular chain.
1121
        chainBucket := nodeChanBucket.NestedReadWriteBucket(chainHash[:])
9,670✔
1122
        if chainBucket == nil {
9,670✔
1123
                return nil, ErrNoActiveChannels
×
1124
        }
×
1125

1126
        // With the bucket for the node and chain fetched, we can now go down
1127
        // another level, for this channel itself.
1128
        var chanPointBuf bytes.Buffer
9,670✔
1129
        if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
9,670✔
1130
                return nil, err
×
1131
        }
×
1132
        chanBucket := chainBucket.NestedReadWriteBucket(chanPointBuf.Bytes())
9,670✔
1133
        if chanBucket == nil {
9,670✔
1134
                return nil, ErrChannelNotFound
×
1135
        }
×
1136

1137
        return chanBucket, nil
9,670✔
1138
}
1139

1140
func fetchFinalHtlcsBucketRw(tx kvdb.RwTx,
1141
        chanID lnwire.ShortChannelID) (kvdb.RwBucket, error) {
7✔
1142

7✔
1143
        finalHtlcsBucket, err := tx.CreateTopLevelBucket(finalHtlcsBucket)
7✔
1144
        if err != nil {
7✔
1145
                return nil, err
×
1146
        }
×
1147

1148
        var chanIDBytes [8]byte
7✔
1149
        byteOrder.PutUint64(chanIDBytes[:], chanID.ToUint64())
7✔
1150
        chanBucket, err := finalHtlcsBucket.CreateBucketIfNotExists(
7✔
1151
                chanIDBytes[:],
7✔
1152
        )
7✔
1153
        if err != nil {
7✔
1154
                return nil, err
×
1155
        }
×
1156

1157
        return chanBucket, nil
7✔
1158
}
1159

1160
// fullSync syncs the contents of an OpenChannel while re-using an existing
1161
// database transaction.
1162
func (c *OpenChannel) fullSync(tx kvdb.RwTx) error {
847✔
1163
        // Fetch the outpoint bucket and check if the outpoint already exists.
847✔
1164
        opBucket := tx.ReadWriteBucket(outpointBucket)
847✔
1165
        if opBucket == nil {
847✔
1166
                return ErrNoChanDBExists
×
1167
        }
×
1168
        cidBucket := tx.ReadWriteBucket(chanIDBucket)
847✔
1169
        if cidBucket == nil {
847✔
1170
                return ErrNoChanDBExists
×
1171
        }
×
1172

1173
        var chanPointBuf bytes.Buffer
847✔
1174
        if err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint); err != nil {
847✔
1175
                return err
×
1176
        }
×
1177

1178
        // Now, check if the outpoint exists in our index.
1179
        if opBucket.Get(chanPointBuf.Bytes()) != nil {
851✔
1180
                return ErrChanAlreadyExists
4✔
1181
        }
4✔
1182

1183
        cid := lnwire.NewChanIDFromOutPoint(c.FundingOutpoint)
847✔
1184
        if cidBucket.Get(cid[:]) != nil {
847✔
1185
                return ErrChanAlreadyExists
×
1186
        }
×
1187

1188
        status := uint8(outpointOpen)
847✔
1189

847✔
1190
        // Write the status of this outpoint as the first entry in a tlv
847✔
1191
        // stream.
847✔
1192
        statusRecord := tlv.MakePrimitiveRecord(indexStatusType, &status)
847✔
1193
        opStream, err := tlv.NewStream(statusRecord)
847✔
1194
        if err != nil {
847✔
1195
                return err
×
1196
        }
×
1197

1198
        var b bytes.Buffer
847✔
1199
        if err := opStream.Encode(&b); err != nil {
847✔
1200
                return err
×
1201
        }
×
1202

1203
        // Add the outpoint to our outpoint index with the tlv stream.
1204
        if err := opBucket.Put(chanPointBuf.Bytes(), b.Bytes()); err != nil {
847✔
1205
                return err
×
1206
        }
×
1207

1208
        if err := cidBucket.Put(cid[:], []byte{}); err != nil {
847✔
1209
                return err
×
1210
        }
×
1211

1212
        // First fetch the top level bucket which stores all data related to
1213
        // current, active channels.
1214
        openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
847✔
1215
        if err != nil {
847✔
1216
                return err
×
1217
        }
×
1218

1219
        // Within this top level bucket, fetch the bucket dedicated to storing
1220
        // open channel data specific to the remote node.
1221
        nodePub := c.IdentityPub.SerializeCompressed()
847✔
1222
        nodeChanBucket, err := openChanBucket.CreateBucketIfNotExists(nodePub)
847✔
1223
        if err != nil {
847✔
1224
                return err
×
1225
        }
×
1226

1227
        // We'll then recurse down an additional layer in order to fetch the
1228
        // bucket for this particular chain.
1229
        chainBucket, err := nodeChanBucket.CreateBucketIfNotExists(c.ChainHash[:])
847✔
1230
        if err != nil {
847✔
1231
                return err
×
1232
        }
×
1233

1234
        // With the bucket for the node fetched, we can now go down another
1235
        // level, creating the bucket for this channel itself.
1236
        chanBucket, err := chainBucket.CreateBucket(
847✔
1237
                chanPointBuf.Bytes(),
847✔
1238
        )
847✔
1239
        switch {
847✔
1240
        case err == kvdb.ErrBucketExists:
×
1241
                // If this channel already exists, then in order to avoid
×
1242
                // overriding it, we'll return an error back up to the caller.
×
1243
                return ErrChanAlreadyExists
×
1244
        case err != nil:
×
1245
                return err
×
1246
        }
1247

1248
        return putOpenChannel(chanBucket, c)
847✔
1249
}
1250

1251
// MarkAsOpen marks a channel as fully open given a locator that uniquely
1252
// describes its location within the chain.
1253
func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
167✔
1254
        c.Lock()
167✔
1255
        defer c.Unlock()
167✔
1256

167✔
1257
        if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
334✔
1258
                chanBucket, err := fetchChanBucketRw(
167✔
1259
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
167✔
1260
                )
167✔
1261
                if err != nil {
167✔
1262
                        return err
×
1263
                }
×
1264

1265
                channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
167✔
1266
                if err != nil {
167✔
1267
                        return err
×
1268
                }
×
1269

1270
                channel.IsPending = false
167✔
1271
                channel.ShortChannelID = openLoc
167✔
1272

167✔
1273
                return putOpenChannel(chanBucket, channel)
167✔
1274
        }, func() {}); err != nil {
167✔
1275
                return err
×
1276
        }
×
1277

1278
        c.IsPending = false
167✔
1279
        c.ShortChannelID = openLoc
167✔
1280
        c.Packager = NewChannelPackager(openLoc)
167✔
1281

167✔
1282
        return nil
167✔
1283
}
1284

1285
// MarkRealScid marks the zero-conf channel's confirmed ShortChannelID. This
1286
// should only be done if IsZeroConf returns true.
1287
func (c *OpenChannel) MarkRealScid(realScid lnwire.ShortChannelID) error {
11✔
1288
        c.Lock()
11✔
1289
        defer c.Unlock()
11✔
1290

11✔
1291
        if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
22✔
1292
                chanBucket, err := fetchChanBucketRw(
11✔
1293
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
11✔
1294
                )
11✔
1295
                if err != nil {
11✔
1296
                        return err
×
1297
                }
×
1298

1299
                channel, err := fetchOpenChannel(
11✔
1300
                        chanBucket, &c.FundingOutpoint,
11✔
1301
                )
11✔
1302
                if err != nil {
11✔
1303
                        return err
×
1304
                }
×
1305

1306
                channel.confirmedScid = realScid
11✔
1307

11✔
1308
                return putOpenChannel(chanBucket, channel)
11✔
1309
        }, func() {}); err != nil {
11✔
1310
                return err
×
1311
        }
×
1312

1313
        c.confirmedScid = realScid
11✔
1314

11✔
1315
        return nil
11✔
1316
}
1317

1318
// MarkScidAliasNegotiated adds ScidAliasFeatureBit to ChanType in-memory and
1319
// in the database.
1320
func (c *OpenChannel) MarkScidAliasNegotiated() error {
4✔
1321
        c.Lock()
4✔
1322
        defer c.Unlock()
4✔
1323

4✔
1324
        if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
8✔
1325
                chanBucket, err := fetchChanBucketRw(
4✔
1326
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
4✔
1327
                )
4✔
1328
                if err != nil {
4✔
1329
                        return err
×
1330
                }
×
1331

1332
                channel, err := fetchOpenChannel(
4✔
1333
                        chanBucket, &c.FundingOutpoint,
4✔
1334
                )
4✔
1335
                if err != nil {
4✔
1336
                        return err
×
1337
                }
×
1338

1339
                channel.ChanType |= ScidAliasFeatureBit
4✔
1340
                return putOpenChannel(chanBucket, channel)
4✔
1341
        }, func() {}); err != nil {
4✔
1342
                return err
×
1343
        }
×
1344

1345
        c.ChanType |= ScidAliasFeatureBit
4✔
1346

4✔
1347
        return nil
4✔
1348
}
1349

1350
// MarkDataLoss marks sets the channel status to LocalDataLoss and stores the
1351
// passed commitPoint for use to retrieve funds in case the remote force closes
1352
// the channel.
1353
func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
8✔
1354
        c.Lock()
8✔
1355
        defer c.Unlock()
8✔
1356

8✔
1357
        var b bytes.Buffer
8✔
1358
        if err := WriteElement(&b, commitPoint); err != nil {
8✔
1359
                return err
×
1360
        }
×
1361

1362
        putCommitPoint := func(chanBucket kvdb.RwBucket) error {
16✔
1363
                return chanBucket.Put(dataLossCommitPointKey, b.Bytes())
8✔
1364
        }
8✔
1365

1366
        return c.putChanStatus(ChanStatusLocalDataLoss, putCommitPoint)
8✔
1367
}
1368

1369
// DataLossCommitPoint retrieves the stored commit point set during
1370
// MarkDataLoss. If not found ErrNoCommitPoint is returned.
1371
func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, error) {
4✔
1372
        var commitPoint *btcec.PublicKey
4✔
1373

4✔
1374
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
8✔
1375
                chanBucket, err := fetchChanBucket(
4✔
1376
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
4✔
1377
                )
4✔
1378
                switch err {
4✔
1379
                case nil:
4✔
1380
                case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
×
1381
                        return ErrNoCommitPoint
×
1382
                default:
×
1383
                        return err
×
1384
                }
1385

1386
                bs := chanBucket.Get(dataLossCommitPointKey)
4✔
1387
                if bs == nil {
4✔
1388
                        return ErrNoCommitPoint
×
1389
                }
×
1390
                r := bytes.NewReader(bs)
4✔
1391
                if err := ReadElements(r, &commitPoint); err != nil {
4✔
1392
                        return err
×
1393
                }
×
1394

1395
                return nil
4✔
1396
        }, func() {
4✔
1397
                commitPoint = nil
4✔
1398
        })
4✔
1399
        if err != nil {
4✔
1400
                return nil, err
×
1401
        }
×
1402

1403
        return commitPoint, nil
4✔
1404
}
1405

1406
// MarkBorked marks the event when the channel as reached an irreconcilable
1407
// state, such as a channel breach or state desynchronization. Borked channels
1408
// should never be added to the switch.
1409
func (c *OpenChannel) MarkBorked() error {
5✔
1410
        c.Lock()
5✔
1411
        defer c.Unlock()
5✔
1412

5✔
1413
        return c.putChanStatus(ChanStatusBorked)
5✔
1414
}
5✔
1415

1416
// SecondCommitmentPoint returns the second per-commitment-point for use in the
1417
// channel_ready message.
1418
func (c *OpenChannel) SecondCommitmentPoint() (*btcec.PublicKey, error) {
4✔
1419
        c.RLock()
4✔
1420
        defer c.RUnlock()
4✔
1421

4✔
1422
        // Since we start at commitment height = 0, the second per commitment
4✔
1423
        // point is actually at the 1st index.
4✔
1424
        revocation, err := c.RevocationProducer.AtIndex(1)
4✔
1425
        if err != nil {
4✔
1426
                return nil, err
×
1427
        }
×
1428

1429
        return input.ComputeCommitmentPoint(revocation[:]), nil
4✔
1430
}
1431

1432
var (
1433
        // taprootRevRootKey is the key used to derive the revocation root for
1434
        // the taproot nonces. This is done via HMAC of the existing revocation
1435
        // root.
1436
        taprootRevRootKey = []byte("taproot-rev-root")
1437
)
1438

1439
// DeriveMusig2Shachain derives a shachain producer for the taproot channel
1440
// from normal shachain revocation root.
1441
func DeriveMusig2Shachain(revRoot shachain.Producer) (shachain.Producer, error) { //nolint:lll
923✔
1442
        // In order to obtain the revocation root hash to create the taproot
923✔
1443
        // revocation, we'll encode the producer into a buffer, then use that
923✔
1444
        // to derive the shachain root needed.
923✔
1445
        var rootHashBuf bytes.Buffer
923✔
1446
        if err := revRoot.Encode(&rootHashBuf); err != nil {
923✔
1447
                return nil, fmt.Errorf("unable to encode producer: %w", err)
×
1448
        }
×
1449

1450
        revRootHash := chainhash.HashH(rootHashBuf.Bytes())
923✔
1451

923✔
1452
        // For taproot channel types, we'll also generate a distinct shachain
923✔
1453
        // root using the same seed information. We'll use this to generate
923✔
1454
        // verification nonces for the channel. We'll bind with this a simple
923✔
1455
        // hmac.
923✔
1456
        taprootRevHmac := hmac.New(sha256.New, taprootRevRootKey)
923✔
1457
        if _, err := taprootRevHmac.Write(revRootHash[:]); err != nil {
923✔
1458
                return nil, err
×
1459
        }
×
1460

1461
        taprootRevRoot := taprootRevHmac.Sum(nil)
923✔
1462

923✔
1463
        // Once we have the root, we can then generate our shachain producer
923✔
1464
        // and from that generate the per-commitment point.
923✔
1465
        return shachain.NewRevocationProducerFromBytes(
923✔
1466
                taprootRevRoot,
923✔
1467
        )
923✔
1468
}
1469

1470
// NewMusigVerificationNonce generates the local or verification nonce for
1471
// another musig2 session. In order to permit our implementation to not have to
1472
// write any secret nonce state to disk, we'll use the _next_ shachain
1473
// pre-image as our primary randomness source. When used to generate the nonce
1474
// again to broadcast our commitment hte current height will be used.
1475
func NewMusigVerificationNonce(pubKey *btcec.PublicKey, targetHeight uint64,
1476
        shaGen shachain.Producer) (*musig2.Nonces, error) {
90✔
1477

90✔
1478
        // Now that we know what height we need, we'll grab the shachain
90✔
1479
        // pre-image at the target destination.
90✔
1480
        nextPreimage, err := shaGen.AtIndex(targetHeight)
90✔
1481
        if err != nil {
90✔
1482
                return nil, err
×
1483
        }
×
1484

1485
        shaChainRand := musig2.WithCustomRand(bytes.NewBuffer(nextPreimage[:]))
90✔
1486
        pubKeyOpt := musig2.WithPublicKey(pubKey)
90✔
1487

90✔
1488
        return musig2.GenNonces(pubKeyOpt, shaChainRand)
90✔
1489
}
1490

1491
// ChanSyncMsg returns the ChannelReestablish message that should be sent upon
1492
// reconnection with the remote peer that we're maintaining this channel with.
1493
// The information contained within this message is necessary to re-sync our
1494
// commitment chains in the case of a last or only partially processed message.
1495
// When the remote party receives this message one of three things may happen:
1496
//
1497
//  1. We're fully synced and no messages need to be sent.
1498
//  2. We didn't get the last CommitSig message they sent, so they'll re-send
1499
//     it.
1500
//  3. We didn't get the last RevokeAndAck message they sent, so they'll
1501
//     re-send it.
1502
//
1503
// If this is a restored channel, having status ChanStatusRestored, then we'll
1504
// modify our typical chan sync message to ensure they force close even if
1505
// we're on the very first state.
1506
func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) {
240✔
1507
        c.Lock()
240✔
1508
        defer c.Unlock()
240✔
1509

240✔
1510
        // The remote commitment height that we'll send in the
240✔
1511
        // ChannelReestablish message is our current commitment height plus
240✔
1512
        // one. If the receiver thinks that our commitment height is actually
240✔
1513
        // *equal* to this value, then they'll re-send the last commitment that
240✔
1514
        // they sent but we never fully processed.
240✔
1515
        localHeight := c.LocalCommitment.CommitHeight
240✔
1516
        nextLocalCommitHeight := localHeight + 1
240✔
1517

240✔
1518
        // The second value we'll send is the height of the remote commitment
240✔
1519
        // from our PoV. If the receiver thinks that their height is actually
240✔
1520
        // *one plus* this value, then they'll re-send their last revocation.
240✔
1521
        remoteChainTipHeight := c.RemoteCommitment.CommitHeight
240✔
1522

240✔
1523
        // If this channel has undergone a commitment update, then in order to
240✔
1524
        // prove to the remote party our knowledge of their prior commitment
240✔
1525
        // state, we'll also send over the last commitment secret that the
240✔
1526
        // remote party sent.
240✔
1527
        var lastCommitSecret [32]byte
240✔
1528
        if remoteChainTipHeight != 0 {
298✔
1529
                remoteSecret, err := c.RevocationStore.LookUp(
58✔
1530
                        remoteChainTipHeight - 1,
58✔
1531
                )
58✔
1532
                if err != nil {
58✔
1533
                        return nil, err
×
1534
                }
×
1535
                lastCommitSecret = [32]byte(*remoteSecret)
58✔
1536
        }
1537

1538
        // Additionally, we'll send over the current unrevoked commitment on
1539
        // our local commitment transaction.
1540
        currentCommitSecret, err := c.RevocationProducer.AtIndex(
240✔
1541
                localHeight,
240✔
1542
        )
240✔
1543
        if err != nil {
240✔
1544
                return nil, err
×
1545
        }
×
1546

1547
        // If we've restored this channel, then we'll purposefully give them an
1548
        // invalid LocalUnrevokedCommitPoint so they'll force close the channel
1549
        // allowing us to sweep our funds.
1550
        if c.hasChanStatus(ChanStatusRestored) {
244✔
1551
                currentCommitSecret[0] ^= 1
4✔
1552

4✔
1553
                // If this is a tweakless channel, then we'll purposefully send
4✔
1554
                // a next local height taht's invalid to trigger a force close
4✔
1555
                // on their end. We do this as tweakless channels don't require
4✔
1556
                // that the commitment point is valid, only that it's present.
4✔
1557
                if c.ChanType.IsTweakless() {
8✔
1558
                        nextLocalCommitHeight = 0
4✔
1559
                }
4✔
1560
        }
1561

1562
        // If this is a taproot channel, then we'll need to generate our next
1563
        // verification nonce to send to the remote party. They'll use this to
1564
        // sign the next update to our commitment transaction.
1565
        var nextTaprootNonce lnwire.OptMusig2NonceTLV
240✔
1566
        if c.ChanType.IsTaproot() {
252✔
1567
                taprootRevProducer, err := DeriveMusig2Shachain(
12✔
1568
                        c.RevocationProducer,
12✔
1569
                )
12✔
1570
                if err != nil {
12✔
1571
                        return nil, err
×
1572
                }
×
1573

1574
                nextNonce, err := NewMusigVerificationNonce(
12✔
1575
                        c.LocalChanCfg.MultiSigKey.PubKey,
12✔
1576
                        nextLocalCommitHeight, taprootRevProducer,
12✔
1577
                )
12✔
1578
                if err != nil {
12✔
1579
                        return nil, fmt.Errorf("unable to gen next "+
×
1580
                                "nonce: %w", err)
×
1581
                }
×
1582

1583
                nextTaprootNonce = lnwire.SomeMusig2Nonce(nextNonce.PubNonce)
12✔
1584
        }
1585

1586
        return &lnwire.ChannelReestablish{
240✔
1587
                ChanID: lnwire.NewChanIDFromOutPoint(
240✔
1588
                        c.FundingOutpoint,
240✔
1589
                ),
240✔
1590
                NextLocalCommitHeight:  nextLocalCommitHeight,
240✔
1591
                RemoteCommitTailHeight: remoteChainTipHeight,
240✔
1592
                LastRemoteCommitSecret: lastCommitSecret,
240✔
1593
                LocalUnrevokedCommitPoint: input.ComputeCommitmentPoint(
240✔
1594
                        currentCommitSecret[:],
240✔
1595
                ),
240✔
1596
                LocalNonce: nextTaprootNonce,
240✔
1597
        }, nil
240✔
1598
}
1599

1600
// MarkShutdownSent serialises and persist the given ShutdownInfo for this
1601
// channel. Persisting this info represents the fact that we have sent the
1602
// Shutdown message to the remote side and hence that we should re-transmit the
1603
// same Shutdown message on re-establish.
1604
func (c *OpenChannel) MarkShutdownSent(info *ShutdownInfo) error {
15✔
1605
        c.Lock()
15✔
1606
        defer c.Unlock()
15✔
1607

15✔
1608
        return c.storeShutdownInfo(info)
15✔
1609
}
15✔
1610

1611
// storeShutdownInfo serialises the ShutdownInfo and persists it under the
1612
// shutdownInfoKey.
1613
func (c *OpenChannel) storeShutdownInfo(info *ShutdownInfo) error {
15✔
1614
        var b bytes.Buffer
15✔
1615
        err := info.encode(&b)
15✔
1616
        if err != nil {
15✔
1617
                return err
×
1618
        }
×
1619

1620
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
30✔
1621
                chanBucket, err := fetchChanBucketRw(
15✔
1622
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
15✔
1623
                )
15✔
1624
                if err != nil {
15✔
1625
                        return err
×
1626
                }
×
1627

1628
                return chanBucket.Put(shutdownInfoKey, b.Bytes())
15✔
1629
        }, func() {})
15✔
1630
}
1631

1632
// ShutdownInfo decodes the shutdown info stored for this channel and returns
1633
// the result. If no shutdown info has been persisted for this channel then the
1634
// ErrNoShutdownInfo error is returned.
1635
func (c *OpenChannel) ShutdownInfo() (fn.Option[ShutdownInfo], error) {
8✔
1636
        c.RLock()
8✔
1637
        defer c.RUnlock()
8✔
1638

8✔
1639
        var shutdownInfo *ShutdownInfo
8✔
1640
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
16✔
1641
                chanBucket, err := fetchChanBucket(
8✔
1642
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
8✔
1643
                )
8✔
1644
                switch {
8✔
1645
                case err == nil:
8✔
1646
                case errors.Is(err, ErrNoChanDBExists),
1647
                        errors.Is(err, ErrNoActiveChannels),
1648
                        errors.Is(err, ErrChannelNotFound):
3✔
1649

3✔
1650
                        return ErrNoShutdownInfo
3✔
1651
                default:
×
1652
                        return err
×
1653
                }
1654

1655
                shutdownInfoBytes := chanBucket.Get(shutdownInfoKey)
8✔
1656
                if shutdownInfoBytes == nil {
14✔
1657
                        return ErrNoShutdownInfo
6✔
1658
                }
6✔
1659

1660
                shutdownInfo, err = decodeShutdownInfo(shutdownInfoBytes)
6✔
1661

6✔
1662
                return err
6✔
1663
        }, func() {
8✔
1664
                shutdownInfo = nil
8✔
1665
        })
8✔
1666
        if err != nil {
14✔
1667
                return fn.None[ShutdownInfo](), err
6✔
1668
        }
6✔
1669

1670
        return fn.Some[ShutdownInfo](*shutdownInfo), nil
6✔
1671
}
1672

1673
// isBorked returns true if the channel has been marked as borked in the
1674
// database. This requires an existing database transaction to already be
1675
// active.
1676
//
1677
// NOTE: The primary mutex should already be held before this method is called.
1678
func (c *OpenChannel) isBorked(chanBucket kvdb.RBucket) (bool, error) {
8,841✔
1679
        channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
8,841✔
1680
        if err != nil {
8,841✔
1681
                return false, err
×
1682
        }
×
1683

1684
        return channel.chanStatus != ChanStatusDefault, nil
8,841✔
1685
}
1686

1687
// MarkCommitmentBroadcasted marks the channel as a commitment transaction has
1688
// been broadcast, either our own or the remote, and we should watch the chain
1689
// for it to confirm before taking any further action. It takes as argument the
1690
// closing tx _we believe_ will appear in the chain. This is only used to
1691
// republish this tx at startup to ensure propagation, and we should still
1692
// handle the case where a different tx actually hits the chain.
1693
func (c *OpenChannel) MarkCommitmentBroadcasted(closeTx *wire.MsgTx,
1694
        closer lntypes.ChannelParty) error {
12✔
1695

12✔
1696
        return c.markBroadcasted(
12✔
1697
                ChanStatusCommitBroadcasted, forceCloseTxKey, closeTx,
12✔
1698
                closer,
12✔
1699
        )
12✔
1700
}
12✔
1701

1702
// MarkCoopBroadcasted marks the channel to indicate that a cooperative close
1703
// transaction has been broadcast, either our own or the remote, and that we
1704
// should watch the chain for it to confirm before taking further action. It
1705
// takes as argument a cooperative close tx that could appear on chain, and
1706
// should be rebroadcast upon startup. This is only used to republish and
1707
// ensure propagation, and we should still handle the case where a different tx
1708
// actually hits the chain.
1709
func (c *OpenChannel) MarkCoopBroadcasted(closeTx *wire.MsgTx,
1710
        closer lntypes.ChannelParty) error {
42✔
1711

42✔
1712
        return c.markBroadcasted(
42✔
1713
                ChanStatusCoopBroadcasted, coopCloseTxKey, closeTx,
42✔
1714
                closer,
42✔
1715
        )
42✔
1716
}
42✔
1717

1718
// markBroadcasted is a helper function which modifies the channel status of the
1719
// receiving channel and inserts a close transaction under the requested key,
1720
// which should specify either a coop or force close. It adds a status which
1721
// indicates the party that initiated the channel close.
1722
func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte,
1723
        closeTx *wire.MsgTx, closer lntypes.ChannelParty) error {
50✔
1724

50✔
1725
        c.Lock()
50✔
1726
        defer c.Unlock()
50✔
1727

50✔
1728
        // If a closing tx is provided, we'll generate a closure to write the
50✔
1729
        // transaction in the appropriate bucket under the given key.
50✔
1730
        var putClosingTx func(kvdb.RwBucket) error
50✔
1731
        if closeTx != nil {
75✔
1732
                var b bytes.Buffer
25✔
1733
                if err := WriteElement(&b, closeTx); err != nil {
25✔
1734
                        return err
×
1735
                }
×
1736

1737
                putClosingTx = func(chanBucket kvdb.RwBucket) error {
50✔
1738
                        return chanBucket.Put(key, b.Bytes())
25✔
1739
                }
25✔
1740
        }
1741

1742
        // Add the initiator status to the status provided. These statuses are
1743
        // set in addition to the broadcast status so that we do not need to
1744
        // migrate the original logic which does not store initiator.
1745
        if closer.IsLocal() {
94✔
1746
                status |= ChanStatusLocalCloseInitiator
44✔
1747
        } else {
54✔
1748
                status |= ChanStatusRemoteCloseInitiator
10✔
1749
        }
10✔
1750

1751
        return c.putChanStatus(status, putClosingTx)
50✔
1752
}
1753

1754
// BroadcastedCommitment retrieves the stored unilateral closing tx set during
1755
// MarkCommitmentBroadcasted. If not found ErrNoCloseTx is returned.
1756
func (c *OpenChannel) BroadcastedCommitment() (*wire.MsgTx, error) {
11✔
1757
        return c.getClosingTx(forceCloseTxKey)
11✔
1758
}
11✔
1759

1760
// BroadcastedCooperative retrieves the stored cooperative closing tx set during
1761
// MarkCoopBroadcasted. If not found ErrNoCloseTx is returned.
1762
func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, error) {
13✔
1763
        return c.getClosingTx(coopCloseTxKey)
13✔
1764
}
13✔
1765

1766
// getClosingTx is a helper method which returns the stored closing transaction
1767
// for key. The caller should use either the force or coop closing keys.
1768
func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
20✔
1769
        var closeTx *wire.MsgTx
20✔
1770

20✔
1771
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
40✔
1772
                chanBucket, err := fetchChanBucket(
20✔
1773
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
20✔
1774
                )
20✔
1775
                switch err {
20✔
1776
                case nil:
20✔
1777
                case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
×
1778
                        return ErrNoCloseTx
×
1779
                default:
×
1780
                        return err
×
1781
                }
1782

1783
                bs := chanBucket.Get(key)
20✔
1784
                if bs == nil {
26✔
1785
                        return ErrNoCloseTx
6✔
1786
                }
6✔
1787
                r := bytes.NewReader(bs)
18✔
1788
                return ReadElement(r, &closeTx)
18✔
1789
        }, func() {
20✔
1790
                closeTx = nil
20✔
1791
        })
20✔
1792
        if err != nil {
26✔
1793
                return nil, err
6✔
1794
        }
6✔
1795

1796
        return closeTx, nil
18✔
1797
}
1798

1799
// putChanStatus appends the given status to the channel. fs is an optional
1800
// list of closures that are given the chanBucket in order to atomically add
1801
// extra information together with the new status.
1802
func (c *OpenChannel) putChanStatus(status ChannelStatus,
1803
        fs ...func(kvdb.RwBucket) error) error {
58✔
1804

58✔
1805
        if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
116✔
1806
                chanBucket, err := fetchChanBucketRw(
58✔
1807
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
58✔
1808
                )
58✔
1809
                if err != nil {
58✔
1810
                        return err
×
1811
                }
×
1812

1813
                channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
58✔
1814
                if err != nil {
58✔
1815
                        return err
×
1816
                }
×
1817

1818
                // Add this status to the existing bitvector found in the DB.
1819
                status = channel.chanStatus | status
58✔
1820
                channel.chanStatus = status
58✔
1821

58✔
1822
                if err := putOpenChannel(chanBucket, channel); err != nil {
58✔
1823
                        return err
×
1824
                }
×
1825

1826
                for _, f := range fs {
112✔
1827
                        // Skip execution of nil closures.
54✔
1828
                        if f == nil {
83✔
1829
                                continue
29✔
1830
                        }
1831

1832
                        if err := f(chanBucket); err != nil {
29✔
1833
                                return err
×
1834
                        }
×
1835
                }
1836

1837
                return nil
58✔
1838
        }, func() {}); err != nil {
58✔
UNCOV
1839
                return err
×
UNCOV
1840
        }
×
1841

1842
        // Update the in-memory representation to keep it in sync with the DB.
1843
        c.chanStatus = status
58✔
1844

58✔
1845
        return nil
58✔
1846
}
1847

1848
func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
4✔
1849
        if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
8✔
1850
                chanBucket, err := fetchChanBucketRw(
4✔
1851
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
4✔
1852
                )
4✔
1853
                if err != nil {
4✔
1854
                        return err
×
1855
                }
×
1856

1857
                channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
4✔
1858
                if err != nil {
4✔
1859
                        return err
×
1860
                }
×
1861

1862
                // Unset this bit in the bitvector on disk.
1863
                status = channel.chanStatus & ^status
4✔
1864
                channel.chanStatus = status
4✔
1865

4✔
1866
                return putOpenChannel(chanBucket, channel)
4✔
1867
        }, func() {}); err != nil {
4✔
1868
                return err
×
1869
        }
×
1870

1871
        // Update the in-memory representation to keep it in sync with the DB.
1872
        c.chanStatus = status
4✔
1873

4✔
1874
        return nil
4✔
1875
}
1876

1877
// putOpenChannel serializes, and stores the current state of the channel in its
1878
// entirety.
1879
func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
1,189✔
1880
        // First, we'll write out all the relatively static fields, that are
1,189✔
1881
        // decided upon initial channel creation.
1,189✔
1882
        if err := putChanInfo(chanBucket, channel); err != nil {
1,189✔
1883
                return fmt.Errorf("unable to store chan info: %w", err)
×
1884
        }
×
1885

1886
        // With the static channel info written out, we'll now write out the
1887
        // current commitment state for both parties.
1888
        if err := putChanCommitments(chanBucket, channel); err != nil {
1,189✔
1889
                return fmt.Errorf("unable to store chan commitments: %w", err)
×
1890
        }
×
1891

1892
        // Next, if this is a frozen channel, we'll add in the axillary
1893
        // information we need to store.
1894
        if channel.ChanType.IsFrozen() || channel.ChanType.HasLeaseExpiration() {
1,621✔
1895
                err := storeThawHeight(
432✔
1896
                        chanBucket, channel.ThawHeight,
432✔
1897
                )
432✔
1898
                if err != nil {
432✔
1899
                        return fmt.Errorf("unable to store thaw height: %w",
×
1900
                                err)
×
1901
                }
×
1902
        }
1903

1904
        // Finally, we'll write out the revocation state for both parties
1905
        // within a distinct key space.
1906
        if err := putChanRevocationState(chanBucket, channel); err != nil {
1,189✔
1907
                return fmt.Errorf("unable to store chan revocations: %w", err)
×
1908
        }
×
1909

1910
        return nil
1,189✔
1911
}
1912

1913
// fetchOpenChannel retrieves, and deserializes (including decrypting
1914
// sensitive) the complete channel currently active with the passed nodeID.
1915
func fetchOpenChannel(chanBucket kvdb.RBucket,
1916
        chanPoint *wire.OutPoint) (*OpenChannel, error) {
9,952✔
1917

9,952✔
1918
        channel := &OpenChannel{
9,952✔
1919
                FundingOutpoint: *chanPoint,
9,952✔
1920
        }
9,952✔
1921

9,952✔
1922
        // First, we'll read all the static information that changes less
9,952✔
1923
        // frequently from disk.
9,952✔
1924
        if err := fetchChanInfo(chanBucket, channel); err != nil {
9,952✔
1925
                return nil, fmt.Errorf("unable to fetch chan info: %w", err)
×
1926
        }
×
1927

1928
        // With the static information read, we'll now read the current
1929
        // commitment state for both sides of the channel.
1930
        if err := fetchChanCommitments(chanBucket, channel); err != nil {
9,952✔
1931
                return nil, fmt.Errorf("unable to fetch chan commitments: %w",
×
1932
                        err)
×
1933
        }
×
1934

1935
        // Next, if this is a frozen channel, we'll add in the axillary
1936
        // information we need to store.
1937
        if channel.ChanType.IsFrozen() || channel.ChanType.HasLeaseExpiration() {
10,290✔
1938
                thawHeight, err := fetchThawHeight(chanBucket)
338✔
1939
                if err != nil {
338✔
1940
                        return nil, fmt.Errorf("unable to store thaw "+
×
1941
                                "height: %v", err)
×
1942
                }
×
1943

1944
                channel.ThawHeight = thawHeight
338✔
1945
        }
1946

1947
        // Finally, we'll retrieve the current revocation state so we can
1948
        // properly
1949
        if err := fetchChanRevocationState(chanBucket, channel); err != nil {
9,952✔
1950
                return nil, fmt.Errorf("unable to fetch chan revocations: %w",
×
1951
                        err)
×
1952
        }
×
1953

1954
        channel.Packager = NewChannelPackager(channel.ShortChannelID)
9,952✔
1955

9,952✔
1956
        return channel, nil
9,952✔
1957
}
1958

1959
// SyncPending writes the contents of the channel to the database while it's in
1960
// the pending (waiting for funding confirmation) state. The IsPending flag
1961
// will be set to true. When the channel's funding transaction is confirmed,
1962
// the channel should be marked as "open" and the IsPending flag set to false.
1963
// Note that this function also creates a LinkNode relationship between this
1964
// newly created channel and a new LinkNode instance. This allows listing all
1965
// channels in the database globally, or according to the LinkNode they were
1966
// created with.
1967
//
1968
// TODO(roasbeef): addr param should eventually be an lnwire.NetAddress type
1969
// that includes service bits.
1970
func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) error {
846✔
1971
        c.Lock()
846✔
1972
        defer c.Unlock()
846✔
1973

846✔
1974
        c.FundingBroadcastHeight = pendingHeight
846✔
1975

846✔
1976
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
1,692✔
1977
                return syncNewChannel(tx, c, []net.Addr{addr})
846✔
1978
        }, func() {})
1,692✔
1979
}
1980

1981
// syncNewChannel will write the passed channel to disk, and also create a
1982
// LinkNode (if needed) for the channel peer.
1983
func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) error {
847✔
1984
        // First, sync all the persistent channel state to disk.
847✔
1985
        if err := c.fullSync(tx); err != nil {
851✔
1986
                return err
4✔
1987
        }
4✔
1988

1989
        nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket)
847✔
1990
        if err != nil {
847✔
1991
                return err
×
1992
        }
×
1993

1994
        // If a LinkNode for this identity public key already exists,
1995
        // then we can exit early.
1996
        nodePub := c.IdentityPub.SerializeCompressed()
847✔
1997
        if nodeInfoBucket.Get(nodePub) != nil {
994✔
1998
                return nil
147✔
1999
        }
147✔
2000

2001
        // Next, we need to establish a (possibly) new LinkNode relationship
2002
        // for this channel. The LinkNode metadata contains reachability,
2003
        // up-time, and service bits related information.
2004
        linkNode := NewLinkNode(
704✔
2005
                &LinkNodeDB{backend: c.Db.backend},
704✔
2006
                wire.MainNet, c.IdentityPub, addrs...,
704✔
2007
        )
704✔
2008

704✔
2009
        // TODO(roasbeef): do away with link node all together?
704✔
2010

704✔
2011
        return putLinkNode(nodeInfoBucket, linkNode)
704✔
2012
}
2013

2014
// UpdateCommitment updates the local commitment state. It locks in the pending
2015
// local updates that were received by us from the remote party. The commitment
2016
// state completely describes the balance state at this point in the commitment
2017
// chain. In addition to that, it persists all the remote log updates that we
2018
// have acked, but not signed a remote commitment for yet. These need to be
2019
// persisted to be able to produce a valid commit signature if a restart would
2020
// occur. This method its to be called when we revoke our prior commitment
2021
// state.
2022
//
2023
// A map is returned of all the htlc resolutions that were locked in this
2024
// commitment. Keys correspond to htlc indices and values indicate whether the
2025
// htlc was settled or failed.
2026
func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
2027
        unsignedAckedUpdates []LogUpdate) (map[uint64]bool, error) {
2,952✔
2028

2,952✔
2029
        c.Lock()
2,952✔
2030
        defer c.Unlock()
2,952✔
2031

2,952✔
2032
        // If this is a restored channel, then we want to avoid mutating the
2,952✔
2033
        // state as all, as it's impossible to do so in a protocol compliant
2,952✔
2034
        // manner.
2,952✔
2035
        if c.hasChanStatus(ChanStatusRestored) {
2,953✔
2036
                return nil, ErrNoRestoredChannelMutation
1✔
2037
        }
1✔
2038

2039
        var finalHtlcs = make(map[uint64]bool)
2,951✔
2040

2,951✔
2041
        err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
5,902✔
2042
                chanBucket, err := fetchChanBucketRw(
2,951✔
2043
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
2,951✔
2044
                )
2,951✔
2045
                if err != nil {
2,951✔
2046
                        return err
×
2047
                }
×
2048

2049
                // If the channel is marked as borked, then for safety reasons,
2050
                // we shouldn't attempt any further updates.
2051
                isBorked, err := c.isBorked(chanBucket)
2,951✔
2052
                if err != nil {
2,951✔
2053
                        return err
×
2054
                }
×
2055
                if isBorked {
2,952✔
2056
                        return ErrChanBorked
1✔
2057
                }
1✔
2058

2059
                if err = putChanInfo(chanBucket, c); err != nil {
2,950✔
2060
                        return fmt.Errorf("unable to store chan info: %w", err)
×
2061
                }
×
2062

2063
                // With the proper bucket fetched, we'll now write the latest
2064
                // commitment state to disk for the target party.
2065
                err = putChanCommitment(
2,950✔
2066
                        chanBucket, newCommitment, true,
2,950✔
2067
                )
2,950✔
2068
                if err != nil {
2,950✔
2069
                        return fmt.Errorf("unable to store chan "+
×
2070
                                "revocations: %v", err)
×
2071
                }
×
2072

2073
                // Persist unsigned but acked remote updates that need to be
2074
                // restored after a restart.
2075
                var b bytes.Buffer
2,950✔
2076
                err = serializeLogUpdates(&b, unsignedAckedUpdates)
2,950✔
2077
                if err != nil {
2,950✔
2078
                        return err
×
2079
                }
×
2080

2081
                err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes())
2,950✔
2082
                if err != nil {
2,950✔
2083
                        return fmt.Errorf("unable to store dangline remote "+
×
2084
                                "updates: %v", err)
×
2085
                }
×
2086

2087
                // Since we have just sent the counterparty a revocation, store true
2088
                // under lastWasRevokeKey.
2089
                var b2 bytes.Buffer
2,950✔
2090
                if err := WriteElements(&b2, true); err != nil {
2,950✔
2091
                        return err
×
2092
                }
×
2093

2094
                if err := chanBucket.Put(lastWasRevokeKey, b2.Bytes()); err != nil {
2,950✔
2095
                        return err
×
2096
                }
×
2097

2098
                // Persist the remote unsigned local updates that are not included
2099
                // in our new commitment.
2100
                updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey)
2,950✔
2101
                if updateBytes == nil {
3,432✔
2102
                        return nil
482✔
2103
                }
482✔
2104

2105
                r := bytes.NewReader(updateBytes)
2,472✔
2106
                updates, err := deserializeLogUpdates(r)
2,472✔
2107
                if err != nil {
2,472✔
2108
                        return err
×
2109
                }
×
2110

2111
                // Get the bucket where settled htlcs are recorded if the user
2112
                // opted in to storing this information.
2113
                var finalHtlcsBucket kvdb.RwBucket
2,472✔
2114
                if c.Db.parent.storeFinalHtlcResolutions {
2,477✔
2115
                        bucket, err := fetchFinalHtlcsBucketRw(
5✔
2116
                                tx, c.ShortChannelID,
5✔
2117
                        )
5✔
2118
                        if err != nil {
5✔
2119
                                return err
×
2120
                        }
×
2121

2122
                        finalHtlcsBucket = bucket
5✔
2123
                }
2124

2125
                var unsignedUpdates []LogUpdate
2,472✔
2126
                for _, upd := range updates {
3,317✔
2127
                        // Gather updates that are not on our local commitment.
845✔
2128
                        if upd.LogIndex >= newCommitment.LocalLogIndex {
845✔
2129
                                unsignedUpdates = append(unsignedUpdates, upd)
×
2130

×
2131
                                continue
×
2132
                        }
2133

2134
                        // The update was locked in. If the update was a
2135
                        // resolution, then store it in the database.
2136
                        err := processFinalHtlc(
845✔
2137
                                finalHtlcsBucket, upd, finalHtlcs,
845✔
2138
                        )
845✔
2139
                        if err != nil {
845✔
2140
                                return err
×
2141
                        }
×
2142
                }
2143

2144
                var b3 bytes.Buffer
2,472✔
2145
                err = serializeLogUpdates(&b3, unsignedUpdates)
2,472✔
2146
                if err != nil {
2,472✔
2147
                        return fmt.Errorf("unable to serialize log updates: %w",
×
2148
                                err)
×
2149
                }
×
2150

2151
                err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b3.Bytes())
2,472✔
2152
                if err != nil {
2,472✔
2153
                        return fmt.Errorf("unable to restore chanbucket: %w",
×
2154
                                err)
×
2155
                }
×
2156

2157
                return nil
2,472✔
2158
        }, func() {
2,951✔
2159
                finalHtlcs = make(map[uint64]bool)
2,951✔
2160
        })
2,951✔
2161
        if err != nil {
2,952✔
2162
                return nil, err
1✔
2163
        }
1✔
2164

2165
        c.LocalCommitment = *newCommitment
2,950✔
2166

2,950✔
2167
        return finalHtlcs, nil
2,950✔
2168
}
2169

2170
// processFinalHtlc stores a final htlc outcome in the database if signaled via
2171
// the supplied log update. An in-memory htlcs map is updated too.
2172
func processFinalHtlc(finalHtlcsBucket walletdb.ReadWriteBucket, upd LogUpdate,
2173
        finalHtlcs map[uint64]bool) error {
845✔
2174

845✔
2175
        var (
845✔
2176
                settled bool
845✔
2177
                id      uint64
845✔
2178
        )
845✔
2179

845✔
2180
        switch msg := upd.UpdateMsg.(type) {
845✔
2181
        case *lnwire.UpdateFulfillHTLC:
703✔
2182
                settled = true
703✔
2183
                id = msg.ID
703✔
2184

2185
        case *lnwire.UpdateFailHTLC:
135✔
2186
                settled = false
135✔
2187
                id = msg.ID
135✔
2188

2189
        case *lnwire.UpdateFailMalformedHTLC:
7✔
2190
                settled = false
7✔
2191
                id = msg.ID
7✔
2192

2193
        default:
8✔
2194
                return nil
8✔
2195
        }
2196

2197
        // Store the final resolution in the database if a bucket is provided.
2198
        if finalHtlcsBucket != nil {
842✔
2199
                err := putFinalHtlc(
5✔
2200
                        finalHtlcsBucket, id,
5✔
2201
                        FinalHtlcInfo{
5✔
2202
                                Settled:  settled,
5✔
2203
                                Offchain: true,
5✔
2204
                        },
5✔
2205
                )
5✔
2206
                if err != nil {
5✔
2207
                        return err
×
2208
                }
×
2209
        }
2210

2211
        finalHtlcs[id] = settled
837✔
2212

837✔
2213
        return nil
837✔
2214
}
2215

2216
// ActiveHtlcs returns a slice of HTLC's which are currently active on *both*
2217
// commitment transactions.
2218
func (c *OpenChannel) ActiveHtlcs() []HTLC {
3,360✔
2219
        c.RLock()
3,360✔
2220
        defer c.RUnlock()
3,360✔
2221

3,360✔
2222
        // We'll only return HTLC's that are locked into *both* commitment
3,360✔
2223
        // transactions. So we'll iterate through their set of HTLC's to note
3,360✔
2224
        // which ones are present on their commitment.
3,360✔
2225
        remoteHtlcs := make(map[[32]byte]struct{})
3,360✔
2226
        for _, htlc := range c.RemoteCommitment.Htlcs {
432,676✔
2227
                log.Tracef("RemoteCommitment has htlc: id=%v, update=%v "+
429,316✔
2228
                        "incoming=%v", htlc.HtlcIndex, htlc.LogIndex,
429,316✔
2229
                        htlc.Incoming)
429,316✔
2230

429,316✔
2231
                onionHash := sha256.Sum256(htlc.OnionBlob[:])
429,316✔
2232
                remoteHtlcs[onionHash] = struct{}{}
429,316✔
2233
        }
429,316✔
2234

2235
        // Now that we know which HTLC's they have, we'll only mark the HTLC's
2236
        // as active if *we* know them as well.
2237
        activeHtlcs := make([]HTLC, 0, len(remoteHtlcs))
3,360✔
2238
        for _, htlc := range c.LocalCommitment.Htlcs {
432,937✔
2239
                log.Tracef("LocalCommitment has htlc: id=%v, update=%v "+
429,577✔
2240
                        "incoming=%v", htlc.HtlcIndex, htlc.LogIndex,
429,577✔
2241
                        htlc.Incoming)
429,577✔
2242

429,577✔
2243
                onionHash := sha256.Sum256(htlc.OnionBlob[:])
429,577✔
2244
                if _, ok := remoteHtlcs[onionHash]; !ok {
429,866✔
2245
                        log.Tracef("Skipped htlc due to onion mismatched: "+
289✔
2246
                                "id=%v, update=%v incoming=%v",
289✔
2247
                                htlc.HtlcIndex, htlc.LogIndex, htlc.Incoming)
289✔
2248

289✔
2249
                        continue
289✔
2250
                }
2251

2252
                activeHtlcs = append(activeHtlcs, htlc)
429,292✔
2253
        }
2254

2255
        return activeHtlcs
3,360✔
2256
}
2257

2258
// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are
2259
// contained within ChannelDeltas which encode the current state of the
2260
// commitment between state updates.
2261
//
2262
// TODO(roasbeef): save space by using smaller ints at tail end?
2263
type HTLC struct {
2264
        // TODO(yy): can embed an HTLCEntry here.
2265

2266
        // Signature is the signature for the second level covenant transaction
2267
        // for this HTLC. The second level transaction is a timeout tx in the
2268
        // case that this is an outgoing HTLC, and a success tx in the case
2269
        // that this is an incoming HTLC.
2270
        //
2271
        // TODO(roasbeef): make [64]byte instead?
2272
        Signature []byte
2273

2274
        // RHash is the payment hash of the HTLC.
2275
        RHash [32]byte
2276

2277
        // Amt is the amount of milli-satoshis this HTLC escrows.
2278
        Amt lnwire.MilliSatoshi
2279

2280
        // RefundTimeout is the absolute timeout on the HTLC that the sender
2281
        // must wait before reclaiming the funds in limbo.
2282
        RefundTimeout uint32
2283

2284
        // OutputIndex is the output index for this particular HTLC output
2285
        // within the commitment transaction.
2286
        OutputIndex int32
2287

2288
        // Incoming denotes whether we're the receiver or the sender of this
2289
        // HTLC.
2290
        Incoming bool
2291

2292
        // OnionBlob is an opaque blob which is used to complete multi-hop
2293
        // routing.
2294
        OnionBlob [lnwire.OnionPacketSize]byte
2295

2296
        // HtlcIndex is the HTLC counter index of this active, outstanding
2297
        // HTLC. This differs from the LogIndex, as the HtlcIndex is only
2298
        // incremented for each offered HTLC, while they LogIndex is
2299
        // incremented for each update (includes settle+fail).
2300
        HtlcIndex uint64
2301

2302
        // LogIndex is the cumulative log index of this HTLC. This differs
2303
        // from the HtlcIndex as this will be incremented for each new log
2304
        // update added.
2305
        LogIndex uint64
2306

2307
        // ExtraData contains any additional information that was transmitted
2308
        // with the HTLC via TLVs. This data *must* already be encoded as a
2309
        // TLV stream, and may be empty. The length of this data is naturally
2310
        // limited by the space available to TLVs in update_add_htlc:
2311
        // = 65535 bytes (bolt 8 maximum message size):
2312
        // - 2 bytes (bolt 1 message_type)
2313
        // - 32 bytes (channel_id)
2314
        // - 8 bytes (id)
2315
        // - 8 bytes (amount_msat)
2316
        // - 32 bytes (payment_hash)
2317
        // - 4 bytes (cltv_expiry)
2318
        // - 1366 bytes (onion_routing_packet)
2319
        // = 64083 bytes maximum possible TLV stream
2320
        //
2321
        // Note that this extra data is stored inline with the OnionBlob for
2322
        // legacy reasons, see serialization/deserialization functions for
2323
        // detail.
2324
        ExtraData lnwire.ExtraOpaqueData
2325

2326
        // BlindingPoint is an optional blinding point included with the HTLC.
2327
        //
2328
        // Note: this field is not a part of on-disk representation of the
2329
        // HTLC. It is stored in the ExtraData field, which is used to store
2330
        // a TLV stream of additional information associated with the HTLC.
2331
        BlindingPoint lnwire.BlindingPointRecord
2332
}
2333

2334
// serializeExtraData encodes a TLV stream of extra data to be stored with a
2335
// HTLC. It uses the update_add_htlc TLV types, because this is where extra
2336
// data is passed with a HTLC. At present blinding points are the only extra
2337
// data that we will store, and the function is a no-op if a nil blinding
2338
// point is provided.
2339
//
2340
// This function MUST be called to persist all HTLC values when they are
2341
// serialized.
2342
func (h *HTLC) serializeExtraData() error {
825,010✔
2343
        var records []tlv.RecordProducer
825,010✔
2344
        h.BlindingPoint.WhenSome(func(b tlv.RecordT[lnwire.BlindingPointTlvType,
825,010✔
2345
                *btcec.PublicKey]) {
825,017✔
2346

7✔
2347
                records = append(records, &b)
7✔
2348
        })
7✔
2349

2350
        return h.ExtraData.PackRecords(records...)
825,010✔
2351
}
2352

2353
// deserializeExtraData extracts TLVs from the extra data persisted for the
2354
// htlc and populates values in the struct accordingly.
2355
//
2356
// This function MUST be called to populate the struct properly when HTLCs
2357
// are deserialized.
2358
func (h *HTLC) deserializeExtraData() error {
1,916,918✔
2359
        if len(h.ExtraData) == 0 {
3,833,833✔
2360
                return nil
1,916,915✔
2361
        }
1,916,915✔
2362

2363
        blindingPoint := h.BlindingPoint.Zero()
7✔
2364
        tlvMap, err := h.ExtraData.ExtractRecords(&blindingPoint)
7✔
2365
        if err != nil {
7✔
2366
                return err
×
2367
        }
×
2368

2369
        if val, ok := tlvMap[h.BlindingPoint.TlvType()]; ok && val == nil {
14✔
2370
                h.BlindingPoint = tlv.SomeRecordT(blindingPoint)
7✔
2371
        }
7✔
2372

2373
        return nil
7✔
2374
}
2375

2376
// SerializeHtlcs writes out the passed set of HTLC's into the passed writer
2377
// using the current default on-disk serialization format.
2378
//
2379
// This inline serialization has been extended to allow storage of extra data
2380
// associated with a HTLC in the following way:
2381
//   - The known-length onion blob (1366 bytes) is serialized as var bytes in
2382
//     WriteElements (ie, the length 1366 was written, followed by the 1366
2383
//     onion bytes).
2384
//   - To include extra data, we append any extra data present to this one
2385
//     variable length of data. Since we know that the onion is strictly 1366
2386
//     bytes, any length after that should be considered to be extra data.
2387
//
2388
// NOTE: This API is NOT stable, the on-disk format will likely change in the
2389
// future.
2390
func SerializeHtlcs(b io.Writer, htlcs ...HTLC) error {
11,227✔
2391
        numHtlcs := uint16(len(htlcs))
11,227✔
2392
        if err := WriteElement(b, numHtlcs); err != nil {
11,227✔
2393
                return err
×
2394
        }
×
2395

2396
        for _, htlc := range htlcs {
836,237✔
2397
                // Populate TLV stream for any additional fields contained
825,010✔
2398
                // in the TLV.
825,010✔
2399
                if err := htlc.serializeExtraData(); err != nil {
825,010✔
2400
                        return err
×
2401
                }
×
2402

2403
                // The onion blob and hltc data are stored as a single var
2404
                // bytes blob.
2405
                onionAndExtraData := make(
825,010✔
2406
                        []byte, lnwire.OnionPacketSize+len(htlc.ExtraData),
825,010✔
2407
                )
825,010✔
2408
                copy(onionAndExtraData, htlc.OnionBlob[:])
825,010✔
2409
                copy(onionAndExtraData[lnwire.OnionPacketSize:], htlc.ExtraData)
825,010✔
2410

825,010✔
2411
                if err := WriteElements(b,
825,010✔
2412
                        htlc.Signature, htlc.RHash, htlc.Amt, htlc.RefundTimeout,
825,010✔
2413
                        htlc.OutputIndex, htlc.Incoming, onionAndExtraData,
825,010✔
2414
                        htlc.HtlcIndex, htlc.LogIndex,
825,010✔
2415
                ); err != nil {
825,010✔
2416
                        return err
×
2417
                }
×
2418
        }
2419

2420
        return nil
11,227✔
2421
}
2422

2423
// DeserializeHtlcs attempts to read out a slice of HTLC's from the passed
2424
// io.Reader. The bytes within the passed reader MUST have been previously
2425
// written to using the SerializeHtlcs function.
2426
//
2427
// This inline deserialization has been extended to allow storage of extra data
2428
// associated with a HTLC in the following way:
2429
//   - The known-length onion blob (1366 bytes) and any additional data present
2430
//     are read out as a single blob of variable byte data.
2431
//   - They are stored like this to take advantage of the variable space
2432
//     available for extension without migration (see SerializeHtlcs).
2433
//   - The first 1366 bytes are interpreted as the onion blob, and any remaining
2434
//     bytes as extra HTLC data.
2435
//   - This extra HTLC data is expected to be serialized as a TLV stream, and
2436
//     its parsing is left to higher layers.
2437
//
2438
// NOTE: This API is NOT stable, the on-disk format will likely change in the
2439
// future.
2440
func DeserializeHtlcs(r io.Reader) ([]HTLC, error) {
22,925✔
2441
        var numHtlcs uint16
22,925✔
2442
        if err := ReadElement(r, &numHtlcs); err != nil {
22,925✔
2443
                return nil, err
×
2444
        }
×
2445

2446
        var htlcs []HTLC
22,925✔
2447
        if numHtlcs == 0 {
30,174✔
2448
                return htlcs, nil
7,249✔
2449
        }
7,249✔
2450

2451
        htlcs = make([]HTLC, numHtlcs)
15,680✔
2452
        for i := uint16(0); i < numHtlcs; i++ {
1,932,599✔
2453
                var onionAndExtraData []byte
1,916,919✔
2454
                if err := ReadElements(r,
1,916,919✔
2455
                        &htlcs[i].Signature, &htlcs[i].RHash, &htlcs[i].Amt,
1,916,919✔
2456
                        &htlcs[i].RefundTimeout, &htlcs[i].OutputIndex,
1,916,919✔
2457
                        &htlcs[i].Incoming, &onionAndExtraData,
1,916,919✔
2458
                        &htlcs[i].HtlcIndex, &htlcs[i].LogIndex,
1,916,919✔
2459
                ); err != nil {
1,916,919✔
2460
                        return htlcs, err
×
2461
                }
×
2462

2463
                // Sanity check that we have at least the onion blob size we
2464
                // expect.
2465
                if len(onionAndExtraData) < lnwire.OnionPacketSize {
1,916,920✔
2466
                        return nil, ErrOnionBlobLength
1✔
2467
                }
1✔
2468

2469
                // First OnionPacketSize bytes are our fixed length onion
2470
                // packet.
2471
                copy(
1,916,918✔
2472
                        htlcs[i].OnionBlob[:],
1,916,918✔
2473
                        onionAndExtraData[0:lnwire.OnionPacketSize],
1,916,918✔
2474
                )
1,916,918✔
2475

1,916,918✔
2476
                // Any additional bytes belong to extra data. ExtraDataLen
1,916,918✔
2477
                // will be >= 0, because we know that we always have a fixed
1,916,918✔
2478
                // length onion packet.
1,916,918✔
2479
                extraDataLen := len(onionAndExtraData) - lnwire.OnionPacketSize
1,916,918✔
2480
                if extraDataLen > 0 {
1,916,925✔
2481
                        htlcs[i].ExtraData = make([]byte, extraDataLen)
7✔
2482

7✔
2483
                        copy(
7✔
2484
                                htlcs[i].ExtraData,
7✔
2485
                                onionAndExtraData[lnwire.OnionPacketSize:],
7✔
2486
                        )
7✔
2487
                }
7✔
2488

2489
                // Finally, deserialize any TLVs contained in that extra data
2490
                // if they are present.
2491
                if err := htlcs[i].deserializeExtraData(); err != nil {
1,916,918✔
2492
                        return nil, err
×
2493
                }
×
2494
        }
2495

2496
        return htlcs, nil
15,679✔
2497
}
2498

2499
// Copy returns a full copy of the target HTLC.
2500
func (h *HTLC) Copy() HTLC {
4,616✔
2501
        clone := HTLC{
4,616✔
2502
                Incoming:      h.Incoming,
4,616✔
2503
                Amt:           h.Amt,
4,616✔
2504
                RefundTimeout: h.RefundTimeout,
4,616✔
2505
                OutputIndex:   h.OutputIndex,
4,616✔
2506
        }
4,616✔
2507
        copy(clone.Signature[:], h.Signature)
4,616✔
2508
        copy(clone.RHash[:], h.RHash[:])
4,616✔
2509
        copy(clone.ExtraData, h.ExtraData)
4,616✔
2510

4,616✔
2511
        return clone
4,616✔
2512
}
4,616✔
2513

2514
// LogUpdate represents a pending update to the remote commitment chain. The
2515
// log update may be an add, fail, or settle entry. We maintain this data in
2516
// order to be able to properly retransmit our proposed state if necessary.
2517
type LogUpdate struct {
2518
        // LogIndex is the log index of this proposed commitment update entry.
2519
        LogIndex uint64
2520

2521
        // UpdateMsg is the update message that was included within our
2522
        // local update log. The LogIndex value denotes the log index of this
2523
        // update which will be used when restoring our local update log if
2524
        // we're left with a dangling update on restart.
2525
        UpdateMsg lnwire.Message
2526
}
2527

2528
// serializeLogUpdate writes a log update to the provided io.Writer.
2529
func serializeLogUpdate(w io.Writer, l *LogUpdate) error {
2,675✔
2530
        return WriteElements(w, l.LogIndex, l.UpdateMsg)
2,675✔
2531
}
2,675✔
2532

2533
// deserializeLogUpdate reads a log update from the provided io.Reader.
2534
func deserializeLogUpdate(r io.Reader) (*LogUpdate, error) {
3,279✔
2535
        l := &LogUpdate{}
3,279✔
2536
        if err := ReadElements(r, &l.LogIndex, &l.UpdateMsg); err != nil {
3,279✔
2537
                return nil, err
×
2538
        }
×
2539

2540
        return l, nil
3,279✔
2541
}
2542

2543
// CommitDiff represents the delta needed to apply the state transition between
2544
// two subsequent commitment states. Given state N and state N+1, one is able
2545
// to apply the set of messages contained within the CommitDiff to N to arrive
2546
// at state N+1. Each time a new commitment is extended, we'll write a new
2547
// commitment (along with the full commitment state) to disk so we can
2548
// re-transmit the state in the case of a connection loss or message drop.
2549
type CommitDiff struct {
2550
        // ChannelCommitment is the full commitment state that one would arrive
2551
        // at by applying the set of messages contained in the UpdateDiff to
2552
        // the prior accepted commitment.
2553
        Commitment ChannelCommitment
2554

2555
        // LogUpdates is the set of messages sent prior to the commitment state
2556
        // transition in question. Upon reconnection, if we detect that they
2557
        // don't have the commitment, then we re-send this along with the
2558
        // proper signature.
2559
        LogUpdates []LogUpdate
2560

2561
        // CommitSig is the exact CommitSig message that should be sent after
2562
        // the set of LogUpdates above has been retransmitted. The signatures
2563
        // within this message should properly cover the new commitment state
2564
        // and also the HTLC's within the new commitment state.
2565
        CommitSig *lnwire.CommitSig
2566

2567
        // OpenedCircuitKeys is a set of unique identifiers for any downstream
2568
        // Add packets included in this commitment txn. After a restart, this
2569
        // set of htlcs is acked from the link's incoming mailbox to ensure
2570
        // there isn't an attempt to re-add them to this commitment txn.
2571
        OpenedCircuitKeys []models.CircuitKey
2572

2573
        // ClosedCircuitKeys records the unique identifiers for any settle/fail
2574
        // packets that were resolved by this commitment txn. After a restart,
2575
        // this is used to ensure those circuits are removed from the circuit
2576
        // map, and the downstream packets in the link's mailbox are removed.
2577
        ClosedCircuitKeys []models.CircuitKey
2578

2579
        // AddAcks specifies the locations (commit height, pkg index) of any
2580
        // Adds that were failed/settled in this commit diff. This will ack
2581
        // entries in *this* channel's forwarding packages.
2582
        //
2583
        // NOTE: This value is not serialized, it is used to atomically mark the
2584
        // resolution of adds, such that they will not be reprocessed after a
2585
        // restart.
2586
        AddAcks []AddRef
2587

2588
        // SettleFailAcks specifies the locations (chan id, commit height, pkg
2589
        // index) of any Settles or Fails that were locked into this commit
2590
        // diff, and originate from *another* channel, i.e. the outgoing link.
2591
        //
2592
        // NOTE: This value is not serialized, it is used to atomically acks
2593
        // settles and fails from the forwarding packages of other channels,
2594
        // such that they will not be reforwarded internally after a restart.
2595
        SettleFailAcks []SettleFailRef
2596
}
2597

2598
// serializeLogUpdates serializes provided list of updates to a stream.
2599
func serializeLogUpdates(w io.Writer, logUpdates []LogUpdate) error {
13,813✔
2600
        numUpdates := uint16(len(logUpdates))
13,813✔
2601
        if err := binary.Write(w, byteOrder, numUpdates); err != nil {
13,813✔
2602
                return err
×
2603
        }
×
2604

2605
        for _, diff := range logUpdates {
22,895✔
2606
                err := WriteElements(w, diff.LogIndex, diff.UpdateMsg)
9,082✔
2607
                if err != nil {
9,082✔
2608
                        return err
×
2609
                }
×
2610
        }
2611

2612
        return nil
13,813✔
2613
}
2614

2615
// deserializeLogUpdates deserializes a list of updates from a stream.
2616
func deserializeLogUpdates(r io.Reader) ([]LogUpdate, error) {
8,244✔
2617
        var numUpdates uint16
8,244✔
2618
        if err := binary.Read(r, byteOrder, &numUpdates); err != nil {
8,244✔
2619
                return nil, err
×
2620
        }
×
2621

2622
        logUpdates := make([]LogUpdate, numUpdates)
8,244✔
2623
        for i := 0; i < int(numUpdates); i++ {
15,935✔
2624
                err := ReadElements(r,
7,691✔
2625
                        &logUpdates[i].LogIndex, &logUpdates[i].UpdateMsg,
7,691✔
2626
                )
7,691✔
2627
                if err != nil {
7,691✔
2628
                        return nil, err
×
2629
                }
×
2630
        }
2631
        return logUpdates, nil
8,244✔
2632
}
2633

2634
func serializeCommitDiff(w io.Writer, diff *CommitDiff) error { // nolint: dupl
2,991✔
2635
        if err := serializeChanCommit(w, &diff.Commitment); err != nil {
2,991✔
2636
                return err
×
2637
        }
×
2638

2639
        if err := WriteElements(w, diff.CommitSig); err != nil {
2,991✔
2640
                return err
×
2641
        }
×
2642

2643
        if err := serializeLogUpdates(w, diff.LogUpdates); err != nil {
2,991✔
2644
                return err
×
2645
        }
×
2646

2647
        numOpenRefs := uint16(len(diff.OpenedCircuitKeys))
2,991✔
2648
        if err := binary.Write(w, byteOrder, numOpenRefs); err != nil {
2,991✔
2649
                return err
×
2650
        }
×
2651

2652
        for _, openRef := range diff.OpenedCircuitKeys {
4,496✔
2653
                err := WriteElements(w, openRef.ChanID, openRef.HtlcID)
1,505✔
2654
                if err != nil {
1,505✔
2655
                        return err
×
2656
                }
×
2657
        }
2658

2659
        numClosedRefs := uint16(len(diff.ClosedCircuitKeys))
2,991✔
2660
        if err := binary.Write(w, byteOrder, numClosedRefs); err != nil {
2,991✔
2661
                return err
×
2662
        }
×
2663

2664
        for _, closedRef := range diff.ClosedCircuitKeys {
3,034✔
2665
                err := WriteElements(w, closedRef.ChanID, closedRef.HtlcID)
43✔
2666
                if err != nil {
43✔
2667
                        return err
×
2668
                }
×
2669
        }
2670

2671
        return nil
2,991✔
2672
}
2673

2674
func deserializeCommitDiff(r io.Reader) (*CommitDiff, error) {
2,972✔
2675
        var (
2,972✔
2676
                d   CommitDiff
2,972✔
2677
                err error
2,972✔
2678
        )
2,972✔
2679

2,972✔
2680
        d.Commitment, err = deserializeChanCommit(r)
2,972✔
2681
        if err != nil {
2,972✔
2682
                return nil, err
×
2683
        }
×
2684

2685
        var msg lnwire.Message
2,972✔
2686
        if err := ReadElements(r, &msg); err != nil {
2,972✔
2687
                return nil, err
×
2688
        }
×
2689
        commitSig, ok := msg.(*lnwire.CommitSig)
2,972✔
2690
        if !ok {
2,972✔
2691
                return nil, fmt.Errorf("expected lnwire.CommitSig, instead "+
×
2692
                        "read: %T", msg)
×
2693
        }
×
2694
        d.CommitSig = commitSig
2,972✔
2695

2,972✔
2696
        d.LogUpdates, err = deserializeLogUpdates(r)
2,972✔
2697
        if err != nil {
2,972✔
2698
                return nil, err
×
2699
        }
×
2700

2701
        var numOpenRefs uint16
2,972✔
2702
        if err := binary.Read(r, byteOrder, &numOpenRefs); err != nil {
2,972✔
2703
                return nil, err
×
2704
        }
×
2705

2706
        d.OpenedCircuitKeys = make([]models.CircuitKey, numOpenRefs)
2,972✔
2707
        for i := 0; i < int(numOpenRefs); i++ {
4,504✔
2708
                err := ReadElements(r,
1,532✔
2709
                        &d.OpenedCircuitKeys[i].ChanID,
1,532✔
2710
                        &d.OpenedCircuitKeys[i].HtlcID)
1,532✔
2711
                if err != nil {
1,532✔
2712
                        return nil, err
×
2713
                }
×
2714
        }
2715

2716
        var numClosedRefs uint16
2,972✔
2717
        if err := binary.Read(r, byteOrder, &numClosedRefs); err != nil {
2,972✔
2718
                return nil, err
×
2719
        }
×
2720

2721
        d.ClosedCircuitKeys = make([]models.CircuitKey, numClosedRefs)
2,972✔
2722
        for i := 0; i < int(numClosedRefs); i++ {
3,013✔
2723
                err := ReadElements(r,
41✔
2724
                        &d.ClosedCircuitKeys[i].ChanID,
41✔
2725
                        &d.ClosedCircuitKeys[i].HtlcID)
41✔
2726
                if err != nil {
41✔
2727
                        return nil, err
×
2728
                }
×
2729
        }
2730

2731
        return &d, nil
2,972✔
2732
}
2733

2734
// AppendRemoteCommitChain appends a new CommitDiff to the end of the
2735
// commitment chain for the remote party. This method is to be used once we
2736
// have prepared a new commitment state for the remote party, but before we
2737
// transmit it to the remote party. The contents of the argument should be
2738
// sufficient to retransmit the updates and signature needed to reconstruct the
2739
// state in full, in the case that we need to retransmit.
2740
func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
2,993✔
2741
        c.Lock()
2,993✔
2742
        defer c.Unlock()
2,993✔
2743

2,993✔
2744
        // If this is a restored channel, then we want to avoid mutating the
2,993✔
2745
        // state at all, as it's impossible to do so in a protocol compliant
2,993✔
2746
        // manner.
2,993✔
2747
        if c.hasChanStatus(ChanStatusRestored) {
2,994✔
2748
                return ErrNoRestoredChannelMutation
1✔
2749
        }
1✔
2750

2751
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
5,984✔
2752
                // First, we'll grab the writable bucket where this channel's
2,992✔
2753
                // data resides.
2,992✔
2754
                chanBucket, err := fetchChanBucketRw(
2,992✔
2755
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
2,992✔
2756
                )
2,992✔
2757
                if err != nil {
2,992✔
2758
                        return err
×
2759
                }
×
2760

2761
                // If the channel is marked as borked, then for safety reasons,
2762
                // we shouldn't attempt any further updates.
2763
                isBorked, err := c.isBorked(chanBucket)
2,992✔
2764
                if err != nil {
2,992✔
2765
                        return err
×
2766
                }
×
2767
                if isBorked {
2,993✔
2768
                        return ErrChanBorked
1✔
2769
                }
1✔
2770

2771
                // Any outgoing settles and fails necessarily have a
2772
                // corresponding adds in this channel's forwarding packages.
2773
                // Mark all of these as being fully processed in our forwarding
2774
                // package, which prevents us from reprocessing them after
2775
                // startup.
2776
                err = c.Packager.AckAddHtlcs(tx, diff.AddAcks...)
2,991✔
2777
                if err != nil {
2,991✔
2778
                        return err
×
2779
                }
×
2780

2781
                // Additionally, we ack from any fails or settles that are
2782
                // persisted in another channel's forwarding package. This
2783
                // prevents the same fails and settles from being retransmitted
2784
                // after restarts. The actual fail or settle we need to
2785
                // propagate to the remote party is now in the commit diff.
2786
                err = c.Packager.AckSettleFails(tx, diff.SettleFailAcks...)
2,991✔
2787
                if err != nil {
2,991✔
2788
                        return err
×
2789
                }
×
2790

2791
                // We are sending a commitment signature so lastWasRevokeKey should
2792
                // store false.
2793
                var b bytes.Buffer
2,991✔
2794
                if err := WriteElements(&b, false); err != nil {
2,991✔
2795
                        return err
×
2796
                }
×
2797
                if err := chanBucket.Put(lastWasRevokeKey, b.Bytes()); err != nil {
2,991✔
2798
                        return err
×
2799
                }
×
2800

2801
                // TODO(roasbeef): use seqno to derive key for later LCP
2802

2803
                // With the bucket retrieved, we'll now serialize the commit
2804
                // diff itself, and write it to disk.
2805
                var b2 bytes.Buffer
2,991✔
2806
                if err := serializeCommitDiff(&b2, diff); err != nil {
2,991✔
2807
                        return err
×
2808
                }
×
2809
                return chanBucket.Put(commitDiffKey, b2.Bytes())
2,991✔
2810
        }, func() {})
2,992✔
2811
}
2812

2813
// RemoteCommitChainTip returns the "tip" of the current remote commitment
2814
// chain. This value will be non-nil iff, we've created a new commitment for
2815
// the remote party that they haven't yet ACK'd. In this case, their commitment
2816
// chain will have a length of two: their current unrevoked commitment, and
2817
// this new pending commitment. Once they revoked their prior state, we'll swap
2818
// these pointers, causing the tip and the tail to point to the same entry.
2819
func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
1,185✔
2820
        var cd *CommitDiff
1,185✔
2821
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
2,370✔
2822
                chanBucket, err := fetchChanBucket(
1,185✔
2823
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
1,185✔
2824
                )
1,185✔
2825
                switch err {
1,185✔
2826
                case nil:
845✔
2827
                case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
343✔
2828
                        return ErrNoPendingCommit
343✔
2829
                default:
×
2830
                        return err
×
2831
                }
2832

2833
                tipBytes := chanBucket.Get(commitDiffKey)
845✔
2834
                if tipBytes == nil {
1,623✔
2835
                        return ErrNoPendingCommit
778✔
2836
                }
778✔
2837

2838
                tipReader := bytes.NewReader(tipBytes)
71✔
2839
                dcd, err := deserializeCommitDiff(tipReader)
71✔
2840
                if err != nil {
71✔
2841
                        return err
×
2842
                }
×
2843

2844
                cd = dcd
71✔
2845
                return nil
71✔
2846
        }, func() {
1,185✔
2847
                cd = nil
1,185✔
2848
        })
1,185✔
2849
        if err != nil {
2,303✔
2850
                return nil, err
1,118✔
2851
        }
1,118✔
2852

2853
        return cd, err
71✔
2854
}
2855

2856
// UnsignedAckedUpdates retrieves the persisted unsigned acked remote log
2857
// updates that still need to be signed for.
2858
func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) {
765✔
2859
        var updates []LogUpdate
765✔
2860
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
1,530✔
2861
                chanBucket, err := fetchChanBucket(
765✔
2862
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
765✔
2863
                )
765✔
2864
                switch err {
765✔
2865
                case nil:
425✔
2866
                case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
343✔
2867
                        return nil
343✔
2868
                default:
×
2869
                        return err
×
2870
                }
2871

2872
                updateBytes := chanBucket.Get(unsignedAckedUpdatesKey)
425✔
2873
                if updateBytes == nil {
786✔
2874
                        return nil
361✔
2875
                }
361✔
2876

2877
                r := bytes.NewReader(updateBytes)
68✔
2878
                updates, err = deserializeLogUpdates(r)
68✔
2879
                return err
68✔
2880
        }, func() {
765✔
2881
                updates = nil
765✔
2882
        })
765✔
2883
        if err != nil {
765✔
2884
                return nil, err
×
2885
        }
×
2886

2887
        return updates, nil
765✔
2888
}
2889

2890
// RemoteUnsignedLocalUpdates retrieves the persisted, unsigned local log
2891
// updates that the remote still needs to sign for.
2892
func (c *OpenChannel) RemoteUnsignedLocalUpdates() ([]LogUpdate, error) {
764✔
2893
        var updates []LogUpdate
764✔
2894
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
1,528✔
2895
                chanBucket, err := fetchChanBucket(
764✔
2896
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
764✔
2897
                )
764✔
2898
                switch err {
764✔
2899
                case nil:
424✔
2900
                        break
424✔
2901
                case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
343✔
2902
                        return nil
343✔
2903
                default:
×
2904
                        return err
×
2905
                }
2906

2907
                updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey)
424✔
2908
                if updateBytes == nil {
812✔
2909
                        return nil
388✔
2910
                }
388✔
2911

2912
                r := bytes.NewReader(updateBytes)
40✔
2913
                updates, err = deserializeLogUpdates(r)
40✔
2914
                return err
40✔
2915
        }, func() {
764✔
2916
                updates = nil
764✔
2917
        })
764✔
2918
        if err != nil {
764✔
2919
                return nil, err
×
2920
        }
×
2921

2922
        return updates, nil
764✔
2923
}
2924

2925
// InsertNextRevocation inserts the _next_ commitment point (revocation) into
2926
// the database, and also modifies the internal RemoteNextRevocation attribute
2927
// to point to the passed key. This method is to be using during final channel
2928
// set up, _after_ the channel has been fully confirmed.
2929
//
2930
// NOTE: If this method isn't called, then the target channel won't be able to
2931
// propose new states for the commitment state of the remote party.
2932
func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) error {
594✔
2933
        c.Lock()
594✔
2934
        defer c.Unlock()
594✔
2935

594✔
2936
        c.RemoteNextRevocation = revKey
594✔
2937

594✔
2938
        err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
1,188✔
2939
                chanBucket, err := fetchChanBucketRw(
594✔
2940
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
594✔
2941
                )
594✔
2942
                if err != nil {
594✔
2943
                        return err
×
2944
                }
×
2945

2946
                return putChanRevocationState(chanBucket, c)
594✔
2947
        }, func() {})
594✔
2948
        if err != nil {
594✔
2949
                return err
×
2950
        }
×
2951

2952
        return nil
594✔
2953
}
2954

2955
// AdvanceCommitChainTail records the new state transition within an on-disk
2956
// append-only log which records all state transitions by the remote peer. In
2957
// the case of an uncooperative broadcast of a prior state by the remote peer,
2958
// this log can be consulted in order to reconstruct the state needed to
2959
// rectify the situation. This method will add the current commitment for the
2960
// remote party to the revocation log, and promote the current pending
2961
// commitment to the current remote commitment. The updates parameter is the
2962
// set of local updates that the peer still needs to send us a signature for.
2963
// We store this set of updates in case we go down.
2964
func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg,
2965
        updates []LogUpdate, ourOutputIndex, theirOutputIndex uint32) error {
2,907✔
2966

2,907✔
2967
        c.Lock()
2,907✔
2968
        defer c.Unlock()
2,907✔
2969

2,907✔
2970
        // If this is a restored channel, then we want to avoid mutating the
2,907✔
2971
        // state at all, as it's impossible to do so in a protocol compliant
2,907✔
2972
        // manner.
2,907✔
2973
        if c.hasChanStatus(ChanStatusRestored) {
2,908✔
2974
                return ErrNoRestoredChannelMutation
1✔
2975
        }
1✔
2976

2977
        var newRemoteCommit *ChannelCommitment
2,906✔
2978

2,906✔
2979
        err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
5,812✔
2980
                chanBucket, err := fetchChanBucketRw(
2,906✔
2981
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
2,906✔
2982
                )
2,906✔
2983
                if err != nil {
2,906✔
2984
                        return err
×
2985
                }
×
2986

2987
                // If the channel is marked as borked, then for safety reasons,
2988
                // we shouldn't attempt any further updates.
2989
                isBorked, err := c.isBorked(chanBucket)
2,906✔
2990
                if err != nil {
2,906✔
2991
                        return err
×
2992
                }
×
2993
                if isBorked {
2,907✔
2994
                        return ErrChanBorked
1✔
2995
                }
1✔
2996

2997
                // Persist the latest preimage state to disk as the remote peer
2998
                // has just added to our local preimage store, and given us a
2999
                // new pending revocation key.
3000
                if err := putChanRevocationState(chanBucket, c); err != nil {
2,905✔
3001
                        return err
×
3002
                }
×
3003

3004
                // With the current preimage producer/store state updated,
3005
                // append a new log entry recording this the delta of this
3006
                // state transition.
3007
                //
3008
                // TODO(roasbeef): could make the deltas relative, would save
3009
                // space, but then tradeoff for more disk-seeks to recover the
3010
                // full state.
3011
                logKey := revocationLogBucket
2,905✔
3012
                logBucket, err := chanBucket.CreateBucketIfNotExists(logKey)
2,905✔
3013
                if err != nil {
2,905✔
3014
                        return err
×
3015
                }
×
3016

3017
                // Before we append this revoked state to the revocation log,
3018
                // we'll swap out what's currently the tail of the commit tip,
3019
                // with the current locked-in commitment for the remote party.
3020
                tipBytes := chanBucket.Get(commitDiffKey)
2,905✔
3021
                tipReader := bytes.NewReader(tipBytes)
2,905✔
3022
                newCommit, err := deserializeCommitDiff(tipReader)
2,905✔
3023
                if err != nil {
2,905✔
3024
                        return err
×
3025
                }
×
3026
                err = putChanCommitment(
2,905✔
3027
                        chanBucket, &newCommit.Commitment, false,
2,905✔
3028
                )
2,905✔
3029
                if err != nil {
2,905✔
3030
                        return err
×
3031
                }
×
3032
                if err := chanBucket.Delete(commitDiffKey); err != nil {
2,905✔
3033
                        return err
×
3034
                }
×
3035

3036
                // With the commitment pointer swapped, we can now add the
3037
                // revoked (prior) state to the revocation log.
3038
                err = putRevocationLog(
2,905✔
3039
                        logBucket, &c.RemoteCommitment, ourOutputIndex,
2,905✔
3040
                        theirOutputIndex, c.Db.parent.noRevLogAmtData,
2,905✔
3041
                )
2,905✔
3042
                if err != nil {
2,905✔
3043
                        return err
×
3044
                }
×
3045

3046
                // Lastly, we write the forwarding package to disk so that we
3047
                // can properly recover from failures and reforward HTLCs that
3048
                // have not received a corresponding settle/fail.
3049
                if err := c.Packager.AddFwdPkg(tx, fwdPkg); err != nil {
2,905✔
3050
                        return err
×
3051
                }
×
3052

3053
                // Persist the unsigned acked updates that are not included
3054
                // in their new commitment.
3055
                updateBytes := chanBucket.Get(unsignedAckedUpdatesKey)
2,905✔
3056
                if updateBytes == nil {
3,106✔
3057
                        // This shouldn't normally happen as we always store
201✔
3058
                        // the number of updates, but could still be
201✔
3059
                        // encountered by nodes that are upgrading.
201✔
3060
                        newRemoteCommit = &newCommit.Commitment
201✔
3061
                        return nil
201✔
3062
                }
201✔
3063

3064
                r := bytes.NewReader(updateBytes)
2,708✔
3065
                unsignedUpdates, err := deserializeLogUpdates(r)
2,708✔
3066
                if err != nil {
2,708✔
3067
                        return err
×
3068
                }
×
3069

3070
                var validUpdates []LogUpdate
2,708✔
3071
                for _, upd := range unsignedUpdates {
6,623✔
3072
                        lIdx := upd.LogIndex
3,915✔
3073

3,915✔
3074
                        // Filter for updates that are not on the remote
3,915✔
3075
                        // commitment.
3,915✔
3076
                        if lIdx >= newCommit.Commitment.RemoteLogIndex {
5,149✔
3077
                                validUpdates = append(validUpdates, upd)
1,234✔
3078
                        }
1,234✔
3079
                }
3080

3081
                var b bytes.Buffer
2,708✔
3082
                err = serializeLogUpdates(&b, validUpdates)
2,708✔
3083
                if err != nil {
2,708✔
3084
                        return fmt.Errorf("unable to serialize log updates: %w",
×
3085
                                err)
×
3086
                }
×
3087

3088
                err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes())
2,708✔
3089
                if err != nil {
2,708✔
3090
                        return fmt.Errorf("unable to store under "+
×
3091
                                "unsignedAckedUpdatesKey: %w", err)
×
3092
                }
×
3093

3094
                // Persist the local updates the peer hasn't yet signed so they
3095
                // can be restored after restart.
3096
                var b2 bytes.Buffer
2,708✔
3097
                err = serializeLogUpdates(&b2, updates)
2,708✔
3098
                if err != nil {
2,708✔
3099
                        return err
×
3100
                }
×
3101

3102
                err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes())
2,708✔
3103
                if err != nil {
2,708✔
3104
                        return fmt.Errorf("unable to restore remote unsigned "+
×
3105
                                "local updates: %v", err)
×
3106
                }
×
3107

3108
                newRemoteCommit = &newCommit.Commitment
2,708✔
3109

2,708✔
3110
                return nil
2,708✔
3111
        }, func() {
2,906✔
3112
                newRemoteCommit = nil
2,906✔
3113
        })
2,906✔
3114
        if err != nil {
2,907✔
3115
                return err
1✔
3116
        }
1✔
3117

3118
        // With the db transaction complete, we'll swap over the in-memory
3119
        // pointer of the new remote commitment, which was previously the tip
3120
        // of the commit chain.
3121
        c.RemoteCommitment = *newRemoteCommit
2,905✔
3122

2,905✔
3123
        return nil
2,905✔
3124
}
3125

3126
// FinalHtlcInfo contains information about the final outcome of an htlc.
3127
type FinalHtlcInfo struct {
3128
        // Settled is true is the htlc was settled. If false, the htlc was
3129
        // failed.
3130
        Settled bool
3131

3132
        // Offchain indicates whether the htlc was resolved off-chain or
3133
        // on-chain.
3134
        Offchain bool
3135
}
3136

3137
// putFinalHtlc writes the final htlc outcome to the database. Additionally it
3138
// records whether the htlc was resolved off-chain or on-chain.
3139
func putFinalHtlc(finalHtlcsBucket kvdb.RwBucket, id uint64,
3140
        info FinalHtlcInfo) error {
7✔
3141

7✔
3142
        var key [8]byte
7✔
3143
        byteOrder.PutUint64(key[:], id)
7✔
3144

7✔
3145
        var finalHtlcByte FinalHtlcByte
7✔
3146
        if info.Settled {
14✔
3147
                finalHtlcByte |= FinalHtlcSettledBit
7✔
3148
        }
7✔
3149
        if info.Offchain {
13✔
3150
                finalHtlcByte |= FinalHtlcOffchainBit
6✔
3151
        }
6✔
3152

3153
        return finalHtlcsBucket.Put(key[:], []byte{byte(finalHtlcByte)})
7✔
3154
}
3155

3156
// NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure
3157
// this always returns the next index that has been not been allocated, this
3158
// will first try to examine any pending commitments, before falling back to the
3159
// last locked-in remote commitment.
3160
func (c *OpenChannel) NextLocalHtlcIndex() (uint64, error) {
379✔
3161
        // First, load the most recent commit diff that we initiated for the
379✔
3162
        // remote party. If no pending commit is found, this is not treated as
379✔
3163
        // a critical error, since we can always fall back.
379✔
3164
        pendingRemoteCommit, err := c.RemoteCommitChainTip()
379✔
3165
        if err != nil && err != ErrNoPendingCommit {
379✔
3166
                return 0, err
×
3167
        }
×
3168

3169
        // If a pending commit was found, its local htlc index will be at least
3170
        // as large as the one on our local commitment.
3171
        if pendingRemoteCommit != nil {
395✔
3172
                return pendingRemoteCommit.Commitment.LocalHtlcIndex, nil
16✔
3173
        }
16✔
3174

3175
        // Otherwise, fallback to using the local htlc index of their commitment.
3176
        return c.RemoteCommitment.LocalHtlcIndex, nil
363✔
3177
}
3178

3179
// LoadFwdPkgs scans the forwarding log for any packages that haven't been
3180
// processed, and returns their deserialized log updates in map indexed by the
3181
// remote commitment height at which the updates were locked in.
3182
func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, error) {
501✔
3183
        c.RLock()
501✔
3184
        defer c.RUnlock()
501✔
3185

501✔
3186
        var fwdPkgs []*FwdPkg
501✔
3187
        if err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
945✔
3188
                var err error
444✔
3189
                fwdPkgs, err = c.Packager.LoadFwdPkgs(tx)
444✔
3190
                return err
444✔
3191
        }, func() {
945✔
3192
                fwdPkgs = nil
501✔
3193
        }); err != nil {
558✔
3194
                return nil, err
57✔
3195
        }
57✔
3196

3197
        return fwdPkgs, nil
444✔
3198
}
3199

3200
// AckAddHtlcs updates the AckAddFilter containing any of the provided AddRefs
3201
// indicating that a response to this Add has been committed to the remote party.
3202
// Doing so will prevent these Add HTLCs from being reforwarded internally.
3203
func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) error {
1✔
3204
        c.Lock()
1✔
3205
        defer c.Unlock()
1✔
3206

1✔
3207
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
2✔
3208
                return c.Packager.AckAddHtlcs(tx, addRefs...)
1✔
3209
        }, func() {})
2✔
3210
}
3211

3212
// AckSettleFails updates the SettleFailFilter containing any of the provided
3213
// SettleFailRefs, indicating that the response has been delivered to the
3214
// incoming link, corresponding to a particular AddRef. Doing so will prevent
3215
// the responses from being retransmitted internally.
3216
func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) error {
×
3217
        c.Lock()
×
3218
        defer c.Unlock()
×
3219

×
3220
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
×
3221
                return c.Packager.AckSettleFails(tx, settleFailRefs...)
×
3222
        }, func() {})
×
3223
}
3224

3225
// SetFwdFilter atomically sets the forwarding filter for the forwarding package
3226
// identified by `height`.
3227
func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) error {
2,179✔
3228
        c.Lock()
2,179✔
3229
        defer c.Unlock()
2,179✔
3230

2,179✔
3231
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
4,358✔
3232
                return c.Packager.SetFwdFilter(tx, height, fwdFilter)
2,179✔
3233
        }, func() {})
4,358✔
3234
}
3235

3236
// RemoveFwdPkgs atomically removes forwarding packages specified by the remote
3237
// commitment heights. If one of the intermediate RemovePkg calls fails, then the
3238
// later packages won't be removed.
3239
//
3240
// NOTE: This method should only be called on packages marked FwdStateCompleted.
3241
func (c *OpenChannel) RemoveFwdPkgs(heights ...uint64) error {
17✔
3242
        c.Lock()
17✔
3243
        defer c.Unlock()
17✔
3244

17✔
3245
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
34✔
3246
                for _, height := range heights {
911✔
3247
                        err := c.Packager.RemovePkg(tx, height)
894✔
3248
                        if err != nil {
894✔
UNCOV
3249
                                return err
×
UNCOV
3250
                        }
×
3251
                }
3252

3253
                return nil
17✔
3254
        }, func() {})
17✔
3255
}
3256

3257
// revocationLogTailCommitHeight returns the commit height at the end of the
3258
// revocation log. This entry represents the last previous state for the remote
3259
// node's commitment chain. The ChannelDelta returned by this method will
3260
// always lag one state behind the most current (unrevoked) state of the remote
3261
// node's commitment chain.
3262
// NOTE: used in unit test only.
3263
func (c *OpenChannel) revocationLogTailCommitHeight() (uint64, error) {
2✔
3264
        c.RLock()
2✔
3265
        defer c.RUnlock()
2✔
3266

2✔
3267
        var height uint64
2✔
3268

2✔
3269
        // If we haven't created any state updates yet, then we'll exit early as
2✔
3270
        // there's nothing to be found on disk in the revocation bucket.
2✔
3271
        if c.RemoteCommitment.CommitHeight == 0 {
2✔
3272
                return height, nil
×
3273
        }
×
3274

3275
        if err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
4✔
3276
                chanBucket, err := fetchChanBucket(
2✔
3277
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
2✔
3278
                )
2✔
3279
                if err != nil {
2✔
3280
                        return err
×
3281
                }
×
3282

3283
                logBucket, err := fetchLogBucket(chanBucket)
2✔
3284
                if err != nil {
2✔
3285
                        return err
×
3286
                }
×
3287

3288
                // Once we have the bucket that stores the revocation log from
3289
                // this channel, we'll jump to the _last_ key in bucket. Since
3290
                // the key is the commit height, we'll decode the bytes and
3291
                // return it.
3292
                cursor := logBucket.ReadCursor()
2✔
3293
                rawHeight, _ := cursor.Last()
2✔
3294
                height = byteOrder.Uint64(rawHeight)
2✔
3295

2✔
3296
                return nil
2✔
3297
        }, func() {}); err != nil {
2✔
3298
                return height, err
×
3299
        }
×
3300

3301
        return height, nil
2✔
3302
}
3303

3304
// CommitmentHeight returns the current commitment height. The commitment
3305
// height represents the number of updates to the commitment state to date.
3306
// This value is always monotonically increasing. This method is provided in
3307
// order to allow multiple instances of a particular open channel to obtain a
3308
// consistent view of the number of channel updates to date.
3309
func (c *OpenChannel) CommitmentHeight() (uint64, error) {
1✔
3310
        c.RLock()
1✔
3311
        defer c.RUnlock()
1✔
3312

1✔
3313
        var height uint64
1✔
3314
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
2✔
3315
                // Get the bucket dedicated to storing the metadata for open
1✔
3316
                // channels.
1✔
3317
                chanBucket, err := fetchChanBucket(
1✔
3318
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
1✔
3319
                )
1✔
3320
                if err != nil {
1✔
3321
                        return err
×
3322
                }
×
3323

3324
                commit, err := fetchChanCommitment(chanBucket, true)
1✔
3325
                if err != nil {
1✔
3326
                        return err
×
3327
                }
×
3328

3329
                height = commit.CommitHeight
1✔
3330
                return nil
1✔
3331
        }, func() {
1✔
3332
                height = 0
1✔
3333
        })
1✔
3334
        if err != nil {
1✔
3335
                return 0, err
×
3336
        }
×
3337

3338
        return height, nil
1✔
3339
}
3340

3341
// FindPreviousState scans through the append-only log in an attempt to recover
3342
// the previous channel state indicated by the update number. This method is
3343
// intended to be used for obtaining the relevant data needed to claim all
3344
// funds rightfully spendable in the case of an on-chain broadcast of the
3345
// commitment transaction.
3346
func (c *OpenChannel) FindPreviousState(
3347
        updateNum uint64) (*RevocationLog, *ChannelCommitment, error) {
35✔
3348

35✔
3349
        c.RLock()
35✔
3350
        defer c.RUnlock()
35✔
3351

35✔
3352
        commit := &ChannelCommitment{}
35✔
3353
        rl := &RevocationLog{}
35✔
3354

35✔
3355
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
70✔
3356
                chanBucket, err := fetchChanBucket(
35✔
3357
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
35✔
3358
                )
35✔
3359
                if err != nil {
36✔
3360
                        return err
1✔
3361
                }
1✔
3362

3363
                // Find the revocation log from both the new and the old
3364
                // bucket.
3365
                r, c, err := fetchRevocationLogCompatible(chanBucket, updateNum)
34✔
3366
                if err != nil {
57✔
3367
                        return err
23✔
3368
                }
23✔
3369

3370
                rl = r
15✔
3371
                commit = c
15✔
3372
                return nil
15✔
3373
        }, func() {})
35✔
3374
        if err != nil {
59✔
3375
                return nil, nil, err
24✔
3376
        }
24✔
3377

3378
        // Either the `rl` or the `commit` is nil here. We return them as-is
3379
        // and leave it to the caller to decide its following action.
3380
        return rl, commit, nil
15✔
3381
}
3382

3383
// ClosureType is an enum like structure that details exactly _how_ a channel
3384
// was closed. Three closure types are currently possible: none, cooperative,
3385
// local force close, remote force close, and (remote) breach.
3386
type ClosureType uint8
3387

3388
const (
3389
        // CooperativeClose indicates that a channel has been closed
3390
        // cooperatively.  This means that both channel peers were online and
3391
        // signed a new transaction paying out the settled balance of the
3392
        // contract.
3393
        CooperativeClose ClosureType = 0
3394

3395
        // LocalForceClose indicates that we have unilaterally broadcast our
3396
        // current commitment state on-chain.
3397
        LocalForceClose ClosureType = 1
3398

3399
        // RemoteForceClose indicates that the remote peer has unilaterally
3400
        // broadcast their current commitment state on-chain.
3401
        RemoteForceClose ClosureType = 4
3402

3403
        // BreachClose indicates that the remote peer attempted to broadcast a
3404
        // prior _revoked_ channel state.
3405
        BreachClose ClosureType = 2
3406

3407
        // FundingCanceled indicates that the channel never was fully opened
3408
        // before it was marked as closed in the database. This can happen if
3409
        // we or the remote fail at some point during the opening workflow, or
3410
        // we timeout waiting for the funding transaction to be confirmed.
3411
        FundingCanceled ClosureType = 3
3412

3413
        // Abandoned indicates that the channel state was removed without
3414
        // any further actions. This is intended to clean up unusable
3415
        // channels during development.
3416
        Abandoned ClosureType = 5
3417
)
3418

3419
// ChannelCloseSummary contains the final state of a channel at the point it
3420
// was closed. Once a channel is closed, all the information pertaining to that
3421
// channel within the openChannelBucket is deleted, and a compact summary is
3422
// put in place instead.
3423
type ChannelCloseSummary struct {
3424
        // ChanPoint is the outpoint for this channel's funding transaction,
3425
        // and is used as a unique identifier for the channel.
3426
        ChanPoint wire.OutPoint
3427

3428
        // ShortChanID encodes the exact location in the chain in which the
3429
        // channel was initially confirmed. This includes: the block height,
3430
        // transaction index, and the output within the target transaction.
3431
        ShortChanID lnwire.ShortChannelID
3432

3433
        // ChainHash is the hash of the genesis block that this channel resides
3434
        // within.
3435
        ChainHash chainhash.Hash
3436

3437
        // ClosingTXID is the txid of the transaction which ultimately closed
3438
        // this channel.
3439
        ClosingTXID chainhash.Hash
3440

3441
        // RemotePub is the public key of the remote peer that we formerly had
3442
        // a channel with.
3443
        RemotePub *btcec.PublicKey
3444

3445
        // Capacity was the total capacity of the channel.
3446
        Capacity btcutil.Amount
3447

3448
        // CloseHeight is the height at which the funding transaction was
3449
        // spent.
3450
        CloseHeight uint32
3451

3452
        // SettledBalance is our total balance settled balance at the time of
3453
        // channel closure. This _does not_ include the sum of any outputs that
3454
        // have been time-locked as a result of the unilateral channel closure.
3455
        SettledBalance btcutil.Amount
3456

3457
        // TimeLockedBalance is the sum of all the time-locked outputs at the
3458
        // time of channel closure. If we triggered the force closure of this
3459
        // channel, then this value will be non-zero if our settled output is
3460
        // above the dust limit. If we were on the receiving side of a channel
3461
        // force closure, then this value will be non-zero if we had any
3462
        // outstanding outgoing HTLC's at the time of channel closure.
3463
        TimeLockedBalance btcutil.Amount
3464

3465
        // CloseType details exactly _how_ the channel was closed. Five closure
3466
        // types are possible: cooperative, local force, remote force, breach
3467
        // and funding canceled.
3468
        CloseType ClosureType
3469

3470
        // IsPending indicates whether this channel is in the 'pending close'
3471
        // state, which means the channel closing transaction has been
3472
        // confirmed, but not yet been fully resolved. In the case of a channel
3473
        // that has been cooperatively closed, it will go straight into the
3474
        // fully resolved state as soon as the closing transaction has been
3475
        // confirmed. However, for channels that have been force closed, they'll
3476
        // stay marked as "pending" until _all_ the pending funds have been
3477
        // swept.
3478
        IsPending bool
3479

3480
        // RemoteCurrentRevocation is the current revocation for their
3481
        // commitment transaction. However, since this is the derived public key,
3482
        // we don't yet have the private key so we aren't yet able to verify
3483
        // that it's actually in the hash chain.
3484
        RemoteCurrentRevocation *btcec.PublicKey
3485

3486
        // RemoteNextRevocation is the revocation key to be used for the *next*
3487
        // commitment transaction we create for the local node. Within the
3488
        // specification, this value is referred to as the
3489
        // per-commitment-point.
3490
        RemoteNextRevocation *btcec.PublicKey
3491

3492
        // LocalChanConfig is the channel configuration for the local node.
3493
        LocalChanConfig ChannelConfig
3494

3495
        // LastChanSyncMsg is the ChannelReestablish message for this channel
3496
        // for the state at the point where it was closed.
3497
        LastChanSyncMsg *lnwire.ChannelReestablish
3498
}
3499

3500
// CloseChannel closes a previously active Lightning channel. Closing a channel
3501
// entails deleting all saved state within the database concerning this
3502
// channel. This method also takes a struct that summarizes the state of the
3503
// channel at closing, this compact representation will be the only component
3504
// of a channel left over after a full closing. It takes an optional set of
3505
// channel statuses which will be written to the historical channel bucket.
3506
// These statuses are used to record close initiators.
3507
func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
3508
        statuses ...ChannelStatus) error {
118✔
3509

118✔
3510
        c.Lock()
118✔
3511
        defer c.Unlock()
118✔
3512

118✔
3513
        return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
236✔
3514
                openChanBucket := tx.ReadWriteBucket(openChannelBucket)
118✔
3515
                if openChanBucket == nil {
118✔
3516
                        return ErrNoChanDBExists
×
3517
                }
×
3518

3519
                nodePub := c.IdentityPub.SerializeCompressed()
118✔
3520
                nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
118✔
3521
                if nodeChanBucket == nil {
118✔
3522
                        return ErrNoActiveChannels
×
3523
                }
×
3524

3525
                chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:])
118✔
3526
                if chainBucket == nil {
118✔
3527
                        return ErrNoActiveChannels
×
3528
                }
×
3529

3530
                var chanPointBuf bytes.Buffer
118✔
3531
                err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint)
118✔
3532
                if err != nil {
118✔
3533
                        return err
×
3534
                }
×
3535
                chanKey := chanPointBuf.Bytes()
118✔
3536
                chanBucket := chainBucket.NestedReadWriteBucket(
118✔
3537
                        chanKey,
118✔
3538
                )
118✔
3539
                if chanBucket == nil {
118✔
3540
                        return ErrNoActiveChannels
×
3541
                }
×
3542

3543
                // Before we delete the channel state, we'll read out the full
3544
                // details, as we'll also store portions of this information
3545
                // for record keeping.
3546
                chanState, err := fetchOpenChannel(
118✔
3547
                        chanBucket, &c.FundingOutpoint,
118✔
3548
                )
118✔
3549
                if err != nil {
118✔
3550
                        return err
×
3551
                }
×
3552

3553
                // Delete all the forwarding packages stored for this particular
3554
                // channel.
3555
                if err = chanState.Packager.Wipe(tx); err != nil {
118✔
3556
                        return err
×
3557
                }
×
3558

3559
                // Now that the index to this channel has been deleted, purge
3560
                // the remaining channel metadata from the database.
3561
                err = deleteOpenChannel(chanBucket)
118✔
3562
                if err != nil {
118✔
3563
                        return err
×
3564
                }
×
3565

3566
                // We'll also remove the channel from the frozen channel bucket
3567
                // if we need to.
3568
                if c.ChanType.IsFrozen() || c.ChanType.HasLeaseExpiration() {
229✔
3569
                        err := deleteThawHeight(chanBucket)
111✔
3570
                        if err != nil {
111✔
3571
                                return err
×
3572
                        }
×
3573
                }
3574

3575
                // With the base channel data deleted, attempt to delete the
3576
                // information stored within the revocation log.
3577
                if err := deleteLogBucket(chanBucket); err != nil {
118✔
3578
                        return err
×
3579
                }
×
3580

3581
                err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes())
118✔
3582
                if err != nil {
118✔
3583
                        return err
×
3584
                }
×
3585

3586
                // Fetch the outpoint bucket to see if the outpoint exists or
3587
                // not.
3588
                opBucket := tx.ReadWriteBucket(outpointBucket)
118✔
3589
                if opBucket == nil {
118✔
3590
                        return ErrNoChanDBExists
×
3591
                }
×
3592

3593
                // Add the closed outpoint to our outpoint index. This should
3594
                // replace an open outpoint in the index.
3595
                if opBucket.Get(chanPointBuf.Bytes()) == nil {
118✔
3596
                        return ErrMissingIndexEntry
×
3597
                }
×
3598

3599
                status := uint8(outpointClosed)
118✔
3600

118✔
3601
                // Write the IndexStatus of this outpoint as the first entry in a tlv
118✔
3602
                // stream.
118✔
3603
                statusRecord := tlv.MakePrimitiveRecord(indexStatusType, &status)
118✔
3604
                opStream, err := tlv.NewStream(statusRecord)
118✔
3605
                if err != nil {
118✔
3606
                        return err
×
3607
                }
×
3608

3609
                var b bytes.Buffer
118✔
3610
                if err := opStream.Encode(&b); err != nil {
118✔
3611
                        return err
×
3612
                }
×
3613

3614
                // Finally add the closed outpoint and tlv stream to the index.
3615
                if err := opBucket.Put(chanPointBuf.Bytes(), b.Bytes()); err != nil {
118✔
3616
                        return err
×
3617
                }
×
3618

3619
                // Add channel state to the historical channel bucket.
3620
                historicalBucket, err := tx.CreateTopLevelBucket(
118✔
3621
                        historicalChannelBucket,
118✔
3622
                )
118✔
3623
                if err != nil {
118✔
3624
                        return err
×
3625
                }
×
3626

3627
                historicalChanBucket, err :=
118✔
3628
                        historicalBucket.CreateBucketIfNotExists(chanKey)
118✔
3629
                if err != nil {
118✔
3630
                        return err
×
3631
                }
×
3632

3633
                // Apply any additional statuses to the channel state.
3634
                for _, status := range statuses {
127✔
3635
                        chanState.chanStatus |= status
9✔
3636
                }
9✔
3637

3638
                err = putOpenChannel(historicalChanBucket, chanState)
118✔
3639
                if err != nil {
118✔
3640
                        return err
×
3641
                }
×
3642

3643
                // Finally, create a summary of this channel in the closed
3644
                // channel bucket for this node.
3645
                return putChannelCloseSummary(
118✔
3646
                        tx, chanPointBuf.Bytes(), summary, chanState,
118✔
3647
                )
118✔
3648
        }, func() {})
118✔
3649
}
3650

3651
// ChannelSnapshot is a frozen snapshot of the current channel state. A
3652
// snapshot is detached from the original channel that generated it, providing
3653
// read-only access to the current or prior state of an active channel.
3654
//
3655
// TODO(roasbeef): remove all together? pretty much just commitment
3656
type ChannelSnapshot struct {
3657
        // RemoteIdentity is the identity public key of the remote node that we
3658
        // are maintaining the open channel with.
3659
        RemoteIdentity btcec.PublicKey
3660

3661
        // ChanPoint is the outpoint that created the channel. This output is
3662
        // found within the funding transaction and uniquely identified the
3663
        // channel on the resident chain.
3664
        ChannelPoint wire.OutPoint
3665

3666
        // ChainHash is the genesis hash of the chain that the channel resides
3667
        // within.
3668
        ChainHash chainhash.Hash
3669

3670
        // Capacity is the total capacity of the channel.
3671
        Capacity btcutil.Amount
3672

3673
        // TotalMSatSent is the total number of milli-satoshis we've sent
3674
        // within this channel.
3675
        TotalMSatSent lnwire.MilliSatoshi
3676

3677
        // TotalMSatReceived is the total number of milli-satoshis we've
3678
        // received within this channel.
3679
        TotalMSatReceived lnwire.MilliSatoshi
3680

3681
        // ChannelCommitment is the current up-to-date commitment for the
3682
        // target channel.
3683
        ChannelCommitment
3684
}
3685

3686
// Snapshot returns a read-only snapshot of the current channel state. This
3687
// snapshot includes information concerning the current settled balance within
3688
// the channel, metadata detailing total flows, and any outstanding HTLCs.
3689
func (c *OpenChannel) Snapshot() *ChannelSnapshot {
90✔
3690
        c.RLock()
90✔
3691
        defer c.RUnlock()
90✔
3692

90✔
3693
        localCommit := c.LocalCommitment
90✔
3694
        snapshot := &ChannelSnapshot{
90✔
3695
                RemoteIdentity:    *c.IdentityPub,
90✔
3696
                ChannelPoint:      c.FundingOutpoint,
90✔
3697
                Capacity:          c.Capacity,
90✔
3698
                TotalMSatSent:     c.TotalMSatSent,
90✔
3699
                TotalMSatReceived: c.TotalMSatReceived,
90✔
3700
                ChainHash:         c.ChainHash,
90✔
3701
                ChannelCommitment: ChannelCommitment{
90✔
3702
                        LocalBalance:  localCommit.LocalBalance,
90✔
3703
                        RemoteBalance: localCommit.RemoteBalance,
90✔
3704
                        CommitHeight:  localCommit.CommitHeight,
90✔
3705
                        CommitFee:     localCommit.CommitFee,
90✔
3706
                },
90✔
3707
        }
90✔
3708

90✔
3709
        // Copy over the current set of HTLCs to ensure the caller can't mutate
90✔
3710
        // our internal state.
90✔
3711
        snapshot.Htlcs = make([]HTLC, len(localCommit.Htlcs))
90✔
3712
        for i, h := range localCommit.Htlcs {
4,706✔
3713
                snapshot.Htlcs[i] = h.Copy()
4,616✔
3714
        }
4,616✔
3715

3716
        return snapshot
90✔
3717
}
3718

3719
// LatestCommitments returns the two latest commitments for both the local and
3720
// remote party. These commitments are read from disk to ensure that only the
3721
// latest fully committed state is returned. The first commitment returned is
3722
// the local commitment, and the second returned is the remote commitment.
3723
func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, error) {
19✔
3724
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
38✔
3725
                chanBucket, err := fetchChanBucket(
19✔
3726
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
19✔
3727
                )
19✔
3728
                if err != nil {
19✔
3729
                        return err
×
3730
                }
×
3731

3732
                return fetchChanCommitments(chanBucket, c)
19✔
3733
        }, func() {})
19✔
3734
        if err != nil {
19✔
3735
                return nil, nil, err
×
3736
        }
×
3737

3738
        return &c.LocalCommitment, &c.RemoteCommitment, nil
19✔
3739
}
3740

3741
// RemoteRevocationStore returns the most up to date commitment version of the
3742
// revocation storage tree for the remote party. This method can be used when
3743
// acting on a possible contract breach to ensure, that the caller has the most
3744
// up to date information required to deliver justice.
3745
func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
19✔
3746
        err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
38✔
3747
                chanBucket, err := fetchChanBucket(
19✔
3748
                        tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
19✔
3749
                )
19✔
3750
                if err != nil {
19✔
3751
                        return err
×
3752
                }
×
3753

3754
                return fetchChanRevocationState(chanBucket, c)
19✔
3755
        }, func() {})
19✔
3756
        if err != nil {
19✔
3757
                return nil, err
×
3758
        }
×
3759

3760
        return c.RevocationStore, nil
19✔
3761
}
3762

3763
// AbsoluteThawHeight determines a frozen channel's absolute thaw height. If the
3764
// channel is not frozen, then 0 is returned.
3765
func (c *OpenChannel) AbsoluteThawHeight() (uint32, error) {
4✔
3766
        // Only frozen channels have a thaw height.
4✔
3767
        if !c.ChanType.IsFrozen() && !c.ChanType.HasLeaseExpiration() {
8✔
3768
                return 0, nil
4✔
3769
        }
4✔
3770

3771
        // If the channel has the frozen bit set and it's thaw height is below
3772
        // the absolute threshold, then it's interpreted as a relative height to
3773
        // the chain's current height.
3774
        if c.ChanType.IsFrozen() && c.ThawHeight < AbsoluteThawHeightThreshold {
8✔
3775
                // We'll only known of the channel's short ID once it's
4✔
3776
                // confirmed.
4✔
3777
                if c.IsPending {
4✔
3778
                        return 0, errors.New("cannot use relative thaw " +
×
3779
                                "height for unconfirmed channel")
×
3780
                }
×
3781

3782
                // For non-zero-conf channels, this is the base height to use.
3783
                blockHeightBase := c.ShortChannelID.BlockHeight
4✔
3784

4✔
3785
                // If this is a zero-conf channel, the ShortChannelID will be
4✔
3786
                // an alias.
4✔
3787
                if c.IsZeroConf() {
4✔
3788
                        if !c.ZeroConfConfirmed() {
×
3789
                                return 0, errors.New("cannot use relative " +
×
3790
                                        "height for unconfirmed zero-conf " +
×
3791
                                        "channel")
×
3792
                        }
×
3793

3794
                        // Use the confirmed SCID's BlockHeight.
3795
                        blockHeightBase = c.confirmedScid.BlockHeight
×
3796
                }
3797

3798
                return blockHeightBase + c.ThawHeight, nil
4✔
3799
        }
3800

3801
        return c.ThawHeight, nil
4✔
3802
}
3803

3804
func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte,
3805
        summary *ChannelCloseSummary, lastChanState *OpenChannel) error {
118✔
3806

118✔
3807
        closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket)
118✔
3808
        if err != nil {
118✔
3809
                return err
×
3810
        }
×
3811

3812
        summary.RemoteCurrentRevocation = lastChanState.RemoteCurrentRevocation
118✔
3813
        summary.RemoteNextRevocation = lastChanState.RemoteNextRevocation
118✔
3814
        summary.LocalChanConfig = lastChanState.LocalChanCfg
118✔
3815

118✔
3816
        var b bytes.Buffer
118✔
3817
        if err := serializeChannelCloseSummary(&b, summary); err != nil {
118✔
3818
                return err
×
3819
        }
×
3820

3821
        return closedChanBucket.Put(chanID, b.Bytes())
118✔
3822
}
3823

3824
func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
125✔
3825
        err := WriteElements(w,
125✔
3826
                cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID,
125✔
3827
                cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance,
125✔
3828
                cs.TimeLockedBalance, cs.CloseType, cs.IsPending,
125✔
3829
        )
125✔
3830
        if err != nil {
125✔
3831
                return err
×
3832
        }
×
3833

3834
        // If this is a close channel summary created before the addition of
3835
        // the new fields, then we can exit here.
3836
        if cs.RemoteCurrentRevocation == nil {
125✔
3837
                return WriteElements(w, false)
×
3838
        }
×
3839

3840
        // If fields are present, write boolean to indicate this, and continue.
3841
        if err := WriteElements(w, true); err != nil {
125✔
3842
                return err
×
3843
        }
×
3844

3845
        if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil {
125✔
3846
                return err
×
3847
        }
×
3848

3849
        if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil {
125✔
3850
                return err
×
3851
        }
×
3852

3853
        // The RemoteNextRevocation field is optional, as it's possible for a
3854
        // channel to be closed before we learn of the next unrevoked
3855
        // revocation point for the remote party. Write a boolean indicating
3856
        // whether this field is present or not.
3857
        if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
125✔
3858
                return err
×
3859
        }
×
3860

3861
        // Write the field, if present.
3862
        if cs.RemoteNextRevocation != nil {
248✔
3863
                if err = WriteElements(w, cs.RemoteNextRevocation); err != nil {
123✔
3864
                        return err
×
3865
                }
×
3866
        }
3867

3868
        // Write whether the channel sync message is present.
3869
        if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil {
125✔
3870
                return err
×
3871
        }
×
3872

3873
        // Write the channel sync message, if present.
3874
        if cs.LastChanSyncMsg != nil {
129✔
3875
                if err := WriteElements(w, cs.LastChanSyncMsg); err != nil {
4✔
3876
                        return err
×
3877
                }
×
3878
        }
3879

3880
        return nil
125✔
3881
}
3882

3883
func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) {
134✔
3884
        c := &ChannelCloseSummary{}
134✔
3885

134✔
3886
        err := ReadElements(r,
134✔
3887
                &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
134✔
3888
                &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
134✔
3889
                &c.TimeLockedBalance, &c.CloseType, &c.IsPending,
134✔
3890
        )
134✔
3891
        if err != nil {
134✔
3892
                return nil, err
×
3893
        }
×
3894

3895
        // We'll now check to see if the channel close summary was encoded with
3896
        // any of the additional optional fields.
3897
        var hasNewFields bool
134✔
3898
        err = ReadElements(r, &hasNewFields)
134✔
3899
        if err != nil {
134✔
3900
                return nil, err
×
3901
        }
×
3902

3903
        // If fields are not present, we can return.
3904
        if !hasNewFields {
142✔
3905
                return c, nil
8✔
3906
        }
8✔
3907

3908
        // Otherwise read the new fields.
3909
        if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil {
126✔
3910
                return nil, err
×
3911
        }
×
3912

3913
        if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
126✔
3914
                return nil, err
×
3915
        }
×
3916

3917
        // Finally, we'll attempt to read the next unrevoked commitment point
3918
        // for the remote party. If we closed the channel before receiving a
3919
        // channel_ready message then this might not be present. A boolean
3920
        // indicating whether the field is present will come first.
3921
        var hasRemoteNextRevocation bool
126✔
3922
        err = ReadElements(r, &hasRemoteNextRevocation)
126✔
3923
        if err != nil {
126✔
3924
                return nil, err
×
3925
        }
×
3926

3927
        // If this field was written, read it.
3928
        if hasRemoteNextRevocation {
252✔
3929
                err = ReadElements(r, &c.RemoteNextRevocation)
126✔
3930
                if err != nil {
126✔
3931
                        return nil, err
×
3932
                }
×
3933
        }
3934

3935
        // Check if we have a channel sync message to read.
3936
        var hasChanSyncMsg bool
126✔
3937
        err = ReadElements(r, &hasChanSyncMsg)
126✔
3938
        if err == io.EOF {
126✔
3939
                return c, nil
×
3940
        } else if err != nil {
126✔
3941
                return nil, err
×
3942
        }
×
3943

3944
        // If a chan sync message is present, read it.
3945
        if hasChanSyncMsg {
130✔
3946
                // We must pass in reference to a lnwire.Message for the codec
4✔
3947
                // to support it.
4✔
3948
                var msg lnwire.Message
4✔
3949
                if err := ReadElements(r, &msg); err != nil {
4✔
3950
                        return nil, err
×
3951
                }
×
3952

3953
                chanSync, ok := msg.(*lnwire.ChannelReestablish)
4✔
3954
                if !ok {
4✔
3955
                        return nil, errors.New("unable cast db Message to " +
×
3956
                                "ChannelReestablish")
×
3957
                }
×
3958
                c.LastChanSyncMsg = chanSync
4✔
3959
        }
3960

3961
        return c, nil
126✔
3962
}
3963

3964
func writeChanConfig(b io.Writer, c *ChannelConfig) error {
8,387✔
3965
        return WriteElements(b,
8,387✔
3966
                c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC,
8,387✔
3967
                c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey,
8,387✔
3968
                c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint,
8,387✔
3969
                c.HtlcBasePoint,
8,387✔
3970
        )
8,387✔
3971
}
8,387✔
3972

3973
// fundingTxPresent returns true if expect the funding transcation to be found
3974
// on disk or already populated within the passed open channel struct.
3975
func fundingTxPresent(channel *OpenChannel) bool {
14,088✔
3976
        chanType := channel.ChanType
14,088✔
3977

14,088✔
3978
        return chanType.IsSingleFunder() && chanType.HasFundingTx() &&
14,088✔
3979
                channel.IsInitiator &&
14,088✔
3980
                !channel.hasChanStatus(ChanStatusRestored)
14,088✔
3981
}
14,088✔
3982

3983
func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
4,135✔
3984
        var w bytes.Buffer
4,135✔
3985
        if err := WriteElements(&w,
4,135✔
3986
                channel.ChanType, channel.ChainHash, channel.FundingOutpoint,
4,135✔
3987
                channel.ShortChannelID, channel.IsPending, channel.IsInitiator,
4,135✔
3988
                channel.chanStatus, channel.FundingBroadcastHeight,
4,135✔
3989
                channel.NumConfsRequired, channel.ChannelFlags,
4,135✔
3990
                channel.IdentityPub, channel.Capacity, channel.TotalMSatSent,
4,135✔
3991
                channel.TotalMSatReceived,
4,135✔
3992
        ); err != nil {
4,135✔
3993
                return err
×
3994
        }
×
3995

3996
        // For single funder channels that we initiated, and we have the
3997
        // funding transaction, then write the funding txn.
3998
        if fundingTxPresent(channel) {
6,445✔
3999
                if err := WriteElement(&w, channel.FundingTxn); err != nil {
2,310✔
4000
                        return err
×
4001
                }
×
4002
        }
4003

4004
        if err := writeChanConfig(&w, &channel.LocalChanCfg); err != nil {
4,135✔
4005
                return err
×
4006
        }
×
4007
        if err := writeChanConfig(&w, &channel.RemoteChanCfg); err != nil {
4,135✔
4008
                return err
×
4009
        }
×
4010

4011
        // Convert balance fields into uint64.
4012
        localBalance := uint64(channel.InitialLocalBalance)
4,135✔
4013
        remoteBalance := uint64(channel.InitialRemoteBalance)
4,135✔
4014

4,135✔
4015
        // Create the tlv stream.
4,135✔
4016
        tlvStream, err := tlv.NewStream(
4,135✔
4017
                // Write the RevocationKeyLocator as the first entry in a tlv
4,135✔
4018
                // stream.
4,135✔
4019
                MakeKeyLocRecord(
4,135✔
4020
                        keyLocType, &channel.RevocationKeyLocator,
4,135✔
4021
                ),
4,135✔
4022
                tlv.MakePrimitiveRecord(
4,135✔
4023
                        initialLocalBalanceType, &localBalance,
4,135✔
4024
                ),
4,135✔
4025
                tlv.MakePrimitiveRecord(
4,135✔
4026
                        initialRemoteBalanceType, &remoteBalance,
4,135✔
4027
                ),
4,135✔
4028
                MakeScidRecord(realScidType, &channel.confirmedScid),
4,135✔
4029
                tlv.MakePrimitiveRecord(channelMemoType, &channel.Memo),
4,135✔
4030
        )
4,135✔
4031
        if err != nil {
4,135✔
4032
                return err
×
4033
        }
×
4034

4035
        if err := tlvStream.Encode(&w); err != nil {
4,135✔
4036
                return err
×
4037
        }
×
4038

4039
        if err := chanBucket.Put(chanInfoKey, w.Bytes()); err != nil {
4,135✔
4040
                return err
×
4041
        }
×
4042

4043
        // Finally, add optional shutdown scripts for the local and remote peer if
4044
        // they are present.
4045
        if err := putOptionalUpfrontShutdownScript(
4,135✔
4046
                chanBucket, localUpfrontShutdownKey, channel.LocalShutdownScript,
4,135✔
4047
        ); err != nil {
4,135✔
4048
                return err
×
4049
        }
×
4050

4051
        return putOptionalUpfrontShutdownScript(
4,135✔
4052
                chanBucket, remoteUpfrontShutdownKey, channel.RemoteShutdownScript,
4,135✔
4053
        )
4,135✔
4054
}
4055

4056
// putOptionalUpfrontShutdownScript adds a shutdown script under the key
4057
// provided if it has a non-zero length.
4058
func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte,
4059
        script []byte) error {
8,266✔
4060
        // If the script is empty, we do not need to add anything.
8,266✔
4061
        if len(script) == 0 {
16,522✔
4062
                return nil
8,256✔
4063
        }
8,256✔
4064

4065
        var w bytes.Buffer
14✔
4066
        if err := WriteElement(&w, script); err != nil {
14✔
4067
                return err
×
4068
        }
×
4069

4070
        return chanBucket.Put(key, w.Bytes())
14✔
4071
}
4072

4073
// getOptionalUpfrontShutdownScript reads the shutdown script stored under the
4074
// key provided if it is present. Upfront shutdown scripts are optional, so the
4075
// function returns with no error if the key is not present.
4076
func getOptionalUpfrontShutdownScript(chanBucket kvdb.RBucket, key []byte,
4077
        script *lnwire.DeliveryAddress) error {
19,910✔
4078

19,910✔
4079
        // Return early if the bucket does not exit, a shutdown script was not set.
19,910✔
4080
        bs := chanBucket.Get(key)
19,910✔
4081
        if bs == nil {
39,816✔
4082
                return nil
19,906✔
4083
        }
19,906✔
4084

4085
        var tempScript []byte
8✔
4086
        r := bytes.NewReader(bs)
8✔
4087
        if err := ReadElement(r, &tempScript); err != nil {
8✔
4088
                return err
×
4089
        }
×
4090
        *script = tempScript
8✔
4091

8✔
4092
        return nil
8✔
4093
}
4094

4095
func serializeChanCommit(w io.Writer, c *ChannelCommitment) error {
11,209✔
4096
        if err := WriteElements(w,
11,209✔
4097
                c.CommitHeight, c.LocalLogIndex, c.LocalHtlcIndex,
11,209✔
4098
                c.RemoteLogIndex, c.RemoteHtlcIndex, c.LocalBalance,
11,209✔
4099
                c.RemoteBalance, c.CommitFee, c.FeePerKw, c.CommitTx,
11,209✔
4100
                c.CommitSig,
11,209✔
4101
        ); err != nil {
11,209✔
4102
                return err
×
4103
        }
×
4104

4105
        return SerializeHtlcs(w, c.Htlcs...)
11,209✔
4106
}
4107

4108
func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment,
4109
        local bool) error {
8,219✔
4110

8,219✔
4111
        var commitKey []byte
8,219✔
4112
        if local {
12,353✔
4113
                commitKey = append(chanCommitmentKey, byte(0x00))
4,134✔
4114
        } else {
8,223✔
4115
                commitKey = append(chanCommitmentKey, byte(0x01))
4,089✔
4116
        }
4,089✔
4117

4118
        var b bytes.Buffer
8,219✔
4119
        if err := serializeChanCommit(&b, c); err != nil {
8,219✔
4120
                return err
×
4121
        }
×
4122

4123
        return chanBucket.Put(commitKey, b.Bytes())
8,219✔
4124
}
4125

4126
func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
1,189✔
4127
        // If this is a restored channel, then we don't have any commitments to
1,189✔
4128
        // write.
1,189✔
4129
        if channel.hasChanStatus(ChanStatusRestored) {
1,194✔
4130
                return nil
5✔
4131
        }
5✔
4132

4133
        err := putChanCommitment(
1,188✔
4134
                chanBucket, &channel.LocalCommitment, true,
1,188✔
4135
        )
1,188✔
4136
        if err != nil {
1,188✔
4137
                return err
×
4138
        }
×
4139

4140
        return putChanCommitment(
1,188✔
4141
                chanBucket, &channel.RemoteCommitment, false,
1,188✔
4142
        )
1,188✔
4143
}
4144

4145
func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
4,680✔
4146
        var b bytes.Buffer
4,680✔
4147
        err := WriteElements(
4,680✔
4148
                &b, channel.RemoteCurrentRevocation, channel.RevocationProducer,
4,680✔
4149
                channel.RevocationStore,
4,680✔
4150
        )
4,680✔
4151
        if err != nil {
4,680✔
4152
                return err
×
4153
        }
×
4154

4155
        // If the next revocation is present, which is only the case after the
4156
        // ChannelReady message has been sent, then we'll write it to disk.
4157
        if channel.RemoteNextRevocation != nil {
8,638✔
4158
                err = WriteElements(&b, channel.RemoteNextRevocation)
3,958✔
4159
                if err != nil {
3,958✔
4160
                        return err
×
4161
                }
×
4162
        }
4163

4164
        return chanBucket.Put(revocationStateKey, b.Bytes())
4,680✔
4165
}
4166

4167
func readChanConfig(b io.Reader, c *ChannelConfig) error {
20,032✔
4168
        return ReadElements(b,
20,032✔
4169
                &c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve,
20,032✔
4170
                &c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay,
20,032✔
4171
                &c.MultiSigKey, &c.RevocationBasePoint,
20,032✔
4172
                &c.PaymentBasePoint, &c.DelayBasePoint,
20,032✔
4173
                &c.HtlcBasePoint,
20,032✔
4174
        )
20,032✔
4175
}
20,032✔
4176

4177
func fetchChanInfo(chanBucket kvdb.RBucket, channel *OpenChannel) error {
9,957✔
4178
        infoBytes := chanBucket.Get(chanInfoKey)
9,957✔
4179
        if infoBytes == nil {
9,957✔
4180
                return ErrNoChanInfoFound
×
4181
        }
×
4182
        r := bytes.NewReader(infoBytes)
9,957✔
4183

9,957✔
4184
        if err := ReadElements(r,
9,957✔
4185
                &channel.ChanType, &channel.ChainHash, &channel.FundingOutpoint,
9,957✔
4186
                &channel.ShortChannelID, &channel.IsPending, &channel.IsInitiator,
9,957✔
4187
                &channel.chanStatus, &channel.FundingBroadcastHeight,
9,957✔
4188
                &channel.NumConfsRequired, &channel.ChannelFlags,
9,957✔
4189
                &channel.IdentityPub, &channel.Capacity, &channel.TotalMSatSent,
9,957✔
4190
                &channel.TotalMSatReceived,
9,957✔
4191
        ); err != nil {
9,957✔
4192
                return err
×
4193
        }
×
4194

4195
        // For single funder channels that we initiated and have the funding
4196
        // transaction to, read the funding txn.
4197
        if fundingTxPresent(channel) {
15,188✔
4198
                if err := ReadElement(r, &channel.FundingTxn); err != nil {
5,231✔
4199
                        return err
×
4200
                }
×
4201
        }
4202

4203
        if err := readChanConfig(r, &channel.LocalChanCfg); err != nil {
9,957✔
4204
                return err
×
4205
        }
×
4206
        if err := readChanConfig(r, &channel.RemoteChanCfg); err != nil {
9,957✔
4207
                return err
×
4208
        }
×
4209

4210
        // Retrieve the boolean stored under lastWasRevokeKey.
4211
        lastWasRevokeBytes := chanBucket.Get(lastWasRevokeKey)
9,957✔
4212
        if lastWasRevokeBytes == nil {
11,257✔
4213
                // If nothing has been stored under this key, we store false in the
1,300✔
4214
                // OpenChannel struct.
1,300✔
4215
                channel.LastWasRevoke = false
1,300✔
4216
        } else {
9,961✔
4217
                // Otherwise, read the value into the LastWasRevoke field.
8,661✔
4218
                revokeReader := bytes.NewReader(lastWasRevokeBytes)
8,661✔
4219
                err := ReadElements(revokeReader, &channel.LastWasRevoke)
8,661✔
4220
                if err != nil {
8,661✔
4221
                        return err
×
4222
                }
×
4223
        }
4224

4225
        // Create balance fields in uint64, and Memo field as byte slice.
4226
        var (
9,957✔
4227
                localBalance  uint64
9,957✔
4228
                remoteBalance uint64
9,957✔
4229
                memo          []byte
9,957✔
4230
        )
9,957✔
4231

9,957✔
4232
        // Create the tlv stream.
9,957✔
4233
        tlvStream, err := tlv.NewStream(
9,957✔
4234
                // Write the RevocationKeyLocator as the first entry in a tlv
9,957✔
4235
                // stream.
9,957✔
4236
                MakeKeyLocRecord(
9,957✔
4237
                        keyLocType, &channel.RevocationKeyLocator,
9,957✔
4238
                ),
9,957✔
4239
                tlv.MakePrimitiveRecord(
9,957✔
4240
                        initialLocalBalanceType, &localBalance,
9,957✔
4241
                ),
9,957✔
4242
                tlv.MakePrimitiveRecord(
9,957✔
4243
                        initialRemoteBalanceType, &remoteBalance,
9,957✔
4244
                ),
9,957✔
4245
                MakeScidRecord(realScidType, &channel.confirmedScid),
9,957✔
4246
                tlv.MakePrimitiveRecord(channelMemoType, &memo),
9,957✔
4247
        )
9,957✔
4248
        if err != nil {
9,957✔
4249
                return err
×
4250
        }
×
4251

4252
        if err := tlvStream.Decode(r); err != nil {
9,957✔
4253
                return err
×
4254
        }
×
4255

4256
        // Attach the balance fields.
4257
        channel.InitialLocalBalance = lnwire.MilliSatoshi(localBalance)
9,957✔
4258
        channel.InitialRemoteBalance = lnwire.MilliSatoshi(remoteBalance)
9,957✔
4259

9,957✔
4260
        // Attach the memo field if non-empty.
9,957✔
4261
        if len(memo) > 0 {
9,961✔
4262
                channel.Memo = memo
4✔
4263
        }
4✔
4264

4265
        channel.Packager = NewChannelPackager(channel.ShortChannelID)
9,957✔
4266

9,957✔
4267
        // Finally, read the optional shutdown scripts.
9,957✔
4268
        if err := getOptionalUpfrontShutdownScript(
9,957✔
4269
                chanBucket, localUpfrontShutdownKey, &channel.LocalShutdownScript,
9,957✔
4270
        ); err != nil {
9,957✔
4271
                return err
×
4272
        }
×
4273

4274
        return getOptionalUpfrontShutdownScript(
9,957✔
4275
                chanBucket, remoteUpfrontShutdownKey, &channel.RemoteShutdownScript,
9,957✔
4276
        )
9,957✔
4277
}
4278

4279
func deserializeChanCommit(r io.Reader) (ChannelCommitment, error) {
22,906✔
4280
        var c ChannelCommitment
22,906✔
4281

22,906✔
4282
        err := ReadElements(r,
22,906✔
4283
                &c.CommitHeight, &c.LocalLogIndex, &c.LocalHtlcIndex, &c.RemoteLogIndex,
22,906✔
4284
                &c.RemoteHtlcIndex, &c.LocalBalance, &c.RemoteBalance,
22,906✔
4285
                &c.CommitFee, &c.FeePerKw, &c.CommitTx, &c.CommitSig,
22,906✔
4286
        )
22,906✔
4287
        if err != nil {
22,906✔
4288
                return c, err
×
4289
        }
×
4290

4291
        c.Htlcs, err = DeserializeHtlcs(r)
22,906✔
4292
        if err != nil {
22,906✔
4293
                return c, err
×
4294
        }
×
4295

4296
        return c, nil
22,906✔
4297
}
4298

4299
func fetchChanCommitment(chanBucket kvdb.RBucket, local bool) (ChannelCommitment, error) {
19,937✔
4300
        var commitKey []byte
19,937✔
4301
        if local {
29,908✔
4302
                commitKey = append(chanCommitmentKey, byte(0x00))
9,971✔
4303
        } else {
19,941✔
4304
                commitKey = append(chanCommitmentKey, byte(0x01))
9,970✔
4305
        }
9,970✔
4306

4307
        commitBytes := chanBucket.Get(commitKey)
19,937✔
4308
        if commitBytes == nil {
19,937✔
4309
                return ChannelCommitment{}, ErrNoCommitmentsFound
×
4310
        }
×
4311

4312
        r := bytes.NewReader(commitBytes)
19,937✔
4313
        return deserializeChanCommit(r)
19,937✔
4314
}
4315

4316
func fetchChanCommitments(chanBucket kvdb.RBucket, channel *OpenChannel) error {
9,972✔
4317
        var err error
9,972✔
4318

9,972✔
4319
        // If this is a restored channel, then we don't have any commitments to
9,972✔
4320
        // read.
9,972✔
4321
        if channel.hasChanStatus(ChanStatusRestored) {
9,978✔
4322
                return nil
6✔
4323
        }
6✔
4324

4325
        channel.LocalCommitment, err = fetchChanCommitment(chanBucket, true)
9,970✔
4326
        if err != nil {
9,970✔
4327
                return err
×
4328
        }
×
4329
        channel.RemoteCommitment, err = fetchChanCommitment(chanBucket, false)
9,970✔
4330
        if err != nil {
9,970✔
4331
                return err
×
4332
        }
×
4333

4334
        return nil
9,970✔
4335
}
4336

4337
func fetchChanRevocationState(chanBucket kvdb.RBucket, channel *OpenChannel) error {
9,972✔
4338
        revBytes := chanBucket.Get(revocationStateKey)
9,972✔
4339
        if revBytes == nil {
9,972✔
4340
                return ErrNoRevocationsFound
×
4341
        }
×
4342
        r := bytes.NewReader(revBytes)
9,972✔
4343

9,972✔
4344
        err := ReadElements(
9,972✔
4345
                r, &channel.RemoteCurrentRevocation, &channel.RevocationProducer,
9,972✔
4346
                &channel.RevocationStore,
9,972✔
4347
        )
9,972✔
4348
        if err != nil {
9,972✔
4349
                return err
×
4350
        }
×
4351

4352
        // If there aren't any bytes left in the buffer, then we don't yet have
4353
        // the next remote revocation, so we can exit early here.
4354
        if r.Len() == 0 {
10,183✔
4355
                return nil
211✔
4356
        }
211✔
4357

4358
        // Otherwise we'll read the next revocation for the remote party which
4359
        // is always the last item within the buffer.
4360
        return ReadElements(r, &channel.RemoteNextRevocation)
9,765✔
4361
}
4362

4363
func deleteOpenChannel(chanBucket kvdb.RwBucket) error {
118✔
4364
        if err := chanBucket.Delete(chanInfoKey); err != nil {
118✔
4365
                return err
×
4366
        }
×
4367

4368
        err := chanBucket.Delete(append(chanCommitmentKey, byte(0x00)))
118✔
4369
        if err != nil {
118✔
4370
                return err
×
4371
        }
×
4372
        err = chanBucket.Delete(append(chanCommitmentKey, byte(0x01)))
118✔
4373
        if err != nil {
118✔
4374
                return err
×
4375
        }
×
4376

4377
        if err := chanBucket.Delete(revocationStateKey); err != nil {
118✔
4378
                return err
×
4379
        }
×
4380

4381
        if diff := chanBucket.Get(commitDiffKey); diff != nil {
122✔
4382
                return chanBucket.Delete(commitDiffKey)
4✔
4383
        }
4✔
4384

4385
        return nil
118✔
4386
}
4387

4388
// makeLogKey converts a uint64 into an 8 byte array.
4389
func makeLogKey(updateNum uint64) [8]byte {
19,792✔
4390
        var key [8]byte
19,792✔
4391
        byteOrder.PutUint64(key[:], updateNum)
19,792✔
4392
        return key
19,792✔
4393
}
19,792✔
4394

4395
func fetchThawHeight(chanBucket kvdb.RBucket) (uint32, error) {
338✔
4396
        var height uint32
338✔
4397

338✔
4398
        heightBytes := chanBucket.Get(frozenChanKey)
338✔
4399
        heightReader := bytes.NewReader(heightBytes)
338✔
4400

338✔
4401
        if err := ReadElements(heightReader, &height); err != nil {
338✔
4402
                return 0, err
×
4403
        }
×
4404

4405
        return height, nil
338✔
4406
}
4407

4408
func storeThawHeight(chanBucket kvdb.RwBucket, height uint32) error {
432✔
4409
        var heightBuf bytes.Buffer
432✔
4410
        if err := WriteElements(&heightBuf, height); err != nil {
432✔
4411
                return err
×
4412
        }
×
4413

4414
        return chanBucket.Put(frozenChanKey, heightBuf.Bytes())
432✔
4415
}
4416

4417
func deleteThawHeight(chanBucket kvdb.RwBucket) error {
111✔
4418
        return chanBucket.Delete(frozenChanKey)
111✔
4419
}
111✔
4420

4421
// EKeyLocator is an encoder for keychain.KeyLocator.
4422
func EKeyLocator(w io.Writer, val interface{}, buf *[8]byte) error {
4,136✔
4423
        if v, ok := val.(*keychain.KeyLocator); ok {
8,272✔
4424
                err := tlv.EUint32T(w, uint32(v.Family), buf)
4,136✔
4425
                if err != nil {
4,136✔
4426
                        return err
×
4427
                }
×
4428

4429
                return tlv.EUint32T(w, v.Index, buf)
4,136✔
4430
        }
4431
        return tlv.NewTypeForEncodingErr(val, "keychain.KeyLocator")
×
4432
}
4433

4434
// DKeyLocator is a decoder for keychain.KeyLocator.
4435
func DKeyLocator(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
9,958✔
4436
        if v, ok := val.(*keychain.KeyLocator); ok {
19,916✔
4437
                var family uint32
9,958✔
4438
                err := tlv.DUint32(r, &family, buf, 4)
9,958✔
4439
                if err != nil {
9,958✔
4440
                        return err
×
4441
                }
×
4442
                v.Family = keychain.KeyFamily(family)
9,958✔
4443

9,958✔
4444
                return tlv.DUint32(r, &v.Index, buf, 4)
9,958✔
4445
        }
4446
        return tlv.NewTypeForDecodingErr(val, "keychain.KeyLocator", l, 8)
×
4447
}
4448

4449
// MakeKeyLocRecord creates a Record out of a KeyLocator using the passed
4450
// Type and the EKeyLocator and DKeyLocator functions. The size will always be
4451
// 8 as KeyFamily is uint32 and the Index is uint32.
4452
func MakeKeyLocRecord(typ tlv.Type, keyLoc *keychain.KeyLocator) tlv.Record {
14,088✔
4453
        return tlv.MakeStaticRecord(typ, keyLoc, 8, EKeyLocator, DKeyLocator)
14,088✔
4454
}
14,088✔
4455

4456
// MakeScidRecord creates a Record out of a ShortChannelID using the passed
4457
// Type and the EShortChannelID and DShortChannelID functions. The size will
4458
// always be 8 for the ShortChannelID.
4459
func MakeScidRecord(typ tlv.Type, scid *lnwire.ShortChannelID) tlv.Record {
14,088✔
4460
        return tlv.MakeStaticRecord(
14,088✔
4461
                typ, scid, 8, lnwire.EShortChannelID, lnwire.DShortChannelID,
14,088✔
4462
        )
14,088✔
4463
}
14,088✔
4464

4465
// ShutdownInfo contains various info about the shutdown initiation of a
4466
// channel.
4467
type ShutdownInfo struct {
4468
        // DeliveryScript is the address that we have included in any previous
4469
        // Shutdown message for a particular channel and so should include in
4470
        // any future re-sends of the Shutdown message.
4471
        DeliveryScript tlv.RecordT[tlv.TlvType0, lnwire.DeliveryAddress]
4472

4473
        // LocalInitiator is true if we sent a Shutdown message before ever
4474
        // receiving a Shutdown message from the remote peer.
4475
        LocalInitiator tlv.RecordT[tlv.TlvType1, bool]
4476
}
4477

4478
// NewShutdownInfo constructs a new ShutdownInfo object.
4479
func NewShutdownInfo(deliveryScript lnwire.DeliveryAddress,
4480
        locallyInitiated bool) *ShutdownInfo {
17✔
4481

17✔
4482
        return &ShutdownInfo{
17✔
4483
                DeliveryScript: tlv.NewRecordT[tlv.TlvType0](deliveryScript),
17✔
4484
                LocalInitiator: tlv.NewPrimitiveRecord[tlv.TlvType1](
17✔
4485
                        locallyInitiated,
17✔
4486
                ),
17✔
4487
        }
17✔
4488
}
17✔
4489

4490
// Closer identifies the ChannelParty that initiated the coop-closure process.
4491
func (s ShutdownInfo) Closer() lntypes.ChannelParty {
4✔
4492
        if s.LocalInitiator.Val {
8✔
4493
                return lntypes.Local
4✔
4494
        }
4✔
4495

4496
        return lntypes.Remote
4✔
4497
}
4498

4499
// encode serialises the ShutdownInfo to the given io.Writer.
4500
func (s *ShutdownInfo) encode(w io.Writer) error {
15✔
4501
        records := []tlv.Record{
15✔
4502
                s.DeliveryScript.Record(),
15✔
4503
                s.LocalInitiator.Record(),
15✔
4504
        }
15✔
4505

15✔
4506
        stream, err := tlv.NewStream(records...)
15✔
4507
        if err != nil {
15✔
4508
                return err
×
4509
        }
×
4510

4511
        return stream.Encode(w)
15✔
4512
}
4513

4514
// decodeShutdownInfo constructs a ShutdownInfo struct by decoding the given
4515
// byte slice.
4516
func decodeShutdownInfo(b []byte) (*ShutdownInfo, error) {
6✔
4517
        tlvStream := lnwire.ExtraOpaqueData(b)
6✔
4518

6✔
4519
        var info ShutdownInfo
6✔
4520
        records := []tlv.RecordProducer{
6✔
4521
                &info.DeliveryScript,
6✔
4522
                &info.LocalInitiator,
6✔
4523
        }
6✔
4524

6✔
4525
        _, err := tlvStream.ExtractRecords(records...)
6✔
4526

6✔
4527
        return &info, err
6✔
4528
}
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc