forked from lightningnetwork/lnd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lnd_channel_force_close_test.go
1048 lines (879 loc) · 35.5 KB
/
lnd_channel_force_close_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package itest
import (
"bytes"
"fmt"
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/node"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/routing"
"github.com/stretchr/testify/require"
)
// testChannelForceClosure performs a test to exercise the behavior of "force"
// closing a channel or unilaterally broadcasting the latest local commitment
// state on-chain. The test creates a new channel between Alice and Carol, then
// force closes the channel after some cursory assertions. Within the test, a
// total of 3 + n transactions will be broadcast, representing the commitment
// transaction, a transaction sweeping the local CSV delayed output, a
// transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n
// htlc timeout transactions, where n is the number of payments Alice attempted
// to send to Carol. This test includes several restarts to ensure that the
// transaction output states are persisted throughout the forced closure
// process.
//
// TODO(roasbeef): also add an unsettled HTLC before force closing.
func testChannelForceClosure(ht *lntest.HarnessTest) {
// We'll test the scenario for some of the commitment types, to ensure
// outputs can be swept.
commitTypes := []lnrpc.CommitmentType{
lnrpc.CommitmentType_ANCHORS,
lnrpc.CommitmentType_SIMPLE_TAPROOT,
}
for _, channelType := range commitTypes {
testName := fmt.Sprintf("committype=%v", channelType)
channelType := channelType
success := ht.Run(testName, func(t *testing.T) {
st := ht.Subtest(t)
args := lntest.NodeArgsForCommitType(channelType)
alice := st.NewNode("Alice", args)
defer st.Shutdown(alice)
// Since we'd like to test failure scenarios with
// outstanding htlcs, we'll introduce another node into
// our test network: Carol.
carolArgs := []string{"--hodl.exit-settle"}
carolArgs = append(carolArgs, args...)
carol := st.NewNode("Carol", carolArgs)
defer st.Shutdown(carol)
// Each time, we'll send Alice new set of coins in
// order to fund the channel.
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// NOTE: Alice needs 3 more UTXOs to sweep her
// second-layer txns after a restart - after a restart
// all the time-sensitive sweeps are swept immediately
// without being aggregated.
//
// TODO(yy): remove this once the can recover its state
// from restart.
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// Also give Carol some coins to allow her to sweep her
// anchor.
st.FundCoins(btcutil.SatoshiPerBitcoin, carol)
channelForceClosureTest(st, alice, carol, channelType)
})
if !success {
return
}
}
}
func channelForceClosureTest(ht *lntest.HarnessTest,
alice, carol *node.HarnessNode, channelType lnrpc.CommitmentType) {
const (
chanAmt = btcutil.Amount(10e6)
pushAmt = btcutil.Amount(5e6)
paymentAmt = 100000
numInvoices = 6
)
const commitFeeRate = 20000
ht.SetFeeEstimate(commitFeeRate)
// TODO(roasbeef): should check default value in config here
// instead, or make delay a param
defaultCLTV := uint32(chainreg.DefaultBitcoinTimeLockDelta)
// We must let Alice have an open channel before she can send a node
// announcement, so we open a channel with Carol,
ht.ConnectNodes(alice, carol)
// We need one additional UTXO for sweeping the remote anchor.
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// Before we start, obtain Carol's current wallet balance, we'll check
// to ensure that at the end of the force closure by Alice, Carol
// recognizes his new on-chain output.
carolBalResp := carol.RPC.WalletBalance()
carolStartingBalance := carolBalResp.ConfirmedBalance
// If the channel is a taproot channel, then we'll need to create a
// private channel.
//
// TODO(roasbeef): lift after G175
var privateChan bool
if channelType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
privateChan = true
}
chanPoint := ht.OpenChannel(
alice, carol, lntest.OpenChannelParams{
Private: privateChan,
Amt: chanAmt,
PushAmt: pushAmt,
CommitmentType: channelType,
},
)
// Send payments from Alice to Carol, since Carol is htlchodl mode, the
// htlc outputs should be left unsettled, and should be swept by the
// utxo nursery.
carolPubKey := carol.PubKey[:]
for i := 0; i < numInvoices; i++ {
req := &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(paymentAmt),
PaymentHash: ht.Random32Bytes(),
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
alice.RPC.SendPayment(req)
}
// Once the HTLC has cleared, all the nodes n our mini network should
// show that the HTLC has been locked in.
ht.AssertNumActiveHtlcs(alice, numInvoices)
ht.AssertNumActiveHtlcs(carol, numInvoices)
// Fetch starting height of this test so we can compute the block
// heights we expect certain events to take place.
curHeight := int32(ht.CurrentHeight())
// Using the current height of the chain, derive the relevant heights
// for incubating two-stage htlcs.
var (
startHeight = uint32(curHeight)
commCsvMaturityHeight = startHeight + 1 + defaultCSV
htlcExpiryHeight = padCLTV(startHeight + defaultCLTV)
htlcCsvMaturityHeight = padCLTV(
startHeight + defaultCLTV + 1 + defaultCSV,
)
)
aliceChan := ht.QueryChannelByChanPoint(alice, chanPoint)
require.NotZero(ht, aliceChan.NumUpdates,
"alice should see at least one update to her channel")
// Now that the channel is open and we have unsettled htlcs,
// immediately execute a force closure of the channel. This will also
// assert that the commitment transaction was immediately broadcast in
// order to fulfill the force closure request.
ht.CloseChannelAssertPending(alice, chanPoint, true)
// Now that the channel has been force closed, it should show up in the
// PendingChannels RPC under the waiting close section.
waitingClose := ht.AssertChannelWaitingClose(alice, chanPoint)
// Immediately after force closing, all of the funds should be in
// limbo.
require.NotZero(ht, waitingClose.LimboBalance,
"all funds should still be in limbo")
// Create a map of outpoints to expected resolutions for alice and
// carol which we will add reports to as we sweep outputs.
var (
aliceReports = make(map[string]*lnrpc.Resolution)
carolReports = make(map[string]*lnrpc.Resolution)
)
// The several restarts in this test are intended to ensure that when a
// channel is force-closed, the UTXO nursery has persisted the state of
// the channel in the closure process and will recover the correct
// state when the system comes back on line. This restart tests state
// persistence at the beginning of the process, when the commitment
// transaction has been broadcast but not yet confirmed in a block.
ht.RestartNode(alice)
// To give the neutrino backend some time to catch up with the chain,
// we wait here until we have enough UTXOs to actually sweep the local
// and remote anchor.
const expectedUtxos = 6
ht.AssertNumUTXOs(alice, expectedUtxos)
// We expect to see Alice's force close tx in the mempool.
ht.GetNumTxsFromMempool(1)
// Mine a block which should confirm the commitment transaction
// broadcast as a result of the force closure. Once mined, we also
// expect Alice's anchor sweeping tx being published.
ht.MineBlocksAndAssertNumTxes(1, 1)
// Assert Alice's has one pending anchor output - because she doesn't
// have incoming HTLCs, her outgoing HTLC won't have a deadline, thus
// she won't use the anchor to perform CPFP.
aliceAnchor := ht.AssertNumPendingSweeps(alice, 1)[0]
require.Equal(ht, aliceAnchor.Outpoint.TxidStr,
waitingClose.Commitments.LocalTxid)
// Now that the commitment has been confirmed, the channel should be
// marked as force closed.
err := wait.NoError(func() error {
forceClose := ht.AssertChannelPendingForceClose(
alice, chanPoint,
)
// Now that the channel has been force closed, it should now
// have the height and number of blocks to confirm populated.
err := checkCommitmentMaturity(
forceClose, commCsvMaturityHeight, int32(defaultCSV),
)
if err != nil {
return err
}
// None of our outputs have been swept, so they should all be
// in limbo.
if forceClose.LimboBalance == 0 {
return errors.New("all funds should still be in limbo")
}
if forceClose.RecoveredBalance != 0 {
return errors.New("no funds should be recovered")
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
// The following restart is intended to ensure that outputs from the
// force close commitment transaction have been persisted once the
// transaction has been confirmed, but before the outputs are spendable
// (the "kindergarten" bucket.)
ht.RestartNode(alice)
// Carol should offer her commit and anchor outputs to the sweeper.
sweepTxns := ht.AssertNumPendingSweeps(carol, 2)
// Find Carol's anchor sweep.
var carolAnchor, carolCommit = sweepTxns[0], sweepTxns[1]
if carolAnchor.AmountSat != uint32(anchorSize) {
carolAnchor, carolCommit = carolCommit, carolAnchor
}
// Mine a block to trigger Carol's sweeper to make decisions on the
// anchor sweeping.
ht.MineEmptyBlocks(1)
// Carol's sweep tx should be in the mempool already, as her output is
// not timelocked.
carolTx := ht.GetNumTxsFromMempool(1)[0]
// Carol's sweeping tx should have 2-input-1-output shape.
require.Len(ht, carolTx.TxIn, 2)
require.Len(ht, carolTx.TxOut, 1)
// Calculate the total fee Carol paid.
totalFeeCarol := ht.CalculateTxFee(carolTx)
// If we have anchors, add an anchor resolution for carol.
op := fmt.Sprintf("%v:%v", carolAnchor.Outpoint.TxidStr,
carolAnchor.Outpoint.OutputIndex)
carolReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_ANCHOR,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: carolTx.TxHash().String(),
AmountSat: anchorSize,
Outpoint: carolAnchor.Outpoint,
}
op = fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr,
carolCommit.Outpoint.OutputIndex)
carolReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_COMMIT,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
Outpoint: carolCommit.Outpoint,
AmountSat: uint64(pushAmt),
SweepTxid: carolTx.TxHash().String(),
}
// Currently within the codebase, the default CSV is 4 relative blocks.
// For the persistence test, we generate two blocks, then trigger a
// restart and then generate the final block that should trigger the
// creation of the sweep transaction.
//
// We also expect Carol to broadcast her sweeping tx which spends her
// commit and anchor outputs.
ht.MineBlocksAndAssertNumTxes(1, 1)
// Alice should still have the anchor sweeping request.
ht.AssertNumPendingSweeps(alice, 1)
// The following restart checks to ensure that outputs in the
// kindergarten bucket are persisted while waiting for the required
// number of confirmations to be reported.
ht.RestartNode(alice)
// Alice should see the channel in her set of pending force closed
// channels with her funds still in limbo.
var aliceBalance int64
var closingTxID *chainhash.Hash
err = wait.NoError(func() error {
forceClose := ht.AssertChannelPendingForceClose(
alice, chanPoint,
)
// Get the closing txid.
txid, err := chainhash.NewHashFromStr(forceClose.ClosingTxid)
if err != nil {
return err
}
closingTxID = txid
// Make a record of the balances we expect for alice and carol.
aliceBalance = forceClose.Channel.LocalBalance
// At this point, the nursery should show that the commitment
// output has 2 block left before its CSV delay expires. In
// total, we have mined exactly defaultCSV blocks, so the htlc
// outputs should also reflect that this many blocks have
// passed.
err = checkCommitmentMaturity(
forceClose, commCsvMaturityHeight, 2,
)
if err != nil {
return err
}
// All funds should still be shown in limbo.
if forceClose.LimboBalance == 0 {
return errors.New("all funds should still be in " +
"limbo")
}
if forceClose.RecoveredBalance != 0 {
return errors.New("no funds should be recovered")
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
// Generate an additional block, which should cause the CSV delayed
// output from the commitment txn to expire.
ht.MineEmptyBlocks(1)
// At this point, the CSV will expire in the next block, meaning that
// the output should be offered to the sweeper.
sweeps := ht.AssertNumPendingSweeps(alice, 2)
commitSweep, anchorSweep := sweeps[0], sweeps[1]
if commitSweep.AmountSat < anchorSweep.AmountSat {
commitSweep, anchorSweep = anchorSweep, commitSweep
}
// Restart Alice to ensure that she resumes watching the finalized
// commitment sweep txid.
ht.RestartNode(alice)
// Mine one block and the sweeping transaction should now be broadcast.
// So we fetch the node's mempool to ensure it has been properly
// broadcast.
ht.MineEmptyBlocks(1)
sweepingTXID := ht.AssertNumTxsInMempool(1)[0]
// Fetch the sweep transaction, all input it's spending should be from
// the commitment transaction which was broadcast on-chain.
sweepTx := ht.GetRawTransaction(sweepingTXID)
for _, txIn := range sweepTx.MsgTx().TxIn {
require.Equal(ht, &txIn.PreviousOutPoint.Hash, closingTxID,
"sweep transaction not spending from commit")
}
// We expect a resolution which spends our commit output.
op = fmt.Sprintf("%v:%v", commitSweep.Outpoint.TxidStr,
commitSweep.Outpoint.OutputIndex)
aliceReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_COMMIT,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: sweepingTXID.String(),
Outpoint: commitSweep.Outpoint,
AmountSat: uint64(aliceBalance),
}
// Add alice's anchor to our expected set of reports.
op = fmt.Sprintf("%v:%v", aliceAnchor.Outpoint.TxidStr,
aliceAnchor.Outpoint.OutputIndex)
aliceReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_ANCHOR,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: sweepingTXID.String(),
Outpoint: aliceAnchor.Outpoint,
AmountSat: uint64(anchorSize),
}
// Check that we can find the commitment sweep in our set of known
// sweeps, using the simple transaction id ListSweeps output.
ht.AssertSweepFound(alice, sweepingTXID.String(), false, 0)
// Next, we mine an additional block which should include the sweep
// transaction as the input scripts and the sequence locks on the
// inputs should be properly met.
ht.MineBlocksAndAssertNumTxes(1, 1)
// Update current height
curHeight = int32(ht.CurrentHeight())
// checkForceClosedChannelNumHtlcs verifies that a force closed channel
// has the proper number of htlcs.
checkPendingChannelNumHtlcs := func(
forceClose lntest.PendingForceClose) error {
if len(forceClose.PendingHtlcs) != numInvoices {
return fmt.Errorf("expected force closed channel to "+
"have %d pending htlcs, found %d instead",
numInvoices, len(forceClose.PendingHtlcs))
}
return nil
}
err = wait.NoError(func() error {
// Now that the commit output has been fully swept, check to
// see that the channel remains open for the pending htlc
// outputs.
forceClose := ht.AssertChannelPendingForceClose(
alice, chanPoint,
)
// The commitment funds will have been recovered after the
// commit txn was included in the last block. The htlc funds
// will be shown in limbo.
err := checkPendingChannelNumHtlcs(forceClose)
if err != nil {
return err
}
err = checkPendingHtlcStageAndMaturity(
forceClose, 1, htlcExpiryHeight,
int32(htlcExpiryHeight)-curHeight,
)
if err != nil {
return err
}
if forceClose.LimboBalance == 0 {
return fmt.Errorf("expected funds in limbo, found 0")
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "timeout checking pending force close channel")
// Compute the height preceding that which will cause the htlc CLTV
// timeouts will expire. The outputs entered at the same height as the
// output spending from the commitment txn, so we must deduct the
// number of blocks we have generated since adding it to the nursery,
// and take an additional block off so that we end up one block shy of
// the expiry height, and add the block padding.
currentHeight := int32(ht.CurrentHeight())
cltvHeightDelta := int(htlcExpiryHeight - uint32(currentHeight) - 1)
// Advance the blockchain until just before the CLTV expires, nothing
// exciting should have happened during this time.
ht.MineEmptyBlocks(cltvHeightDelta)
// We now restart Alice, to ensure that she will broadcast the
// presigned htlc timeout txns after the delay expires after
// experiencing a while waiting for the htlc outputs to incubate.
ht.RestartNode(alice)
// To give the neutrino backend some time to catch up with the chain,
// we wait here until we have enough UTXOs to
// ht.AssertNumUTXOs(alice, expectedUtxos)
// Alice should now see the channel in her set of pending force closed
// channels with one pending HTLC.
err = wait.NoError(func() error {
forceClose := ht.AssertChannelPendingForceClose(
alice, chanPoint,
)
// We should now be at the block just before the utxo nursery
// will attempt to broadcast the htlc timeout transactions.
err = checkPendingChannelNumHtlcs(forceClose)
if err != nil {
return err
}
err = checkPendingHtlcStageAndMaturity(
forceClose, 1, htlcExpiryHeight, 1,
)
if err != nil {
return err
}
// Now that our commitment confirmation depth has been
// surpassed, we should now see a non-zero recovered balance.
// All htlc outputs are still left in limbo, so it should be
// non-zero as well.
if forceClose.LimboBalance == 0 {
return errors.New("htlc funds should still be in limbo")
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
// Now, generate the block which will cause Alice to offer the
// presigned htlc timeout txns to the sweeper.
ht.MineEmptyBlocks(1)
// Since Alice had numInvoices (6) htlcs extended to Carol before force
// closing, we expect Alice to broadcast an htlc timeout txn for each
// one.
ht.AssertNumPendingSweeps(alice, numInvoices)
// Wait for them all to show up in the mempool
//
// NOTE: after restart, all the htlc timeout txns will be offered to
// the sweeper with `Immediate` set to true, so they won't be
// aggregated.
htlcTxIDs := ht.AssertNumTxsInMempool(numInvoices)
// Retrieve each htlc timeout txn from the mempool, and ensure it is
// well-formed. This entails verifying that each only spends from
// output, and that output is from the commitment txn.
numInputs := 2
// Construct a map of the already confirmed htlc timeout outpoints,
// that will count the number of times each is spent by the sweep txn.
// We prepopulate it in this way so that we can later detect if we are
// spending from an output that was not a confirmed htlc timeout txn.
var htlcTxOutpointSet = make(map[wire.OutPoint]int)
var htlcLessFees uint64
for _, htlcTxID := range htlcTxIDs {
// Fetch the sweep transaction, all input it's spending should
// be from the commitment transaction which was broadcast
// on-chain. In case of an anchor type channel, we expect one
// extra input that is not spending from the commitment, that
// is added for fees.
htlcTx := ht.GetRawTransaction(htlcTxID)
// Ensure the htlc transaction has the expected number of
// inputs.
inputs := htlcTx.MsgTx().TxIn
require.Len(ht, inputs, numInputs, "num inputs mismatch")
// The number of outputs should be the same.
outputs := htlcTx.MsgTx().TxOut
require.Len(ht, outputs, numInputs, "num outputs mismatch")
// Ensure all the htlc transaction inputs are spending from the
// commitment transaction, except if this is an extra input
// added to pay for fees for anchor channels.
nonCommitmentInputs := 0
for i, txIn := range inputs {
if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) {
nonCommitmentInputs++
require.Lessf(ht, nonCommitmentInputs, 2,
"htlc transaction not "+
"spending from commit "+
"tx %v, instead spending %v",
closingTxID, txIn.PreviousOutPoint)
// This was an extra input added to pay fees,
// continue to the next one.
continue
}
// For each htlc timeout transaction, we expect a
// resolver report recording this on chain resolution
// for both alice and carol.
outpoint := txIn.PreviousOutPoint
resolutionOutpoint := &lnrpc.OutPoint{
TxidBytes: outpoint.Hash[:],
TxidStr: outpoint.Hash.String(),
OutputIndex: outpoint.Index,
}
// We expect alice to have a timeout tx resolution with
// an amount equal to the payment amount.
//nolint:lll
aliceReports[outpoint.String()] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC,
Outcome: lnrpc.ResolutionOutcome_FIRST_STAGE,
SweepTxid: htlcTx.Hash().String(),
Outpoint: resolutionOutpoint,
AmountSat: uint64(paymentAmt),
}
// We expect carol to have a resolution with an
// incoming htlc timeout which reflects the full amount
// of the htlc. It has no spend tx, because carol stops
// monitoring the htlc once it has timed out.
//nolint:lll
carolReports[outpoint.String()] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_INCOMING_HTLC,
Outcome: lnrpc.ResolutionOutcome_TIMEOUT,
SweepTxid: "",
Outpoint: resolutionOutpoint,
AmountSat: uint64(paymentAmt),
}
// Recorf the HTLC outpoint, such that we can later
// check whether it gets swept
op := wire.OutPoint{
Hash: htlcTxID,
Index: uint32(i),
}
htlcTxOutpointSet[op] = 0
}
// We record the htlc amount less fees here, so that we know
// what value to expect for the second stage of our htlc
// resolution.
htlcLessFees = uint64(outputs[0].Value)
}
// With the htlc timeout txns still in the mempool, we restart Alice to
// verify that she can resume watching the htlc txns she broadcasted
// before crashing.
ht.RestartNode(alice)
// Generate a block that mines the htlc timeout txns. Doing so now
// activates the 2nd-stage CSV delayed outputs.
ht.MineBlocksAndAssertNumTxes(1, numInvoices)
// Alice is restarted here to ensure that she promptly moved the crib
// outputs to the kindergarten bucket after the htlc timeout txns were
// confirmed.
ht.RestartNode(alice)
// Advance the chain until just before the 2nd-layer CSV delays expire.
// For anchor channels this is one block earlier.
currentHeight = int32(ht.CurrentHeight())
ht.Logf("current height: %v, htlcCsvMaturityHeight=%v", currentHeight,
htlcCsvMaturityHeight)
numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 2)
ht.MineEmptyBlocks(numBlocks)
// Restart Alice to ensure that she can recover from a failure before
// having graduated the htlc outputs in the kindergarten bucket.
ht.RestartNode(alice)
// Now that the channel has been fully swept, it should no longer show
// incubated, check to see that Alice's node still reports the channel
// as pending force closed.
err = wait.NoError(func() error {
forceClose := ht.AssertChannelPendingForceClose(
alice, chanPoint,
)
if forceClose.LimboBalance == 0 {
return fmt.Errorf("htlc funds should still be in limbo")
}
return checkPendingChannelNumHtlcs(forceClose)
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
// Generate a block that causes Alice to sweep the htlc outputs in the
// kindergarten bucket.
ht.MineEmptyBlocks(1)
ht.AssertNumPendingSweeps(alice, numInvoices)
// Mine a block to trigger the sweep.
ht.MineEmptyBlocks(1)
// A temp hack to ensure the CI is not blocking the current
// development. There's a known issue in block sync among different
// subsystems, which is scheduled to be fixed in 0.18.1.
if ht.IsNeutrinoBackend() {
// We expect the htlcs to be aggregated into one tx. However,
// due to block sync issue, they may end up in two txns. Here
// we assert that there are two txns found in the mempool - if
// succeeded, it means the aggregation failed, and we won't
// continue the test.
//
// NOTE: we don't check `len(mempool) == 1` because it will
// give us false positive.
err := wait.NoError(func() error {
mempool := ht.Miner().GetRawMempool()
if len(mempool) == 2 {
return nil
}
return fmt.Errorf("expected 2 txes in mempool, found "+
"%d", len(mempool))
}, lntest.DefaultTimeout)
ht.Logf("Assert num of txns got %v", err)
// If there are indeed two txns found in the mempool, we won't
// continue the test.
if err == nil {
ht.Log("Neutrino backend failed to aggregate htlc " +
"sweeps!")
// Clean the mempool.
ht.MineBlocksAndAssertNumTxes(1, 2)
return
}
}
// Wait for the single sweep txn to appear in the mempool.
htlcSweepTxID := ht.AssertNumTxsInMempool(1)[0]
// Fetch the htlc sweep transaction from the mempool.
htlcSweepTx := ht.GetRawTransaction(htlcSweepTxID)
// Ensure the htlc sweep transaction only has one input for each htlc
// Alice extended before force closing.
require.Len(ht, htlcSweepTx.MsgTx().TxIn, numInvoices,
"htlc transaction has wrong num of inputs")
require.Len(ht, htlcSweepTx.MsgTx().TxOut, 1,
"htlc sweep transaction should have one output")
// Ensure that each output spends from exactly one htlc timeout output.
for _, txIn := range htlcSweepTx.MsgTx().TxIn {
outpoint := txIn.PreviousOutPoint
// Check that the input is a confirmed htlc timeout txn.
_, ok := htlcTxOutpointSet[outpoint]
require.Truef(ht, ok, "htlc sweep output not spending from "+
"htlc tx, instead spending output %v", outpoint)
// Increment our count for how many times this output was spent.
htlcTxOutpointSet[outpoint]++
// Check that each is only spent once.
require.Lessf(ht, htlcTxOutpointSet[outpoint], 2,
"htlc sweep tx has multiple spends from "+
"outpoint %v", outpoint)
// Since we have now swept our htlc timeout tx, we expect to
// have timeout resolutions for each of our htlcs.
output := txIn.PreviousOutPoint
aliceReports[output.String()] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC,
Outcome: lnrpc.ResolutionOutcome_TIMEOUT,
SweepTxid: htlcSweepTx.Hash().String(),
Outpoint: &lnrpc.OutPoint{
TxidBytes: output.Hash[:],
TxidStr: output.Hash.String(),
OutputIndex: output.Index,
},
AmountSat: htlcLessFees,
}
}
// Check that each HTLC output was spent exactly once.
for op, num := range htlcTxOutpointSet {
require.Equalf(ht, 1, num,
"HTLC outpoint:%s was spent times", op)
}
// Check that we can find the htlc sweep in our set of sweeps using
// the verbose output of the listsweeps output.
ht.AssertSweepFound(alice, htlcSweepTx.Hash().String(), true, 0)
// The following restart checks to ensure that the nursery store is
// storing the txid of the previously broadcast htlc sweep txn, and
// that it begins watching that txid after restarting.
ht.RestartNode(alice)
// Now that the channel has been fully swept, it should no longer show
// incubated, check to see that Alice's node still reports the channel
// as pending force closed.
err = wait.NoError(func() error {
forceClose := ht.AssertChannelPendingForceClose(
alice, chanPoint,
)
err := checkPendingChannelNumHtlcs(forceClose)
if err != nil {
return err
}
err = checkPendingHtlcStageAndMaturity(
forceClose, 2, htlcCsvMaturityHeight-1, -1,
)
if err != nil {
return err
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
// Generate the final block that sweeps all htlc funds into the user's
// wallet, and make sure the sweep is in this block.
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.AssertTxInBlock(block, htlcSweepTxID)
// Now that the channel has been fully swept, it should no longer show
// up within the pending channels RPC.
err = wait.NoError(func() error {
ht.AssertNumPendingForceClose(alice, 0)
// In addition to there being no pending channels, we verify
// that pending channels does not report any money still in
// limbo.
pendingChanResp := alice.RPC.PendingChannels()
if pendingChanResp.TotalLimboBalance != 0 {
return errors.New("no user funds should be left " +
"in limbo after incubation")
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "timeout checking limbo balance")
// At this point, Carol should now be aware of her new immediately
// spendable on-chain balance, as it was Alice who broadcast the
// commitment transaction.
carolBalResp = carol.RPC.WalletBalance()
// Carol's expected balance should be its starting balance plus the
// push amount sent by Alice and minus the miner fee paid.
carolExpectedBalance := btcutil.Amount(carolStartingBalance) +
pushAmt - totalFeeCarol
// In addition, if this is an anchor-enabled channel, further add the
// anchor size.
if lntest.CommitTypeHasAnchors(channelType) {
carolExpectedBalance += btcutil.Amount(anchorSize)
}
require.Equal(ht, carolExpectedBalance,
btcutil.Amount(carolBalResp.ConfirmedBalance),
"carol's balance is incorrect")
// Finally, we check that alice and carol have the set of resolutions
// we expect.
assertReports(ht, alice, chanPoint, aliceReports)
assertReports(ht, carol, chanPoint, carolReports)
}
// padCLTV is a small helper function that pads a cltv value with a block
// padding.
func padCLTV(cltv uint32) uint32 {
return cltv + uint32(routing.BlockPadding)
}
// testFailingChannel tests that we will fail the channel by force closing it
// in the case where a counterparty tries to settle an HTLC with the wrong
// preimage.
func testFailingChannel(ht *lntest.HarnessTest) {
const paymentAmt = 10000
chanAmt := lnd.MaxFundingAmount
// We'll introduce Carol, which will settle any incoming invoice with a
// totally unrelated preimage.
carol := ht.NewNode("Carol", []string{"--hodl.bogus-settle"})
alice := ht.Alice
ht.ConnectNodes(alice, carol)
// Let Alice connect and open a channel to Carol,
ht.OpenChannel(alice, carol, lntest.OpenChannelParams{Amt: chanAmt})
// With the channel open, we'll create a invoice for Carol that Alice
// will attempt to pay.
preimage := bytes.Repeat([]byte{byte(192)}, 32)
invoice := &lnrpc.Invoice{
Memo: "testing",
RPreimage: preimage,
Value: paymentAmt,
}
resp := carol.RPC.AddInvoice(invoice)
// Send the payment from Alice to Carol. We expect Carol to attempt to
// settle this payment with the wrong preimage.
//
// NOTE: cannot use `CompletePaymentRequestsNoWait` here as the channel
// will be force closed, so the num of updates check in that function
// won't work as the channel cannot be found.
req := &routerrpc.SendPaymentRequest{
PaymentRequest: resp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
ht.SendPaymentAndAssertStatus(alice, req, lnrpc.Payment_IN_FLIGHT)
// Since Alice detects that Carol is trying to trick her by providing a
// fake preimage, she should fail and force close the channel.
ht.AssertNumWaitingClose(alice, 1)
// Mine a block to confirm the broadcasted commitment.
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
require.Len(ht, block.Transactions, 2, "transaction wasn't mined")
// The channel should now show up as force closed both for Alice and
// Carol.
ht.AssertNumPendingForceClose(alice, 1)
ht.AssertNumPendingForceClose(carol, 1)
// Carol will use the correct preimage to resolve the HTLC on-chain.
ht.AssertNumPendingSweeps(carol, 1)
// Bring down the fee rate estimation, otherwise the following sweep
// won't happen.
ht.SetFeeEstimate(chainfee.FeePerKwFloor)
// Mine a block to trigger Carol's sweeper to broadcast the sweeping
// tx.
ht.MineEmptyBlocks(1)
// Carol should have broadcast her sweeping tx.
ht.AssertNumTxsInMempool(1)
// Mine two blocks to confirm Carol's sweeping tx, which will by now
// Alice's commit output should be offered to her sweeper.
ht.MineBlocksAndAssertNumTxes(2, 1)
// Alice's should have one pending sweep request for her commit output.
ht.AssertNumPendingSweeps(alice, 1)
// Mine a block to trigger the sweep.
ht.MineEmptyBlocks(1)
// Mine Alice's sweeping tx.
ht.MineBlocksAndAssertNumTxes(1, 1)
// No pending channels should be left.
ht.AssertNumPendingForceClose(alice, 0)
}
// assertReports checks that the count of resolutions we have present per
// type matches a set of expected resolutions.
//
// NOTE: only used in current test file.
func assertReports(ht *lntest.HarnessTest, hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint, expected map[string]*lnrpc.Resolution) {
op := ht.OutPointFromChannelPoint(chanPoint)
// Get our node's closed channels.
req := &lnrpc.ClosedChannelsRequest{Abandoned: false}
closed := hn.RPC.ClosedChannels(req)
var resolutions []*lnrpc.Resolution
for _, close := range closed.Channels {
if close.ChannelPoint == op.String() {
resolutions = close.Resolutions
break
}
}
require.NotNil(ht, resolutions)
// Copy the expected resolutions so we can remove them as we find them.
notFound := make(map[string]*lnrpc.Resolution)
for k, v := range expected {
notFound[k] = v
}
for _, res := range resolutions {
outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr,
res.Outpoint.OutputIndex)
require.Contains(ht, expected, outPointStr)
require.Equal(ht, expected[outPointStr], res)
delete(notFound, outPointStr)
}
// We should have found all the resolutions.
require.Empty(ht, notFound)
}
// checkCommitmentMaturity checks that both the maturity height and blocks
// maturity height are as expected.