Skip to content

Commit

Permalink
Merge pull request lightningnetwork#2744 from cfromknecht/disable-bef…
Browse files Browse the repository at this point in the history
…ore-close

cnct+chancloser: disable channel before closing
  • Loading branch information
Roasbeef authored Mar 22, 2019
2 parents e58fa33 + 714e42f commit c7ca387
Show file tree
Hide file tree
Showing 5 changed files with 123 additions and 38 deletions.
18 changes: 8 additions & 10 deletions chancloser.go
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,14 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b
}
c.closingTx = closeTx

// Before closing, we'll attempt to send a disable update for
// the channel. We do so before closing the channel as otherwise
// the current edge policy won't be retrievable from the graph.
if err := c.cfg.disableChannel(c.chanPoint); err != nil {
peerLog.Warnf("Unable to disable channel %v on "+
"close: %v", c.chanPoint, err)
}

// With the closing transaction crafted, we'll now broadcast it
// to the network.
peerLog.Infof("Broadcasting cooperative close tx: %v",
Expand All @@ -440,16 +448,6 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b
return nil, false, err
}

// We'll attempt to disable the channel in the background to
// avoid blocking due to sending the update message to all
// active peers.
go func() {
if err := c.cfg.disableChannel(c.chanPoint); err != nil {
peerLog.Errorf("Unable to disable channel %v on "+
"close: %v", c.chanPoint, err)
}
}()

// Finally, we'll transition to the closeFinished state, and
// also return the final close signed message we sent.
// Additionally, we return true for the second argument to
Expand Down
18 changes: 8 additions & 10 deletions contractcourt/chain_arbitrator.go
Original file line number Diff line number Diff line change
Expand Up @@ -635,6 +635,14 @@ func (c *ChainArbitrator) ForceCloseContract(chanPoint wire.OutPoint) (*wire.Msg

log.Infof("Attempting to force close ChannelPoint(%v)", chanPoint)

// Before closing, we'll attempt to send a disable update for the
// channel. We do so before closing the channel as otherwise the current
// edge policy won't be retrievable from the graph.
if err := c.cfg.DisableChannel(chanPoint); err != nil {
log.Warnf("Unable to disable channel %v on "+
"close: %v", chanPoint, err)
}

errChan := make(chan error, 1)
respChan := make(chan *wire.MsgTx, 1)

Expand Down Expand Up @@ -667,16 +675,6 @@ func (c *ChainArbitrator) ForceCloseContract(chanPoint wire.OutPoint) (*wire.Msg
return nil, ErrChainArbExiting
}

// We'll attempt to disable the channel in the background to
// avoid blocking due to sending the update message to all
// active peers.
go func() {
if err := c.cfg.DisableChannel(chanPoint); err != nil {
log.Errorf("Unable to disable channel %v on "+
"close: %v", chanPoint, err)
}
}()

return closeTx, nil
}

Expand Down
110 changes: 92 additions & 18 deletions lnd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,16 +223,80 @@ func openChannelAndAssert(ctx context.Context, t *harnessTest,
// closure is attempted, therefore the passed context should be a child derived
// via timeout from a base parent. Additionally, once the channel has been
// detected as closed, an assertion checks that the transaction is found within
// a block.
// a block. Finally, this assertion verifies that the node always sends out a
// disable update when closing the channel if the channel was previously enabled.
//
// NOTE: This method assumes that the provided funding point is confirmed
// on-chain AND that the edge exists in the node's channel graph. If the funding
// transactions was reorged out at some point, use closeReorgedChannelAndAssert.
func closeChannelAndAssert(ctx context.Context, t *harnessTest,
net *lntest.NetworkHarness, node *lntest.HarnessNode,
fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash {

// Fetch the current channel policy. If the channel is currently
// enabled, we will register for graph notifications before closing to
// assert that the node sends out a disabling update as a result of the
// channel being closed.
curPolicy := getChannelPolicies(t, node, node.PubKeyStr, fundingChanPoint)[0]
expectDisable := !curPolicy.Disabled

// If the current channel policy is enabled, begin subscribing the graph
// updates before initiating the channel closure.
var graphSub *graphSubscription
if expectDisable {
sub := subscribeGraphNotifications(t, ctx, node)
graphSub = &sub
defer close(graphSub.quit)
}

closeUpdates, _, err := net.CloseChannel(ctx, node, fundingChanPoint, force)
if err != nil {
t.Fatalf("unable to close channel: %v", err)
}

// If the channel policy was enabled prior to the closure, wait until we
// received the disabled update.
if expectDisable {
curPolicy.Disabled = true
waitForChannelUpdate(
t, *graphSub,
[]expectedChanUpdate{
{node.PubKeyStr, curPolicy, fundingChanPoint},
},
)
}

return assertChannelClosed(ctx, t, net, node, fundingChanPoint, closeUpdates)
}

// closeReorgedChannelAndAssert attempts to close a channel identified by the
// passed channel point owned by the passed Lightning node. A fully blocking
// channel closure is attempted, therefore the passed context should be a child
// derived via timeout from a base parent. Additionally, once the channel has
// been detected as closed, an assertion checks that the transaction is found
// within a block.
//
// NOTE: This method does not verify that the node sends a disable update for
// the closed channel.
func closeReorgedChannelAndAssert(ctx context.Context, t *harnessTest,
net *lntest.NetworkHarness, node *lntest.HarnessNode,
fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash {

closeUpdates, _, err := net.CloseChannel(ctx, node, fundingChanPoint, force)
if err != nil {
t.Fatalf("unable to close channel: %v", err)
}

return assertChannelClosed(ctx, t, net, node, fundingChanPoint, closeUpdates)
}

// assertChannelClosed asserts that the channel is properly cleaned up after
// initiating a cooperative or local close.
func assertChannelClosed(ctx context.Context, t *harnessTest,
net *lntest.NetworkHarness, node *lntest.HarnessNode,
fundingChanPoint *lnrpc.ChannelPoint,
closeUpdates lnrpc.Lightning_CloseChannelClient) *chainhash.Hash {

txidHash, err := getChanPointFundingTxid(fundingChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
Expand Down Expand Up @@ -989,7 +1053,6 @@ out:
select {
case graphUpdate := <-subscription.updateChan:
for _, update := range graphUpdate.ChannelUpdates {

// For each expected update, check if it matches
// the update we just received.
for i, exp := range expUpdates {
Expand Down Expand Up @@ -1070,11 +1133,12 @@ func assertNoChannelUpdates(t *harnessTest, subscription graphSubscription,
}
}

// assertChannelPolicy asserts that the passed node's known channel policy for
// the passed chanPoint is consistent with the expected policy values.
func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
advertisingNode string, expectedPolicy *lnrpc.RoutingPolicy,
chanPoints ...*lnrpc.ChannelPoint) {
// getChannelPolicies queries the channel graph and retrieves the current edge
// policies for the provided channel points.
func getChannelPolicies(t *harnessTest, node *lntest.HarnessNode,
advertisingNode string,
chanPoints ...*lnrpc.ChannelPoint) []*lnrpc.RoutingPolicy {

ctxb := context.Background()

descReq := &lnrpc.ChannelGraphRequest{
Expand All @@ -1086,25 +1150,18 @@ func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
t.Fatalf("unable to query for alice's graph: %v", err)
}

var policies []*lnrpc.RoutingPolicy
out:
for _, chanPoint := range chanPoints {
for _, e := range chanGraph.Edges {
if e.ChanPoint != txStr(chanPoint) {
continue
}

var err error
if e.Node1Pub == advertisingNode {
err = checkChannelPolicy(
e.Node1Policy, expectedPolicy,
)
policies = append(policies, e.Node1Policy)
} else {
err = checkChannelPolicy(
e.Node2Policy, expectedPolicy,
)
}
if err != nil {
t.Fatalf(err.Error())
policies = append(policies, e.Node2Policy)
}

continue out
Expand All @@ -1114,6 +1171,23 @@ out:
// able to find this specific one, then we'll fail.
t.Fatalf("did not find edge %v", txStr(chanPoint))
}

return policies
}

// assertChannelPolicy asserts that the passed node's known channel policy for
// the passed chanPoint is consistent with the expected policy values.
func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
advertisingNode string, expectedPolicy *lnrpc.RoutingPolicy,
chanPoints ...*lnrpc.ChannelPoint) {

policies := getChannelPolicies(t, node, advertisingNode, chanPoints...)
for _, policy := range policies {
err := checkChannelPolicy(policy, expectedPolicy)
if err != nil {
t.Fatalf(err.Error())
}
}
}

// checkChannelPolicy checks that the policy matches the expected one.
Expand Down Expand Up @@ -1872,7 +1946,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
assertTxInBlock(t, block, fundingTxID)

ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
closeReorgedChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
}

// testDisconnectingTargetPeer performs a test which
Expand Down
12 changes: 12 additions & 0 deletions netann/chan_status_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -466,6 +466,18 @@ func (m *ChanStatusManager) disableInactiveChannels() {
if err != nil {
log.Errorf("Unable to sign update disabling "+
"channel(%v): %v", outpoint, err)

// If the edge was not found, this is a likely indicator
// that the channel has been closed. Thus we remove the
// outpoint from the set of tracked outpoints to prevent
// further attempts.
if err == channeldb.ErrEdgeNotFound {
log.Debugf("Removing channel(%v) from "+
"consideration for passive disabling",
outpoint)
delete(m.chanStates, outpoint)
}

continue
}

Expand Down
3 changes: 3 additions & 0 deletions test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,9 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
if err != nil {
return nil, nil, nil, nil, err
}
if err = chanStatusMgr.Start(); err != nil {
return nil, nil, nil, nil, err
}
s.chanStatusMgr = chanStatusMgr

alicePeer := &peer{
Expand Down

0 comments on commit c7ca387

Please sign in to comment.