-
Notifications
You must be signed in to change notification settings - Fork 2.4k
/
Copy pathchain.go
1838 lines (1627 loc) · 65 KB
/
chain.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2013-2018 The btcsuite developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"container/list"
"fmt"
"sync"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
)
const (
// maxOrphanBlocks is the maximum number of orphan blocks that can be
// queued.
maxOrphanBlocks = 100
)
// BlockLocator is used to help locate a specific block. The algorithm for
// building the block locator is to add the hashes in reverse order until
// the genesis block is reached. In order to keep the list of locator hashes
// to a reasonable number of entries, first the most recent previous 12 block
// hashes are added, then the step is doubled each loop iteration to
// exponentially decrease the number of hashes as a function of the distance
// from the block being located.
//
// For example, assume a block chain with a side chain as depicted below:
//
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
// \-> 16a -> 17a
//
// The block locator for block 17a would be the hashes of blocks:
// [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis]
type BlockLocator []*chainhash.Hash
// orphanBlock represents a block that we don't yet have the parent for. It
// is a normal block plus an expiration time to prevent caching the orphan
// forever.
type orphanBlock struct {
block *btcutil.Block
expiration time.Time
}
// BestState houses information about the current best block and other info
// related to the state of the main chain as it exists from the point of view of
// the current best block.
//
// The BestSnapshot method can be used to obtain access to this information
// in a concurrent safe manner and the data will not be changed out from under
// the caller when chain state changes occur as the function name implies.
// However, the returned snapshot must be treated as immutable since it is
// shared by all callers.
type BestState struct {
Hash chainhash.Hash // The hash of the block.
Height int32 // The height of the block.
Bits uint32 // The difficulty bits of the block.
BlockSize uint64 // The size of the block.
BlockWeight uint64 // The weight of the block.
NumTxns uint64 // The number of txns in the block.
TotalTxns uint64 // The total number of txns in the chain.
MedianTime time.Time // Median time as per CalcPastMedianTime.
}
// newBestState returns a new best stats instance for the given parameters.
func newBestState(node *blockNode, blockSize, blockWeight, numTxns,
totalTxns uint64, medianTime time.Time) *BestState {
return &BestState{
Hash: node.hash,
Height: node.height,
Bits: node.bits,
BlockSize: blockSize,
BlockWeight: blockWeight,
NumTxns: numTxns,
TotalTxns: totalTxns,
MedianTime: medianTime,
}
}
// BlockChain provides functions for working with the bitcoin block chain.
// It includes functionality such as rejecting duplicate blocks, ensuring blocks
// follow all rules, orphan handling, checkpoint handling, and best chain
// selection with reorganization.
type BlockChain struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
checkpoints []chaincfg.Checkpoint
checkpointsByHeight map[int32]*chaincfg.Checkpoint
db database.DB
chainParams *chaincfg.Params
timeSource MedianTimeSource
sigCache *txscript.SigCache
indexManager IndexManager
hashCache *txscript.HashCache
// The following fields are calculated based upon the provided chain
// parameters. They are also set when the instance is created and
// can't be changed afterwards, so there is no need to protect them with
// a separate mutex.
minRetargetTimespan int64 // target timespan / adjustment factor
maxRetargetTimespan int64 // target timespan * adjustment factor
blocksPerRetarget int32 // target timespan / target time per block
// chainLock protects concurrent access to the vast majority of the
// fields in this struct below this point.
chainLock sync.RWMutex
// pruneTarget is the size in bytes the database targets for when the node
// is pruned.
pruneTarget uint64
// These fields are related to the memory block index. They both have
// their own locks, however they are often also protected by the chain
// lock to help prevent logic races when blocks are being processed.
//
// index houses the entire block index in memory. The block index is
// a tree-shaped structure.
//
// bestChain tracks the current active chain by making use of an
// efficient chain view into the block index.
index *blockIndex
bestChain *chainView
// These fields are related to handling of orphan blocks. They are
// protected by a combination of the chain lock and the orphan lock.
orphanLock sync.RWMutex
orphans map[chainhash.Hash]*orphanBlock
prevOrphans map[chainhash.Hash][]*orphanBlock
oldestOrphan *orphanBlock
// These fields are related to checkpoint handling. They are protected
// by the chain lock.
nextCheckpoint *chaincfg.Checkpoint
checkpointNode *blockNode
// The state is used as a fairly efficient way to cache information
// about the current best chain state that is returned to callers when
// requested. It operates on the principle of MVCC such that any time a
// new block becomes the best block, the state pointer is replaced with
// a new struct and the old state is left untouched. In this way,
// multiple callers can be pointing to different best chain states.
// This is acceptable for most callers because the state is only being
// queried at a specific point in time.
//
// In addition, some of the fields are stored in the database so the
// chain state can be quickly reconstructed on load.
stateLock sync.RWMutex
stateSnapshot *BestState
// The following caches are used to efficiently keep track of the
// current deployment threshold state of each rule change deployment.
//
// This information is stored in the database so it can be quickly
// reconstructed on load.
//
// warningCaches caches the current deployment threshold state for blocks
// in each of the **possible** deployments. This is used in order to
// detect when new unrecognized rule changes are being voted on and/or
// have been activated such as will be the case when older versions of
// the software are being used
//
// deploymentCaches caches the current deployment threshold state for
// blocks in each of the actively defined deployments.
warningCaches []thresholdStateCache
deploymentCaches []thresholdStateCache
// The following fields are used to determine if certain warnings have
// already been shown.
//
// unknownRulesWarned refers to warnings due to unknown rules being
// activated.
unknownRulesWarned bool
// The notifications field stores a slice of callbacks to be executed on
// certain blockchain events.
notificationsLock sync.RWMutex
notifications []NotificationCallback
}
// HaveBlock returns whether or not the chain instance has the block represented
// by the passed hash. This includes checking the various places a block can
// be like part of the main chain, on a side chain, or in the orphan pool.
//
// This function is safe for concurrent access.
func (b *BlockChain) HaveBlock(hash *chainhash.Hash) (bool, error) {
exists, err := b.blockExists(hash)
if err != nil {
return false, err
}
return exists || b.IsKnownOrphan(hash), nil
}
// IsKnownOrphan returns whether the passed hash is currently a known orphan.
// Keep in mind that only a limited number of orphans are held onto for a
// limited amount of time, so this function must not be used as an absolute
// way to test if a block is an orphan block. A full block (as opposed to just
// its hash) must be passed to ProcessBlock for that purpose. However, calling
// ProcessBlock with an orphan that already exists results in an error, so this
// function provides a mechanism for a caller to intelligently detect *recent*
// duplicate orphans and react accordingly.
//
// This function is safe for concurrent access.
func (b *BlockChain) IsKnownOrphan(hash *chainhash.Hash) bool {
// Protect concurrent access. Using a read lock only so multiple
// readers can query without blocking each other.
b.orphanLock.RLock()
_, exists := b.orphans[*hash]
b.orphanLock.RUnlock()
return exists
}
// GetOrphanRoot returns the head of the chain for the provided hash from the
// map of orphan blocks.
//
// This function is safe for concurrent access.
func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash {
// Protect concurrent access. Using a read lock only so multiple
// readers can query without blocking each other.
b.orphanLock.RLock()
defer b.orphanLock.RUnlock()
// Keep looping while the parent of each orphaned block is
// known and is an orphan itself.
orphanRoot := hash
prevHash := hash
for {
orphan, exists := b.orphans[*prevHash]
if !exists {
break
}
orphanRoot = prevHash
prevHash = &orphan.block.MsgBlock().Header.PrevBlock
}
return orphanRoot
}
// removeOrphanBlock removes the passed orphan block from the orphan pool and
// previous orphan index.
func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) {
// Protect concurrent access.
b.orphanLock.Lock()
defer b.orphanLock.Unlock()
// Remove the orphan block from the orphan pool.
orphanHash := orphan.block.Hash()
delete(b.orphans, *orphanHash)
// Remove the reference from the previous orphan index too. An indexing
// for loop is intentionally used over a range here as range does not
// reevaluate the slice on each iteration nor does it adjust the index
// for the modified slice.
prevHash := &orphan.block.MsgBlock().Header.PrevBlock
orphans := b.prevOrphans[*prevHash]
for i := 0; i < len(orphans); i++ {
hash := orphans[i].block.Hash()
if hash.IsEqual(orphanHash) {
copy(orphans[i:], orphans[i+1:])
orphans[len(orphans)-1] = nil
orphans = orphans[:len(orphans)-1]
i--
}
}
b.prevOrphans[*prevHash] = orphans
// Remove the map entry altogether if there are no longer any orphans
// which depend on the parent hash.
if len(b.prevOrphans[*prevHash]) == 0 {
delete(b.prevOrphans, *prevHash)
}
}
// addOrphanBlock adds the passed block (which is already determined to be
// an orphan prior calling this function) to the orphan pool. It lazily cleans
// up any expired blocks so a separate cleanup poller doesn't need to be run.
// It also imposes a maximum limit on the number of outstanding orphan
// blocks and will remove the oldest received orphan block if the limit is
// exceeded.
func (b *BlockChain) addOrphanBlock(block *btcutil.Block) {
// Remove expired orphan blocks.
for _, oBlock := range b.orphans {
if time.Now().After(oBlock.expiration) {
b.removeOrphanBlock(oBlock)
continue
}
// Update the oldest orphan block pointer so it can be discarded
// in case the orphan pool fills up.
if b.oldestOrphan == nil || oBlock.expiration.Before(b.oldestOrphan.expiration) {
b.oldestOrphan = oBlock
}
}
// Limit orphan blocks to prevent memory exhaustion.
if len(b.orphans)+1 > maxOrphanBlocks {
// Remove the oldest orphan to make room for the new one.
b.removeOrphanBlock(b.oldestOrphan)
b.oldestOrphan = nil
}
// Protect concurrent access. This is intentionally done here instead
// of near the top since removeOrphanBlock does its own locking and
// the range iterator is not invalidated by removing map entries.
b.orphanLock.Lock()
defer b.orphanLock.Unlock()
// Insert the block into the orphan map with an expiration time
// 1 hour from now.
expiration := time.Now().Add(time.Hour)
oBlock := &orphanBlock{
block: block,
expiration: expiration,
}
b.orphans[*block.Hash()] = oBlock
// Add to previous hash lookup index for faster dependency lookups.
prevHash := &block.MsgBlock().Header.PrevBlock
b.prevOrphans[*prevHash] = append(b.prevOrphans[*prevHash], oBlock)
}
// SequenceLock represents the converted relative lock-time in seconds, and
// absolute block-height for a transaction input's relative lock-times.
// According to SequenceLock, after the referenced input has been confirmed
// within a block, a transaction spending that input can be included into a
// block either after 'seconds' (according to past median time), or once the
// 'BlockHeight' has been reached.
type SequenceLock struct {
Seconds int64
BlockHeight int32
}
// CalcSequenceLock computes a relative lock-time SequenceLock for the passed
// transaction using the passed UtxoViewpoint to obtain the past median time
// for blocks in which the referenced inputs of the transactions were included
// within. The generated SequenceLock lock can be used in conjunction with a
// block height, and adjusted median block time to determine if all the inputs
// referenced within a transaction have reached sufficient maturity allowing
// the candidate transaction to be included in a block.
//
// This function is safe for concurrent access.
func (b *BlockChain) CalcSequenceLock(tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) {
b.chainLock.Lock()
defer b.chainLock.Unlock()
return b.calcSequenceLock(b.bestChain.Tip(), tx, utxoView, mempool)
}
// calcSequenceLock computes the relative lock-times for the passed
// transaction. See the exported version, CalcSequenceLock for further details.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) {
// A value of -1 for each relative lock type represents a relative time
// lock value that will allow a transaction to be included in a block
// at any given height or time. This value is returned as the relative
// lock time in the case that BIP 68 is disabled, or has not yet been
// activated.
sequenceLock := &SequenceLock{Seconds: -1, BlockHeight: -1}
// The sequence locks semantics are always active for transactions
// within the mempool.
csvSoftforkActive := mempool
// If we're performing block validation, then we need to query the BIP9
// state.
if !csvSoftforkActive {
// Obtain the latest BIP9 version bits state for the
// CSV-package soft-fork deployment. The adherence of sequence
// locks depends on the current soft-fork state.
csvState, err := b.deploymentState(node.parent, chaincfg.DeploymentCSV)
if err != nil {
return nil, err
}
csvSoftforkActive = csvState == ThresholdActive
}
// If the transaction's version is less than 2, and BIP 68 has not yet
// been activated then sequence locks are disabled. Additionally,
// sequence locks don't apply to coinbase transactions Therefore, we
// return sequence lock values of -1 indicating that this transaction
// can be included within a block at any given height or time.
mTx := tx.MsgTx()
sequenceLockActive := uint32(mTx.Version) >= 2 && csvSoftforkActive
if !sequenceLockActive || IsCoinBase(tx) {
return sequenceLock, nil
}
// Grab the next height from the PoV of the passed blockNode to use for
// inputs present in the mempool.
nextHeight := node.height + 1
for txInIndex, txIn := range mTx.TxIn {
utxo := utxoView.LookupEntry(txIn.PreviousOutPoint)
if utxo == nil {
str := fmt.Sprintf("output %v referenced from "+
"transaction %s:%d either does not exist or "+
"has already been spent", txIn.PreviousOutPoint,
tx.Hash(), txInIndex)
return sequenceLock, ruleError(ErrMissingTxOut, str)
}
// If the input height is set to the mempool height, then we
// assume the transaction makes it into the next block when
// evaluating its sequence blocks.
inputHeight := utxo.BlockHeight()
if inputHeight == 0x7fffffff {
inputHeight = nextHeight
}
// Given a sequence number, we apply the relative time lock
// mask in order to obtain the time lock delta required before
// this input can be spent.
sequenceNum := txIn.Sequence
relativeLock := int64(sequenceNum & wire.SequenceLockTimeMask)
switch {
// Relative time locks are disabled for this input, so we can
// skip any further calculation.
case sequenceNum&wire.SequenceLockTimeDisabled == wire.SequenceLockTimeDisabled:
continue
case sequenceNum&wire.SequenceLockTimeIsSeconds == wire.SequenceLockTimeIsSeconds:
// This input requires a relative time lock expressed
// in seconds before it can be spent. Therefore, we
// need to query for the block prior to the one in
// which this input was included within so we can
// compute the past median time for the block prior to
// the one which included this referenced output.
prevInputHeight := inputHeight - 1
if prevInputHeight < 0 {
prevInputHeight = 0
}
blockNode := node.Ancestor(prevInputHeight)
medianTime := CalcPastMedianTime(blockNode)
// Time based relative time-locks as defined by BIP 68
// have a time granularity of RelativeLockSeconds, so
// we shift left by this amount to convert to the
// proper relative time-lock. We also subtract one from
// the relative lock to maintain the original lockTime
// semantics.
timeLockSeconds := (relativeLock << wire.SequenceLockTimeGranularity) - 1
timeLock := medianTime.Unix() + timeLockSeconds
if timeLock > sequenceLock.Seconds {
sequenceLock.Seconds = timeLock
}
default:
// The relative lock-time for this input is expressed
// in blocks so we calculate the relative offset from
// the input's height as its converted absolute
// lock-time. We subtract one from the relative lock in
// order to maintain the original lockTime semantics.
blockHeight := inputHeight + int32(relativeLock-1)
if blockHeight > sequenceLock.BlockHeight {
sequenceLock.BlockHeight = blockHeight
}
}
}
return sequenceLock, nil
}
// LockTimeToSequence converts the passed relative locktime to a sequence
// number in accordance to BIP-68.
// See: https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki
// - (Compatibility)
func LockTimeToSequence(isSeconds bool, locktime uint32) uint32 {
// If we're expressing the relative lock time in blocks, then the
// corresponding sequence number is simply the desired input age.
if !isSeconds {
return locktime
}
// Set the 22nd bit which indicates the lock time is in seconds, then
// shift the locktime over by 9 since the time granularity is in
// 512-second intervals (2^9). This results in a max lock-time of
// 33,553,920 seconds, or 1.1 years.
return wire.SequenceLockTimeIsSeconds |
locktime>>wire.SequenceLockTimeGranularity
}
// getReorganizeNodes finds the fork point between the main chain and the passed
// node and returns a list of block nodes that would need to be detached from
// the main chain and a list of block nodes that would need to be attached to
// the fork point (which will be the end of the main chain after detaching the
// returned list of block nodes) in order to reorganize the chain such that the
// passed node is the new end of the main chain. The lists will be empty if the
// passed node is not on a side chain.
//
// This function may modify node statuses in the block index without flushing.
//
// This function MUST be called with the chain state lock held (for reads).
func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List) {
attachNodes := list.New()
detachNodes := list.New()
// Do not reorganize to a known invalid chain. Ancestors deeper than the
// direct parent are checked below but this is a quick check before doing
// more unnecessary work.
if b.index.NodeStatus(node.parent).KnownInvalid() {
b.index.SetStatusFlags(node, statusInvalidAncestor)
return detachNodes, attachNodes
}
// Find the fork point (if any) adding each block to the list of nodes
// to attach to the main tree. Push them onto the list in reverse order
// so they are attached in the appropriate order when iterating the list
// later.
forkNode := b.bestChain.FindFork(node)
invalidChain := false
for n := node; n != nil && n != forkNode; n = n.parent {
if b.index.NodeStatus(n).KnownInvalid() {
invalidChain = true
break
}
attachNodes.PushFront(n)
}
// If any of the node's ancestors are invalid, unwind attachNodes, marking
// each one as invalid for future reference.
if invalidChain {
var next *list.Element
for e := attachNodes.Front(); e != nil; e = next {
next = e.Next()
n := attachNodes.Remove(e).(*blockNode)
b.index.SetStatusFlags(n, statusInvalidAncestor)
}
return detachNodes, attachNodes
}
// Start from the end of the main chain and work backwards until the
// common ancestor adding each block to the list of nodes to detach from
// the main chain.
for n := b.bestChain.Tip(); n != nil && n != forkNode; n = n.parent {
detachNodes.PushBack(n)
}
return detachNodes, attachNodes
}
// connectBlock handles connecting the passed node/block to the end of the main
// (best) chain.
//
// This passed utxo view must have all referenced txos the block spends marked
// as spent and all of the new txos the block creates added to it. In addition,
// the passed stxos slice must be populated with all of the information for the
// spent txos. This approach is used because the connection validation that
// must happen prior to calling this function requires the same details, so
// it would be inefficient to repeat it.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block,
view *UtxoViewpoint, stxos []SpentTxOut) error {
// Make sure it's extending the end of the best chain.
prevHash := &block.MsgBlock().Header.PrevBlock
if !prevHash.IsEqual(&b.bestChain.Tip().hash) {
return AssertError("connectBlock must be called with a block " +
"that extends the main chain")
}
// Sanity check the correct number of stxos are provided.
if len(stxos) != countSpentOutputs(block) {
return AssertError("connectBlock called with inconsistent " +
"spent transaction out information")
}
// No warnings about unknown rules until the chain is current.
if b.isCurrent() {
// Warn if any unknown new rules are either about to activate or
// have already been activated.
if err := b.warnUnknownRuleActivations(node); err != nil {
return err
}
}
// Write any block status changes to DB before updating best state.
err := b.index.flushToDB()
if err != nil {
return err
}
// Generate a new best state snapshot that will be used to update the
// database and later memory if all database updates are successful.
b.stateLock.RLock()
curTotalTxns := b.stateSnapshot.TotalTxns
b.stateLock.RUnlock()
numTxns := uint64(len(block.MsgBlock().Transactions))
blockSize := uint64(block.MsgBlock().SerializeSize())
blockWeight := uint64(GetBlockWeight(block))
state := newBestState(node, blockSize, blockWeight, numTxns,
curTotalTxns+numTxns, CalcPastMedianTime(node),
)
// Atomically insert info into the database.
err = b.db.Update(func(dbTx database.Tx) error {
// If the pruneTarget isn't 0, we should attempt to delete older blocks
// from the database.
if b.pruneTarget != 0 {
// When the total block size is under the prune target, prune blocks is
// a no-op and the deleted hashes are nil.
deletedHashes, err := dbTx.PruneBlocks(b.pruneTarget)
if err != nil {
return err
}
// Only attempt to delete if we have any deleted blocks.
if len(deletedHashes) != 0 {
// Delete the spend journals of the pruned blocks.
err = dbPruneSpendJournalEntry(dbTx, deletedHashes)
if err != nil {
return err
}
}
}
// Update best block state.
err := dbPutBestState(dbTx, state, node.workSum)
if err != nil {
return err
}
// Add the block hash and height to the block index which tracks
// the main chain.
err = dbPutBlockIndex(dbTx, block.Hash(), node.height)
if err != nil {
return err
}
// Update the utxo set using the state of the utxo view. This
// entails removing all of the utxos spent and adding the new
// ones created by the block.
err = dbPutUtxoView(dbTx, view)
if err != nil {
return err
}
// Update the transaction spend journal by adding a record for
// the block that contains all txos spent by it.
err = dbPutSpendJournalEntry(dbTx, block.Hash(), stxos)
if err != nil {
return err
}
// Allow the index manager to call each of the currently active
// optional indexes with the block being connected so they can
// update themselves accordingly.
if b.indexManager != nil {
err := b.indexManager.ConnectBlock(dbTx, block, stxos)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
// Prune fully spent entries and mark all entries in the view unmodified
// now that the modifications have been committed to the database.
view.commit()
// This node is now the end of the best chain.
b.bestChain.SetTip(node)
// Update the state for the best block. Notice how this replaces the
// entire struct instead of updating the existing one. This effectively
// allows the old version to act as a snapshot which callers can use
// freely without needing to hold a lock for the duration. See the
// comments on the state variable for more details.
b.stateLock.Lock()
b.stateSnapshot = state
b.stateLock.Unlock()
// Notify the caller that the block was connected to the main chain.
// The caller would typically want to react with actions such as
// updating wallets.
b.chainLock.Unlock()
b.sendNotification(NTBlockConnected, block)
b.chainLock.Lock()
return nil
}
// disconnectBlock handles disconnecting the passed node/block from the end of
// the main (best) chain.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error {
// Make sure the node being disconnected is the end of the best chain.
if !node.hash.IsEqual(&b.bestChain.Tip().hash) {
return AssertError("disconnectBlock must be called with the " +
"block at the end of the main chain")
}
// Load the previous block since some details for it are needed below.
prevNode := node.parent
var prevBlock *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error {
var err error
prevBlock, err = dbFetchBlockByNode(dbTx, prevNode)
return err
})
if err != nil {
return err
}
// Write any block status changes to DB before updating best state.
err = b.index.flushToDB()
if err != nil {
return err
}
// Generate a new best state snapshot that will be used to update the
// database and later memory if all database updates are successful.
b.stateLock.RLock()
curTotalTxns := b.stateSnapshot.TotalTxns
b.stateLock.RUnlock()
numTxns := uint64(len(prevBlock.MsgBlock().Transactions))
blockSize := uint64(prevBlock.MsgBlock().SerializeSize())
blockWeight := uint64(GetBlockWeight(prevBlock))
newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions))
state := newBestState(prevNode, blockSize, blockWeight, numTxns,
newTotalTxns, CalcPastMedianTime(prevNode))
err = b.db.Update(func(dbTx database.Tx) error {
// Update best block state.
err := dbPutBestState(dbTx, state, node.workSum)
if err != nil {
return err
}
// Remove the block hash and height from the block index which
// tracks the main chain.
err = dbRemoveBlockIndex(dbTx, block.Hash(), node.height)
if err != nil {
return err
}
// Update the utxo set using the state of the utxo view. This
// entails restoring all of the utxos spent and removing the new
// ones created by the block.
err = dbPutUtxoView(dbTx, view)
if err != nil {
return err
}
// Before we delete the spend journal entry for this back,
// we'll fetch it as is so the indexers can utilize if needed.
stxos, err := dbFetchSpendJournalEntry(dbTx, block)
if err != nil {
return err
}
// Update the transaction spend journal by removing the record
// that contains all txos spent by the block.
err = dbRemoveSpendJournalEntry(dbTx, block.Hash())
if err != nil {
return err
}
// Allow the index manager to call each of the currently active
// optional indexes with the block being disconnected so they
// can update themselves accordingly.
if b.indexManager != nil {
err := b.indexManager.DisconnectBlock(dbTx, block, stxos)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
// Prune fully spent entries and mark all entries in the view unmodified
// now that the modifications have been committed to the database.
view.commit()
// This node's parent is now the end of the best chain.
b.bestChain.SetTip(node.parent)
// Update the state for the best block. Notice how this replaces the
// entire struct instead of updating the existing one. This effectively
// allows the old version to act as a snapshot which callers can use
// freely without needing to hold a lock for the duration. See the
// comments on the state variable for more details.
b.stateLock.Lock()
b.stateSnapshot = state
b.stateLock.Unlock()
// Notify the caller that the block was disconnected from the main
// chain. The caller would typically want to react with actions such as
// updating wallets.
b.chainLock.Unlock()
b.sendNotification(NTBlockDisconnected, block)
b.chainLock.Lock()
return nil
}
// countSpentOutputs returns the number of utxos the passed block spends.
func countSpentOutputs(block *btcutil.Block) int {
// Exclude the coinbase transaction since it can't spend anything.
var numSpent int
for _, tx := range block.Transactions()[1:] {
numSpent += len(tx.MsgTx().TxIn)
}
return numSpent
}
// reorganizeChain reorganizes the block chain by disconnecting the nodes in the
// detachNodes list and connecting the nodes in the attach list. It expects
// that the lists are already in the correct order and are in sync with the
// end of the current best chain. Specifically, nodes that are being
// disconnected must be in reverse order (think of popping them off the end of
// the chain) and nodes the are being attached must be in forwards order
// (think pushing them onto the end of the chain).
//
// This function may modify node statuses in the block index without flushing.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error {
// Nothing to do if no reorganize nodes were provided.
if detachNodes.Len() == 0 && attachNodes.Len() == 0 {
return nil
}
// Ensure the provided nodes match the current best chain.
tip := b.bestChain.Tip()
if detachNodes.Len() != 0 {
firstDetachNode := detachNodes.Front().Value.(*blockNode)
if firstDetachNode.hash != tip.hash {
return AssertError(fmt.Sprintf("reorganize nodes to detach are "+
"not for the current best chain -- first detach node %v, "+
"current chain %v", &firstDetachNode.hash, &tip.hash))
}
}
// Ensure the provided nodes are for the same fork point.
if attachNodes.Len() != 0 && detachNodes.Len() != 0 {
firstAttachNode := attachNodes.Front().Value.(*blockNode)
lastDetachNode := detachNodes.Back().Value.(*blockNode)
if firstAttachNode.parent.hash != lastDetachNode.parent.hash {
return AssertError(fmt.Sprintf("reorganize nodes do not have the "+
"same fork point -- first attach parent %v, last detach "+
"parent %v", &firstAttachNode.parent.hash,
&lastDetachNode.parent.hash))
}
}
// Track the old and new best chains heads.
oldBest := tip
newBest := tip
// All of the blocks to detach and related spend journal entries needed
// to unspend transaction outputs in the blocks being disconnected must
// be loaded from the database during the reorg check phase below and
// then they are needed again when doing the actual database updates.
// Rather than doing two loads, cache the loaded data into these slices.
detachBlocks := make([]*btcutil.Block, 0, detachNodes.Len())
detachSpentTxOuts := make([][]SpentTxOut, 0, detachNodes.Len())
attachBlocks := make([]*btcutil.Block, 0, attachNodes.Len())
// Disconnect all of the blocks back to the point of the fork. This
// entails loading the blocks and their associated spent txos from the
// database and using that information to unspend all of the spent txos
// and remove the utxos created by the blocks.
view := NewUtxoViewpoint()
view.SetBestHash(&oldBest.hash)
for e := detachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
var block *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, n)
return err
})
if err != nil {
return err
}
if n.hash != *block.Hash() {
return AssertError(fmt.Sprintf("detach block node hash %v (height "+
"%v) does not match previous parent block hash %v", &n.hash,
n.height, block.Hash()))
}
// Load all of the utxos referenced by the block that aren't
// already in the view.
err = view.fetchInputUtxos(b.db, block)
if err != nil {
return err
}
// Load all of the spent txos for the block from the spend
// journal.
var stxos []SpentTxOut
err = b.db.View(func(dbTx database.Tx) error {
stxos, err = dbFetchSpendJournalEntry(dbTx, block)
return err
})
if err != nil {
return err
}
// Store the loaded block and spend journal entry for later.
detachBlocks = append(detachBlocks, block)
detachSpentTxOuts = append(detachSpentTxOuts, stxos)
err = view.disconnectTransactions(b.db, block, stxos)
if err != nil {
return err
}
newBest = n.parent
}
// Set the fork point only if there are nodes to attach since otherwise
// blocks are only being disconnected and thus there is no fork point.
var forkNode *blockNode
if attachNodes.Len() > 0 {
forkNode = newBest
}
// Perform several checks to verify each block that needs to be attached
// to the main chain can be connected without violating any rules and
// without actually connecting the block.
//
// NOTE: These checks could be done directly when connecting a block,
// however the downside to that approach is that if any of these checks
// fail after disconnecting some blocks or attaching others, all of the
// operations have to be rolled back to get the chain back into the
// state it was before the rule violation (or other failure). There are
// at least a couple of ways accomplish that rollback, but both involve
// tweaking the chain and/or database. This approach catches these
// issues before ever modifying the chain.
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
var block *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, n)
return err
})
if err != nil {
return err
}
// Store the loaded block for later.
attachBlocks = append(attachBlocks, block)
// Skip checks if node has already been fully validated. Although
// checkConnectBlock gets skipped, we still need to update the UTXO
// view.
if b.index.NodeStatus(n).KnownValid() {
err = view.fetchInputUtxos(b.db, block)
if err != nil {
return err
}
err = view.connectTransactions(block, nil)
if err != nil {
return err
}
newBest = n
continue
}
// Notice the spent txout details are not requested here and
// thus will not be generated. This is done because the state
// is not being immediately written to the database, so it is
// not needed.
//
// In the case the block is determined to be invalid due to a
// rule violation, mark it as invalid and mark all of its
// descendants as having an invalid ancestor.
err = b.checkConnectBlock(n, block, view, nil)
if err != nil {
if _, ok := err.(RuleError); ok {
b.index.SetStatusFlags(n, statusValidateFailed)
for de := e.Next(); de != nil; de = de.Next() {
dn := de.Value.(*blockNode)
b.index.SetStatusFlags(dn, statusInvalidAncestor)
}
}